title
stringlengths
3
221
text
stringlengths
17
477k
parsed
listlengths
0
3.17k
OpenCV - Laplacian Transformation
Laplacian Operator is also a derivative operator which is used to find edges in an image. It is a second order derivative mask. In this mask we have two further classifications one is Positive Laplacian Operator and other is Negative Laplacian Operator. Unlike other operators Laplacian didn’t take out edges in any particular direction but it takes out edges in following classification. Inward Edges Outward Edges You can perform Laplacian Transform operation on an image using the Laplacian() method of the imgproc class, following is the syntax of this method. Laplacian(src, dst, ddepth) This method accepts the following parameters − src − A Mat object representing the source (input image) for this operation. src − A Mat object representing the source (input image) for this operation. dst − A Mat object representing the destination (output image) for this operation. dst − A Mat object representing the destination (output image) for this operation. ddepth − A variable of the type integer representing depth of the destination image. ddepth − A variable of the type integer representing depth of the destination image. The following program demonstrates how to perform Laplace transform operation on a given image. import org.opencv.core.Core; import org.opencv.core.Mat; import org.opencv.imgcodecs.Imgcodecs; import org.opencv.imgproc.Imgproc; public class LaplacianTest { public static void main(String args[]) { // Loading the OpenCV core library System.loadLibrary(Core.NATIVE_LIBRARY_NAME); //Reading the Image from the file and storing it in to a Matrix object String file ="E:/OpenCV/chap18/laplacian_input.jpg"; Mat src = Imgcodecs.imread(file); // Creating an empty matrix to store the result Mat dst = new Mat(); // Applying GaussianBlur on the Image Imgproc.Laplacian(src, dst, 10); // Writing the image Imgcodecs.imwrite("E:/OpenCV/chap18/laplacian.jpg", dst); System.out.println("Image Processed"); } } Assume that following is the input image laplacian_input.jpg specified in the above program. On executing the program, you will get the following output − Image Processed If you open the specified path, you can observe the output image as follows − 70 Lectures 9 hours Abhilash Nelson 41 Lectures 4 hours Abhilash Nelson 20 Lectures 2 hours Spotle Learn 12 Lectures 46 mins Srikanth Guskra 19 Lectures 2 hours Haithem Gasmi 67 Lectures 6.5 hours Gianluca Mottola Print Add Notes Bookmark this page
[ { "code": null, "e": 3258, "s": 3004, "text": "Laplacian Operator is also a derivative operator which is used to find edges in an image. It is a second order derivative mask. In this mask we have two further classifications one is Positive Laplacian Operator and other is Negative Laplacian Operator." }, { "code": null, "e": 3393, "s": 3258, "text": "Unlike other operators Laplacian didn’t take out edges in any particular direction but it takes out edges in following classification." }, { "code": null, "e": 3406, "s": 3393, "text": "Inward Edges" }, { "code": null, "e": 3420, "s": 3406, "text": "Outward Edges" }, { "code": null, "e": 3569, "s": 3420, "text": "You can perform Laplacian Transform operation on an image using the Laplacian() method of the imgproc class, following is the syntax of this method." }, { "code": null, "e": 3598, "s": 3569, "text": "Laplacian(src, dst, ddepth)\n" }, { "code": null, "e": 3645, "s": 3598, "text": "This method accepts the following parameters −" }, { "code": null, "e": 3722, "s": 3645, "text": "src − A Mat object representing the source (input image) for this operation." }, { "code": null, "e": 3799, "s": 3722, "text": "src − A Mat object representing the source (input image) for this operation." }, { "code": null, "e": 3882, "s": 3799, "text": "dst − A Mat object representing the destination (output image) for this operation." }, { "code": null, "e": 3965, "s": 3882, "text": "dst − A Mat object representing the destination (output image) for this operation." }, { "code": null, "e": 4050, "s": 3965, "text": "ddepth − A variable of the type integer representing depth of the destination image." }, { "code": null, "e": 4135, "s": 4050, "text": "ddepth − A variable of the type integer representing depth of the destination image." }, { "code": null, "e": 4231, "s": 4135, "text": "The following program demonstrates how to perform Laplace transform operation on a given image." }, { "code": null, "e": 5017, "s": 4231, "text": "import org.opencv.core.Core;\nimport org.opencv.core.Mat;\nimport org.opencv.imgcodecs.Imgcodecs;\nimport org.opencv.imgproc.Imgproc;\n\npublic class LaplacianTest {\n public static void main(String args[]) {\n // Loading the OpenCV core library\n System.loadLibrary(Core.NATIVE_LIBRARY_NAME);\n\n //Reading the Image from the file and storing it in to a Matrix object\n String file =\"E:/OpenCV/chap18/laplacian_input.jpg\";\n Mat src = Imgcodecs.imread(file);\n\n // Creating an empty matrix to store the result\n Mat dst = new Mat();\n\n // Applying GaussianBlur on the Image\n Imgproc.Laplacian(src, dst, 10);\n\n // Writing the image\n Imgcodecs.imwrite(\"E:/OpenCV/chap18/laplacian.jpg\", dst);\n\n System.out.println(\"Image Processed\");\n }\n}" }, { "code": null, "e": 5110, "s": 5017, "text": "Assume that following is the input image laplacian_input.jpg specified in the above program." }, { "code": null, "e": 5172, "s": 5110, "text": "On executing the program, you will get the following output −" }, { "code": null, "e": 5189, "s": 5172, "text": "Image Processed\n" }, { "code": null, "e": 5267, "s": 5189, "text": "If you open the specified path, you can observe the output image as follows −" }, { "code": null, "e": 5300, "s": 5267, "text": "\n 70 Lectures \n 9 hours \n" }, { "code": null, "e": 5317, "s": 5300, "text": " Abhilash Nelson" }, { "code": null, "e": 5350, "s": 5317, "text": "\n 41 Lectures \n 4 hours \n" }, { "code": null, "e": 5367, "s": 5350, "text": " Abhilash Nelson" }, { "code": null, "e": 5400, "s": 5367, "text": "\n 20 Lectures \n 2 hours \n" }, { "code": null, "e": 5414, "s": 5400, "text": " Spotle Learn" }, { "code": null, "e": 5446, "s": 5414, "text": "\n 12 Lectures \n 46 mins\n" }, { "code": null, "e": 5463, "s": 5446, "text": " Srikanth Guskra" }, { "code": null, "e": 5496, "s": 5463, "text": "\n 19 Lectures \n 2 hours \n" }, { "code": null, "e": 5511, "s": 5496, "text": " Haithem Gasmi" }, { "code": null, "e": 5546, "s": 5511, "text": "\n 67 Lectures \n 6.5 hours \n" }, { "code": null, "e": 5564, "s": 5546, "text": " Gianluca Mottola" }, { "code": null, "e": 5571, "s": 5564, "text": " Print" }, { "code": null, "e": 5582, "s": 5571, "text": " Add Notes" } ]
H2 Database - Backup
BACKUP is the command used to take database backup into a separate .zip file. Objects are not locked, and when it takes backup the transaction log is also copied. Admin rights are required to execute this command. Following is the generic syntax of the Backup command. BACKUP TO fileNameString; In this example, let us take a backup of the current database into backup.zip file. Use the following command for the same. BACKUP TO 'backup.zip'; On executing the above command, you will get the backup.zip file in your local file system. 14 Lectures 1 hours Mahesh Kumar 100 Lectures 9.5 hours Hari Om Singh 108 Lectures 8 hours Pavan Lalwani 10 Lectures 1 hours Deepti Trivedi 20 Lectures 2 hours Deepti Trivedi 14 Lectures 1 hours Deepti Trivedi Print Add Notes Bookmark this page
[ { "code": null, "e": 2321, "s": 2107, "text": "BACKUP is the command used to take database backup into a separate .zip file. Objects are not locked, and when it takes backup the transaction log is also copied. Admin rights are required to execute this command." }, { "code": null, "e": 2376, "s": 2321, "text": "Following is the generic syntax of the Backup command." }, { "code": null, "e": 2403, "s": 2376, "text": "BACKUP TO fileNameString;\n" }, { "code": null, "e": 2527, "s": 2403, "text": "In this example, let us take a backup of the current database into backup.zip file. Use the following command for the same." }, { "code": null, "e": 2551, "s": 2527, "text": "BACKUP TO 'backup.zip';" }, { "code": null, "e": 2643, "s": 2551, "text": "On executing the above command, you will get the backup.zip file in your local file system." }, { "code": null, "e": 2676, "s": 2643, "text": "\n 14 Lectures \n 1 hours \n" }, { "code": null, "e": 2690, "s": 2676, "text": " Mahesh Kumar" }, { "code": null, "e": 2726, "s": 2690, "text": "\n 100 Lectures \n 9.5 hours \n" }, { "code": null, "e": 2741, "s": 2726, "text": " Hari Om Singh" }, { "code": null, "e": 2775, "s": 2741, "text": "\n 108 Lectures \n 8 hours \n" }, { "code": null, "e": 2790, "s": 2775, "text": " Pavan Lalwani" }, { "code": null, "e": 2823, "s": 2790, "text": "\n 10 Lectures \n 1 hours \n" }, { "code": null, "e": 2839, "s": 2823, "text": " Deepti Trivedi" }, { "code": null, "e": 2872, "s": 2839, "text": "\n 20 Lectures \n 2 hours \n" }, { "code": null, "e": 2888, "s": 2872, "text": " Deepti Trivedi" }, { "code": null, "e": 2921, "s": 2888, "text": "\n 14 Lectures \n 1 hours \n" }, { "code": null, "e": 2937, "s": 2921, "text": " Deepti Trivedi" }, { "code": null, "e": 2944, "s": 2937, "text": " Print" }, { "code": null, "e": 2955, "s": 2944, "text": " Add Notes" } ]
Extract arrays separately from array of Objects in JavaScript
Suppose, we have an array of objects like this − const arr = [{ name : 'Client 1', total: 900, value: 12000 }, { name : 'Client 2', total: 10, value: 800 }, { name : 'Client 3', total: 5, value : 0 }]; We are required to write a JavaScript function that takes in one such array and extracts a separate array for each object property. Therefore, one array for the name property of each object, one for total and one for value. If there existed more properties, we would have separated more arrays. The code for this will be − const arr = [{ name : 'Client 1', total: 900, value: 12000 }, { name : 'Client 2', total: 10, value: 800 }, { name : 'Client 3', total: 5, value : 0 }]; const separateOut = arr => { if(!arr.length){ return []; }; const res = {}; const keys = Object.keys(arr[0]); keys.forEach(key => { arr.forEach(el => { if(res.hasOwnProperty(key)){ res[key].push(el[key]) }else{ res[key] = [el[key]]; }; }); }); return res; }; console.log(separateOut(arr)); And the output in the console will be − { name: [ 'Client 1', 'Client 2', 'Client 3' ], total: [ 900, 10, 5 ], value: [ 12000, 800, 0 ] }
[ { "code": null, "e": 1111, "s": 1062, "text": "Suppose, we have an array of objects like this −" }, { "code": null, "e": 1291, "s": 1111, "text": "const arr = [{\n name : 'Client 1',\n total: 900,\n value: 12000\n}, {\n name : 'Client 2',\n total: 10,\n value: 800\n}, {\n name : 'Client 3',\n total: 5,\n value : 0\n}];" }, { "code": null, "e": 1423, "s": 1291, "text": "We are required to write a JavaScript function that takes in one such array and extracts a separate array for each object property." }, { "code": null, "e": 1586, "s": 1423, "text": "Therefore, one array for the name property of each object, one for total and one for value. If there existed more properties, we would have separated more arrays." }, { "code": null, "e": 1614, "s": 1586, "text": "The code for this will be −" }, { "code": null, "e": 2174, "s": 1614, "text": "const arr = [{\n name : 'Client 1',\n total: 900,\n value: 12000\n}, {\n name : 'Client 2',\n total: 10,\n value: 800\n}, {\n name : 'Client 3',\n total: 5,\n value : 0\n}];\nconst separateOut = arr => {\n if(!arr.length){\n return [];\n };\n const res = {};\n const keys = Object.keys(arr[0]);\n keys.forEach(key => {\n arr.forEach(el => {\n if(res.hasOwnProperty(key)){\n res[key].push(el[key])\n }else{\n res[key] = [el[key]];\n };\n });\n });\n return res;\n};\nconsole.log(separateOut(arr));" }, { "code": null, "e": 2214, "s": 2174, "text": "And the output in the console will be −" }, { "code": null, "e": 2321, "s": 2214, "text": "{\n name: [ 'Client 1', 'Client 2', 'Client 3' ],\n total: [ 900, 10, 5 ],\n value: [ 12000, 800, 0 ]\n}" } ]
Equivalence Partitioning Testing
Equivalence Partitioning also called as equivalence class partitioning. It is abbreviated as ECP. It is a software testing technique that divides the input test data of the application under test into each partition at least once of equivalent data from which test cases can be derived. An advantage of this approach is it reduces the time required for performing testing of a software due to less number of test cases. The Below example best describes the equivalence class Partitioning: Assume that the application accepts an integer in the range 100 to 999 Valid Equivalence Class partition: 100 to 999 inclusive. Non-valid Equivalence Class partitions: less than 100, more than 999, decimal numbers and alphabets/non-numeric characters. 80 Lectures 7.5 hours Arnab Chakraborty 10 Lectures 1 hours Zach Miller 17 Lectures 1.5 hours Zach Miller 60 Lectures 5 hours John Shea 99 Lectures 10 hours Daniel IT 62 Lectures 5 hours GlobalETraining Print Add Notes Bookmark this page
[ { "code": null, "e": 6032, "s": 5745, "text": "Equivalence Partitioning also called as equivalence class partitioning. It is abbreviated as ECP. It is a software testing technique that divides the input test data of the application under test into each partition at least once of equivalent data from which test cases can be derived." }, { "code": null, "e": 6165, "s": 6032, "text": "An advantage of this approach is it reduces the time required for performing testing of a software due to less number of test cases." }, { "code": null, "e": 6234, "s": 6165, "text": "The Below example best describes the equivalence class Partitioning:" }, { "code": null, "e": 6486, "s": 6234, "text": "Assume that the application accepts an integer in the range 100 to 999\nValid Equivalence Class partition: 100 to 999 inclusive.\nNon-valid Equivalence Class partitions: less than 100, more than 999, decimal numbers and alphabets/non-numeric characters." }, { "code": null, "e": 6521, "s": 6486, "text": "\n 80 Lectures \n 7.5 hours \n" }, { "code": null, "e": 6540, "s": 6521, "text": " Arnab Chakraborty" }, { "code": null, "e": 6573, "s": 6540, "text": "\n 10 Lectures \n 1 hours \n" }, { "code": null, "e": 6586, "s": 6573, "text": " Zach Miller" }, { "code": null, "e": 6621, "s": 6586, "text": "\n 17 Lectures \n 1.5 hours \n" }, { "code": null, "e": 6634, "s": 6621, "text": " Zach Miller" }, { "code": null, "e": 6667, "s": 6634, "text": "\n 60 Lectures \n 5 hours \n" }, { "code": null, "e": 6678, "s": 6667, "text": " John Shea" }, { "code": null, "e": 6712, "s": 6678, "text": "\n 99 Lectures \n 10 hours \n" }, { "code": null, "e": 6723, "s": 6712, "text": " Daniel IT" }, { "code": null, "e": 6756, "s": 6723, "text": "\n 62 Lectures \n 5 hours \n" }, { "code": null, "e": 6773, "s": 6756, "text": " GlobalETraining" }, { "code": null, "e": 6780, "s": 6773, "text": " Print" }, { "code": null, "e": 6791, "s": 6780, "text": " Add Notes" } ]
Python Program to print the diamond shape
The looping features in python can be used to create many nicely formatted diagrams using various characters from the keyboard. One such shape is diamond shape which will involve multiple loops. This is because we have to print the character both vertically and horizontally. Also we have to take care of the shape gradually growing from top till middle and then gradually shrinking from middle till the bottom. For this reason, we will use two for loops each containing one more for loop inside it. Below is the code for creating the diamond shape. def Shape_of_Diamond(shape): a = 0 for m in range(1, shape + 1): for n in range(1, (shape - m) + 1): print(end=" ") while a != (2 * m - 1): print("@", end="") a = a + 1 a = 0 print() s = 1 c = 1 for m in range(1, shape): for n in range(1, s + 1): print(end=" ") s = s + 1 while c <= (2 * (shape - m) - 1): print("@", end="") c = c + 1 c= 1 print() shape = 8 Shape_of_Diamond(shape) Running the above code gives us the following result: @ @@@ @@@@@ @@@@@@@ @@@@@@@@@ @@@@@@@@@@@ @@@@@@@@@@@@@ @@@@@@@@@@@@@@@ @@@@@@@@@@@@ @@@@@@@@@@@ @@@@@@@@@ @@@@@@@ @@@@@ @@@ @
[ { "code": null, "e": 1562, "s": 1062, "text": "The looping features in python can be used to create many nicely formatted diagrams using various characters from the keyboard. One such shape is diamond shape which will involve multiple loops. This is because we have to print the character both vertically and horizontally. Also we have to take care of the shape gradually growing from top till middle and then gradually shrinking from middle till the bottom. For this reason, we will use two for loops each containing one more for loop inside it." }, { "code": null, "e": 1612, "s": 1562, "text": "Below is the code for creating the diamond shape." }, { "code": null, "e": 2002, "s": 1612, "text": "def Shape_of_Diamond(shape):\na = 0\nfor m in range(1, shape + 1):\n\nfor n in range(1, (shape - m) + 1):\nprint(end=\" \")\n\nwhile a != (2 * m - 1):\nprint(\"@\", end=\"\")\na = a + 1\na = 0\n\nprint()\n\ns = 1\nc = 1\nfor m in range(1, shape):\n\nfor n in range(1, s + 1):\nprint(end=\" \")\ns = s + 1\n\nwhile c <= (2 * (shape - m) - 1):\nprint(\"@\", end=\"\")\nc = c + 1\nc= 1\nprint()\n\nshape = 8\nShape_of_Diamond(shape)\n" }, { "code": null, "e": 2056, "s": 2002, "text": "Running the above code gives us the following result:" }, { "code": null, "e": 2294, "s": 2056, "text": " @\n @@@\n @@@@@\n @@@@@@@\n @@@@@@@@@ \n @@@@@@@@@@@\n @@@@@@@@@@@@@\n @@@@@@@@@@@@@@@\n @@@@@@@@@@@@\n @@@@@@@@@@@\n @@@@@@@@@\n @@@@@@@\n @@@@@\n @@@\n @\n\n" } ]
Building a Machine Learning Pipeline with Scikit-Learn | by Ezgi Gumusbas | Towards Data Science
Data science projects need iterative progress. For example, we clean and prepare data for modeling with transforming to the proper format, run the model, get results, improve the model/change the model and work on feature engineering, get new results, compare them with other results, etc. It is not easy and smart to do every step again and again. To solve this problem, we can use a pipeline to integrate steps of machine learning workflow. Pipelines are super useful for transforming and training data quickly. Additionally, we can compare different models and tune hyperparameters by integrating grid search in our pipeline. In this article, I write about how to create pipelines in scikit-learn to show the magical world of them. There are many ways to make a pipeline but I will show one of the easiest and smart versions of them in this blog. To use the pipeline function of scikit-learn we have to import the Pipeline module. from sklearn.pipeline import Pipeline By using a list of (key, value) pairs, the pipeline is built. Here, the key is a string containing the name you want to give and the value is the estimator object. Very simple example code to show how to use; estimators = [('reduce_dim', PCA()), ('clf',SVC())]pipe = Pipeline(estimators) For more details, you can check the scikit-learn documentation. ‘make_pipeline’ is a utility function that is a shorthand for constructing pipelines. It takes a variable number of estimates and returns a pipeline by filling the names automatically. I would like to explain the usage of pipelines in examples because I think, it is easier to figure out when we see the application of the modules in code. In real-life, data sets generally consist of numerical and categorical columns. We have to transform these columns with different techniques. When we scale numerical columns by using a scaler, we should encode categorical columns with an encoder. It is easy to do this transform for the first time, but generally, in data science projects we would like to try different scalers and encoders. To make this quickly and easily, we use pipelines. For one of my projects, I predict the status of water-wells in Tanzania by using classification techniques. I used different scalers, encoders, and classification models with the pipeline. If you want to see the full Jupiter notebook with data and how to use the pipeline in the modeling process, you can find it here on my Github. A pipeline example from that project; I only show how to import the pipeline module here. But of course, we need to import all libraries and modules which we plan to use such as pandas, NumPy, RobustScaler, category_encoders, train_test_split, etc. from sklearn.pipeline import make_pipeline df = pd.read_csv('clean_data.csv') If your data has some meaningless features, null/wrong values, or if it needs any type of cleaning process, you can do it at this stage. Because the quality of data affects the quality of the model. In this blog, my aim is to show the pipeline process so I skip this part and use the cleaned version of my data. cat_col = ['basin','region','extraction_type_group','management','payment','water_quality','quantity','source','waterpoint_type','decade','installer_cat','funder_cat'] num_col = ['gps_height','longitude','latitude','district_code','population','public_meeting','permit'] target='status_group'used_cols = [c for c in df.columns.tolist() if c not in [target]]X=df[used_cols]y=df[target]X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # making pipelinescaler = RobustScaler()encoder = ce.TargetEncoder(cols=cat_col)# putting numeric columns to scaler and categorical to encodernum_transformer = make_pipeline(scaler)cat_transformer = make_pipeline(encoder)# getting together our scaler and encoder with preprocessorpreprocessor = ColumnTransformer( transformers=[('num', num_transformer, num_col), ('cat', cat_transformer, cat_col)])# choosing modelmodel_name = LogisticRegression(class_weight = 'balanced', solver = 'lbfgs', random_state=42)# giving all values to pipelinepipe = make_pipeline(preprocessor,model_name)pipe.fit(X_train, y_train)# make predictions on training sety_pred = pipe.predict(X_train)# make predictions on test sety_pred_test = pipe.predict(X_test)# to print the results in good wayprint("Accuracy:"); print("="*len("Accuracy:"))print(f"TRAIN: {accuracy_score(y_train, y_pred)}")print(f"TEST: {accuracy_score(y_test, y_pred_test)}")print("\nBalanced Accuracy:"); print("="*len("Balanced Accuracy:"))print(f"TRAIN: {balanced_accuracy_score(y_train, y_pred)}")print(f"TEST: {balanced_accuracy_score(y_test, y_pred_test)}") In this example, it is seen that we can add features to our pipelines such as the preprocessor which contains scaler and encoder. We also can add binner or more functions to the pipeline for different problems. This helps to transform our data in a quick and simple way. In this example, the make_pipeline function automatically applied scaler, encoder, and our model to the pipe and we fit it very easily. When we write a function and put our pipeline in this function and return results, it is super easy to change the model also. For this example, when we only change the model_name to try another classification model and run the cell which calls the corresponding function, it easily turns results within the pipeline. In short, we do not need to change our data sets for transforming the data. We can do every transform in the pipeline and keep our data set originally. We can also use make_pipeline to getting together imputer and scaler for numerical columns; # Imputing nulls and scaling for numeric columnsnum_imputer = SimpleImputer(strategy='median')scaler = RobustScaler()# Imputing nulls through the encoding for categorical columnsencoder = ce.TargetEncoder(cols=cat_cols, handle_missing="value")# Defining different transformers for numeric and categorical columnsnum_transformer = make_pipeline(num_imputer, scaler)cat_transformer = make_pipeline(encoder)# getting together our scaler and encoder with preprocessorpreprocessor = ColumnTransformer( transformers=[('num', num_transformer, num_col), ('cat', cat_transformer, cat_col)])# choosing modelmodel_name = LogisticRegression(class_weight = 'balanced', solver = 'lbfgs', random_state=42)# giving all values to pipelinepipe = make_pipeline(preprocessor,model_name) In this example, I also added imputer to my pipeline. As you see, it is very easy to add and change modules when we build a good pipeline without transforming our data manually. The aim of the pipeline is to combine several steps that can be cross-validated together while setting different parameters. With gathering these steps, it helps us to add new parameters or make changes to our model easily. Also, it makes our code more readable and understandable. By building an understandable work-flow, it helps the reproducibility of the project. By using pipeline, we do not transform our data at the beginning of the process. Pipeline does needed transforms for us and keep the original data. If you would like to deep into scikit-learn library documentation, there are some useful links here. scikit-learn.org scikit-learn.org If you have any feedback or suggestions for this article, feel free to connect with me via LinkedIn.
[ { "code": null, "e": 614, "s": 171, "text": "Data science projects need iterative progress. For example, we clean and prepare data for modeling with transforming to the proper format, run the model, get results, improve the model/change the model and work on feature engineering, get new results, compare them with other results, etc. It is not easy and smart to do every step again and again. To solve this problem, we can use a pipeline to integrate steps of machine learning workflow." }, { "code": null, "e": 906, "s": 614, "text": "Pipelines are super useful for transforming and training data quickly. Additionally, we can compare different models and tune hyperparameters by integrating grid search in our pipeline. In this article, I write about how to create pipelines in scikit-learn to show the magical world of them." }, { "code": null, "e": 1021, "s": 906, "text": "There are many ways to make a pipeline but I will show one of the easiest and smart versions of them in this blog." }, { "code": null, "e": 1105, "s": 1021, "text": "To use the pipeline function of scikit-learn we have to import the Pipeline module." }, { "code": null, "e": 1143, "s": 1105, "text": "from sklearn.pipeline import Pipeline" }, { "code": null, "e": 1352, "s": 1143, "text": "By using a list of (key, value) pairs, the pipeline is built. Here, the key is a string containing the name you want to give and the value is the estimator object. Very simple example code to show how to use;" }, { "code": null, "e": 1431, "s": 1352, "text": "estimators = [('reduce_dim', PCA()), ('clf',SVC())]pipe = Pipeline(estimators)" }, { "code": null, "e": 1495, "s": 1431, "text": "For more details, you can check the scikit-learn documentation." }, { "code": null, "e": 1680, "s": 1495, "text": "‘make_pipeline’ is a utility function that is a shorthand for constructing pipelines. It takes a variable number of estimates and returns a pipeline by filling the names automatically." }, { "code": null, "e": 2278, "s": 1680, "text": "I would like to explain the usage of pipelines in examples because I think, it is easier to figure out when we see the application of the modules in code. In real-life, data sets generally consist of numerical and categorical columns. We have to transform these columns with different techniques. When we scale numerical columns by using a scaler, we should encode categorical columns with an encoder. It is easy to do this transform for the first time, but generally, in data science projects we would like to try different scalers and encoders. To make this quickly and easily, we use pipelines." }, { "code": null, "e": 2648, "s": 2278, "text": "For one of my projects, I predict the status of water-wells in Tanzania by using classification techniques. I used different scalers, encoders, and classification models with the pipeline. If you want to see the full Jupiter notebook with data and how to use the pipeline in the modeling process, you can find it here on my Github. A pipeline example from that project;" }, { "code": null, "e": 2859, "s": 2648, "text": "I only show how to import the pipeline module here. But of course, we need to import all libraries and modules which we plan to use such as pandas, NumPy, RobustScaler, category_encoders, train_test_split, etc." }, { "code": null, "e": 2902, "s": 2859, "text": "from sklearn.pipeline import make_pipeline" }, { "code": null, "e": 2937, "s": 2902, "text": "df = pd.read_csv('clean_data.csv')" }, { "code": null, "e": 3249, "s": 2937, "text": "If your data has some meaningless features, null/wrong values, or if it needs any type of cleaning process, you can do it at this stage. Because the quality of data affects the quality of the model. In this blog, my aim is to show the pipeline process so I skip this part and use the cleaned version of my data." }, { "code": null, "e": 3533, "s": 3249, "text": "cat_col = ['basin','region','extraction_type_group','management','payment','water_quality','quantity','source','waterpoint_type','decade','installer_cat','funder_cat'] num_col = ['gps_height','longitude','latitude','district_code','population','public_meeting','permit']" }, { "code": null, "e": 3736, "s": 3533, "text": "target='status_group'used_cols = [c for c in df.columns.tolist() if c not in [target]]X=df[used_cols]y=df[target]X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)" }, { "code": null, "e": 4870, "s": 3736, "text": "# making pipelinescaler = RobustScaler()encoder = ce.TargetEncoder(cols=cat_col)# putting numeric columns to scaler and categorical to encodernum_transformer = make_pipeline(scaler)cat_transformer = make_pipeline(encoder)# getting together our scaler and encoder with preprocessorpreprocessor = ColumnTransformer( transformers=[('num', num_transformer, num_col), ('cat', cat_transformer, cat_col)])# choosing modelmodel_name = LogisticRegression(class_weight = 'balanced', solver = 'lbfgs', random_state=42)# giving all values to pipelinepipe = make_pipeline(preprocessor,model_name)pipe.fit(X_train, y_train)# make predictions on training sety_pred = pipe.predict(X_train)# make predictions on test sety_pred_test = pipe.predict(X_test)# to print the results in good wayprint(\"Accuracy:\"); print(\"=\"*len(\"Accuracy:\"))print(f\"TRAIN: {accuracy_score(y_train, y_pred)}\")print(f\"TEST: {accuracy_score(y_test, y_pred_test)}\")print(\"\\nBalanced Accuracy:\"); print(\"=\"*len(\"Balanced Accuracy:\"))print(f\"TRAIN: {balanced_accuracy_score(y_train, y_pred)}\")print(f\"TEST: {balanced_accuracy_score(y_test, y_pred_test)}\")" }, { "code": null, "e": 5277, "s": 4870, "text": "In this example, it is seen that we can add features to our pipelines such as the preprocessor which contains scaler and encoder. We also can add binner or more functions to the pipeline for different problems. This helps to transform our data in a quick and simple way. In this example, the make_pipeline function automatically applied scaler, encoder, and our model to the pipe and we fit it very easily." }, { "code": null, "e": 5746, "s": 5277, "text": "When we write a function and put our pipeline in this function and return results, it is super easy to change the model also. For this example, when we only change the model_name to try another classification model and run the cell which calls the corresponding function, it easily turns results within the pipeline. In short, we do not need to change our data sets for transforming the data. We can do every transform in the pipeline and keep our data set originally." }, { "code": null, "e": 5838, "s": 5746, "text": "We can also use make_pipeline to getting together imputer and scaler for numerical columns;" }, { "code": null, "e": 6629, "s": 5838, "text": "# Imputing nulls and scaling for numeric columnsnum_imputer = SimpleImputer(strategy='median')scaler = RobustScaler()# Imputing nulls through the encoding for categorical columnsencoder = ce.TargetEncoder(cols=cat_cols, handle_missing=\"value\")# Defining different transformers for numeric and categorical columnsnum_transformer = make_pipeline(num_imputer, scaler)cat_transformer = make_pipeline(encoder)# getting together our scaler and encoder with preprocessorpreprocessor = ColumnTransformer( transformers=[('num', num_transformer, num_col), ('cat', cat_transformer, cat_col)])# choosing modelmodel_name = LogisticRegression(class_weight = 'balanced', solver = 'lbfgs', random_state=42)# giving all values to pipelinepipe = make_pipeline(preprocessor,model_name)" }, { "code": null, "e": 6807, "s": 6629, "text": "In this example, I also added imputer to my pipeline. As you see, it is very easy to add and change modules when we build a good pipeline without transforming our data manually." }, { "code": null, "e": 7323, "s": 6807, "text": "The aim of the pipeline is to combine several steps that can be cross-validated together while setting different parameters. With gathering these steps, it helps us to add new parameters or make changes to our model easily. Also, it makes our code more readable and understandable. By building an understandable work-flow, it helps the reproducibility of the project. By using pipeline, we do not transform our data at the beginning of the process. Pipeline does needed transforms for us and keep the original data." }, { "code": null, "e": 7424, "s": 7323, "text": "If you would like to deep into scikit-learn library documentation, there are some useful links here." }, { "code": null, "e": 7441, "s": 7424, "text": "scikit-learn.org" }, { "code": null, "e": 7458, "s": 7441, "text": "scikit-learn.org" } ]
Latent Semantic Analysis — Deduce the hidden topic from the document | by Sanket Doshi | Towards Data Science
Making computers learn and understand the human language is still the most difficult task. Language contains huge vocabulary and each work has different meaning based on the context and making computers learn the context is an open question. In this we will try to deduce the hidden topics represented by the text and will use that knowledge for document clustering. Topic model is an unsupervised way of deducing the hidden topics represented by the text or document. This topic is not the actual topics such as sports, news or business instead are the words that can be used for representing the text in the best possible way. This technique is very powerful and can be used for document clustering in an unsupervised way. If you’ve used Google News then you’ve seen the clustering of news from various sources if the news represents similar topic. This is one of the application of topic modelling. Latent Semantic Analysis is an efficient way of analysing the text and finding the hidden topics by understanding the context of the text. Latent Semantic Analysis(LSA) is used to find the hidden topics represented by the document or text. This hidden topics then are used for clustering the similar documents together. LSA is an unsupervised algorithm and hence we don’t know the actual topic of the document. Most simple way of finding similar documents is by using vector representation of text and cosine similarity. Vector representation represents each document in the form of vector. This vector is known as document-term matrix. For example: a1 = "the petrol in this car is low"a2 = "the vehicle is short on fuel" Consider above two strings and form the context we can understand that both the strings are similar. We’ll try to find how much this strings are similar using vector representation. The document term matrix for the above example is: The size of the document-term matrix is (number of documents) * (vocabulary size). Vocabulary size is the number of unique words present in all the documents all together. Here the vocabulary size is 11 and number of documents are 2. Similarity between documents if found out using cosine similarity between the documents matrix. The similarity between documents a1 and a2 is 0.3086067 which is too low since the documents are mostly similar in context. This is the disadvantage of document-term matrix and hence, the vector representation technique. Another disadvantage is the vocabulary size as the language has the huge vocabulary causing the matrix to be bigger and computationally expensive. This disadvantages of the vector representation has led to the requirement of new technique for finding the similarity among the documents and finding the hidden topics. The technique which can solve the problem of synonyms and is also computationally not expensive. And the technique which was suggested was Latent Semantic Analysis. This matrix is of dimension (vocabulary size) * (vocabulary size). It represents the frequency of the words coming together in the dataset. The matrix helps us understand the words which belongs together. For the above example the term co-occurrence matrix is: As we can see the words the and is are the most common but are not very useful in meanings of the sentence. We’ll see how to use this matrix and it’s benefits later in this blog. The LSA returns concepts instead of topics which represents the given document. The concepts are list of words which represents the document in the best possible way. For example, in the dataset of sports document the concepts can be Concept 1: ball, shoes, goals, win Concept 2: ball, bat, score, umpire As we can see two concepts — concept 1 represents football and concept 2 represents cricket. But we can see that the concepts can have overlapping of words so, the whole set of words represent one concept together rather than the individual word. LSA tries to find the best set of words known as concept to represent the document using term co-occurrence matrix. Concept is also a way of representing the document through dimension reduction. We can see that the document-term matrix is very sparse and large in size. The computation on such a large matrix is expensive along with not very significant results and much of the values in the matrix are zero. To reduce the computation complexity and to get the more relevant and useful result SVD is used. SVD decomposes the matrix into three different matrixes: orthogonal column matrix, orthogonal row matrix and one singular matrix. The main advantage of SVD is that we can reduce the the size of the matrix substantially from millions to 100 or 1000. In the above image K is the rank of the matrix. It resembles that if we use only k columns and rows then also we can approximately calculate the matrix A without any major loss. During SVD calculations we calculate A*(A'T) which represents the term co-occurrence matrix. That means the value with index (i,j) in the above matrix represents the number of times term(i) and term(j) exist together in a dataset of documents. You learn about SVD more here. Implementation is the best way to understand the concept. We’ll implement LSA using a small example that will help us understand the working and output of LSA. The documents we’ll be using are a1 = "He is a good dog."a2 = "The dog is too lazy."a3 = "That is a brown cat."a4 = "The cat is very active."a5 = "I have brown cat and dog." Here we can see that 2 concepts must be generated one which represents cat and another represents dog. Convert this list of documents to DataFrame: import pandas as pddf = pd.DataFrame()df["documents"] = [a1,a2,a3,a4,a5]df The df would look like: The most important part of any machine learning algorithm is data preprocessing. More the noise present in data lesser the accuracy of model. We’ll perform four types of processing on data: Remove all the special characters from the text.Remove all the words with less than 3 letters.Lowercase all the characters.Remove stop words. Remove all the special characters from the text. Remove all the words with less than 3 letters. Lowercase all the characters. Remove stop words. #remove special charactersdf['clean_documents'] = df['documents'].str.replace("[^a-zA-Z#]", " ")#remove words have letters less than 3df['clean_documents'] = df['clean_documents'].fillna('').apply(lambda x: ' '.join([w for w in x.split() if len(w)>2]))#lowercase all charactersdf['clean_documents'] = df['clean_documents'].fillna('').apply(lambda x: x.lower()) For removing stop words we’ll tokenise the string and than again append all the words which are not stop words. import nltknltk.download('stopwords')from nltk.corpus import stopwordsstop_words = stopwords.words('english')# tokenizationtokenized_doc = df['clean_documents'].fillna('').apply(lambda x: x.split())# remove stop-wordstokenized_doc = tokenized_doc.apply(lambda x: [item for item in x if item not in stop_words])# de-tokenizationdetokenized_doc = []for i in range(len(df)): t = ' '.join(tokenized_doc[i]) detokenized_doc.append(t)df['clean_documents'] = detokenized_doc After this preprocessing our data will look like: We’ll use sklearn for generating the document-term matrix. from sklearn.feature_extraction.text import TfidfVectorizervectorizer = TfidfVectorizer(stop_words='english', smooth_idf=True)X = vectorizer.fit_transform(df['clean_documents']) We used TfidfVectorizer instead of CountVectorizer as tf-idf is more efficient vectorizer. You can learn about various params passed to TfidfVectorizer here and to learn about tf-idf you can check this link. The shape of X will be (5,6) where rows represents the number of documents that is 5 and columns represents the terms which are 6. To see the terms dictionary = vectorizer.get_feature_names()dictionary which will give an array of words ['active', 'brown', 'cat', 'dog', 'good', 'lazy'] from sklearn.decomposition import TruncatedSVD# SVD represent documents and terms in vectors svd_model = TruncatedSVD(n_components=2, algorithm='randomized', n_iter=100, random_state=122)lsa = svd_model.fit_transform(X) TruncatedSVD performs SVD function on the document-term matrix and gives us the vector after dimensionality reduction. If you want the matrix without dimensionality reduction you should use fit instead of fit_transform . n_components is the dimension of output data. The value of n_components represents number of different topics. You can learn more about sklearn SVD here. Now we’ll check the topics assigned to our documents pd.options.display.float_format = '{:,.16f}'.formattopic_encoded_df = pd.DataFrame(lsa, columns = ["topic_1", "topic_2"])topic_encoded_df["documents"] = df['clean_documents']display(topic_encoded_df[["documents", "topic_1", "topic_2"]]) The output will look like We can see the topics assigned to each document. The documents regarding dog’s is represented by topic_2 and the documents regarding cats is represented by topic_1. The last documents which has both cat and dog is represented more by topic_1 but belong to topic_2 too. It’s more resembled by topic_1 as the document contains word brown and cat both which has higher weight in topic_1. We can also see the weight given to the terms in each topic. encoding_matrix = pd.DataFrame(svd_model.components_, index = ["topic_1","topic_2"], columns = (dictionary)).Tencoding_matrix We can see above that both term brown and cat has higher weightage in topic_1 than topic_2. We’ve seen the implementation and working of LSA. The LSA is the pioneer for LSI and dimensionality reduction algorithms. The LSA is used for dimensionality reduction. We can reduce the vector size drastically from millions to mere thousands without losing any context. This will help us in reducing the computation power and the time taken to perform the computation.The LSA is used in search engines. Latent Semantic Indexing(LSI) is the algorithm developed on LSA. The documents matching the search query are found using the vector developed from LSA.LSA can also be used for document clustering. As we can see that the LSA assigns topics to each document based on the assigned topic we can cluster the documents. The LSA is used for dimensionality reduction. We can reduce the vector size drastically from millions to mere thousands without losing any context. This will help us in reducing the computation power and the time taken to perform the computation. The LSA is used in search engines. Latent Semantic Indexing(LSI) is the algorithm developed on LSA. The documents matching the search query are found using the vector developed from LSA. LSA can also be used for document clustering. As we can see that the LSA assigns topics to each document based on the assigned topic we can cluster the documents.
[ { "code": null, "e": 539, "s": 172, "text": "Making computers learn and understand the human language is still the most difficult task. Language contains huge vocabulary and each work has different meaning based on the context and making computers learn the context is an open question. In this we will try to deduce the hidden topics represented by the text and will use that knowledge for document clustering." }, { "code": null, "e": 801, "s": 539, "text": "Topic model is an unsupervised way of deducing the hidden topics represented by the text or document. This topic is not the actual topics such as sports, news or business instead are the words that can be used for representing the text in the best possible way." }, { "code": null, "e": 1074, "s": 801, "text": "This technique is very powerful and can be used for document clustering in an unsupervised way. If you’ve used Google News then you’ve seen the clustering of news from various sources if the news represents similar topic. This is one of the application of topic modelling." }, { "code": null, "e": 1213, "s": 1074, "text": "Latent Semantic Analysis is an efficient way of analysing the text and finding the hidden topics by understanding the context of the text." }, { "code": null, "e": 1485, "s": 1213, "text": "Latent Semantic Analysis(LSA) is used to find the hidden topics represented by the document or text. This hidden topics then are used for clustering the similar documents together. LSA is an unsupervised algorithm and hence we don’t know the actual topic of the document." }, { "code": null, "e": 1711, "s": 1485, "text": "Most simple way of finding similar documents is by using vector representation of text and cosine similarity. Vector representation represents each document in the form of vector. This vector is known as document-term matrix." }, { "code": null, "e": 1724, "s": 1711, "text": "For example:" }, { "code": null, "e": 1796, "s": 1724, "text": "a1 = \"the petrol in this car is low\"a2 = \"the vehicle is short on fuel\"" }, { "code": null, "e": 1978, "s": 1796, "text": "Consider above two strings and form the context we can understand that both the strings are similar. We’ll try to find how much this strings are similar using vector representation." }, { "code": null, "e": 2029, "s": 1978, "text": "The document term matrix for the above example is:" }, { "code": null, "e": 2263, "s": 2029, "text": "The size of the document-term matrix is (number of documents) * (vocabulary size). Vocabulary size is the number of unique words present in all the documents all together. Here the vocabulary size is 11 and number of documents are 2." }, { "code": null, "e": 2727, "s": 2263, "text": "Similarity between documents if found out using cosine similarity between the documents matrix. The similarity between documents a1 and a2 is 0.3086067 which is too low since the documents are mostly similar in context. This is the disadvantage of document-term matrix and hence, the vector representation technique. Another disadvantage is the vocabulary size as the language has the huge vocabulary causing the matrix to be bigger and computationally expensive." }, { "code": null, "e": 3062, "s": 2727, "text": "This disadvantages of the vector representation has led to the requirement of new technique for finding the similarity among the documents and finding the hidden topics. The technique which can solve the problem of synonyms and is also computationally not expensive. And the technique which was suggested was Latent Semantic Analysis." }, { "code": null, "e": 3267, "s": 3062, "text": "This matrix is of dimension (vocabulary size) * (vocabulary size). It represents the frequency of the words coming together in the dataset. The matrix helps us understand the words which belongs together." }, { "code": null, "e": 3323, "s": 3267, "text": "For the above example the term co-occurrence matrix is:" }, { "code": null, "e": 3502, "s": 3323, "text": "As we can see the words the and is are the most common but are not very useful in meanings of the sentence. We’ll see how to use this matrix and it’s benefits later in this blog." }, { "code": null, "e": 3669, "s": 3502, "text": "The LSA returns concepts instead of topics which represents the given document. The concepts are list of words which represents the document in the best possible way." }, { "code": null, "e": 3736, "s": 3669, "text": "For example, in the dataset of sports document the concepts can be" }, { "code": null, "e": 3771, "s": 3736, "text": "Concept 1: ball, shoes, goals, win" }, { "code": null, "e": 3807, "s": 3771, "text": "Concept 2: ball, bat, score, umpire" }, { "code": null, "e": 4170, "s": 3807, "text": "As we can see two concepts — concept 1 represents football and concept 2 represents cricket. But we can see that the concepts can have overlapping of words so, the whole set of words represent one concept together rather than the individual word. LSA tries to find the best set of words known as concept to represent the document using term co-occurrence matrix." }, { "code": null, "e": 4250, "s": 4170, "text": "Concept is also a way of representing the document through dimension reduction." }, { "code": null, "e": 4561, "s": 4250, "text": "We can see that the document-term matrix is very sparse and large in size. The computation on such a large matrix is expensive along with not very significant results and much of the values in the matrix are zero. To reduce the computation complexity and to get the more relevant and useful result SVD is used." }, { "code": null, "e": 4691, "s": 4561, "text": "SVD decomposes the matrix into three different matrixes: orthogonal column matrix, orthogonal row matrix and one singular matrix." }, { "code": null, "e": 4988, "s": 4691, "text": "The main advantage of SVD is that we can reduce the the size of the matrix substantially from millions to 100 or 1000. In the above image K is the rank of the matrix. It resembles that if we use only k columns and rows then also we can approximately calculate the matrix A without any major loss." }, { "code": null, "e": 5263, "s": 4988, "text": "During SVD calculations we calculate A*(A'T) which represents the term co-occurrence matrix. That means the value with index (i,j) in the above matrix represents the number of times term(i) and term(j) exist together in a dataset of documents. You learn about SVD more here." }, { "code": null, "e": 5423, "s": 5263, "text": "Implementation is the best way to understand the concept. We’ll implement LSA using a small example that will help us understand the working and output of LSA." }, { "code": null, "e": 5456, "s": 5423, "text": "The documents we’ll be using are" }, { "code": null, "e": 5597, "s": 5456, "text": "a1 = \"He is a good dog.\"a2 = \"The dog is too lazy.\"a3 = \"That is a brown cat.\"a4 = \"The cat is very active.\"a5 = \"I have brown cat and dog.\"" }, { "code": null, "e": 5700, "s": 5597, "text": "Here we can see that 2 concepts must be generated one which represents cat and another represents dog." }, { "code": null, "e": 5745, "s": 5700, "text": "Convert this list of documents to DataFrame:" }, { "code": null, "e": 5820, "s": 5745, "text": "import pandas as pddf = pd.DataFrame()df[\"documents\"] = [a1,a2,a3,a4,a5]df" }, { "code": null, "e": 5844, "s": 5820, "text": "The df would look like:" }, { "code": null, "e": 5986, "s": 5844, "text": "The most important part of any machine learning algorithm is data preprocessing. More the noise present in data lesser the accuracy of model." }, { "code": null, "e": 6034, "s": 5986, "text": "We’ll perform four types of processing on data:" }, { "code": null, "e": 6176, "s": 6034, "text": "Remove all the special characters from the text.Remove all the words with less than 3 letters.Lowercase all the characters.Remove stop words." }, { "code": null, "e": 6225, "s": 6176, "text": "Remove all the special characters from the text." }, { "code": null, "e": 6272, "s": 6225, "text": "Remove all the words with less than 3 letters." }, { "code": null, "e": 6302, "s": 6272, "text": "Lowercase all the characters." }, { "code": null, "e": 6321, "s": 6302, "text": "Remove stop words." }, { "code": null, "e": 6682, "s": 6321, "text": "#remove special charactersdf['clean_documents'] = df['documents'].str.replace(\"[^a-zA-Z#]\", \" \")#remove words have letters less than 3df['clean_documents'] = df['clean_documents'].fillna('').apply(lambda x: ' '.join([w for w in x.split() if len(w)>2]))#lowercase all charactersdf['clean_documents'] = df['clean_documents'].fillna('').apply(lambda x: x.lower())" }, { "code": null, "e": 6794, "s": 6682, "text": "For removing stop words we’ll tokenise the string and than again append all the words which are not stop words." }, { "code": null, "e": 7268, "s": 6794, "text": "import nltknltk.download('stopwords')from nltk.corpus import stopwordsstop_words = stopwords.words('english')# tokenizationtokenized_doc = df['clean_documents'].fillna('').apply(lambda x: x.split())# remove stop-wordstokenized_doc = tokenized_doc.apply(lambda x: [item for item in x if item not in stop_words])# de-tokenizationdetokenized_doc = []for i in range(len(df)): t = ' '.join(tokenized_doc[i]) detokenized_doc.append(t)df['clean_documents'] = detokenized_doc" }, { "code": null, "e": 7318, "s": 7268, "text": "After this preprocessing our data will look like:" }, { "code": null, "e": 7377, "s": 7318, "text": "We’ll use sklearn for generating the document-term matrix." }, { "code": null, "e": 7555, "s": 7377, "text": "from sklearn.feature_extraction.text import TfidfVectorizervectorizer = TfidfVectorizer(stop_words='english', smooth_idf=True)X = vectorizer.fit_transform(df['clean_documents'])" }, { "code": null, "e": 7763, "s": 7555, "text": "We used TfidfVectorizer instead of CountVectorizer as tf-idf is more efficient vectorizer. You can learn about various params passed to TfidfVectorizer here and to learn about tf-idf you can check this link." }, { "code": null, "e": 7894, "s": 7763, "text": "The shape of X will be (5,6) where rows represents the number of documents that is 5 and columns represents the terms which are 6." }, { "code": null, "e": 7911, "s": 7894, "text": "To see the terms" }, { "code": null, "e": 7965, "s": 7911, "text": "dictionary = vectorizer.get_feature_names()dictionary" }, { "code": null, "e": 7999, "s": 7965, "text": "which will give an array of words" }, { "code": null, "e": 8049, "s": 7999, "text": "['active', 'brown', 'cat', 'dog', 'good', 'lazy']" }, { "code": null, "e": 8269, "s": 8049, "text": "from sklearn.decomposition import TruncatedSVD# SVD represent documents and terms in vectors svd_model = TruncatedSVD(n_components=2, algorithm='randomized', n_iter=100, random_state=122)lsa = svd_model.fit_transform(X)" }, { "code": null, "e": 8490, "s": 8269, "text": "TruncatedSVD performs SVD function on the document-term matrix and gives us the vector after dimensionality reduction. If you want the matrix without dimensionality reduction you should use fit instead of fit_transform ." }, { "code": null, "e": 8644, "s": 8490, "text": "n_components is the dimension of output data. The value of n_components represents number of different topics. You can learn more about sklearn SVD here." }, { "code": null, "e": 8697, "s": 8644, "text": "Now we’ll check the topics assigned to our documents" }, { "code": null, "e": 8934, "s": 8697, "text": "pd.options.display.float_format = '{:,.16f}'.formattopic_encoded_df = pd.DataFrame(lsa, columns = [\"topic_1\", \"topic_2\"])topic_encoded_df[\"documents\"] = df['clean_documents']display(topic_encoded_df[[\"documents\", \"topic_1\", \"topic_2\"]])" }, { "code": null, "e": 8960, "s": 8934, "text": "The output will look like" }, { "code": null, "e": 9345, "s": 8960, "text": "We can see the topics assigned to each document. The documents regarding dog’s is represented by topic_2 and the documents regarding cats is represented by topic_1. The last documents which has both cat and dog is represented more by topic_1 but belong to topic_2 too. It’s more resembled by topic_1 as the document contains word brown and cat both which has higher weight in topic_1." }, { "code": null, "e": 9406, "s": 9345, "text": "We can also see the weight given to the terms in each topic." }, { "code": null, "e": 9532, "s": 9406, "text": "encoding_matrix = pd.DataFrame(svd_model.components_, index = [\"topic_1\",\"topic_2\"], columns = (dictionary)).Tencoding_matrix" }, { "code": null, "e": 9624, "s": 9532, "text": "We can see above that both term brown and cat has higher weightage in topic_1 than topic_2." }, { "code": null, "e": 9674, "s": 9624, "text": "We’ve seen the implementation and working of LSA." }, { "code": null, "e": 9746, "s": 9674, "text": "The LSA is the pioneer for LSI and dimensionality reduction algorithms." }, { "code": null, "e": 10341, "s": 9746, "text": "The LSA is used for dimensionality reduction. We can reduce the vector size drastically from millions to mere thousands without losing any context. This will help us in reducing the computation power and the time taken to perform the computation.The LSA is used in search engines. Latent Semantic Indexing(LSI) is the algorithm developed on LSA. The documents matching the search query are found using the vector developed from LSA.LSA can also be used for document clustering. As we can see that the LSA assigns topics to each document based on the assigned topic we can cluster the documents." }, { "code": null, "e": 10588, "s": 10341, "text": "The LSA is used for dimensionality reduction. We can reduce the vector size drastically from millions to mere thousands without losing any context. This will help us in reducing the computation power and the time taken to perform the computation." }, { "code": null, "e": 10775, "s": 10588, "text": "The LSA is used in search engines. Latent Semantic Indexing(LSI) is the algorithm developed on LSA. The documents matching the search query are found using the vector developed from LSA." } ]
java.time.LocalDate.of() Method Example
The java.time.LocalDate.of(int year, Month month, int dayOfMonth) method obtains an instance of LocalDate from a year, month and day. Following is the declaration for java.time.LocalDate.of(int year, Month month, int dayOfMonth) method. public static LocalDate of(int year, Month month, int dayOfMonth) year − the year to represent, from MIN_YEAR to MAX_YEAR year − the year to represent, from MIN_YEAR to MAX_YEAR month − the month-of-year to represent month − the month-of-year to represent dayOfMonth − the day-of-month to represent, from 1 to 31 dayOfMonth − the day-of-month to represent, from 1 to 31 the local date, not null. The following example shows the usage of java.time.LocalDate.of(int year, Month month, int dayOfMonth) method. package com.tutorialspoint; import java.time.LocalDate; import java.time.Month; public class LocalDateDemo { public static void main(String[] args) { LocalDate date = LocalDate.of(2017,Month.FEBRUARY,3); System.out.println(date); } } Let us compile and run the above program, this will produce the following result − 2017-02-03 Print Add Notes Bookmark this page
[ { "code": null, "e": 2049, "s": 1915, "text": "The java.time.LocalDate.of(int year, Month month, int dayOfMonth) method obtains an instance of LocalDate from a year, month and day." }, { "code": null, "e": 2152, "s": 2049, "text": "Following is the declaration for java.time.LocalDate.of(int year, Month month, int dayOfMonth) method." }, { "code": null, "e": 2219, "s": 2152, "text": "public static LocalDate of(int year, Month month, int dayOfMonth)\n" }, { "code": null, "e": 2275, "s": 2219, "text": "year − the year to represent, from MIN_YEAR to MAX_YEAR" }, { "code": null, "e": 2331, "s": 2275, "text": "year − the year to represent, from MIN_YEAR to MAX_YEAR" }, { "code": null, "e": 2370, "s": 2331, "text": "month − the month-of-year to represent" }, { "code": null, "e": 2409, "s": 2370, "text": "month − the month-of-year to represent" }, { "code": null, "e": 2466, "s": 2409, "text": "dayOfMonth − the day-of-month to represent, from 1 to 31" }, { "code": null, "e": 2523, "s": 2466, "text": "dayOfMonth − the day-of-month to represent, from 1 to 31" }, { "code": null, "e": 2549, "s": 2523, "text": "the local date, not null." }, { "code": null, "e": 2660, "s": 2549, "text": "The following example shows the usage of java.time.LocalDate.of(int year, Month month, int dayOfMonth) method." }, { "code": null, "e": 2920, "s": 2660, "text": "package com.tutorialspoint;\n\nimport java.time.LocalDate;\nimport java.time.Month;\n\npublic class LocalDateDemo {\n public static void main(String[] args) {\n\t \n LocalDate date = LocalDate.of(2017,Month.FEBRUARY,3);\n System.out.println(date); \n }\n}" }, { "code": null, "e": 3003, "s": 2920, "text": "Let us compile and run the above program, this will produce the following result −" }, { "code": null, "e": 3015, "s": 3003, "text": "2017-02-03\n" }, { "code": null, "e": 3022, "s": 3015, "text": " Print" }, { "code": null, "e": 3033, "s": 3022, "text": " Add Notes" } ]
CSS | columns Property - GeeksforGeeks
08 Aug, 2019 In CSS the columns property is used to set the number of columns and the width of the columns. This is a shorthand property and can take multiple values at a time. Syntax: columns: column-width columns-count | auto | initial | inherit; PropertyValues: auto: This sets the column-width and column-count values to their browser default values.Syntax:columns: auto auto; Example 1: Using auto as the value.<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: auto auto; /* Chrome, Safari, Opera */ -moz-columns: auto auto; /* Firefox */ columns: auto auto; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>Output:Note: If any of the values among the column-width and column-count are not specified, then the browser assumes their value as auto by default. Syntax: columns: auto auto; Example 1: Using auto as the value. <!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: auto auto; /* Chrome, Safari, Opera */ -moz-columns: auto auto; /* Firefox */ columns: auto auto; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html> Output: Note: If any of the values among the column-width and column-count are not specified, then the browser assumes their value as auto by default. integer: This is used to specify the column-width and the column-count using integer values.Syntax:columns: column-width column-count; Example 2:<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: 60px 5; /* Chrome, Safari, Opera */ -moz-columns: 60px 5; /* Firefox */ columns: 60px 5; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>Output: Syntax: columns: column-width column-count; Example 2: <!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: 60px 5; /* Chrome, Safari, Opera */ -moz-columns: 60px 5; /* Firefox */ columns: 60px 5; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html> Output: initial: Initializes the values to their initial default values.Syntax:columns: initial initial; Example 3:<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: initial initial; /* Chrome, Safari, Opera */ -moz-columns: initial initial; /* Firefox */ columns: initial initial; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>Output: Syntax: columns: initial initial; Example 3: <!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: initial initial; /* Chrome, Safari, Opera */ -moz-columns: initial initial; /* Firefox */ columns: initial initial; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html> Output: inherit: Inherits the value from its parent element.Syntax:columns: inherit inherit; Example 4:<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: inherit inherit; /* Chrome, Safari, Opera */ -moz-columns: inherit inherit; /* Firefox */ columns: inherit inherit; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html> </html>Output: Syntax: columns: inherit inherit; Example 4: <!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: inherit inherit; /* Chrome, Safari, Opera */ -moz-columns: inherit inherit; /* Firefox */ columns: inherit inherit; } </style></head> <body> <h1>The column Property</h1> <div class="GFG"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html> </html> Output: Supported Browser: The browsers supported by columns Property are listed below: Chrome 50.0, 4.0 -webkit- Edge 10.0 Firefox 52.0, 9.0 -moz- Opera 37.0, 15.0 -webkit- 11.1 Safari 9.0, 3.1 -webkit- CSS-Properties Picked CSS Web Technologies Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. How to insert spaces/tabs in text using HTML/CSS? Top 10 Projects For Beginners To Practice HTML and CSS Skills How to create footer to stay at the bottom of a Web page? How to update Node.js and NPM to next version ? CSS to put icon inside an input element in a form Remove elements from a JavaScript Array Installation of Node.js on Linux Convert a string to an integer in JavaScript How to fetch data from an API in ReactJS ? How to insert spaces/tabs in text using HTML/CSS?
[ { "code": null, "e": 24983, "s": 24955, "text": "\n08 Aug, 2019" }, { "code": null, "e": 25147, "s": 24983, "text": "In CSS the columns property is used to set the number of columns and the width of the columns. This is a shorthand property and can take multiple values at a time." }, { "code": null, "e": 25155, "s": 25147, "text": "Syntax:" }, { "code": null, "e": 25220, "s": 25155, "text": "columns: column-width columns-count | auto | initial | inherit;\n" }, { "code": null, "e": 25236, "s": 25220, "text": "PropertyValues:" }, { "code": null, "e": 28037, "s": 25236, "text": "auto: This sets the column-width and column-count values to their browser default values.Syntax:columns: auto auto;\nExample 1: Using auto as the value.<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: auto auto; /* Chrome, Safari, Opera */ -moz-columns: auto auto; /* Firefox */ columns: auto auto; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>Output:Note: If any of the values among the column-width and column-count are not specified, then the browser assumes their value as auto by default." }, { "code": null, "e": 28045, "s": 28037, "text": "Syntax:" }, { "code": null, "e": 28066, "s": 28045, "text": "columns: auto auto;\n" }, { "code": null, "e": 28102, "s": 28066, "text": "Example 1: Using auto as the value." }, { "code": "<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: auto auto; /* Chrome, Safari, Opera */ -moz-columns: auto auto; /* Firefox */ columns: auto auto; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>", "e": 30603, "s": 28102, "text": null }, { "code": null, "e": 30611, "s": 30603, "text": "Output:" }, { "code": null, "e": 30754, "s": 30611, "text": "Note: If any of the values among the column-width and column-count are not specified, then the browser assumes their value as auto by default." }, { "code": null, "e": 33398, "s": 30754, "text": "integer: This is used to specify the column-width and the column-count using integer values.Syntax:columns: column-width column-count;\nExample 2:<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: 60px 5; /* Chrome, Safari, Opera */ -moz-columns: 60px 5; /* Firefox */ columns: 60px 5; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>Output:" }, { "code": null, "e": 33406, "s": 33398, "text": "Syntax:" }, { "code": null, "e": 33443, "s": 33406, "text": "columns: column-width column-count;\n" }, { "code": null, "e": 33454, "s": 33443, "text": "Example 2:" }, { "code": "<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: 60px 5; /* Chrome, Safari, Opera */ -moz-columns: 60px 5; /* Firefox */ columns: 60px 5; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>", "e": 35946, "s": 33454, "text": null }, { "code": null, "e": 35954, "s": 35946, "text": "Output:" }, { "code": null, "e": 38587, "s": 35954, "text": "initial: Initializes the values to their initial default values.Syntax:columns: initial initial;\nExample 3:<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: initial initial; /* Chrome, Safari, Opera */ -moz-columns: initial initial; /* Firefox */ columns: initial initial; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>Output:" }, { "code": null, "e": 38595, "s": 38587, "text": "Syntax:" }, { "code": null, "e": 38622, "s": 38595, "text": "columns: initial initial;\n" }, { "code": null, "e": 38633, "s": 38622, "text": "Example 3:" }, { "code": "<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: initial initial; /* Chrome, Safari, Opera */ -moz-columns: initial initial; /* Firefox */ columns: initial initial; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html>", "e": 41152, "s": 38633, "text": null }, { "code": null, "e": 41160, "s": 41152, "text": "Output:" }, { "code": null, "e": 43792, "s": 41160, "text": "inherit: Inherits the value from its parent element.Syntax:columns: inherit inherit;\nExample 4:<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: inherit inherit; /* Chrome, Safari, Opera */ -moz-columns: inherit inherit; /* Firefox */ columns: inherit inherit; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html> </html>Output:" }, { "code": null, "e": 43800, "s": 43792, "text": "Syntax:" }, { "code": null, "e": 43827, "s": 43800, "text": "columns: inherit inherit;\n" }, { "code": null, "e": 43838, "s": 43827, "text": "Example 4:" }, { "code": "<!DOCTYPE html><html> <head> <title> CSS | columns Property </title> <style> body { text-align: center; color: green; } .GFG { -webkit-columns: inherit inherit; /* Chrome, Safari, Opera */ -moz-columns: inherit inherit; /* Firefox */ columns: inherit inherit; } </style></head> <body> <h1>The column Property</h1> <div class=\"GFG\"> <h2>Welcome to the world of Geeks!!</h2> How many times were you frustrated while looking out for a good collection of programming/algorithm/interview questions? What did you expect and what did you get? This portal has been created to provide well written, well thought and well-explained solutions for selected questions. <p> <strong>Our team includes:</strong> <p> Sandeep Jain: An IIT Roorkee alumnus and founder of GeeksforGeeks. He loves to solve programming problems in most efficient ways. Apart from GeeksforGeeks, he has worked with DE Shaw and Co. as a software developer and JIIT Noida as an assistant professor. </p> <p> Vaibhav Bajpai: Amazed by computer science,he is a technology enthusiast who enjoys being a part of a development. Off from work, you canfind him in love with movies, food, and friends. </p> <p> Shikhar Goel: A Computer Science graduate who likes to make things simpler. When he's not working, you can find him surfing the web, learning facts, tricks and life hacks. He also enjoys movies in his leisure time. </p> <p> Dharmesh Singh: A software developer who is always trying to push boundaries in search of great breakthroughs. Off from his desk, you can find him cheering up his buddies and enjoying life. </p> <p> Shubham Baranwal: A passionate developer who always tries to learn new technology and software. In his free time, either he reads some articles or learns some other stuff. </p> </p> </div></body> </html> </html>", "e": 46368, "s": 43838, "text": null }, { "code": null, "e": 46376, "s": 46368, "text": "Output:" }, { "code": null, "e": 46456, "s": 46376, "text": "Supported Browser: The browsers supported by columns Property are listed below:" }, { "code": null, "e": 46482, "s": 46456, "text": "Chrome 50.0, 4.0 -webkit-" }, { "code": null, "e": 46492, "s": 46482, "text": "Edge 10.0" }, { "code": null, "e": 46516, "s": 46492, "text": "Firefox 52.0, 9.0 -moz-" }, { "code": null, "e": 46547, "s": 46516, "text": "Opera 37.0, 15.0 -webkit- 11.1" }, { "code": null, "e": 46572, "s": 46547, "text": "Safari 9.0, 3.1 -webkit-" }, { "code": null, "e": 46587, "s": 46572, "text": "CSS-Properties" }, { "code": null, "e": 46594, "s": 46587, "text": "Picked" }, { "code": null, "e": 46598, "s": 46594, "text": "CSS" }, { "code": null, "e": 46615, "s": 46598, "text": "Web Technologies" }, { "code": null, "e": 46713, "s": 46615, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 46763, "s": 46713, "text": "How to insert spaces/tabs in text using HTML/CSS?" }, { "code": null, "e": 46825, "s": 46763, "text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills" }, { "code": null, "e": 46883, "s": 46825, "text": "How to create footer to stay at the bottom of a Web page?" }, { "code": null, "e": 46931, "s": 46883, "text": "How to update Node.js and NPM to next version ?" }, { "code": null, "e": 46981, "s": 46931, "text": "CSS to put icon inside an input element in a form" }, { "code": null, "e": 47021, "s": 46981, "text": "Remove elements from a JavaScript Array" }, { "code": null, "e": 47054, "s": 47021, "text": "Installation of Node.js on Linux" }, { "code": null, "e": 47099, "s": 47054, "text": "Convert a string to an integer in JavaScript" }, { "code": null, "e": 47142, "s": 47099, "text": "How to fetch data from an API in ReactJS ?" } ]
move_to_element method - Action Chains in Selenium Python - GeeksforGeeks
15 May, 2020 Selenium’s Python Module is built to perform automated testing with Python. ActionChains are a way to automate low-level interactions such as mouse movements, mouse button actions, keypress, and context menu interactions. This is useful for doing more complex actions like hover over and drag and drop. Action chain methods are used by advanced scripts where we need to drag an element, click an element, double click, etc.This article revolves around move_to_element method on Action Chains in Python Selenium. move_to_element method is used to move the mouse to the middle of an element.Syntax – move_to_element(to_element) to_element: The WebElement to move to. Example – <input type ="text" name ="passwd" id ="passwd-id" /> To find an element one needs to use one of the locating strategies, For example, element = driver.find_element_by_id("passwd-id")element = driver.find_element_by_name("passwd") Now one can use move_to_element method as an Action chain as below – move_to_element(to_element=element) To demonstrate, move_to_element method of Action Chains in Selenium Python. Let’ s visit https://www.geeksforgeeks.org/ and operate on an element. Program – # import webdriverfrom selenium import webdriver # import Action chains from selenium.webdriver.common.action_chains import ActionChains # create webdriver objectdriver = webdriver.Firefox() # get geeksforgeeks.orgdriver.get("https://www.geeksforgeeks.org/") # get element element = driver.find_element_by_link_text("Courses") # create action chain objectaction = ActionChains(driver) # perform the operationaction.move_to_element(element).click().perform() Output – Python-selenium selenium Python Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments Python Dictionary Enumerate() in Python How to Install PIP on Windows ? Different ways to create Pandas Dataframe Python String | replace() Create a Pandas DataFrame from Lists Selecting rows in pandas DataFrame based on conditions Reading and Writing to text files in Python sum() function in Python *args and **kwargs in Python
[ { "code": null, "e": 24574, "s": 24546, "text": "\n15 May, 2020" }, { "code": null, "e": 25172, "s": 24574, "text": "Selenium’s Python Module is built to perform automated testing with Python. ActionChains are a way to automate low-level interactions such as mouse movements, mouse button actions, keypress, and context menu interactions. This is useful for doing more complex actions like hover over and drag and drop. Action chain methods are used by advanced scripts where we need to drag an element, click an element, double click, etc.This article revolves around move_to_element method on Action Chains in Python Selenium. move_to_element method is used to move the mouse to the middle of an element.Syntax –" }, { "code": null, "e": 25200, "s": 25172, "text": "move_to_element(to_element)" }, { "code": null, "e": 25239, "s": 25200, "text": "to_element: The WebElement to move to." }, { "code": null, "e": 25249, "s": 25239, "text": "Example –" }, { "code": "<input type =\"text\" name =\"passwd\" id =\"passwd-id\" />", "e": 25303, "s": 25249, "text": null }, { "code": null, "e": 25384, "s": 25303, "text": "To find an element one needs to use one of the locating strategies, For example," }, { "code": "element = driver.find_element_by_id(\"passwd-id\")element = driver.find_element_by_name(\"passwd\")", "e": 25480, "s": 25384, "text": null }, { "code": null, "e": 25549, "s": 25480, "text": "Now one can use move_to_element method as an Action chain as below –" }, { "code": null, "e": 25586, "s": 25549, "text": "move_to_element(to_element=element)\n" }, { "code": null, "e": 25733, "s": 25586, "text": "To demonstrate, move_to_element method of Action Chains in Selenium Python. Let’ s visit https://www.geeksforgeeks.org/ and operate on an element." }, { "code": null, "e": 25743, "s": 25733, "text": "Program –" }, { "code": "# import webdriverfrom selenium import webdriver # import Action chains from selenium.webdriver.common.action_chains import ActionChains # create webdriver objectdriver = webdriver.Firefox() # get geeksforgeeks.orgdriver.get(\"https://www.geeksforgeeks.org/\") # get element element = driver.find_element_by_link_text(\"Courses\") # create action chain objectaction = ActionChains(driver) # perform the operationaction.move_to_element(element).click().perform()", "e": 26208, "s": 25743, "text": null }, { "code": null, "e": 26217, "s": 26208, "text": "Output –" }, { "code": null, "e": 26233, "s": 26217, "text": "Python-selenium" }, { "code": null, "e": 26242, "s": 26233, "text": "selenium" }, { "code": null, "e": 26249, "s": 26242, "text": "Python" }, { "code": null, "e": 26347, "s": 26249, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 26356, "s": 26347, "text": "Comments" }, { "code": null, "e": 26369, "s": 26356, "text": "Old Comments" }, { "code": null, "e": 26387, "s": 26369, "text": "Python Dictionary" }, { "code": null, "e": 26409, "s": 26387, "text": "Enumerate() in Python" }, { "code": null, "e": 26441, "s": 26409, "text": "How to Install PIP on Windows ?" }, { "code": null, "e": 26483, "s": 26441, "text": "Different ways to create Pandas Dataframe" }, { "code": null, "e": 26509, "s": 26483, "text": "Python String | replace()" }, { "code": null, "e": 26546, "s": 26509, "text": "Create a Pandas DataFrame from Lists" }, { "code": null, "e": 26601, "s": 26546, "text": "Selecting rows in pandas DataFrame based on conditions" }, { "code": null, "e": 26645, "s": 26601, "text": "Reading and Writing to text files in Python" }, { "code": null, "e": 26670, "s": 26645, "text": "sum() function in Python" } ]
How to create a gradient Video Progress Bar? - GeeksforGeeks
28 May, 2020 Dynamic Progress Bars in videos are quite visually attractive in themselves. Here, we’ll be creating a progress bar that changes its color gradientally using simple HTML, CSS, and some JavaScript. The gradient here refers to change in color from one to another quite smoothly. Here we will be changing the gradient color from green to cyan. Pre-built function used:Mainly we will just need the setInterval() method. Syntax: setInterval(function_reference, time interval) Let us start by creating the HTML code:Example: HTML code<!DOCTYPE html><html><head><title>Gradient Progress Bar</title></head><body><h2>Welcome To GFG</h2><figure> <video id="video" src="GFG.mp4"> </video> <figcaption> <button id="play" aria-label="Play" role="button"> Play </button> <progress id="progress" max="100" value="0"> Progress </progress> </figcaption></figure></body></html>The main line here is:<progress id="progress" max="100" value="0">Progress</progress> Here, the max attribute tells us that the maximum value in the video here will be 100 and the value attribute tells that we are starting from 0.We chose this as it is quite simple to understand in terms of percentage of the video played with 100 being completely played and 0 is not started yet. Let us start by creating the HTML code: Example: HTML code <!DOCTYPE html><html><head><title>Gradient Progress Bar</title></head><body><h2>Welcome To GFG</h2><figure> <video id="video" src="GFG.mp4"> </video> <figcaption> <button id="play" aria-label="Play" role="button"> Play </button> <progress id="progress" max="100" value="0"> Progress </progress> </figcaption></figure></body></html> The main line here is: <progress id="progress" max="100" value="0">Progress</progress> Here, the max attribute tells us that the maximum value in the video here will be 100 and the value attribute tells that we are starting from 0. We chose this as it is quite simple to understand in terms of percentage of the video played with 100 being completely played and 0 is not started yet. Next, we add the CSS of our choice:Example: CSS codeprogress[value] { appearance: none; border: none; color: green; } progress[value]::-webkit-progress-value { background-image: linear-gradient( to right, #00ff00, #014f01 ); transition: width 1s linear;}Now, begins the JavaScript coding for the progress bar. We’ll be doing this step by step so that you could understand quite easily. Next, we add the CSS of our choice: Example: CSS code progress[value] { appearance: none; border: none; color: green; } progress[value]::-webkit-progress-value { background-image: linear-gradient( to right, #00ff00, #014f01 ); transition: width 1s linear;} Now, begins the JavaScript coding for the progress bar. We’ll be doing this step by step so that you could understand quite easily. Getting Progress Values from the video:Let’s do this in steps:Get the current time of the video.Get the total duration of the video.Calculate the progress value using the basic math formula of percentages: (currentTime/duration = progressValue)The Math.round() function will then update the results to nearest whole integers.Example: Javascript code for progress value.const progress = document.getElementById("progress");function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); });} progressLoop(); Getting Progress Values from the video: Let’s do this in steps: Get the current time of the video. Get the total duration of the video. Calculate the progress value using the basic math formula of percentages: (currentTime/duration = progressValue) The Math.round() function will then update the results to nearest whole integers. Example: Javascript code for progress value. const progress = document.getElementById("progress");function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); });} progressLoop(); The basic task is done, You will have a gradient progress bar using the above steps. Further, we’ll be adding some extra features listed below: Showing the current time. Adding a play button. Showing the current time:This is quite easy to do! We can just count the seconds as the video plays and show them in a timer next to the progress bar.Example:Add a timer label in the HTML file:<figure> <video controls id="video" src="GFG.mp4"> </video> <figcaption> <label id="timer" for="progress" role="timer"></label> <progress id="progress" max="100" value="0"> Progress </progress> </figcaption></figure> Showing the current time: This is quite easy to do! We can just count the seconds as the video plays and show them in a timer next to the progress bar. Example: Add a timer label in the HTML file: <figure> <video controls id="video" src="GFG.mp4"> </video> <figcaption> <label id="timer" for="progress" role="timer"></label> <progress id="progress" max="100" value="0"> Progress </progress> </figcaption></figure> Now we just need to assign it a variable and use the innerHTML to print the current value inside it:Example:const progress = document.getElementById("progress");const timer = document.getElementById( "timer" ); function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); timer.innerHTML = Math.round(video.currentTime) + " seconds"; });} progressLoop();And you’ll get a timer showing the seconds passed. Now we just need to assign it a variable and use the innerHTML to print the current value inside it: Example: const progress = document.getElementById("progress");const timer = document.getElementById( "timer" ); function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); timer.innerHTML = Math.round(video.currentTime) + " seconds"; });} progressLoop(); And you’ll get a timer showing the seconds passed. Adding the Play Button:A video tag has a control attribute which when called comes with the play, progress, volume, skip video features. Let us drop this attribute and create a Play/Pause button:Example:Add a button to the HTML:<figure> <video id="video" src="GFG.mp4"> </video> <figcaption> <label id="timer" for="progress" role="timer"></label> <button id="play" aria-label="Play" role="button"> Play </button> <progress id="progress" max="100" value="0"> Progress </progress> </figcaption></figure> Adding the Play Button: A video tag has a control attribute which when called comes with the play, progress, volume, skip video features. Let us drop this attribute and create a Play/Pause button: Example: Add a button to the HTML: <figure> <video id="video" src="GFG.mp4"> </video> <figcaption> <label id="timer" for="progress" role="timer"></label> <button id="play" aria-label="Play" role="button"> Play </button> <progress id="progress" max="100" value="0"> Progress </progress> </figcaption></figure> Now connect it with a JavaScript function that toggles the video between Play and Pause:Example:button = document.getElementById( "play" ); function playPause() { if ( video.paused ) { video.play(); button.innerHTML = "Pause"; } else { video.pause(); button.innerHTML = "Play"; }} button.addEventListener( "click", playPause );video.addEventListener("play", progressLoop);Now, you’ll see that the pre-built control panel has been removed and the button has replaced it.Finally, just add your required CSS on background, video box, and button and you’ll have a Gradient Video Progress Bar. Now connect it with a JavaScript function that toggles the video between Play and Pause: Example: button = document.getElementById( "play" ); function playPause() { if ( video.paused ) { video.play(); button.innerHTML = "Pause"; } else { video.pause(); button.innerHTML = "Play"; }} button.addEventListener( "click", playPause );video.addEventListener("play", progressLoop); Now, you’ll see that the pre-built control panel has been removed and the button has replaced it. Finally, just add your required CSS on background, video box, and button and you’ll have a Gradient Video Progress Bar. Complete code: <!DOCTYPE html><html> <head> <title>Gradient Video Progress Bar</title> <style> body { background-image: radial-gradient(circle at top right, green, cyan); display: grid; height: 100vh; place-items: center; width: 100%; } figure { width: 50%; } video { display: block; width: 100%; } figcaption { align-items: center; background: #eaeaea; display: grid; grid-gap: 1rem; grid-template-columns: 50px auto min(115px); padding: 0.5rem; } button { border: 0; background: green; display: inline; color: white; order: 1; padding: 0.5rem; transition: opacity 0.25s ease-out; width: 100%; } button:hover { cursor: pointer; opacity: 0.8; } label { order: 2; text-align: center; } /* Fallback stuff */ progress[value] { appearance: none; border: none; border-radius: 3px; box-shadow: 0 2px 3px rgba(0, 0, 0, 0.25) inset; color: dodgerblue; display: inline; height: 15px; order: 1; position: relative; width: 100%; } /* WebKit styles */ progress[value]::-webkit-progress-bar { background-color: white; border-radius: 3px; box-shadow: 0 2px 3px rgba(0, 0, 0, 0.25) inset; } progress[value]::-webkit-progress-value { background-image: linear-gradient(to right, green, cyan); border-radius: 3px; position: relative; transition: width 1s linear; } </style> </head> <body> <h2 style="color: black; font-size: 70px;"> Welcome To GFG </h2> <figure> <video id="video" src="https://media.geeksforgeeks.org/wp-content/uploads/20191016154640/geeks6.mp4"> </video> <figcaption> <label id="timer" for="progress" role="timer"> </label> <button id="play" aria-label="Play" role="button"> Play </button> <progress id="progress" max="100" value="0"> Progress </progress> </figcaption> </figure> <script> const progress = document.getElementById("progress"); const timer = document.getElementById("timer"); button = document.getElementById("play"); function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); timer.innerHTML = Math.round(video.currentTime) + " seconds"; }); } function playPause() { if (video.paused) { video.play(); button.innerHTML = "Pause"; } else { video.pause(); button.innerHTML = "Play"; } } button.addEventListener("click", playPause); video.addEventListener("play", progressLoop); </script> </body></html> Output: CSS-Misc JavaScript-Misc CSS HTML JavaScript Web Technologies HTML Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. How to set space between the flexbox ? Design a web page using HTML and CSS Form validation using jQuery How to style a checkbox using CSS? Search Bar using HTML, CSS and JavaScript How to set the default value for an HTML <select> element ? Hide or show elements in HTML using display property How to set input type date in dd-mm-yyyy format using HTML ? REST API (Introduction) HTML Cheat Sheet - A Basic Guide to HTML
[ { "code": null, "e": 26731, "s": 26703, "text": "\n28 May, 2020" }, { "code": null, "e": 27072, "s": 26731, "text": "Dynamic Progress Bars in videos are quite visually attractive in themselves. Here, we’ll be creating a progress bar that changes its color gradientally using simple HTML, CSS, and some JavaScript. The gradient here refers to change in color from one to another quite smoothly. Here we will be changing the gradient color from green to cyan." }, { "code": null, "e": 27147, "s": 27072, "text": "Pre-built function used:Mainly we will just need the setInterval() method." }, { "code": null, "e": 27155, "s": 27147, "text": "Syntax:" }, { "code": null, "e": 27202, "s": 27155, "text": "setInterval(function_reference, time interval)" }, { "code": null, "e": 28020, "s": 27202, "text": "Let us start by creating the HTML code:Example: HTML code<!DOCTYPE html><html><head><title>Gradient Progress Bar</title></head><body><h2>Welcome To GFG</h2><figure> <video id=\"video\" src=\"GFG.mp4\"> </video> <figcaption> <button id=\"play\" aria-label=\"Play\" role=\"button\"> Play </button> <progress id=\"progress\" max=\"100\" value=\"0\"> Progress </progress> </figcaption></figure></body></html>The main line here is:<progress id=\"progress\" max=\"100\" value=\"0\">Progress</progress>\nHere, the max attribute tells us that the maximum value in the video here will be 100 and the value attribute tells that we are starting from 0.We chose this as it is quite simple to understand in terms of percentage of the video played with 100 being completely played and 0 is not started yet." }, { "code": null, "e": 28060, "s": 28020, "text": "Let us start by creating the HTML code:" }, { "code": null, "e": 28079, "s": 28060, "text": "Example: HTML code" }, { "code": "<!DOCTYPE html><html><head><title>Gradient Progress Bar</title></head><body><h2>Welcome To GFG</h2><figure> <video id=\"video\" src=\"GFG.mp4\"> </video> <figcaption> <button id=\"play\" aria-label=\"Play\" role=\"button\"> Play </button> <progress id=\"progress\" max=\"100\" value=\"0\"> Progress </progress> </figcaption></figure></body></html>", "e": 28459, "s": 28079, "text": null }, { "code": null, "e": 28482, "s": 28459, "text": "The main line here is:" }, { "code": null, "e": 28547, "s": 28482, "text": "<progress id=\"progress\" max=\"100\" value=\"0\">Progress</progress>\n" }, { "code": null, "e": 28692, "s": 28547, "text": "Here, the max attribute tells us that the maximum value in the video here will be 100 and the value attribute tells that we are starting from 0." }, { "code": null, "e": 28844, "s": 28692, "text": "We chose this as it is quite simple to understand in terms of percentage of the video played with 100 being completely played and 0 is not started yet." }, { "code": null, "e": 29251, "s": 28844, "text": "Next, we add the CSS of our choice:Example: CSS codeprogress[value] { appearance: none; border: none; color: green; } progress[value]::-webkit-progress-value { background-image: linear-gradient( to right, #00ff00, #014f01 ); transition: width 1s linear;}Now, begins the JavaScript coding for the progress bar. We’ll be doing this step by step so that you could understand quite easily." }, { "code": null, "e": 29287, "s": 29251, "text": "Next, we add the CSS of our choice:" }, { "code": null, "e": 29305, "s": 29287, "text": "Example: CSS code" }, { "code": "progress[value] { appearance: none; border: none; color: green; } progress[value]::-webkit-progress-value { background-image: linear-gradient( to right, #00ff00, #014f01 ); transition: width 1s linear;}", "e": 29529, "s": 29305, "text": null }, { "code": null, "e": 29661, "s": 29529, "text": "Now, begins the JavaScript coding for the progress bar. We’ll be doing this step by step so that you could understand quite easily." }, { "code": null, "e": 30236, "s": 29661, "text": "Getting Progress Values from the video:Let’s do this in steps:Get the current time of the video.Get the total duration of the video.Calculate the progress value using the basic math formula of percentages: (currentTime/duration = progressValue)The Math.round() function will then update the results to nearest whole integers.Example: Javascript code for progress value.const progress = document.getElementById(\"progress\");function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); });} progressLoop();" }, { "code": null, "e": 30276, "s": 30236, "text": "Getting Progress Values from the video:" }, { "code": null, "e": 30300, "s": 30276, "text": "Let’s do this in steps:" }, { "code": null, "e": 30335, "s": 30300, "text": "Get the current time of the video." }, { "code": null, "e": 30372, "s": 30335, "text": "Get the total duration of the video." }, { "code": null, "e": 30486, "s": 30372, "text": "Calculate the progress value using the basic math formula of percentages: (currentTime/duration = progressValue)" }, { "code": null, "e": 30568, "s": 30486, "text": "The Math.round() function will then update the results to nearest whole integers." }, { "code": null, "e": 30613, "s": 30568, "text": "Example: Javascript code for progress value." }, { "code": "const progress = document.getElementById(\"progress\");function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); });} progressLoop();", "e": 30818, "s": 30613, "text": null }, { "code": null, "e": 30903, "s": 30818, "text": "The basic task is done, You will have a gradient progress bar using the above steps." }, { "code": null, "e": 30962, "s": 30903, "text": "Further, we’ll be adding some extra features listed below:" }, { "code": null, "e": 30988, "s": 30962, "text": "Showing the current time." }, { "code": null, "e": 31010, "s": 30988, "text": "Adding a play button." }, { "code": null, "e": 31453, "s": 31010, "text": "Showing the current time:This is quite easy to do! We can just count the seconds as the video plays and show them in a timer next to the progress bar.Example:Add a timer label in the HTML file:<figure> <video controls id=\"video\" src=\"GFG.mp4\"> </video> <figcaption> <label id=\"timer\" for=\"progress\" role=\"timer\"></label> <progress id=\"progress\" max=\"100\" value=\"0\"> Progress </progress> </figcaption></figure>" }, { "code": null, "e": 31479, "s": 31453, "text": "Showing the current time:" }, { "code": null, "e": 31605, "s": 31479, "text": "This is quite easy to do! We can just count the seconds as the video plays and show them in a timer next to the progress bar." }, { "code": null, "e": 31614, "s": 31605, "text": "Example:" }, { "code": null, "e": 31650, "s": 31614, "text": "Add a timer label in the HTML file:" }, { "code": "<figure> <video controls id=\"video\" src=\"GFG.mp4\"> </video> <figcaption> <label id=\"timer\" for=\"progress\" role=\"timer\"></label> <progress id=\"progress\" max=\"100\" value=\"0\"> Progress </progress> </figcaption></figure>", "e": 31900, "s": 31650, "text": null }, { "code": null, "e": 32379, "s": 31900, "text": "Now we just need to assign it a variable and use the innerHTML to print the current value inside it:Example:const progress = document.getElementById(\"progress\");const timer = document.getElementById( \"timer\" ); function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); timer.innerHTML = Math.round(video.currentTime) + \" seconds\"; });} progressLoop();And you’ll get a timer showing the seconds passed." }, { "code": null, "e": 32480, "s": 32379, "text": "Now we just need to assign it a variable and use the innerHTML to print the current value inside it:" }, { "code": null, "e": 32489, "s": 32480, "text": "Example:" }, { "code": "const progress = document.getElementById(\"progress\");const timer = document.getElementById( \"timer\" ); function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); timer.innerHTML = Math.round(video.currentTime) + \" seconds\"; });} progressLoop();", "e": 32810, "s": 32489, "text": null }, { "code": null, "e": 32861, "s": 32810, "text": "And you’ll get a timer showing the seconds passed." }, { "code": null, "e": 33399, "s": 32861, "text": "Adding the Play Button:A video tag has a control attribute which when called comes with the play, progress, volume, skip video features. Let us drop this attribute and create a Play/Pause button:Example:Add a button to the HTML:<figure> <video id=\"video\" src=\"GFG.mp4\"> </video> <figcaption> <label id=\"timer\" for=\"progress\" role=\"timer\"></label> <button id=\"play\" aria-label=\"Play\" role=\"button\"> Play </button> <progress id=\"progress\" max=\"100\" value=\"0\"> Progress </progress> </figcaption></figure>" }, { "code": null, "e": 33423, "s": 33399, "text": "Adding the Play Button:" }, { "code": null, "e": 33596, "s": 33423, "text": "A video tag has a control attribute which when called comes with the play, progress, volume, skip video features. Let us drop this attribute and create a Play/Pause button:" }, { "code": null, "e": 33605, "s": 33596, "text": "Example:" }, { "code": null, "e": 33631, "s": 33605, "text": "Add a button to the HTML:" }, { "code": "<figure> <video id=\"video\" src=\"GFG.mp4\"> </video> <figcaption> <label id=\"timer\" for=\"progress\" role=\"timer\"></label> <button id=\"play\" aria-label=\"Play\" role=\"button\"> Play </button> <progress id=\"progress\" max=\"100\" value=\"0\"> Progress </progress> </figcaption></figure>", "e": 33941, "s": 33631, "text": null }, { "code": null, "e": 34551, "s": 33941, "text": "Now connect it with a JavaScript function that toggles the video between Play and Pause:Example:button = document.getElementById( \"play\" ); function playPause() { if ( video.paused ) { video.play(); button.innerHTML = \"Pause\"; } else { video.pause(); button.innerHTML = \"Play\"; }} button.addEventListener( \"click\", playPause );video.addEventListener(\"play\", progressLoop);Now, you’ll see that the pre-built control panel has been removed and the button has replaced it.Finally, just add your required CSS on background, video box, and button and you’ll have a Gradient Video Progress Bar." }, { "code": null, "e": 34640, "s": 34551, "text": "Now connect it with a JavaScript function that toggles the video between Play and Pause:" }, { "code": null, "e": 34649, "s": 34640, "text": "Example:" }, { "code": "button = document.getElementById( \"play\" ); function playPause() { if ( video.paused ) { video.play(); button.innerHTML = \"Pause\"; } else { video.pause(); button.innerHTML = \"Play\"; }} button.addEventListener( \"click\", playPause );video.addEventListener(\"play\", progressLoop);", "e": 34947, "s": 34649, "text": null }, { "code": null, "e": 35045, "s": 34947, "text": "Now, you’ll see that the pre-built control panel has been removed and the button has replaced it." }, { "code": null, "e": 35165, "s": 35045, "text": "Finally, just add your required CSS on background, video box, and button and you’ll have a Gradient Video Progress Bar." }, { "code": null, "e": 35180, "s": 35165, "text": "Complete code:" }, { "code": "<!DOCTYPE html><html> <head> <title>Gradient Video Progress Bar</title> <style> body { background-image: radial-gradient(circle at top right, green, cyan); display: grid; height: 100vh; place-items: center; width: 100%; } figure { width: 50%; } video { display: block; width: 100%; } figcaption { align-items: center; background: #eaeaea; display: grid; grid-gap: 1rem; grid-template-columns: 50px auto min(115px); padding: 0.5rem; } button { border: 0; background: green; display: inline; color: white; order: 1; padding: 0.5rem; transition: opacity 0.25s ease-out; width: 100%; } button:hover { cursor: pointer; opacity: 0.8; } label { order: 2; text-align: center; } /* Fallback stuff */ progress[value] { appearance: none; border: none; border-radius: 3px; box-shadow: 0 2px 3px rgba(0, 0, 0, 0.25) inset; color: dodgerblue; display: inline; height: 15px; order: 1; position: relative; width: 100%; } /* WebKit styles */ progress[value]::-webkit-progress-bar { background-color: white; border-radius: 3px; box-shadow: 0 2px 3px rgba(0, 0, 0, 0.25) inset; } progress[value]::-webkit-progress-value { background-image: linear-gradient(to right, green, cyan); border-radius: 3px; position: relative; transition: width 1s linear; } </style> </head> <body> <h2 style=\"color: black; font-size: 70px;\"> Welcome To GFG </h2> <figure> <video id=\"video\" src=\"https://media.geeksforgeeks.org/wp-content/uploads/20191016154640/geeks6.mp4\"> </video> <figcaption> <label id=\"timer\" for=\"progress\" role=\"timer\"> </label> <button id=\"play\" aria-label=\"Play\" role=\"button\"> Play </button> <progress id=\"progress\" max=\"100\" value=\"0\"> Progress </progress> </figcaption> </figure> <script> const progress = document.getElementById(\"progress\"); const timer = document.getElementById(\"timer\"); button = document.getElementById(\"play\"); function progressLoop() { setInterval(function () { progress.value = Math.round((video.currentTime / video.duration) * 100); timer.innerHTML = Math.round(video.currentTime) + \" seconds\"; }); } function playPause() { if (video.paused) { video.play(); button.innerHTML = \"Pause\"; } else { video.pause(); button.innerHTML = \"Play\"; } } button.addEventListener(\"click\", playPause); video.addEventListener(\"play\", progressLoop); </script> </body></html> ", "e": 38245, "s": 35180, "text": null }, { "code": null, "e": 38253, "s": 38245, "text": "Output:" }, { "code": null, "e": 38262, "s": 38253, "text": "CSS-Misc" }, { "code": null, "e": 38278, "s": 38262, "text": "JavaScript-Misc" }, { "code": null, "e": 38282, "s": 38278, "text": "CSS" }, { "code": null, "e": 38287, "s": 38282, "text": "HTML" }, { "code": null, "e": 38298, "s": 38287, "text": "JavaScript" }, { "code": null, "e": 38315, "s": 38298, "text": "Web Technologies" }, { "code": null, "e": 38320, "s": 38315, "text": "HTML" }, { "code": null, "e": 38418, "s": 38320, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 38457, "s": 38418, "text": "How to set space between the flexbox ?" }, { "code": null, "e": 38494, "s": 38457, "text": "Design a web page using HTML and CSS" }, { "code": null, "e": 38523, "s": 38494, "text": "Form validation using jQuery" }, { "code": null, "e": 38558, "s": 38523, "text": "How to style a checkbox using CSS?" }, { "code": null, "e": 38600, "s": 38558, "text": "Search Bar using HTML, CSS and JavaScript" }, { "code": null, "e": 38660, "s": 38600, "text": "How to set the default value for an HTML <select> element ?" }, { "code": null, "e": 38713, "s": 38660, "text": "Hide or show elements in HTML using display property" }, { "code": null, "e": 38774, "s": 38713, "text": "How to set input type date in dd-mm-yyyy format using HTML ?" }, { "code": null, "e": 38798, "s": 38774, "text": "REST API (Introduction)" } ]
Boosting performance with XGBoost | by Shubham Goyal | Towards Data Science
In this blog, we are going to see how XGBoost works and some of the important features of XGBoost with the help of an example. So, many of us heard about tree models and boosting techniques. Let’s put these concepts together and talk about XGBoost, the most powerful machine learning Algorithm out there. XGboost called for eXtreme Gradient Boosted trees. The name XGBoost, though, actually refers to the engineering goal to push the limit of computations resources for boosted tree algorithms. Which is the reason why many people use XGBoost. Ever since its introduction in 2014, XGBoost has high predictive power and is almost 10 times faster than the other gradient boosting techniques. It also includes a variety of regularization which reduces overfitting and improves overall performance. Hence it is also known as ‘regularized boosting‘ technique. XGBoost has proved its mettle in terms of performance — and speed. Okay!!! don’t worry, let's talk about boosting we have encountered a lot of tree-based algorithms like decision trees, in those we used to train our single model on a particular dataset maybe with some parameter tuning. Also in ensemble models, we used to train all the models separately. Boosting is also an ensemble technique, which combines many models to give a final one but rather than evaluating all models separately, boosting trains models in sequence. that means, every new model is trained to correct the error of the previous model and the sequence got stopped when there is no further improvement. That is why it is more accurate. There is a comprehensive installation guide on the XGBoost documentation website. It covers installation for Linux, Mac OS X, and Windows. It also covers installation on platforms such as R and Python. So, the first thing is to prepare data for our model. we are going to use the iris flower dataset from Scikit Learn. Here, we have loaded out dataset from Sklearn in python and also import XGBoost library from sklearn import datasetsimport xgboost as xgbiris = datasets.load_iris()X = iris.datay = iris.target Next, we have to split our dataset into two parts: train and test data. This is an important step to see how well our model performs. So, we are going to split our data into an 80%-20% part. from sklearn.model_selection import train_test_splitX_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2) Unlike the rest of the algorithms, XGBoost needs our data to be transformed into a specific format i.e. DMatrix. DMatrix is an internal data structure used by XGBoost which is optimized for both memory efficiency and training speed. D_train = xgb.DMatrix(X_train, label=Y_train)D_test = xgb.DMatrix(X_test, label=Y_test) Now we have our NumPy arrays of data converted to DMatix format to feed our model. But before that, we need to define our model. The first thing we have to do is to define the parameters of our gradient descent ensemble. we have N number of parameters available for our model but for now, we are going to focus on some of the important. The full list of possible parameters is available on the official XGBoost website. param = { 'eta': 0.2, 'max_depth': 4, 'objective': 'multi:softprob', 'num_class': 4 }epochs = 20 so here are our parameters: max_depth: maximum depth of the decision trees being trained objective: the loss function is used num_class: the number of classes in the dataset eta: the learning rate As we already know, this kind of model worked in a sequential way, which make it more complex. this technique is very prone to overfitting. The eta parameter/ learning rate helps our algorithm to prevent overfitting by not just adding the prediction of new trees to the ensemble with full weight but the eta will be multiplied by the residual being adding to reduce their weights. Note: It is advised to have small values of eta in the range of 0.1 to 0.3 we have our model defined now, lets train it model = xgb.train(param, D_train, steps) It is a very similar process to Scikit Learn and running an evaluation is also very familiar. import numpy as npfrom sklearn.metrics import precision_score, recall_score, accuracy_scorepreds = model.predict(D_test)best_preds = np.asarray([np.argmax(line) for line in preds])print("Precision = {}".format(precision_score(Y_test, best_preds, average='macro')))print("Recall = {}".format(recall_score(Y_test, best_preds, average='macro')))print("Accuracy = {}".format(accuracy_score(Y_test, best_preds))) output: That's great, we achieved accuracy above 90% As mentioned above, we have a lot of parameters and choosing the wrong parameter, might affect your model performance a lot. So the question here is: how to choose the right parameters? well, it is too easy, to compare model performance with different values. let's see it Setting the optimal hyperparameters of any ML model can be a challenge. So why not let Scikit Learn do it for you? We can combine Scikit Learn’s grid search with an XGBoost classifier quite easily: from sklearn.model_selection import GridSearchCVclf = xgb.XGBClassifier()parameters = { "eta" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] , "max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15], "min_child_weight" : [ 1, 3, 5, 7 ], "gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ], "colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ] }grid = GridSearchCV(clf, parameters, n_jobs=4, scoring="neg_log_loss", cv=3)grid.fit(X_train, Y_train) output: Only do that on a big dataset if you have time to kill — doing a grid search is essentially training an ensemble of decision trees many times over! Once your XGBoost model is trained, you can dump a human-readable description of it into a text file: model.dump_model('dump.raw.txt') So this is how we can create an XGBoost model and choose ideal hyper-parameters for it. Stay Tunes, happy learning originally posted at: https://blog.knoldus.com/machinex-boosting-performance-with-xgboost/ Follow me on LinkedIn and Twitter for more:
[ { "code": null, "e": 299, "s": 172, "text": "In this blog, we are going to see how XGBoost works and some of the important features of XGBoost with the help of an example." }, { "code": null, "e": 477, "s": 299, "text": "So, many of us heard about tree models and boosting techniques. Let’s put these concepts together and talk about XGBoost, the most powerful machine learning Algorithm out there." }, { "code": null, "e": 528, "s": 477, "text": "XGboost called for eXtreme Gradient Boosted trees." }, { "code": null, "e": 716, "s": 528, "text": "The name XGBoost, though, actually refers to the engineering goal to push the limit of computations resources for boosted tree algorithms. Which is the reason why many people use XGBoost." }, { "code": null, "e": 1094, "s": 716, "text": "Ever since its introduction in 2014, XGBoost has high predictive power and is almost 10 times faster than the other gradient boosting techniques. It also includes a variety of regularization which reduces overfitting and improves overall performance. Hence it is also known as ‘regularized boosting‘ technique. XGBoost has proved its mettle in terms of performance — and speed." }, { "code": null, "e": 1141, "s": 1094, "text": "Okay!!! don’t worry, let's talk about boosting" }, { "code": null, "e": 1383, "s": 1141, "text": "we have encountered a lot of tree-based algorithms like decision trees, in those we used to train our single model on a particular dataset maybe with some parameter tuning. Also in ensemble models, we used to train all the models separately." }, { "code": null, "e": 1738, "s": 1383, "text": "Boosting is also an ensemble technique, which combines many models to give a final one but rather than evaluating all models separately, boosting trains models in sequence. that means, every new model is trained to correct the error of the previous model and the sequence got stopped when there is no further improvement. That is why it is more accurate." }, { "code": null, "e": 1820, "s": 1738, "text": "There is a comprehensive installation guide on the XGBoost documentation website." }, { "code": null, "e": 1877, "s": 1820, "text": "It covers installation for Linux, Mac OS X, and Windows." }, { "code": null, "e": 1940, "s": 1877, "text": "It also covers installation on platforms such as R and Python." }, { "code": null, "e": 2057, "s": 1940, "text": "So, the first thing is to prepare data for our model. we are going to use the iris flower dataset from Scikit Learn." }, { "code": null, "e": 2145, "s": 2057, "text": "Here, we have loaded out dataset from Sklearn in python and also import XGBoost library" }, { "code": null, "e": 2250, "s": 2145, "text": "from sklearn import datasetsimport xgboost as xgbiris = datasets.load_iris()X = iris.datay = iris.target" }, { "code": null, "e": 2441, "s": 2250, "text": "Next, we have to split our dataset into two parts: train and test data. This is an important step to see how well our model performs. So, we are going to split our data into an 80%-20% part." }, { "code": null, "e": 2566, "s": 2441, "text": "from sklearn.model_selection import train_test_splitX_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)" }, { "code": null, "e": 2679, "s": 2566, "text": "Unlike the rest of the algorithms, XGBoost needs our data to be transformed into a specific format i.e. DMatrix." }, { "code": null, "e": 2799, "s": 2679, "text": "DMatrix is an internal data structure used by XGBoost which is optimized for both memory efficiency and training speed." }, { "code": null, "e": 2887, "s": 2799, "text": "D_train = xgb.DMatrix(X_train, label=Y_train)D_test = xgb.DMatrix(X_test, label=Y_test)" }, { "code": null, "e": 3016, "s": 2887, "text": "Now we have our NumPy arrays of data converted to DMatix format to feed our model. But before that, we need to define our model." }, { "code": null, "e": 3307, "s": 3016, "text": "The first thing we have to do is to define the parameters of our gradient descent ensemble. we have N number of parameters available for our model but for now, we are going to focus on some of the important. The full list of possible parameters is available on the official XGBoost website." }, { "code": null, "e": 3424, "s": 3307, "text": "param = { 'eta': 0.2, 'max_depth': 4, 'objective': 'multi:softprob', 'num_class': 4 }epochs = 20" }, { "code": null, "e": 3452, "s": 3424, "text": "so here are our parameters:" }, { "code": null, "e": 3513, "s": 3452, "text": "max_depth: maximum depth of the decision trees being trained" }, { "code": null, "e": 3550, "s": 3513, "text": "objective: the loss function is used" }, { "code": null, "e": 3598, "s": 3550, "text": "num_class: the number of classes in the dataset" }, { "code": null, "e": 3621, "s": 3598, "text": "eta: the learning rate" }, { "code": null, "e": 3761, "s": 3621, "text": "As we already know, this kind of model worked in a sequential way, which make it more complex. this technique is very prone to overfitting." }, { "code": null, "e": 4002, "s": 3761, "text": "The eta parameter/ learning rate helps our algorithm to prevent overfitting by not just adding the prediction of new trees to the ensemble with full weight but the eta will be multiplied by the residual being adding to reduce their weights." }, { "code": null, "e": 4077, "s": 4002, "text": "Note: It is advised to have small values of eta in the range of 0.1 to 0.3" }, { "code": null, "e": 4122, "s": 4077, "text": "we have our model defined now, lets train it" }, { "code": null, "e": 4163, "s": 4122, "text": "model = xgb.train(param, D_train, steps)" }, { "code": null, "e": 4257, "s": 4163, "text": "It is a very similar process to Scikit Learn and running an evaluation is also very familiar." }, { "code": null, "e": 4665, "s": 4257, "text": "import numpy as npfrom sklearn.metrics import precision_score, recall_score, accuracy_scorepreds = model.predict(D_test)best_preds = np.asarray([np.argmax(line) for line in preds])print(\"Precision = {}\".format(precision_score(Y_test, best_preds, average='macro')))print(\"Recall = {}\".format(recall_score(Y_test, best_preds, average='macro')))print(\"Accuracy = {}\".format(accuracy_score(Y_test, best_preds)))" }, { "code": null, "e": 4673, "s": 4665, "text": "output:" }, { "code": null, "e": 4718, "s": 4673, "text": "That's great, we achieved accuracy above 90%" }, { "code": null, "e": 4843, "s": 4718, "text": "As mentioned above, we have a lot of parameters and choosing the wrong parameter, might affect your model performance a lot." }, { "code": null, "e": 4904, "s": 4843, "text": "So the question here is: how to choose the right parameters?" }, { "code": null, "e": 4991, "s": 4904, "text": "well, it is too easy, to compare model performance with different values. let's see it" }, { "code": null, "e": 5189, "s": 4991, "text": "Setting the optimal hyperparameters of any ML model can be a challenge. So why not let Scikit Learn do it for you? We can combine Scikit Learn’s grid search with an XGBoost classifier quite easily:" }, { "code": null, "e": 5697, "s": 5189, "text": "from sklearn.model_selection import GridSearchCVclf = xgb.XGBClassifier()parameters = { \"eta\" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] , \"max_depth\" : [ 3, 4, 5, 6, 8, 10, 12, 15], \"min_child_weight\" : [ 1, 3, 5, 7 ], \"gamma\" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ], \"colsample_bytree\" : [ 0.3, 0.4, 0.5 , 0.7 ] }grid = GridSearchCV(clf, parameters, n_jobs=4, scoring=\"neg_log_loss\", cv=3)grid.fit(X_train, Y_train)" }, { "code": null, "e": 5705, "s": 5697, "text": "output:" }, { "code": null, "e": 5853, "s": 5705, "text": "Only do that on a big dataset if you have time to kill — doing a grid search is essentially training an ensemble of decision trees many times over!" }, { "code": null, "e": 5955, "s": 5853, "text": "Once your XGBoost model is trained, you can dump a human-readable description of it into a text file:" }, { "code": null, "e": 5988, "s": 5955, "text": "model.dump_model('dump.raw.txt')" }, { "code": null, "e": 6076, "s": 5988, "text": "So this is how we can create an XGBoost model and choose ideal hyper-parameters for it." }, { "code": null, "e": 6103, "s": 6076, "text": "Stay Tunes, happy learning" }, { "code": null, "e": 6194, "s": 6103, "text": "originally posted at: https://blog.knoldus.com/machinex-boosting-performance-with-xgboost/" } ]
ASP.NET MVC - Actions
ASP.NET MVC Action Methods are responsible to execute requests and generate responses to it. By default, it generates a response in the form of ActionResult. Actions typically have a one-to-one mapping with user interactions. For example, enter a URL into the browser, click on any particular link, and submit a form, etc. Each of these user interactions causes a request to be sent to the server. In each case, the URL of the request includes information that the MVC framework uses to invoke an action method. The one restriction on action method is that they have to be instance method, so they cannot be static methods. Also there is no return value restrictions. So you can return the string, integer, etc. Actions are the ultimate request destination in an MVC application and it uses the controller base class. Let's take a look at the request processing. When a URL arrives, like /Home/index, it is the UrlRoutingModule that inspects and understands that something configured within the routing table knows how to handle that URL. When a URL arrives, like /Home/index, it is the UrlRoutingModule that inspects and understands that something configured within the routing table knows how to handle that URL. The UrlRoutingModule puts together the information we've configured in the routing table and hands over control to the MVC route handler. The UrlRoutingModule puts together the information we've configured in the routing table and hands over control to the MVC route handler. The MVC route handler passes the controller over to the MvcHandler which is an HTTP handler. The MVC route handler passes the controller over to the MvcHandler which is an HTTP handler. MvcHandler uses a controller factory to instantiate the controller and it knows what controller to instantiate because it looks in the RouteData for that controller value. MvcHandler uses a controller factory to instantiate the controller and it knows what controller to instantiate because it looks in the RouteData for that controller value. Once the MvcHandler has a controller, the only thing that MvcHandler knows about is IController Interface, so it simply tells the controller to execute. Once the MvcHandler has a controller, the only thing that MvcHandler knows about is IController Interface, so it simply tells the controller to execute. When it tells the controller to execute, that's been derived from the MVC's controller base class. The Execute method creates an action invoker and tells that action invoker to go and find a method to invoke, find an action to invoke. When it tells the controller to execute, that's been derived from the MVC's controller base class. The Execute method creates an action invoker and tells that action invoker to go and find a method to invoke, find an action to invoke. The action invoker, again, looks in the RouteData and finds that action parameter that's been passed along from the routing engine. The action invoker, again, looks in the RouteData and finds that action parameter that's been passed along from the routing engine. Actions basically return different types of action results. The ActionResult class is the base for all action results. Following is the list of different kind of action results and its behavior. ContentResult Returns a string FileContentResult Returns file content FilePathResult Returns file content FileStreamResult Returns file content EmptyResult Returns nothing JavaScriptResult Returns script for execution JsonResult Returns JSON formatted data RedirectToResult Redirects to the specified URL HttpUnauthorizedResult Returns 403 HTTP Status code RedirectToRouteResult Redirects to different action/different controller action ViewResult Received as a response for view engine PartialViewResult Received as a response for view engine Let’s have a look at a simple example from the previous chapter in which we have created an EmployeeController. using System; using System.Collections.Generic; using System.Linq; using System.Web; using System.Web.Mvc; namespace MVCControllerDemo.Controllers { public class EmployeeController : Controller{ // GET: Employee public ActionResult Search(string name){ var input = Server.HtmlEncode(name); return Content(input); } } } When you request the following URL http://localhost:61465/Employee/Mark, then you will receive the following output as an action. Let us add one another controller. Step 1 − Right-click on Controllers folder and select Add → Controller. It will display the Add Scaffold dialog. Step 2 − Select the MVC 5 Controller – Empty option and click ‘Add’ button. The Add Controller dialog will appear. Step 3 − Set the name to CustomerController and click ‘Add’ button. Now you will see a new C# file ‘CustomerController.cs’ in the Controllers folder, which is open for editing in Visual Studio as well. Similarly, add one more controller with name HomeController. Following is the HomeController.cs class implementation. using System; using System.Collections.Generic; using System.Linq; using System.Web; using System.Web.Mvc; namespace MVCControllerDemo.Controllers { public class HomeController : Controller{ // GET: Home public string Index(){ return "This is Home Controller"; } } } Step 4 − Run this application and you will receive the following output. Step 5 − Add the following code in Customer controller, which we have created above. public string GetAllCustomers(){ return @"<ul> <li>Ali Raza</li> <li>Mark Upston</li> <li>Allan Bommer</li> <li>Greg Jerry</li> </ul>"; } Step 6 − Run this application and request for http://localhost:61465/Customer/GetAllCustomers. You will see the following output. You can also redirect to actions for the same controller or even for a different controller. Following is a simple example in which we will redirect from HomeController to Customer Controller by changing the code in HomeController using the following code. using System; using System.Collections.Generic; using System.Linq; using System.Web; using System.Web.Mvc; namespace MVCControllerDemo.Controllers{ public class HomeController : Controller{ // GET: Home public ActionResult Index(){ return RedirectToAction("GetAllCustomers","Customer"); } } } As you can see, we have used the RedirectToAction() method ActionResult, which takes two parameters, action name and controller name. When you run this application, you will see the default route will redirect it to /Customer/GetAllCustomers 51 Lectures 5.5 hours Anadi Sharma 44 Lectures 4.5 hours Kaushik Roy Chowdhury 42 Lectures 18 hours SHIVPRASAD KOIRALA 57 Lectures 3.5 hours University Code 40 Lectures 2.5 hours University Code 138 Lectures 9 hours Bhrugen Patel Print Add Notes Bookmark this page
[ { "code": null, "e": 2495, "s": 2269, "text": "ASP.NET MVC Action Methods are responsible to execute requests and generate responses to it. By default, it generates a response in the form of ActionResult. Actions typically have a one-to-one mapping with user interactions." }, { "code": null, "e": 2981, "s": 2495, "text": "For example, enter a URL into the browser, click on any particular link, and submit a form, etc. Each of these user interactions causes a request to be sent to the server. In each case, the URL of the request includes information that the MVC framework uses to invoke an action method. The one restriction on action method is that they have to be instance method, so they cannot be static methods. Also there is no return value restrictions. So you can return the string, integer, etc." }, { "code": null, "e": 3132, "s": 2981, "text": "Actions are the ultimate request destination in an MVC application and it uses the controller base class. Let's take a look at the request processing." }, { "code": null, "e": 3308, "s": 3132, "text": "When a URL arrives, like /Home/index, it is the UrlRoutingModule that inspects and understands that something configured within the routing table knows how to handle that URL." }, { "code": null, "e": 3484, "s": 3308, "text": "When a URL arrives, like /Home/index, it is the UrlRoutingModule that inspects and understands that something configured within the routing table knows how to handle that URL." }, { "code": null, "e": 3622, "s": 3484, "text": "The UrlRoutingModule puts together the information we've configured in the routing table and hands over control to the MVC route handler." }, { "code": null, "e": 3760, "s": 3622, "text": "The UrlRoutingModule puts together the information we've configured in the routing table and hands over control to the MVC route handler." }, { "code": null, "e": 3853, "s": 3760, "text": "The MVC route handler passes the controller over to the MvcHandler which is an HTTP handler." }, { "code": null, "e": 3946, "s": 3853, "text": "The MVC route handler passes the controller over to the MvcHandler which is an HTTP handler." }, { "code": null, "e": 4118, "s": 3946, "text": "MvcHandler uses a controller factory to instantiate the controller and it knows what controller to instantiate because it looks in the RouteData for that controller value." }, { "code": null, "e": 4290, "s": 4118, "text": "MvcHandler uses a controller factory to instantiate the controller and it knows what controller to instantiate because it looks in the RouteData for that controller value." }, { "code": null, "e": 4443, "s": 4290, "text": "Once the MvcHandler has a controller, the only thing that MvcHandler knows about is IController Interface, so it simply tells the controller to execute." }, { "code": null, "e": 4596, "s": 4443, "text": "Once the MvcHandler has a controller, the only thing that MvcHandler knows about is IController Interface, so it simply tells the controller to execute." }, { "code": null, "e": 4831, "s": 4596, "text": "When it tells the controller to execute, that's been derived from the MVC's controller base class. The Execute method creates an action invoker and tells that action invoker to go and find a method to invoke, find an action to invoke." }, { "code": null, "e": 5066, "s": 4831, "text": "When it tells the controller to execute, that's been derived from the MVC's controller base class. The Execute method creates an action invoker and tells that action invoker to go and find a method to invoke, find an action to invoke." }, { "code": null, "e": 5198, "s": 5066, "text": "The action invoker, again, looks in the RouteData and finds that action parameter that's been passed along from the routing engine." }, { "code": null, "e": 5330, "s": 5198, "text": "The action invoker, again, looks in the RouteData and finds that action parameter that's been passed along from the routing engine." }, { "code": null, "e": 5525, "s": 5330, "text": "Actions basically return different types of action results. The ActionResult class is the base for all action results. Following is the list of different kind of action results and its behavior." }, { "code": null, "e": 5539, "s": 5525, "text": "ContentResult" }, { "code": null, "e": 5556, "s": 5539, "text": "Returns a string" }, { "code": null, "e": 5574, "s": 5556, "text": "FileContentResult" }, { "code": null, "e": 5595, "s": 5574, "text": "Returns file content" }, { "code": null, "e": 5610, "s": 5595, "text": "FilePathResult" }, { "code": null, "e": 5631, "s": 5610, "text": "Returns file content" }, { "code": null, "e": 5648, "s": 5631, "text": "FileStreamResult" }, { "code": null, "e": 5669, "s": 5648, "text": "Returns file content" }, { "code": null, "e": 5681, "s": 5669, "text": "EmptyResult" }, { "code": null, "e": 5697, "s": 5681, "text": "Returns nothing" }, { "code": null, "e": 5714, "s": 5697, "text": "JavaScriptResult" }, { "code": null, "e": 5743, "s": 5714, "text": "Returns script for execution" }, { "code": null, "e": 5754, "s": 5743, "text": "JsonResult" }, { "code": null, "e": 5782, "s": 5754, "text": "Returns JSON formatted data" }, { "code": null, "e": 5799, "s": 5782, "text": "RedirectToResult" }, { "code": null, "e": 5830, "s": 5799, "text": "Redirects to the specified URL" }, { "code": null, "e": 5853, "s": 5830, "text": "HttpUnauthorizedResult" }, { "code": null, "e": 5882, "s": 5853, "text": "Returns 403 HTTP Status code" }, { "code": null, "e": 5904, "s": 5882, "text": "RedirectToRouteResult" }, { "code": null, "e": 5962, "s": 5904, "text": "Redirects to different action/different controller action" }, { "code": null, "e": 5973, "s": 5962, "text": "ViewResult" }, { "code": null, "e": 6012, "s": 5973, "text": "Received as a response for view engine" }, { "code": null, "e": 6030, "s": 6012, "text": "PartialViewResult" }, { "code": null, "e": 6069, "s": 6030, "text": "Received as a response for view engine" }, { "code": null, "e": 6181, "s": 6069, "text": "Let’s have a look at a simple example from the previous chapter in which we have created an EmployeeController." }, { "code": null, "e": 6544, "s": 6181, "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\n\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace MVCControllerDemo.Controllers {\n public class EmployeeController : Controller{\n // GET: Employee\n public ActionResult Search(string name){\n var input = Server.HtmlEncode(name);\n return Content(input);\n }\n }\n}" }, { "code": null, "e": 6674, "s": 6544, "text": "When you request the following URL http://localhost:61465/Employee/Mark, then you will receive the following output as an action." }, { "code": null, "e": 6709, "s": 6674, "text": "Let us add one another controller." }, { "code": null, "e": 6781, "s": 6709, "text": "Step 1 − Right-click on Controllers folder and select Add → Controller." }, { "code": null, "e": 6822, "s": 6781, "text": "It will display the Add Scaffold dialog." }, { "code": null, "e": 6898, "s": 6822, "text": "Step 2 − Select the MVC 5 Controller – Empty option and click ‘Add’ button." }, { "code": null, "e": 6937, "s": 6898, "text": "The Add Controller dialog will appear." }, { "code": null, "e": 7005, "s": 6937, "text": "Step 3 − Set the name to CustomerController and click ‘Add’ button." }, { "code": null, "e": 7139, "s": 7005, "text": "Now you will see a new C# file ‘CustomerController.cs’ in the Controllers folder, which is open for editing in Visual Studio as well." }, { "code": null, "e": 7257, "s": 7139, "text": "Similarly, add one more controller with name HomeController. Following is the HomeController.cs class implementation." }, { "code": null, "e": 7559, "s": 7257, "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\n\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace MVCControllerDemo.Controllers {\n public class HomeController : Controller{\n // GET: Home\n public string Index(){\n return \"This is Home Controller\";\n }\n }\n}" }, { "code": null, "e": 7632, "s": 7559, "text": "Step 4 − Run this application and you will receive the following output." }, { "code": null, "e": 7717, "s": 7632, "text": "Step 5 − Add the following code in Customer controller, which we have created above." }, { "code": null, "e": 7885, "s": 7717, "text": "public string GetAllCustomers(){\n return @\"<ul>\n <li>Ali Raza</li>\n <li>Mark Upston</li>\n <li>Allan Bommer</li>\n <li>Greg Jerry</li>\n </ul>\";\n}" }, { "code": null, "e": 8015, "s": 7885, "text": "Step 6 − Run this application and request for http://localhost:61465/Customer/GetAllCustomers. You will see the following output." }, { "code": null, "e": 8108, "s": 8015, "text": "You can also redirect to actions for the same controller or even for a different controller." }, { "code": null, "e": 8272, "s": 8108, "text": "Following is a simple example in which we will redirect from HomeController to Customer Controller by changing the code in HomeController using the following code." }, { "code": null, "e": 8600, "s": 8272, "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\n\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace MVCControllerDemo.Controllers{\n public class HomeController : Controller{\n // GET: Home\n public ActionResult Index(){\n return RedirectToAction(\"GetAllCustomers\",\"Customer\");\n }\n }\n}" }, { "code": null, "e": 8734, "s": 8600, "text": "As you can see, we have used the RedirectToAction() method ActionResult, which takes two parameters, action name and controller name." }, { "code": null, "e": 8842, "s": 8734, "text": "When you run this application, you will see the default route will redirect it to /Customer/GetAllCustomers" }, { "code": null, "e": 8877, "s": 8842, "text": "\n 51 Lectures \n 5.5 hours \n" }, { "code": null, "e": 8891, "s": 8877, "text": " Anadi Sharma" }, { "code": null, "e": 8926, "s": 8891, "text": "\n 44 Lectures \n 4.5 hours \n" }, { "code": null, "e": 8949, "s": 8926, "text": " Kaushik Roy Chowdhury" }, { "code": null, "e": 8983, "s": 8949, "text": "\n 42 Lectures \n 18 hours \n" }, { "code": null, "e": 9003, "s": 8983, "text": " SHIVPRASAD KOIRALA" }, { "code": null, "e": 9038, "s": 9003, "text": "\n 57 Lectures \n 3.5 hours \n" }, { "code": null, "e": 9055, "s": 9038, "text": " University Code" }, { "code": null, "e": 9090, "s": 9055, "text": "\n 40 Lectures \n 2.5 hours \n" }, { "code": null, "e": 9107, "s": 9090, "text": " University Code" }, { "code": null, "e": 9141, "s": 9107, "text": "\n 138 Lectures \n 9 hours \n" }, { "code": null, "e": 9156, "s": 9141, "text": " Bhrugen Patel" }, { "code": null, "e": 9163, "s": 9156, "text": " Print" }, { "code": null, "e": 9174, "s": 9163, "text": " Add Notes" } ]
How to Setup Your JupyterLab Project Environment | by Frank Zickert | Quantum Machine Learning | Towards Data Science
Create and Customize Your Containerized and Script-controlled JupyterLab Project Environment in a minute. This post is part of the book: Hands-On Quantum Machine Learning With Python. Get the first three chapters for free here. The JupyterLab-Configuration lets you easily create your JupyterLab configuration that runs JupyterLab in a container and automates the whole setup using scripts. A container is a separate environment that encapsulates the libraries you install in it without affecting your host computer. Scripts automate executing all the commands you would normally need to run manually. For you can review and edit scripts, you get full control of your configuration at any time. In this post, you’ll see how this JupyterLab configuration works and how you can customize it to cater to your needs. Dammit Jim. I’m a Data Scientist, not a DevOps Engineer The list of requirements of a Data Scientist is very long. It contains math and statistics, programming and databases, communication and visualization, domain knowledge, and many more. ”Please, don’t add DevOps to the list,” you think? Ok! How do these processes sound to you? Create your JupyterLab configuration: The JupyterLab-Configuration lets you easily create your custom configurationDownload and unzip your configurationCustomize it to your needs (optional) The JupyterLab-Configuration lets you easily create your custom configuration Download and unzip your configuration Customize it to your needs (optional) The following picture shows the JupyterLab configuration in action. Use it with two simple steps: Execute sh {path_to_your_project}/run.shOpen localhost:8888 in a browser Execute sh {path_to_your_project}/run.sh Open localhost:8888 in a browser Certainly, some hours, Sir. But ya don’t have some hours, so I’ll do it for ya in a few minutes. The remainder of this post gives you an overview of how this JupyterLab configuration works, conceptually. It explains the building blocks and enables you to customize the configuration to your needs, e.g. add software packages add your own Python modules customize the Jupyter notebook server In 2018, Project Jupyter launched JupyterLab — an interactive development environment for working with notebooks, code, and data. JupyterLab has full support for Jupyter notebooks and enables you to use text editors, terminals, data file viewers, and other custom components side by side with notebooks in a tabbed work area. Provided you run a Unix-based operating system (macOS or Linux), you can install and start JupyterLab with two simple commands: python -m pip install jupyterlabjupyter lab But wait! As simple as the manual setup of JupyterLab may look at first sight as likely it is to not cater to all the things you need to do in your data science project. You may also need: Jupyter-kernels (e.g. bash, Javascript, R, ...) File converters (e.g. Pandoc, Markdown, ...) Libraries (e.g. NumPy, SciPy, TensorFlow, PyTorch, ...) Supporting software (Git, NbSphinx, ...) Installing these dependencies directly on your computer is not a good idea because you would have a hard time ensuring to keep your computer clean. What if you had different projects that require different versions of a library? Would you uninstall the old version and install the correct version every time you switch between the projects? What if you do not need a library anymore? Would you remove it right away and reinstall it, if you discover that you need it after all? Or would you wait until you forgot to remove this library at all? Installing these dependencies manually is not a good idea, either. You would have no control over all the things you installed. What if you wanted to work on this project on another computer? How much time and work would it require you to set up the project again? What if someone asked you for all the third-party-libraries you are using? Among all the libraries you installed on your host computer, how would you identify those you are using in this project? A container is a virtual environment that is separated from the host computer. It creates its own runtime environment that can adapt to your specific project needs. It interacts with its host only in specified ways. Any change of the container does not affect your host computer or vice versa. Docker is one of the most prominent and widely used platforms for virtualization of project environments. The following picture depicts the Docker process that contains two steps: (1) build an image from the Dockerfile and (2) run the image in a container. Our configuration automates this process in the run.sh -script. This is a shell script (sh or bash) that runs on the host computer. Likewise, this script is your entry point to start your JupyterLab project. Simply open a terminal and run: sh {path_to_your_project}/run.sh The Dockerfile is the script that tells Docker how to configure the system within the container. During the docker build-step, Docker creates an image of this system. An image is an executable package that includes everything needed to run an application — the code, a runtime environment, libraries, environment variables, and configuration files. While Docker supports building up systems from the scratch, it is best practice to start from an existing image, e.g. an image containing an operating system or even a full configuration. The configuration starts with an existing image. You can find the corresponding Dockerfile in this GitHub-Repository. This image contains the following software and libraries: Ubuntu 18.04 Python 3.7.0 Pip Jupyter and JupyterLab Bash and Jupyter Bash-Kernel Document (pdf) tools (pandoc, texlive-xetex) Build tools (e.g., build-essential,python3-setuptools, checkinstall) Communication tools (openssl,wget,requests,curl) Various Python development libraries If you require further software libraries, the Dockerfile is your place to go. Just add a new line after the FROM statement. This new line needs to start with RUN and contains any shell command you may want to execute, usually something like apt-get install or pip install. For example, you can use pip to install some major data science packages with the following statements: RUN pip install numpyRUN pip install scipyRUN pip install pandas Changes in the Dockerfile become effective during the build-step. If you already started the container, you’ll need to stop it (e.g. use ctrl+c in your terminal) and restart it (sh {path_to_your_project}/run.sh). When you edited your Dockerfile, the build-step may take some time. Docker tries to reuse existing images, it is very fast in subsequent starts when you did not change anything. If you remove commands from your Dockerfile and rerun the run.sh-script, Docker creates a new image of the system. You do not need to uninstall anything from the system. Because the removed command has never been part of this resulting system. This keeps your configuration clean at all times. You can experimentally install libraries without worrying. If you don’t need them, just remove them. You will get a system image that never installed them in the first place. The following image depicts how the Dockerfile configures the system: it installs the software as specified in its RUN-commands. The docker run-command executes this image in a container. Further, it defines how the system running within the container connects to the outside world, i.e. the host computer. There are two main types of connections: volumes and ports. A volume is a link between a directory at the host computer and one in the container. These directories synchronize, i.e. any change in the host-directory will affect the directory in the container and vice versa. A port-mapping lets Docker forward any request (e.g. HTTP-requests) made to the host computer’s port to the mapped port of the container. The following image depicts our configuration thus far. The run.sh-script takes care of the Docker build and run steps. Once you execute the script, it creates a running container that connects with your host computer via a file system volume and a port mapping. When you download the files from the Git-Hub-Repository, you will get the following file structure in the .zip file: {path_to_your_project}/├─ config/│ ├─ {projectname}.Dockerfile│ ├─ jupyter_notebook_configuration.py│ └─ run_jupyter.sh├─ libs/│ └─ nbimport.py├─ notebooks/│ └─ ...└─ run.sh The config-folder contains the configuration files of your JupyterLab project. These files configure the Docker-container, install the software packages, and configure the JupyterLab environment. The libs-folder contains the software libraries that are not installed as packages but that you add as files, e.g. Python-modules that you wrote yourself in other projects. The notebooks-folder is the directory where we put the Jupyter-Notebooks. In the Dockerfile, we set environment variables that point to these directories. For the scripts in the configuration uses these environment variables, you can edit them if you like. Just make sure that the path in the variable matches the actual path. ENV MAIN_PATH=/usr/local/bin/{projectname}ENV LIBS_PATH=${MAIN_PATH}/libsENV CONFIG_PATH=${MAIN_PATH}/configENV NOTEBOOK_PATH=${MAIN_PATH}/notebooks In the configuration, we map the current working directory ({path_to_your_project}) to the ${MAIN_PATH}-folder in the container. So, any file you put into this directory is available in your JupyterLab project. Vice versa, any file you add or change within JupyterLab (e.g. Jupyter notebooks) will appear on your host computer. Further, in the EXPOSE command of the Dockerfile, we specify that the configuration provides the JupyterLab port 8888. This port inside the container is mapped to the port of your host computer. The following image depicts how the container connects its file system and port to the host computer. The final command in our Dockerfile is the CMD-command. It tells Docker that this instruction is something you want to execute whenever you start the container. In the configuration, we execute the run_jupyter.sh-script. This script allows us to do some last-minute preparations, like: put the jupyter_notebook_configuration.py file at the location where JupyterLab expects it configure a custom Jupyter-Kernel that automatically loads the nbimport.py Python module The jupyter_notebook_configuration.py lets you configure the Jupyter notebook server, e.g. setting a password to use for web authentication. A list of available options can be found here. The custom Python kernel adds the ${LIBS_PATH} to your Python sys.path. This allows you to import any Python module from the ${LIBS_PATH}-folder, e.g. import libs.nbimport. This nbimport.py-module further enables you to import Jupyter-notebooks that are located in the ${NOTEBOOK_PATH}-folder. Whenever you start a Jupyter notebook with a Python kernel, the system does these things automatically for you. Finally, the run_jupyter.sh-script starts JupyterLab. You can now open localhost:8888 in a browser, where 8888 is the port you specified. The following image depicts the complete JupyterLab configuration. The JupyterLab-Configurator lets you easily create your custom configuration. This JupyterLab-Configuration runs JupyterLab in a container. It separates the environment JupyterLab runs in from the host environment. Thus, you can change the JupyterLab environment (e.g. un-/installing packages) without affecting your host computer or any other project. This JupyterLab-Configuration automates the whole setup using scripts. These scripts: Enable you running JupyterLab with a single command (e.g. sh run.sh) Make your project portable: just move or copy the directory to another host computer Reveal what is part of your configuration and allow you to review and edit your configuration Make your configuration part of your sources. You can version-control them like you can version-control your code The GitHub-repository provides the whole source code. Using the JupyterLab configuration is very easy: Execute sh {path_to_your_project}/run.shOpen localhost:8888 in a browser Execute sh {path_to_your_project}/run.sh Open localhost:8888 in a browser NOTE: The ability to run Docker containers is the only requirement of this JupyterLab-Configuration to the host computer. Docker is available on Windows and recently got the ability to run Linux-based containers. Thus, there is no reason, why Jupyterlab should not run on Windows. If you want to try it, you will need to have Docker running and move the docker build and docker run commands of the run.sh to a .cmd file that you can execute on Windows.
[ { "code": null, "e": 278, "s": 172, "text": "Create and Customize Your Containerized and Script-controlled JupyterLab Project Environment in a minute." }, { "code": null, "e": 356, "s": 278, "text": "This post is part of the book: Hands-On Quantum Machine Learning With Python." }, { "code": null, "e": 400, "s": 356, "text": "Get the first three chapters for free here." }, { "code": null, "e": 867, "s": 400, "text": "The JupyterLab-Configuration lets you easily create your JupyterLab configuration that runs JupyterLab in a container and automates the whole setup using scripts. A container is a separate environment that encapsulates the libraries you install in it without affecting your host computer. Scripts automate executing all the commands you would normally need to run manually. For you can review and edit scripts, you get full control of your configuration at any time." }, { "code": null, "e": 985, "s": 867, "text": "In this post, you’ll see how this JupyterLab configuration works and how you can customize it to cater to your needs." }, { "code": null, "e": 1041, "s": 985, "text": "Dammit Jim. I’m a Data Scientist, not a DevOps Engineer" }, { "code": null, "e": 1226, "s": 1041, "text": "The list of requirements of a Data Scientist is very long. It contains math and statistics, programming and databases, communication and visualization, domain knowledge, and many more." }, { "code": null, "e": 1318, "s": 1226, "text": "”Please, don’t add DevOps to the list,” you think? Ok! How do these processes sound to you?" }, { "code": null, "e": 1356, "s": 1318, "text": "Create your JupyterLab configuration:" }, { "code": null, "e": 1508, "s": 1356, "text": "The JupyterLab-Configuration lets you easily create your custom configurationDownload and unzip your configurationCustomize it to your needs (optional)" }, { "code": null, "e": 1586, "s": 1508, "text": "The JupyterLab-Configuration lets you easily create your custom configuration" }, { "code": null, "e": 1624, "s": 1586, "text": "Download and unzip your configuration" }, { "code": null, "e": 1662, "s": 1624, "text": "Customize it to your needs (optional)" }, { "code": null, "e": 1760, "s": 1662, "text": "The following picture shows the JupyterLab configuration in action. Use it with two simple steps:" }, { "code": null, "e": 1833, "s": 1760, "text": "Execute sh {path_to_your_project}/run.shOpen localhost:8888 in a browser" }, { "code": null, "e": 1874, "s": 1833, "text": "Execute sh {path_to_your_project}/run.sh" }, { "code": null, "e": 1907, "s": 1874, "text": "Open localhost:8888 in a browser" }, { "code": null, "e": 2004, "s": 1907, "text": "Certainly, some hours, Sir. But ya don’t have some hours, so I’ll do it for ya in a few minutes." }, { "code": null, "e": 2210, "s": 2004, "text": "The remainder of this post gives you an overview of how this JupyterLab configuration works, conceptually. It explains the building blocks and enables you to customize the configuration to your needs, e.g." }, { "code": null, "e": 2232, "s": 2210, "text": "add software packages" }, { "code": null, "e": 2260, "s": 2232, "text": "add your own Python modules" }, { "code": null, "e": 2298, "s": 2260, "text": "customize the Jupyter notebook server" }, { "code": null, "e": 2624, "s": 2298, "text": "In 2018, Project Jupyter launched JupyterLab — an interactive development environment for working with notebooks, code, and data. JupyterLab has full support for Jupyter notebooks and enables you to use text editors, terminals, data file viewers, and other custom components side by side with notebooks in a tabbed work area." }, { "code": null, "e": 2752, "s": 2624, "text": "Provided you run a Unix-based operating system (macOS or Linux), you can install and start JupyterLab with two simple commands:" }, { "code": null, "e": 2796, "s": 2752, "text": "python -m pip install jupyterlabjupyter lab" }, { "code": null, "e": 2985, "s": 2796, "text": "But wait! As simple as the manual setup of JupyterLab may look at first sight as likely it is to not cater to all the things you need to do in your data science project. You may also need:" }, { "code": null, "e": 3033, "s": 2985, "text": "Jupyter-kernels (e.g. bash, Javascript, R, ...)" }, { "code": null, "e": 3078, "s": 3033, "text": "File converters (e.g. Pandoc, Markdown, ...)" }, { "code": null, "e": 3134, "s": 3078, "text": "Libraries (e.g. NumPy, SciPy, TensorFlow, PyTorch, ...)" }, { "code": null, "e": 3175, "s": 3134, "text": "Supporting software (Git, NbSphinx, ...)" }, { "code": null, "e": 3323, "s": 3175, "text": "Installing these dependencies directly on your computer is not a good idea because you would have a hard time ensuring to keep your computer clean." }, { "code": null, "e": 3516, "s": 3323, "text": "What if you had different projects that require different versions of a library? Would you uninstall the old version and install the correct version every time you switch between the projects?" }, { "code": null, "e": 3718, "s": 3516, "text": "What if you do not need a library anymore? Would you remove it right away and reinstall it, if you discover that you need it after all? Or would you wait until you forgot to remove this library at all?" }, { "code": null, "e": 3846, "s": 3718, "text": "Installing these dependencies manually is not a good idea, either. You would have no control over all the things you installed." }, { "code": null, "e": 3983, "s": 3846, "text": "What if you wanted to work on this project on another computer? How much time and work would it require you to set up the project again?" }, { "code": null, "e": 4179, "s": 3983, "text": "What if someone asked you for all the third-party-libraries you are using? Among all the libraries you installed on your host computer, how would you identify those you are using in this project?" }, { "code": null, "e": 4579, "s": 4179, "text": "A container is a virtual environment that is separated from the host computer. It creates its own runtime environment that can adapt to your specific project needs. It interacts with its host only in specified ways. Any change of the container does not affect your host computer or vice versa. Docker is one of the most prominent and widely used platforms for virtualization of project environments." }, { "code": null, "e": 4730, "s": 4579, "text": "The following picture depicts the Docker process that contains two steps: (1) build an image from the Dockerfile and (2) run the image in a container." }, { "code": null, "e": 4970, "s": 4730, "text": "Our configuration automates this process in the run.sh -script. This is a shell script (sh or bash) that runs on the host computer. Likewise, this script is your entry point to start your JupyterLab project. Simply open a terminal and run:" }, { "code": null, "e": 5003, "s": 4970, "text": "sh {path_to_your_project}/run.sh" }, { "code": null, "e": 5352, "s": 5003, "text": "The Dockerfile is the script that tells Docker how to configure the system within the container. During the docker build-step, Docker creates an image of this system. An image is an executable package that includes everything needed to run an application — the code, a runtime environment, libraries, environment variables, and configuration files." }, { "code": null, "e": 5540, "s": 5352, "text": "While Docker supports building up systems from the scratch, it is best practice to start from an existing image, e.g. an image containing an operating system or even a full configuration." }, { "code": null, "e": 5716, "s": 5540, "text": "The configuration starts with an existing image. You can find the corresponding Dockerfile in this GitHub-Repository. This image contains the following software and libraries:" }, { "code": null, "e": 5729, "s": 5716, "text": "Ubuntu 18.04" }, { "code": null, "e": 5742, "s": 5729, "text": "Python 3.7.0" }, { "code": null, "e": 5746, "s": 5742, "text": "Pip" }, { "code": null, "e": 5769, "s": 5746, "text": "Jupyter and JupyterLab" }, { "code": null, "e": 5798, "s": 5769, "text": "Bash and Jupyter Bash-Kernel" }, { "code": null, "e": 5843, "s": 5798, "text": "Document (pdf) tools (pandoc, texlive-xetex)" }, { "code": null, "e": 5912, "s": 5843, "text": "Build tools (e.g., build-essential,python3-setuptools, checkinstall)" }, { "code": null, "e": 5961, "s": 5912, "text": "Communication tools (openssl,wget,requests,curl)" }, { "code": null, "e": 5998, "s": 5961, "text": "Various Python development libraries" }, { "code": null, "e": 6376, "s": 5998, "text": "If you require further software libraries, the Dockerfile is your place to go. Just add a new line after the FROM statement. This new line needs to start with RUN and contains any shell command you may want to execute, usually something like apt-get install or pip install. For example, you can use pip to install some major data science packages with the following statements:" }, { "code": null, "e": 6441, "s": 6376, "text": "RUN pip install numpyRUN pip install scipyRUN pip install pandas" }, { "code": null, "e": 6832, "s": 6441, "text": "Changes in the Dockerfile become effective during the build-step. If you already started the container, you’ll need to stop it (e.g. use ctrl+c in your terminal) and restart it (sh {path_to_your_project}/run.sh). When you edited your Dockerfile, the build-step may take some time. Docker tries to reuse existing images, it is very fast in subsequent starts when you did not change anything." }, { "code": null, "e": 7301, "s": 6832, "text": "If you remove commands from your Dockerfile and rerun the run.sh-script, Docker creates a new image of the system. You do not need to uninstall anything from the system. Because the removed command has never been part of this resulting system. This keeps your configuration clean at all times. You can experimentally install libraries without worrying. If you don’t need them, just remove them. You will get a system image that never installed them in the first place." }, { "code": null, "e": 7430, "s": 7301, "text": "The following image depicts how the Dockerfile configures the system: it installs the software as specified in its RUN-commands." }, { "code": null, "e": 7608, "s": 7430, "text": "The docker run-command executes this image in a container. Further, it defines how the system running within the container connects to the outside world, i.e. the host computer." }, { "code": null, "e": 8020, "s": 7608, "text": "There are two main types of connections: volumes and ports. A volume is a link between a directory at the host computer and one in the container. These directories synchronize, i.e. any change in the host-directory will affect the directory in the container and vice versa. A port-mapping lets Docker forward any request (e.g. HTTP-requests) made to the host computer’s port to the mapped port of the container." }, { "code": null, "e": 8283, "s": 8020, "text": "The following image depicts our configuration thus far. The run.sh-script takes care of the Docker build and run steps. Once you execute the script, it creates a running container that connects with your host computer via a file system volume and a port mapping." }, { "code": null, "e": 8400, "s": 8283, "text": "When you download the files from the Git-Hub-Repository, you will get the following file structure in the .zip file:" }, { "code": null, "e": 8574, "s": 8400, "text": "{path_to_your_project}/├─ config/│ ├─ {projectname}.Dockerfile│ ├─ jupyter_notebook_configuration.py│ └─ run_jupyter.sh├─ libs/│ └─ nbimport.py├─ notebooks/│ └─ ...└─ run.sh" }, { "code": null, "e": 8770, "s": 8574, "text": "The config-folder contains the configuration files of your JupyterLab project. These files configure the Docker-container, install the software packages, and configure the JupyterLab environment." }, { "code": null, "e": 8943, "s": 8770, "text": "The libs-folder contains the software libraries that are not installed as packages but that you add as files, e.g. Python-modules that you wrote yourself in other projects." }, { "code": null, "e": 9017, "s": 8943, "text": "The notebooks-folder is the directory where we put the Jupyter-Notebooks." }, { "code": null, "e": 9270, "s": 9017, "text": "In the Dockerfile, we set environment variables that point to these directories. For the scripts in the configuration uses these environment variables, you can edit them if you like. Just make sure that the path in the variable matches the actual path." }, { "code": null, "e": 9419, "s": 9270, "text": "ENV MAIN_PATH=/usr/local/bin/{projectname}ENV LIBS_PATH=${MAIN_PATH}/libsENV CONFIG_PATH=${MAIN_PATH}/configENV NOTEBOOK_PATH=${MAIN_PATH}/notebooks" }, { "code": null, "e": 9747, "s": 9419, "text": "In the configuration, we map the current working directory ({path_to_your_project}) to the ${MAIN_PATH}-folder in the container. So, any file you put into this directory is available in your JupyterLab project. Vice versa, any file you add or change within JupyterLab (e.g. Jupyter notebooks) will appear on your host computer." }, { "code": null, "e": 9942, "s": 9747, "text": "Further, in the EXPOSE command of the Dockerfile, we specify that the configuration provides the JupyterLab port 8888. This port inside the container is mapped to the port of your host computer." }, { "code": null, "e": 10044, "s": 9942, "text": "The following image depicts how the container connects its file system and port to the host computer." }, { "code": null, "e": 10330, "s": 10044, "text": "The final command in our Dockerfile is the CMD-command. It tells Docker that this instruction is something you want to execute whenever you start the container. In the configuration, we execute the run_jupyter.sh-script. This script allows us to do some last-minute preparations, like:" }, { "code": null, "e": 10421, "s": 10330, "text": "put the jupyter_notebook_configuration.py file at the location where JupyterLab expects it" }, { "code": null, "e": 10510, "s": 10421, "text": "configure a custom Jupyter-Kernel that automatically loads the nbimport.py Python module" }, { "code": null, "e": 10698, "s": 10510, "text": "The jupyter_notebook_configuration.py lets you configure the Jupyter notebook server, e.g. setting a password to use for web authentication. A list of available options can be found here." }, { "code": null, "e": 11104, "s": 10698, "text": "The custom Python kernel adds the ${LIBS_PATH} to your Python sys.path. This allows you to import any Python module from the ${LIBS_PATH}-folder, e.g. import libs.nbimport. This nbimport.py-module further enables you to import Jupyter-notebooks that are located in the ${NOTEBOOK_PATH}-folder. Whenever you start a Jupyter notebook with a Python kernel, the system does these things automatically for you." }, { "code": null, "e": 11242, "s": 11104, "text": "Finally, the run_jupyter.sh-script starts JupyterLab. You can now open localhost:8888 in a browser, where 8888 is the port you specified." }, { "code": null, "e": 11309, "s": 11242, "text": "The following image depicts the complete JupyterLab configuration." }, { "code": null, "e": 11662, "s": 11309, "text": "The JupyterLab-Configurator lets you easily create your custom configuration. This JupyterLab-Configuration runs JupyterLab in a container. It separates the environment JupyterLab runs in from the host environment. Thus, you can change the JupyterLab environment (e.g. un-/installing packages) without affecting your host computer or any other project." }, { "code": null, "e": 11748, "s": 11662, "text": "This JupyterLab-Configuration automates the whole setup using scripts. These scripts:" }, { "code": null, "e": 11817, "s": 11748, "text": "Enable you running JupyterLab with a single command (e.g. sh run.sh)" }, { "code": null, "e": 11902, "s": 11817, "text": "Make your project portable: just move or copy the directory to another host computer" }, { "code": null, "e": 11996, "s": 11902, "text": "Reveal what is part of your configuration and allow you to review and edit your configuration" }, { "code": null, "e": 12110, "s": 11996, "text": "Make your configuration part of your sources. You can version-control them like you can version-control your code" }, { "code": null, "e": 12164, "s": 12110, "text": "The GitHub-repository provides the whole source code." }, { "code": null, "e": 12213, "s": 12164, "text": "Using the JupyterLab configuration is very easy:" }, { "code": null, "e": 12286, "s": 12213, "text": "Execute sh {path_to_your_project}/run.shOpen localhost:8888 in a browser" }, { "code": null, "e": 12327, "s": 12286, "text": "Execute sh {path_to_your_project}/run.sh" }, { "code": null, "e": 12360, "s": 12327, "text": "Open localhost:8888 in a browser" } ]
Optional of() method in Java with examples - GeeksforGeeks
30 Jul, 2019 The of() method of java.util.Optional class in Java is used to get an instance of this Optional class with the specified value of the specified type. Syntax: public static <T> Optional<T> of(T value) Parameters: This method accepts value as parameter of type T to create an Optional instance with this value. Return value: This method returns an instance of this Optional class with the specified value of the specified type. Exception: This method throws NullPointerException if the specified value is null. Below programs illustrate of() method:Program 1: // Java program to demonstrate// Optional.of() method import java.util.*; public class GFG { public static void main(String[] args) { // create a Optional Optional<Integer> op = Optional.of(9455); // print value System.out.println("Optional: " + op); }} Optional: Optional[9455] Program 2: // Java program to demonstrate// Optional.of() method import java.util.*; public class GFG { public static void main(String[] args) { try { // create a Optional Optional<Integer> op = Optional.of(null); // print value System.out.println("Optional: " + op); } catch (Exception e) { System.out.println(e); } }} java.lang.NullPointerException Reference: https://docs.oracle.com/javase/9/docs/api/java/util/Optional.html#of-T- Java - util package Java-Functions Java-Optional Java Java Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments Initialize an ArrayList in Java HashMap in Java with Examples Interfaces in Java ArrayList in Java Object Oriented Programming (OOPs) Concept in Java How to iterate any Map in Java Multidimensional Arrays in Java LinkedList in Java Stack Class in Java Overriding in Java
[ { "code": null, "e": 24644, "s": 24616, "text": "\n30 Jul, 2019" }, { "code": null, "e": 24794, "s": 24644, "text": "The of() method of java.util.Optional class in Java is used to get an instance of this Optional class with the specified value of the specified type." }, { "code": null, "e": 24802, "s": 24794, "text": "Syntax:" }, { "code": null, "e": 24848, "s": 24802, "text": "public static <T> \n Optional<T> of(T value)\n" }, { "code": null, "e": 24957, "s": 24848, "text": "Parameters: This method accepts value as parameter of type T to create an Optional instance with this value." }, { "code": null, "e": 25074, "s": 24957, "text": "Return value: This method returns an instance of this Optional class with the specified value of the specified type." }, { "code": null, "e": 25157, "s": 25074, "text": "Exception: This method throws NullPointerException if the specified value is null." }, { "code": null, "e": 25206, "s": 25157, "text": "Below programs illustrate of() method:Program 1:" }, { "code": "// Java program to demonstrate// Optional.of() method import java.util.*; public class GFG { public static void main(String[] args) { // create a Optional Optional<Integer> op = Optional.of(9455); // print value System.out.println(\"Optional: \" + op); }}", "e": 25542, "s": 25206, "text": null }, { "code": null, "e": 25568, "s": 25542, "text": "Optional: Optional[9455]\n" }, { "code": null, "e": 25579, "s": 25568, "text": "Program 2:" }, { "code": "// Java program to demonstrate// Optional.of() method import java.util.*; public class GFG { public static void main(String[] args) { try { // create a Optional Optional<Integer> op = Optional.of(null); // print value System.out.println(\"Optional: \" + op); } catch (Exception e) { System.out.println(e); } }}", "e": 26033, "s": 25579, "text": null }, { "code": null, "e": 26065, "s": 26033, "text": "java.lang.NullPointerException\n" }, { "code": null, "e": 26148, "s": 26065, "text": "Reference: https://docs.oracle.com/javase/9/docs/api/java/util/Optional.html#of-T-" }, { "code": null, "e": 26168, "s": 26148, "text": "Java - util package" }, { "code": null, "e": 26183, "s": 26168, "text": "Java-Functions" }, { "code": null, "e": 26197, "s": 26183, "text": "Java-Optional" }, { "code": null, "e": 26202, "s": 26197, "text": "Java" }, { "code": null, "e": 26207, "s": 26202, "text": "Java" }, { "code": null, "e": 26305, "s": 26207, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 26314, "s": 26305, "text": "Comments" }, { "code": null, "e": 26327, "s": 26314, "text": "Old Comments" }, { "code": null, "e": 26359, "s": 26327, "text": "Initialize an ArrayList in Java" }, { "code": null, "e": 26389, "s": 26359, "text": "HashMap in Java with Examples" }, { "code": null, "e": 26408, "s": 26389, "text": "Interfaces in Java" }, { "code": null, "e": 26426, "s": 26408, "text": "ArrayList in Java" }, { "code": null, "e": 26477, "s": 26426, "text": "Object Oriented Programming (OOPs) Concept in Java" }, { "code": null, "e": 26508, "s": 26477, "text": "How to iterate any Map in Java" }, { "code": null, "e": 26540, "s": 26508, "text": "Multidimensional Arrays in Java" }, { "code": null, "e": 26559, "s": 26540, "text": "LinkedList in Java" }, { "code": null, "e": 26579, "s": 26559, "text": "Stack Class in Java" } ]
Postorder traversal of Binary Tree without recursion and without stack - GeeksforGeeks
01 Jul, 2021 Prerequisite – Inorder/preorder/postorder traversal of tree Given a binary tree, perform postorder traversal. We have discussed below methods for postorder traversal. 1) Recursive Postorder Traversal. 2) Postorder traversal using Stack. 2) Postorder traversal using two Stacks.In this method a DFS based solution is discussed. We keep track of visited nodes in a hash table. C++ Java Python C# Javascript // CPP program or postorder traversal#include <bits/stdc++.h>using namespace std; /* A binary tree node has data, pointer to left childand a pointer to right child */struct Node { int data; struct Node *left, *right;}; /* Helper function that allocates a new node with thegiven data and NULL left and right pointers. */void postorder(struct Node* head){ struct Node* temp = head; unordered_set<Node*> visited; while (temp && visited.find(temp) == visited.end()) { // Visited left subtree if (temp->left && visited.find(temp->left) == visited.end()) temp = temp->left; // Visited right subtree else if (temp->right && visited.find(temp->right) == visited.end()) temp = temp->right; // Print node else { printf("%d ", temp->data); visited.insert(temp); temp = head; } }} struct Node* newNode(int data){ struct Node* node = new Node; node->data = data; node->left = NULL; node->right = NULL; return (node);} /* Driver program to test above functions*/int main(){ struct Node* root = newNode(8); root->left = newNode(3); root->right = newNode(10); root->left->left = newNode(1); root->left->right = newNode(6); root->left->right->left = newNode(4); root->left->right->right = newNode(7); root->right->right = newNode(14); root->right->right->left = newNode(13); postorder(root); return 0;} // JAVA program or postorder traversalimport java.util.*; /* A binary tree node has data, pointer to left childand a pointer to right child */ class Node { int data; Node left, right; Node(int data) { this.data = data; this.left = this.right = null; }}; class GFG{ Node root; /* Helper function that allocates a new node with thegiven data and null left and right pointers. */ void postorder(Node head){ Node temp = root; HashSet<Node> visited = new HashSet<>(); while ((temp != null && !visited.contains(temp))) { // Visited left subtree if (temp.left != null && !visited.contains(temp.left)) temp = temp.left; // Visited right subtree else if (temp.right != null && !visited.contains(temp.right)) temp = temp.right; // Print node else { System.out.printf("%d ", temp.data); visited.add(temp); temp = head; } }} /* Driver program to test above functions*/public static void main(String[] args){ GFG gfg = new GFG(); gfg.root = new Node(8); gfg.root.left = new Node(3); gfg.root.right = new Node(10); gfg.root.left.left = new Node(1); gfg.root.left.right = new Node(6); gfg.root.left.right.left = new Node(4); gfg.root.left.right.right = new Node(7); gfg.root.right.right = new Node(14); gfg.root.right.right.left = new Node(13); gfg.postorder(gfg.root);}} // This code is contributed by Rajput-Ji # Python program or postorder traversal ''' A binary tree node has data, pointer to left childand a pointer to right child '''class newNode: # Constructor to create a newNode def __init__(self, data): self.data = data self.left = None self.right = None ''' Helper function that allocates a new node with thegiven data and NULL left and right pointers. '''def postorder(head): temp = head visited = set() while (temp and temp not in visited): # Visited left subtree if (temp.left and temp.left not in visited): temp = temp.left # Visited right subtree elif (temp.right and temp.right not in visited): temp = temp.right # Print node else: print(temp.data, end = " ") visited.add(temp) temp = head ''' Driver program to test above functions'''if __name__ == '__main__': root = newNode(8) root.left = newNode(3) root.right = newNode(10) root.left.left = newNode(1) root.left.right = newNode(6) root.left.right.left = newNode(4) root.left.right.right = newNode(7) root.right.right = newNode(14) root.right.right.left = newNode(13) postorder(root) # This code is contributed by# SHUBHAMSINGH10 // C# program or postorder traversalusing System;using System.Collections.Generic; /* A binary tree node has data, pointer to left childand a pointer to right child */public class Node { public int data; public Node left, right; public Node(int data) { this.data = data; this.left = this.right = null; } }; class GFG{ Node root; /* Helper function that allocates a new node with thegiven data and null left and right pointers. */ void postorder(Node head) { Node temp = root; HashSet<Node> visited = new HashSet<Node>(); while ((temp != null && !visited.Contains(temp))) { // Visited left subtree if (temp.left != null && !visited.Contains(temp.left)) temp = temp.left; // Visited right subtree else if (temp.right != null && !visited.Contains(temp.right)) temp = temp.right; // Print node else { Console.Write(temp.data + " "); visited.Add(temp); temp = head; } } } /* Driver code*/ public static void Main(String[] args) { GFG gfg = new GFG(); gfg.root = new Node(8); gfg.root.left = new Node(3); gfg.root.right = new Node(10); gfg.root.left.left = new Node(1); gfg.root.left.right = new Node(6); gfg.root.left.right.left = new Node(4); gfg.root.left.right.right = new Node(7); gfg.root.right.right = new Node(14); gfg.root.right.right.left = new Node(13); gfg.postorder(gfg.root); }} // This code is contributed by Rajput-Ji <script> // JavaScript program or postorder traversal /* A binary tree node has data, pointer to left childand a pointer to right child */class Node{ constructor(data) { this.data = data; this.left = null; this.right = null; }}; var root = null; /* Helper function that allocates a new node with thegiven data and null left and right pointers. */ function postorder(head) { var temp = root; var visited = new Set(); while ((temp != null && !visited.has(temp))) { // Visited left subtree if (temp.left != null && !visited.has(temp.left)) temp = temp.left; // Visited right subtree else if (temp.right != null && !visited.has(temp.right)) temp = temp.right; // Print node else { document.write(temp.data + " "); visited.add(temp); temp = head; } } } /* Driver code*/root = new Node(8);root.left = new Node(3);root.right = new Node(10);root.left.left = new Node(1);root.left.right = new Node(6);root.left.right.left = new Node(4);root.left.right.right = new Node(7);root.right.right = new Node(14);root.right.right.left = new Node(13);postorder(root); </script> Output: 1 4 7 6 3 13 14 10 8 Alternate Solution: We can keep visited flag with every node instead of separate hash table. C++ Java Python3 C# Javascript // CPP program or postorder traversal#include <bits/stdc++.h>using namespace std; /* A binary tree node has data, pointer to left childand a pointer to right child */struct Node { int data; struct Node *left, *right; bool visited;}; void postorder(struct Node* head){ struct Node* temp = head; while (temp && temp->visited == false) { // Visited left subtree if (temp->left && temp->left->visited == false) temp = temp->left; // Visited right subtree else if (temp->right && temp->right->visited == false) temp = temp->right; // Print node else { printf("%d ", temp->data); temp->visited = true; temp = head; } }} struct Node* newNode(int data){ struct Node* node = new Node; node->data = data; node->left = NULL; node->right = NULL; node->visited = false; return (node);} /* Driver program to test above functions*/int main(){ struct Node* root = newNode(8); root->left = newNode(3); root->right = newNode(10); root->left->left = newNode(1); root->left->right = newNode(6); root->left->right->left = newNode(4); root->left->right->right = newNode(7); root->right->right = newNode(14); root->right->right->left = newNode(13); postorder(root); return 0;} // Java program or postorder traversalclass GFG{ /* A binary tree node has data, pointer to left child and a pointer to right child */static class Node{ int data; Node left, right; boolean visited;} static void postorder( Node head){ Node temp = head; while (temp != null && temp.visited == false) { // Visited left subtree if (temp.left != null && temp.left.visited == false) temp = temp.left; // Visited right subtree else if (temp.right != null && temp.right.visited == false) temp = temp.right; // Print node else { System.out.printf("%d ", temp.data); temp.visited = true; temp = head; } }} static Node newNode(int data){ Node node = new Node(); node.data = data; node.left = null; node.right = null; node.visited = false; return (node);} /* Driver code*/public static void main(String []args){ Node root = newNode(8); root.left = newNode(3); root.right = newNode(10); root.left.left = newNode(1); root.left.right = newNode(6); root.left.right.left = newNode(4); root.left.right.right = newNode(7); root.right.right = newNode(14); root.right.right.left = newNode(13); postorder(root);}} // This code is contributed by Arnab Kundu """Python3 program or postorder traversal """ # A Binary Tree Node# Utility function to create a# new tree nodeclass newNode: # Constructor to create a newNode def __init__(self, data): self.data = data self.left = None self.right = None self.visited = False def postorder(head) : temp = head while (temp and temp.visited == False): # Visited left subtree if (temp.left and temp.left.visited == False): temp = temp.left # Visited right subtree elif (temp.right and temp.right.visited == False): temp = temp.right # Print node else: print(temp.data, end = " ") temp.visited = True temp = head # Driver Codeif __name__ == '__main__': root = newNode(8) root.left = newNode(3) root.right = newNode(10) root.left.left = newNode(1) root.left.right = newNode(6) root.left.right.left = newNode(4) root.left.right.right = newNode(7) root.right.right = newNode(14) root.right.right.left = newNode(13) postorder(root) # This code is contributed by# SHUBHAMSINGH10 // C# program or postorder traversalusing System; class GFG{ /* A binary tree node has data, pointer to left child and a pointer to right child */class Node{ public int data; public Node left, right; public bool visited;} static void postorder( Node head){ Node temp = head; while (temp != null && temp.visited == false) { // Visited left subtree if (temp.left != null && temp.left.visited == false) temp = temp.left; // Visited right subtree else if (temp.right != null && temp.right.visited == false) temp = temp.right; // Print node else { Console.Write("{0} ", temp.data); temp.visited = true; temp = head; } }} static Node newNode(int data){ Node node = new Node(); node.data = data; node.left = null; node.right = null; node.visited = false; return (node);} /* Driver code*/public static void Main(String []args){ Node root = newNode(8); root.left = newNode(3); root.right = newNode(10); root.left.left = newNode(1); root.left.right = newNode(6); root.left.right.left = newNode(4); root.left.right.right = newNode(7); root.right.right = newNode(14); root.right.right.left = newNode(13); postorder(root);}} // This code is contributed by 29AjayKumar <script> // JavaScript program or postorder traversal /* A binary tree node has data, pointer to left child and a pointer to right child */ class Node { constructor() { this.data; this.left; this.right; this.visited; } } function postorder(head) { let temp = head; while (temp != null && temp.visited == false) { // Visited left subtree if (temp.left != null && temp.left.visited == false) temp = temp.left; // Visited right subtree else if (temp.right != null && temp.right.visited == false) temp = temp.right; // Print node else { document.write(temp.data + " "); temp.visited = true; temp = head; } } } function newNode(data) { let node = new Node(); node.data = data; node.left = null; node.right = null; node.visited = false; return (node); } let root = newNode(8); root.left = newNode(3); root.right = newNode(10); root.left.left = newNode(1); root.left.right = newNode(6); root.left.right.left = newNode(4); root.left.right.right = newNode(7); root.right.right = newNode(14); root.right.right.left = newNode(13); postorder(root); </script> Output: 1 4 7 6 3 13 14 10 8 Time complexity of above solution is O(n2) in worst case we move pointer back to head after visiting every node. Alternate solution using unordered_map in which we do not have to move pointer back to head, so time complexity is O(n). C++ // CPP program or postorder traversal#include <bits/stdc++.h>using namespace std; /* A binary tree node has data, pointer to left childand a pointer to right child */struct Node { int data; struct Node *left, *right; bool visited;}; void postorder(Node* root){ Node* n = root; unordered_map<Node*, Node*> parentMap; parentMap.insert(pair<Node*, Node*>(root, nullptr)); while (n) { if (n->left && parentMap.find(n->left) == parentMap.end()) { parentMap.insert(pair<Node*, Node*>(n->left, n)); n = n->left; } else if (n->right && parentMap.find(n->right) == parentMap.end()) { parentMap.insert(pair<Node*, Node*>(n->right, n)); n = n->right; } else { cout << n->data << " "; n = (parentMap.find(n))->second; } }}struct Node* newNode(int data){ struct Node* node = new Node; node->data = data; node->left = NULL; node->right = NULL; node->visited = false; return (node);} /* Driver program to test above functions*/int main(){ struct Node* root = newNode(8); root->left = newNode(3); root->right = newNode(10); root->left->left = newNode(1); root->left->right = newNode(6); root->left->right->left = newNode(4); root->left->right->right = newNode(7); root->right->right = newNode(14); root->right->right->left = newNode(13); postorder(root); return 0;} Output: 1 4 7 6 3 13 14 10 8 NirvikBasak1 andrew1234 29AjayKumar SHUBHAMSINGH10 Rajput-Ji mukesh07 rrrtnx cpp-unordered_map Hash Tree Hash Tree Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Given an array A[] and a number x, check for pair in A[] with sum as x (aka Two Sum) Internal Working of HashMap in Java Hashing | Set 1 (Introduction) Hashing | Set 3 (Open Addressing) Hashing | Set 2 (Separate Chaining) Tree Traversals (Inorder, Preorder and Postorder) AVL Tree | Set 1 (Insertion) Binary Tree | Set 1 (Introduction) Level Order Binary Tree Traversal Binary Tree | Set 3 (Types of Binary Tree)
[ { "code": null, "e": 37642, "s": 37614, "text": "\n01 Jul, 2021" }, { "code": null, "e": 37753, "s": 37642, "text": "Prerequisite – Inorder/preorder/postorder traversal of tree Given a binary tree, perform postorder traversal. " }, { "code": null, "e": 38019, "s": 37753, "text": "We have discussed below methods for postorder traversal. 1) Recursive Postorder Traversal. 2) Postorder traversal using Stack. 2) Postorder traversal using two Stacks.In this method a DFS based solution is discussed. We keep track of visited nodes in a hash table. " }, { "code": null, "e": 38023, "s": 38019, "text": "C++" }, { "code": null, "e": 38028, "s": 38023, "text": "Java" }, { "code": null, "e": 38035, "s": 38028, "text": "Python" }, { "code": null, "e": 38038, "s": 38035, "text": "C#" }, { "code": null, "e": 38049, "s": 38038, "text": "Javascript" }, { "code": "// CPP program or postorder traversal#include <bits/stdc++.h>using namespace std; /* A binary tree node has data, pointer to left childand a pointer to right child */struct Node { int data; struct Node *left, *right;}; /* Helper function that allocates a new node with thegiven data and NULL left and right pointers. */void postorder(struct Node* head){ struct Node* temp = head; unordered_set<Node*> visited; while (temp && visited.find(temp) == visited.end()) { // Visited left subtree if (temp->left && visited.find(temp->left) == visited.end()) temp = temp->left; // Visited right subtree else if (temp->right && visited.find(temp->right) == visited.end()) temp = temp->right; // Print node else { printf(\"%d \", temp->data); visited.insert(temp); temp = head; } }} struct Node* newNode(int data){ struct Node* node = new Node; node->data = data; node->left = NULL; node->right = NULL; return (node);} /* Driver program to test above functions*/int main(){ struct Node* root = newNode(8); root->left = newNode(3); root->right = newNode(10); root->left->left = newNode(1); root->left->right = newNode(6); root->left->right->left = newNode(4); root->left->right->right = newNode(7); root->right->right = newNode(14); root->right->right->left = newNode(13); postorder(root); return 0;}", "e": 39523, "s": 38049, "text": null }, { "code": "// JAVA program or postorder traversalimport java.util.*; /* A binary tree node has data, pointer to left childand a pointer to right child */ class Node { int data; Node left, right; Node(int data) { this.data = data; this.left = this.right = null; }}; class GFG{ Node root; /* Helper function that allocates a new node with thegiven data and null left and right pointers. */ void postorder(Node head){ Node temp = root; HashSet<Node> visited = new HashSet<>(); while ((temp != null && !visited.contains(temp))) { // Visited left subtree if (temp.left != null && !visited.contains(temp.left)) temp = temp.left; // Visited right subtree else if (temp.right != null && !visited.contains(temp.right)) temp = temp.right; // Print node else { System.out.printf(\"%d \", temp.data); visited.add(temp); temp = head; } }} /* Driver program to test above functions*/public static void main(String[] args){ GFG gfg = new GFG(); gfg.root = new Node(8); gfg.root.left = new Node(3); gfg.root.right = new Node(10); gfg.root.left.left = new Node(1); gfg.root.left.right = new Node(6); gfg.root.left.right.left = new Node(4); gfg.root.left.right.right = new Node(7); gfg.root.right.right = new Node(14); gfg.root.right.right.left = new Node(13); gfg.postorder(gfg.root);}} // This code is contributed by Rajput-Ji", "e": 41046, "s": 39523, "text": null }, { "code": "# Python program or postorder traversal ''' A binary tree node has data, pointer to left childand a pointer to right child '''class newNode: # Constructor to create a newNode def __init__(self, data): self.data = data self.left = None self.right = None ''' Helper function that allocates a new node with thegiven data and NULL left and right pointers. '''def postorder(head): temp = head visited = set() while (temp and temp not in visited): # Visited left subtree if (temp.left and temp.left not in visited): temp = temp.left # Visited right subtree elif (temp.right and temp.right not in visited): temp = temp.right # Print node else: print(temp.data, end = \" \") visited.add(temp) temp = head ''' Driver program to test above functions'''if __name__ == '__main__': root = newNode(8) root.left = newNode(3) root.right = newNode(10) root.left.left = newNode(1) root.left.right = newNode(6) root.left.right.left = newNode(4) root.left.right.right = newNode(7) root.right.right = newNode(14) root.right.right.left = newNode(13) postorder(root) # This code is contributed by# SHUBHAMSINGH10", "e": 42339, "s": 41046, "text": null }, { "code": "// C# program or postorder traversalusing System;using System.Collections.Generic; /* A binary tree node has data, pointer to left childand a pointer to right child */public class Node { public int data; public Node left, right; public Node(int data) { this.data = data; this.left = this.right = null; } }; class GFG{ Node root; /* Helper function that allocates a new node with thegiven data and null left and right pointers. */ void postorder(Node head) { Node temp = root; HashSet<Node> visited = new HashSet<Node>(); while ((temp != null && !visited.Contains(temp))) { // Visited left subtree if (temp.left != null && !visited.Contains(temp.left)) temp = temp.left; // Visited right subtree else if (temp.right != null && !visited.Contains(temp.right)) temp = temp.right; // Print node else { Console.Write(temp.data + \" \"); visited.Add(temp); temp = head; } } } /* Driver code*/ public static void Main(String[] args) { GFG gfg = new GFG(); gfg.root = new Node(8); gfg.root.left = new Node(3); gfg.root.right = new Node(10); gfg.root.left.left = new Node(1); gfg.root.left.right = new Node(6); gfg.root.left.right.left = new Node(4); gfg.root.left.right.right = new Node(7); gfg.root.right.right = new Node(14); gfg.root.right.right.left = new Node(13); gfg.postorder(gfg.root); }} // This code is contributed by Rajput-Ji", "e": 43882, "s": 42339, "text": null }, { "code": "<script> // JavaScript program or postorder traversal /* A binary tree node has data, pointer to left childand a pointer to right child */class Node{ constructor(data) { this.data = data; this.left = null; this.right = null; }}; var root = null; /* Helper function that allocates a new node with thegiven data and null left and right pointers. */ function postorder(head) { var temp = root; var visited = new Set(); while ((temp != null && !visited.has(temp))) { // Visited left subtree if (temp.left != null && !visited.has(temp.left)) temp = temp.left; // Visited right subtree else if (temp.right != null && !visited.has(temp.right)) temp = temp.right; // Print node else { document.write(temp.data + \" \"); visited.add(temp); temp = head; } } } /* Driver code*/root = new Node(8);root.left = new Node(3);root.right = new Node(10);root.left.left = new Node(1);root.left.right = new Node(6);root.left.right.left = new Node(4);root.left.right.right = new Node(7);root.right.right = new Node(14);root.right.right.left = new Node(13);postorder(root); </script>", "e": 45097, "s": 43882, "text": null }, { "code": null, "e": 45107, "s": 45097, "text": "Output: " }, { "code": null, "e": 45129, "s": 45107, "text": "1 4 7 6 3 13 14 10 8 " }, { "code": null, "e": 45224, "s": 45129, "text": "Alternate Solution: We can keep visited flag with every node instead of separate hash table. " }, { "code": null, "e": 45228, "s": 45224, "text": "C++" }, { "code": null, "e": 45233, "s": 45228, "text": "Java" }, { "code": null, "e": 45241, "s": 45233, "text": "Python3" }, { "code": null, "e": 45244, "s": 45241, "text": "C#" }, { "code": null, "e": 45255, "s": 45244, "text": "Javascript" }, { "code": "// CPP program or postorder traversal#include <bits/stdc++.h>using namespace std; /* A binary tree node has data, pointer to left childand a pointer to right child */struct Node { int data; struct Node *left, *right; bool visited;}; void postorder(struct Node* head){ struct Node* temp = head; while (temp && temp->visited == false) { // Visited left subtree if (temp->left && temp->left->visited == false) temp = temp->left; // Visited right subtree else if (temp->right && temp->right->visited == false) temp = temp->right; // Print node else { printf(\"%d \", temp->data); temp->visited = true; temp = head; } }} struct Node* newNode(int data){ struct Node* node = new Node; node->data = data; node->left = NULL; node->right = NULL; node->visited = false; return (node);} /* Driver program to test above functions*/int main(){ struct Node* root = newNode(8); root->left = newNode(3); root->right = newNode(10); root->left->left = newNode(1); root->left->right = newNode(6); root->left->right->left = newNode(4); root->left->right->right = newNode(7); root->right->right = newNode(14); root->right->right->left = newNode(13); postorder(root); return 0;}", "e": 46585, "s": 45255, "text": null }, { "code": "// Java program or postorder traversalclass GFG{ /* A binary tree node has data, pointer to left child and a pointer to right child */static class Node{ int data; Node left, right; boolean visited;} static void postorder( Node head){ Node temp = head; while (temp != null && temp.visited == false) { // Visited left subtree if (temp.left != null && temp.left.visited == false) temp = temp.left; // Visited right subtree else if (temp.right != null && temp.right.visited == false) temp = temp.right; // Print node else { System.out.printf(\"%d \", temp.data); temp.visited = true; temp = head; } }} static Node newNode(int data){ Node node = new Node(); node.data = data; node.left = null; node.right = null; node.visited = false; return (node);} /* Driver code*/public static void main(String []args){ Node root = newNode(8); root.left = newNode(3); root.right = newNode(10); root.left.left = newNode(1); root.left.right = newNode(6); root.left.right.left = newNode(4); root.left.right.right = newNode(7); root.right.right = newNode(14); root.right.right.left = newNode(13); postorder(root);}} // This code is contributed by Arnab Kundu", "e": 47946, "s": 46585, "text": null }, { "code": "\"\"\"Python3 program or postorder traversal \"\"\" # A Binary Tree Node# Utility function to create a# new tree nodeclass newNode: # Constructor to create a newNode def __init__(self, data): self.data = data self.left = None self.right = None self.visited = False def postorder(head) : temp = head while (temp and temp.visited == False): # Visited left subtree if (temp.left and temp.left.visited == False): temp = temp.left # Visited right subtree elif (temp.right and temp.right.visited == False): temp = temp.right # Print node else: print(temp.data, end = \" \") temp.visited = True temp = head # Driver Codeif __name__ == '__main__': root = newNode(8) root.left = newNode(3) root.right = newNode(10) root.left.left = newNode(1) root.left.right = newNode(6) root.left.right.left = newNode(4) root.left.right.right = newNode(7) root.right.right = newNode(14) root.right.right.left = newNode(13) postorder(root) # This code is contributed by# SHUBHAMSINGH10", "e": 49120, "s": 47946, "text": null }, { "code": "// C# program or postorder traversalusing System; class GFG{ /* A binary tree node has data, pointer to left child and a pointer to right child */class Node{ public int data; public Node left, right; public bool visited;} static void postorder( Node head){ Node temp = head; while (temp != null && temp.visited == false) { // Visited left subtree if (temp.left != null && temp.left.visited == false) temp = temp.left; // Visited right subtree else if (temp.right != null && temp.right.visited == false) temp = temp.right; // Print node else { Console.Write(\"{0} \", temp.data); temp.visited = true; temp = head; } }} static Node newNode(int data){ Node node = new Node(); node.data = data; node.left = null; node.right = null; node.visited = false; return (node);} /* Driver code*/public static void Main(String []args){ Node root = newNode(8); root.left = newNode(3); root.right = newNode(10); root.left.left = newNode(1); root.left.right = newNode(6); root.left.right.left = newNode(4); root.left.right.right = newNode(7); root.right.right = newNode(14); root.right.right.left = newNode(13); postorder(root);}} // This code is contributed by 29AjayKumar", "e": 50501, "s": 49120, "text": null }, { "code": "<script> // JavaScript program or postorder traversal /* A binary tree node has data, pointer to left child and a pointer to right child */ class Node { constructor() { this.data; this.left; this.right; this.visited; } } function postorder(head) { let temp = head; while (temp != null && temp.visited == false) { // Visited left subtree if (temp.left != null && temp.left.visited == false) temp = temp.left; // Visited right subtree else if (temp.right != null && temp.right.visited == false) temp = temp.right; // Print node else { document.write(temp.data + \" \"); temp.visited = true; temp = head; } } } function newNode(data) { let node = new Node(); node.data = data; node.left = null; node.right = null; node.visited = false; return (node); } let root = newNode(8); root.left = newNode(3); root.right = newNode(10); root.left.left = newNode(1); root.left.right = newNode(6); root.left.right.left = newNode(4); root.left.right.right = newNode(7); root.right.right = newNode(14); root.right.right.left = newNode(13); postorder(root); </script>", "e": 51982, "s": 50501, "text": null }, { "code": null, "e": 51992, "s": 51982, "text": "Output: " }, { "code": null, "e": 52014, "s": 51992, "text": "1 4 7 6 3 13 14 10 8 " }, { "code": null, "e": 52249, "s": 52014, "text": "Time complexity of above solution is O(n2) in worst case we move pointer back to head after visiting every node. Alternate solution using unordered_map in which we do not have to move pointer back to head, so time complexity is O(n). " }, { "code": null, "e": 52253, "s": 52249, "text": "C++" }, { "code": "// CPP program or postorder traversal#include <bits/stdc++.h>using namespace std; /* A binary tree node has data, pointer to left childand a pointer to right child */struct Node { int data; struct Node *left, *right; bool visited;}; void postorder(Node* root){ Node* n = root; unordered_map<Node*, Node*> parentMap; parentMap.insert(pair<Node*, Node*>(root, nullptr)); while (n) { if (n->left && parentMap.find(n->left) == parentMap.end()) { parentMap.insert(pair<Node*, Node*>(n->left, n)); n = n->left; } else if (n->right && parentMap.find(n->right) == parentMap.end()) { parentMap.insert(pair<Node*, Node*>(n->right, n)); n = n->right; } else { cout << n->data << \" \"; n = (parentMap.find(n))->second; } }}struct Node* newNode(int data){ struct Node* node = new Node; node->data = data; node->left = NULL; node->right = NULL; node->visited = false; return (node);} /* Driver program to test above functions*/int main(){ struct Node* root = newNode(8); root->left = newNode(3); root->right = newNode(10); root->left->left = newNode(1); root->left->right = newNode(6); root->left->right->left = newNode(4); root->left->right->right = newNode(7); root->right->right = newNode(14); root->right->right->left = newNode(13); postorder(root); return 0;}", "e": 53687, "s": 52253, "text": null }, { "code": null, "e": 53697, "s": 53687, "text": "Output: " }, { "code": null, "e": 53719, "s": 53697, "text": "1 4 7 6 3 13 14 10 8 " }, { "code": null, "e": 53734, "s": 53721, "text": "NirvikBasak1" }, { "code": null, "e": 53745, "s": 53734, "text": "andrew1234" }, { "code": null, "e": 53757, "s": 53745, "text": "29AjayKumar" }, { "code": null, "e": 53772, "s": 53757, "text": "SHUBHAMSINGH10" }, { "code": null, "e": 53782, "s": 53772, "text": "Rajput-Ji" }, { "code": null, "e": 53791, "s": 53782, "text": "mukesh07" }, { "code": null, "e": 53798, "s": 53791, "text": "rrrtnx" }, { "code": null, "e": 53816, "s": 53798, "text": "cpp-unordered_map" }, { "code": null, "e": 53821, "s": 53816, "text": "Hash" }, { "code": null, "e": 53826, "s": 53821, "text": "Tree" }, { "code": null, "e": 53831, "s": 53826, "text": "Hash" }, { "code": null, "e": 53836, "s": 53831, "text": "Tree" }, { "code": null, "e": 53934, "s": 53836, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 54019, "s": 53934, "text": "Given an array A[] and a number x, check for pair in A[] with sum as x (aka Two Sum)" }, { "code": null, "e": 54055, "s": 54019, "text": "Internal Working of HashMap in Java" }, { "code": null, "e": 54086, "s": 54055, "text": "Hashing | Set 1 (Introduction)" }, { "code": null, "e": 54120, "s": 54086, "text": "Hashing | Set 3 (Open Addressing)" }, { "code": null, "e": 54156, "s": 54120, "text": "Hashing | Set 2 (Separate Chaining)" }, { "code": null, "e": 54206, "s": 54156, "text": "Tree Traversals (Inorder, Preorder and Postorder)" }, { "code": null, "e": 54235, "s": 54206, "text": "AVL Tree | Set 1 (Insertion)" }, { "code": null, "e": 54270, "s": 54235, "text": "Binary Tree | Set 1 (Introduction)" }, { "code": null, "e": 54304, "s": 54270, "text": "Level Order Binary Tree Traversal" } ]
Public vs Protected in C++ with Examples
29 Oct, 2019 Public All the class members declared under public will be available to everyone. The data members and member functions declared public can be accessed by other classes too. The public members of a class can be accessed from anywhere in the program using the direct member access operator (.) with the object of that class. Example: // C++ program to demonstrate public// access modifier #include <iostream>using namespace std; // class definitionclass Circle {public: double radius; double compute_area() { return 3.14 * radius * radius; }}; // main functionint main(){ Circle obj; // accessing public data member outside class obj.radius = 5.5; cout << "Radius is: " << obj.radius << "\n"; cout << "Area is: " << obj.compute_area(); return 0;} Radius is: 5.5 Area is: 94.985 In the above program, the data member radius is public so we are allowed to access it outside the class. Protected Protected access modifier is similar to that of private access modifiers, the difference is that the class member declared as Protected are inaccessible outside the class but they can be accessed by any subclass(derived class) of that class.Example: // C++ program to demonstrate// protected access modifier#include <bits/stdc++.h>using namespace std; // base classclass Parent { // protected data membersprotected: int id_protected;}; // sub class or derived classclass Child : public Parent { public: void setId(int id) { // Child class is able to access the inherited // protected data members of base class id_protected = id; } void displayId() { cout << "id_protected is: " << id_protected << endl; }}; // main functionint main(){ Child obj1; // member function of the derived class can // access the protected data members of the base class obj1.setId(81); obj1.displayId(); return 0;} Difference between Public and Protected access modifiers C++ Programs Difference Between Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n29 Oct, 2019" }, { "code": null, "e": 35, "s": 28, "text": "Public" }, { "code": null, "e": 352, "s": 35, "text": "All the class members declared under public will be available to everyone. The data members and member functions declared public can be accessed by other classes too. The public members of a class can be accessed from anywhere in the program using the direct member access operator (.) with the object of that class." }, { "code": null, "e": 361, "s": 352, "text": "Example:" }, { "code": "// C++ program to demonstrate public// access modifier #include <iostream>using namespace std; // class definitionclass Circle {public: double radius; double compute_area() { return 3.14 * radius * radius; }}; // main functionint main(){ Circle obj; // accessing public data member outside class obj.radius = 5.5; cout << \"Radius is: \" << obj.radius << \"\\n\"; cout << \"Area is: \" << obj.compute_area(); return 0;}", "e": 820, "s": 361, "text": null }, { "code": null, "e": 852, "s": 820, "text": "Radius is: 5.5\nArea is: 94.985\n" }, { "code": null, "e": 957, "s": 852, "text": "In the above program, the data member radius is public so we are allowed to access it outside the class." }, { "code": null, "e": 967, "s": 957, "text": "Protected" }, { "code": null, "e": 1217, "s": 967, "text": "Protected access modifier is similar to that of private access modifiers, the difference is that the class member declared as Protected are inaccessible outside the class but they can be accessed by any subclass(derived class) of that class.Example:" }, { "code": "// C++ program to demonstrate// protected access modifier#include <bits/stdc++.h>using namespace std; // base classclass Parent { // protected data membersprotected: int id_protected;}; // sub class or derived classclass Child : public Parent { public: void setId(int id) { // Child class is able to access the inherited // protected data members of base class id_protected = id; } void displayId() { cout << \"id_protected is: \" << id_protected << endl; }}; // main functionint main(){ Child obj1; // member function of the derived class can // access the protected data members of the base class obj1.setId(81); obj1.displayId(); return 0;}", "e": 1945, "s": 1217, "text": null }, { "code": null, "e": 1985, "s": 1945, "text": "Difference between Public and Protected" }, { "code": null, "e": 2002, "s": 1985, "text": "access modifiers" }, { "code": null, "e": 2015, "s": 2002, "text": "C++ Programs" }, { "code": null, "e": 2034, "s": 2015, "text": "Difference Between" } ]
Python | Pandas Timestamp.year
08 Jan, 2019 Python is a great language for doing data analysis, primarily because of the fantastic ecosystem of data-centric python packages. Pandas is one of those packages and makes importing and analyzing data much easier. Pandas Timestamp.year attribute return the year in which the date in the given Timestamp object lies. Syntax : Timestamp.year Parameters : None Return : year Example #1: Use Timestamp.year attribute to find the year in which the date present in the given Timestamp object lies. # importing pandas as pdimport pandas as pd # Create the Timestamp objectts = pd.Timestamp(year = 2011, month = 11, day = 21, hour = 10, second = 49, tz = 'US/Central') # Print the Timestamp objectprint(ts) Output : Now we will use the Timestamp.year attribute to find the year value of the date. # return the yearts.year Output : As we can see in the output, the Timestamp.year attribute has returned 2011 indicating that the year value of the date in the give Timestamp object is 2011. Example #2: Use Timestamp.year attribute to find the year in which the date present in the given Timestamp object lies. # importing pandas as pdimport pandas as pd # Create the Timestamp objectts = pd.Timestamp(year = 2009, month = 5, day = 31, hour = 4, second = 49, tz = 'Europe/Berlin') # Print the Timestamp objectprint(ts) Output : Now we will use the Timestamp.year attribute to find the year value of the date. # return the yearts.year Output : As we can see in the output, the Timestamp.year attribute has returned 2009 indicating that the year value of the date in the give Timestamp object is 2009. Python Pandas-Timestamp Python-pandas Python Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n08 Jan, 2019" }, { "code": null, "e": 242, "s": 28, "text": "Python is a great language for doing data analysis, primarily because of the fantastic ecosystem of data-centric python packages. Pandas is one of those packages and makes importing and analyzing data much easier." }, { "code": null, "e": 344, "s": 242, "text": "Pandas Timestamp.year attribute return the year in which the date in the given Timestamp object lies." }, { "code": null, "e": 368, "s": 344, "text": "Syntax : Timestamp.year" }, { "code": null, "e": 386, "s": 368, "text": "Parameters : None" }, { "code": null, "e": 400, "s": 386, "text": "Return : year" }, { "code": null, "e": 520, "s": 400, "text": "Example #1: Use Timestamp.year attribute to find the year in which the date present in the given Timestamp object lies." }, { "code": "# importing pandas as pdimport pandas as pd # Create the Timestamp objectts = pd.Timestamp(year = 2011, month = 11, day = 21, hour = 10, second = 49, tz = 'US/Central') # Print the Timestamp objectprint(ts)", "e": 740, "s": 520, "text": null }, { "code": null, "e": 749, "s": 740, "text": "Output :" }, { "code": null, "e": 830, "s": 749, "text": "Now we will use the Timestamp.year attribute to find the year value of the date." }, { "code": "# return the yearts.year", "e": 855, "s": 830, "text": null }, { "code": null, "e": 864, "s": 855, "text": "Output :" }, { "code": null, "e": 1021, "s": 864, "text": "As we can see in the output, the Timestamp.year attribute has returned 2011 indicating that the year value of the date in the give Timestamp object is 2011." }, { "code": null, "e": 1141, "s": 1021, "text": "Example #2: Use Timestamp.year attribute to find the year in which the date present in the given Timestamp object lies." }, { "code": "# importing pandas as pdimport pandas as pd # Create the Timestamp objectts = pd.Timestamp(year = 2009, month = 5, day = 31, hour = 4, second = 49, tz = 'Europe/Berlin') # Print the Timestamp objectprint(ts)", "e": 1360, "s": 1141, "text": null }, { "code": null, "e": 1369, "s": 1360, "text": "Output :" }, { "code": null, "e": 1450, "s": 1369, "text": "Now we will use the Timestamp.year attribute to find the year value of the date." }, { "code": "# return the yearts.year", "e": 1475, "s": 1450, "text": null }, { "code": null, "e": 1484, "s": 1475, "text": "Output :" }, { "code": null, "e": 1641, "s": 1484, "text": "As we can see in the output, the Timestamp.year attribute has returned 2009 indicating that the year value of the date in the give Timestamp object is 2009." }, { "code": null, "e": 1665, "s": 1641, "text": "Python Pandas-Timestamp" }, { "code": null, "e": 1679, "s": 1665, "text": "Python-pandas" }, { "code": null, "e": 1686, "s": 1679, "text": "Python" } ]
Meta Class in Models – Django
27 May, 2022 Django is a high-level Python Web framework that encourages rapid development and clean, pragmatic design. Built by experienced developers, it takes care of much of the hassle of Web development, so you can focus on writing your app without needing to reinvent the wheel. It’s free and open source. Do also go through Django Models prior to moving ahead. Model Meta is basically the inner class of your model class. Model Meta is basically used to change the behavior of your model fields like changing order options,verbose_name, and a lot of other options. It’s completely optional to add a Meta class to your model. In order to use model meta you have to add class Meta in your model as shown below as follows: class student(models.Model): class Meta: options........ Model Meta has a lot of options that you can give your model in its internal class meta 1. abstract If abstract = True, this model will be an abstract base class Python3 class student(models.Model): class Meta: abstract = True 2. app_label If a model is defined outside of applications in INSTALLED_APPS, it must declare which app it belongs to: Python3 class student(models.Model): class Meta: app_label = 'myapp' # add app name here 3. verbose_name verbose_name is basically a human-readable name for your model Python3 class student(models.Model): class Meta: verbose_name = "stu" # add verbose_name here 4. ordering Ordering is basically used to change the order of your model fields. Python3 class student(models.Model): class Meta: ordering = [-1] Add ordering like this [-1] it changes the order in descending order 5. proxy If we add proxy = True a model which subclasses another model will be treated as a proxy model Python3 class Teacher(models.Model): pass class Student(Teacher): class Meta: proxy = True This is how can we make a proxy model. 6. permissions Extra permissions to enter into the permissions table when creating this object. Add, change, delete and view permissions are automatically created for each model. Python3 class student(models.Model): class Meta: permissions = [] You can add extra permission inside the list. 7. db_table We can overwrite the table name by using db_table in meta class. Python3 class student(models.Model): class Meta: db_table = 'X' This will change the table name to X. 8. get_latest_by It returns the latest object in the table based on the given field, used for typically DateField, DateTimeField, or IntegerField. Python3 class student(models.Model): class Meta: get_latest_by = "order_date" Return the latest by ascending order_date. isha307 Python Django Python Framework Python Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n27 May, 2022" }, { "code": null, "e": 384, "s": 28, "text": "Django is a high-level Python Web framework that encourages rapid development and clean, pragmatic design. Built by experienced developers, it takes care of much of the hassle of Web development, so you can focus on writing your app without needing to reinvent the wheel. It’s free and open source. Do also go through Django Models prior to moving ahead. " }, { "code": null, "e": 744, "s": 384, "text": "Model Meta is basically the inner class of your model class. Model Meta is basically used to change the behavior of your model fields like changing order options,verbose_name, and a lot of other options. It’s completely optional to add a Meta class to your model. In order to use model meta you have to add class Meta in your model as shown below as follows: " }, { "code": null, "e": 813, "s": 744, "text": "class student(models.Model):\n class Meta:\n options........" }, { "code": null, "e": 901, "s": 813, "text": "Model Meta has a lot of options that you can give your model in its internal class meta" }, { "code": null, "e": 913, "s": 901, "text": "1. abstract" }, { "code": null, "e": 976, "s": 913, "text": "If abstract = True, this model will be an abstract base class" }, { "code": null, "e": 984, "s": 976, "text": "Python3" }, { "code": "class student(models.Model): class Meta: abstract = True", "e": 1047, "s": 984, "text": null }, { "code": null, "e": 1060, "s": 1047, "text": "2. app_label" }, { "code": null, "e": 1167, "s": 1060, "text": "If a model is defined outside of applications in INSTALLED_APPS, it must declare which app it belongs to:" }, { "code": null, "e": 1175, "s": 1167, "text": "Python3" }, { "code": "class student(models.Model): class Meta: app_label = 'myapp' # add app name here", "e": 1262, "s": 1175, "text": null }, { "code": null, "e": 1278, "s": 1262, "text": "3. verbose_name" }, { "code": null, "e": 1341, "s": 1278, "text": "verbose_name is basically a human-readable name for your model" }, { "code": null, "e": 1349, "s": 1341, "text": "Python3" }, { "code": "class student(models.Model): class Meta: verbose_name = \"stu\" # add verbose_name here", "e": 1442, "s": 1349, "text": null }, { "code": null, "e": 1455, "s": 1442, "text": "4. ordering " }, { "code": null, "e": 1524, "s": 1455, "text": "Ordering is basically used to change the order of your model fields." }, { "code": null, "e": 1532, "s": 1524, "text": "Python3" }, { "code": "class student(models.Model): class Meta: ordering = [-1]", "e": 1595, "s": 1532, "text": null }, { "code": null, "e": 1664, "s": 1595, "text": "Add ordering like this [-1] it changes the order in descending order" }, { "code": null, "e": 1673, "s": 1664, "text": "5. proxy" }, { "code": null, "e": 1768, "s": 1673, "text": "If we add proxy = True a model which subclasses another model will be treated as a proxy model" }, { "code": null, "e": 1776, "s": 1768, "text": "Python3" }, { "code": "class Teacher(models.Model): pass class Student(Teacher): class Meta: proxy = True", "e": 1866, "s": 1776, "text": null }, { "code": null, "e": 1906, "s": 1866, "text": "This is how can we make a proxy model." }, { "code": null, "e": 1922, "s": 1906, "text": "6. permissions " }, { "code": null, "e": 2086, "s": 1922, "text": "Extra permissions to enter into the permissions table when creating this object. Add, change, delete and view permissions are automatically created for each model." }, { "code": null, "e": 2094, "s": 2086, "text": "Python3" }, { "code": "class student(models.Model): class Meta: permissions = [] ", "e": 2162, "s": 2094, "text": null }, { "code": null, "e": 2208, "s": 2162, "text": "You can add extra permission inside the list." }, { "code": null, "e": 2220, "s": 2208, "text": "7. db_table" }, { "code": null, "e": 2285, "s": 2220, "text": "We can overwrite the table name by using db_table in meta class." }, { "code": null, "e": 2293, "s": 2285, "text": "Python3" }, { "code": "class student(models.Model): class Meta: db_table = 'X'", "e": 2355, "s": 2293, "text": null }, { "code": null, "e": 2393, "s": 2355, "text": "This will change the table name to X." }, { "code": null, "e": 2410, "s": 2393, "text": "8. get_latest_by" }, { "code": null, "e": 2540, "s": 2410, "text": "It returns the latest object in the table based on the given field, used for typically DateField, DateTimeField, or IntegerField." }, { "code": null, "e": 2548, "s": 2540, "text": "Python3" }, { "code": "class student(models.Model): class Meta: get_latest_by = \"order_date\"", "e": 2624, "s": 2548, "text": null }, { "code": null, "e": 2667, "s": 2624, "text": "Return the latest by ascending order_date." }, { "code": null, "e": 2675, "s": 2667, "text": "isha307" }, { "code": null, "e": 2689, "s": 2675, "text": "Python Django" }, { "code": null, "e": 2706, "s": 2689, "text": "Python Framework" }, { "code": null, "e": 2713, "s": 2706, "text": "Python" } ]
INSERT ON DUPLICATE KEY UPDATE in MySQL
23 Dec, 2020 INSERT ON DUPLICATE KEY UPDATE statement is available in MySQL as an extension to the INSERT statement. Whenever a new row is inserted into a table in case the row causes a duplicate entry in the UNIQUE index or PRIMARY KEY, MySQL will throw an error. When the ON DUPLICATE KEY UPDATE option is defined in the INSERT statement, the existing rows are updated with the new values instead. Syntax : INSERT INTO table (column_names) VALUES (values) ON DUPLICATE KEY UPDATE col1 = val1, col2 = val2 ; Along with the INSERT statement, ON DUPLICATE KEY UPDATE statement defines a list of column & value assignments in case of duplicate. How it works :The statement first attempts to insert a new row into the table. When a duplicate entry is their error occurs, MySQL will update the existing row with the value specified in the ON DUPLICATE KEY UPDATE clause. Example –Let us create a table named ‘geek_demo’ as follows. CREATE TABLE geek_demo ( id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(100) ); Inserting data into geek_demo : INSERT INTO geek_demo (name) VALUES('Neha'), ('Nisha'), ('Sara') ; Reading data from table : SELECT id, name FROM geek_demo; Output : Now, one row will insert into the table. INSERT INTO geek_demo(name) VALUES ('Sneha') ON DUPLICATE KEY UPDATE name = 'Sneha'; As there was no duplicate, MySQL inserts a new row into the table. The output of the above statement is similar to the output below statement as follows. INSERT INTO geek_demo(name) VALUES ('Sneha'); Reading data : SELECT id, name FROM geek_demo; Output : Let us insert a row with a duplicate value in the id column as follows. INSERT INTO geek_demo (id, name) VALUES (4, 'Mona') ON DUPLICATE KEY UPDATE name = 'Mona'; Below is the output : 2 row(s) affected Because a row with id 4 already exists in the geek_demo table, the statement updates the name from Sneha to Mona.Reading data : SELECT id, name FROM geek_demo; Output : almut77 DBMS-SQL mysql SQL SQL Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n23 Dec, 2020" }, { "code": null, "e": 280, "s": 28, "text": "INSERT ON DUPLICATE KEY UPDATE statement is available in MySQL as an extension to the INSERT statement. Whenever a new row is inserted into a table in case the row causes a duplicate entry in the UNIQUE index or PRIMARY KEY, MySQL will throw an error." }, { "code": null, "e": 415, "s": 280, "text": "When the ON DUPLICATE KEY UPDATE option is defined in the INSERT statement, the existing rows are updated with the new values instead." }, { "code": null, "e": 424, "s": 415, "text": "Syntax :" }, { "code": null, "e": 531, "s": 424, "text": "INSERT INTO table (column_names)\nVALUES (values)\nON DUPLICATE KEY UPDATE\n col1 = val1, \n col2 = val2 ;\n" }, { "code": null, "e": 665, "s": 531, "text": "Along with the INSERT statement, ON DUPLICATE KEY UPDATE statement defines a list of column & value assignments in case of duplicate." }, { "code": null, "e": 889, "s": 665, "text": "How it works :The statement first attempts to insert a new row into the table. When a duplicate entry is their error occurs, MySQL will update the existing row with the value specified in the ON DUPLICATE KEY UPDATE clause." }, { "code": null, "e": 950, "s": 889, "text": "Example –Let us create a table named ‘geek_demo’ as follows." }, { "code": null, "e": 1034, "s": 950, "text": "CREATE TABLE geek_demo\n(\nid INT AUTO_INCREMENT PRIMARY KEY, \nname VARCHAR(100) \n);\n" }, { "code": null, "e": 1066, "s": 1034, "text": "Inserting data into geek_demo :" }, { "code": null, "e": 1134, "s": 1066, "text": "INSERT INTO geek_demo (name)\nVALUES('Neha'), ('Nisha'), ('Sara') ;\n" }, { "code": null, "e": 1160, "s": 1134, "text": "Reading data from table :" }, { "code": null, "e": 1192, "s": 1160, "text": "SELECT id, name\nFROM geek_demo;" }, { "code": null, "e": 1201, "s": 1192, "text": "Output :" }, { "code": null, "e": 1242, "s": 1201, "text": "Now, one row will insert into the table." }, { "code": null, "e": 1330, "s": 1242, "text": "INSERT INTO geek_demo(name) \nVALUES ('Sneha') \nON DUPLICATE KEY UPDATE name = 'Sneha';\n" }, { "code": null, "e": 1484, "s": 1330, "text": "As there was no duplicate, MySQL inserts a new row into the table. The output of the above statement is similar to the output below statement as follows." }, { "code": null, "e": 1531, "s": 1484, "text": "INSERT INTO geek_demo(name)\nVALUES ('Sneha');\n" }, { "code": null, "e": 1546, "s": 1531, "text": "Reading data :" }, { "code": null, "e": 1580, "s": 1546, "text": "SELECT id, name\nFROM geek_demo; \n" }, { "code": null, "e": 1589, "s": 1580, "text": "Output :" }, { "code": null, "e": 1661, "s": 1589, "text": "Let us insert a row with a duplicate value in the id column as follows." }, { "code": null, "e": 1754, "s": 1661, "text": "INSERT INTO geek_demo (id, name) \nVALUES (4, 'Mona')\nON DUPLICATE KEY UPDATE name = 'Mona';\n" }, { "code": null, "e": 1776, "s": 1754, "text": "Below is the output :" }, { "code": null, "e": 1795, "s": 1776, "text": "2 row(s) affected\n" }, { "code": null, "e": 1923, "s": 1795, "text": "Because a row with id 4 already exists in the geek_demo table, the statement updates the name from Sneha to Mona.Reading data :" }, { "code": null, "e": 1956, "s": 1923, "text": "SELECT id, name\nFROM geek_demo;\n" }, { "code": null, "e": 1965, "s": 1956, "text": "Output :" }, { "code": null, "e": 1973, "s": 1965, "text": "almut77" }, { "code": null, "e": 1982, "s": 1973, "text": "DBMS-SQL" }, { "code": null, "e": 1988, "s": 1982, "text": "mysql" }, { "code": null, "e": 1992, "s": 1988, "text": "SQL" }, { "code": null, "e": 1996, "s": 1992, "text": "SQL" } ]
GATE | GATE CS 2021 | Set 2 | Question 42
23 May, 2021 Let S be the following schedule of operations of three transactions T1, T2 and T3 in a relational database system: R2(Y),R1(X),R3(Z),R1(Y)W1(X),R2(Z),W2(Y),R3(X),W3(Z) Consider the statements P and Q below: P: S is conflict-serializable. Q: If T3 commits before T1 finishes, then S is recoverable. Which one of the following choices is correct?(A) Both P and Q are true(B) P is true and Q is false(C) P is false and Q is true(D) Both P and Q are falseAnswer: (B)Explanation: Hence, Statement P is conflict serializable. Schedule S is recoverable, if Tj creating the dirty read by reading the written data by Ti and Tj commits after Ti commits. T1 and T2 must be commit before T3 as T3 dirty reads the value at T1 and T2, so if T3 commits before T1 finishes, then S will not recoverable. Hence Statement Q is false. Quiz of this Question GATE Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. GATE | GATE-CS-2014-(Set-2) | Question 65 GATE | Sudo GATE 2020 Mock I (27 December 2019) | Question 33 GATE | GATE-CS-2015 (Set 3) | Question 65 GATE | GATE CS 2008 | Question 46 GATE | GATE-CS-2014-(Set-3) | Question 65 GATE | GATE CS 1996 | Question 63 GATE | GATE-CS-2001 | Question 50 GATE | Gate IT 2005 | Question 52 GATE | GATE CS 2012 | Question 18 GATE | GATE-CS-2014-(Set-1) | Question 51
[ { "code": null, "e": 28, "s": 0, "text": "\n23 May, 2021" }, { "code": null, "e": 143, "s": 28, "text": "Let S be the following schedule of operations of three transactions T1, T2 and T3 in a relational database system:" }, { "code": null, "e": 197, "s": 143, "text": "R2(Y),R1(X),R3(Z),R1(Y)W1(X),R2(Z),W2(Y),R3(X),W3(Z) " }, { "code": null, "e": 236, "s": 197, "text": "Consider the statements P and Q below:" }, { "code": null, "e": 267, "s": 236, "text": "P: S is conflict-serializable." }, { "code": null, "e": 327, "s": 267, "text": "Q: If T3 commits before T1 finishes, then S is recoverable." }, { "code": null, "e": 504, "s": 327, "text": "Which one of the following choices is correct?(A) Both P and Q are true(B) P is true and Q is false(C) P is false and Q is true(D) Both P and Q are falseAnswer: (B)Explanation:" }, { "code": null, "e": 549, "s": 504, "text": "Hence, Statement P is conflict serializable." }, { "code": null, "e": 673, "s": 549, "text": "Schedule S is recoverable, if Tj creating the dirty read by reading the written data by Ti and Tj commits after Ti commits." }, { "code": null, "e": 816, "s": 673, "text": "T1 and T2 must be commit before T3 as T3 dirty reads the value at T1 and T2, so if T3 commits before T1 finishes, then S will not recoverable." }, { "code": null, "e": 845, "s": 816, "text": "Hence Statement Q is false. " }, { "code": null, "e": 867, "s": 845, "text": "Quiz of this Question" }, { "code": null, "e": 872, "s": 867, "text": "GATE" }, { "code": null, "e": 970, "s": 872, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 1012, "s": 970, "text": "GATE | GATE-CS-2014-(Set-2) | Question 65" }, { "code": null, "e": 1074, "s": 1012, "text": "GATE | Sudo GATE 2020 Mock I (27 December 2019) | Question 33" }, { "code": null, "e": 1116, "s": 1074, "text": "GATE | GATE-CS-2015 (Set 3) | Question 65" }, { "code": null, "e": 1150, "s": 1116, "text": "GATE | GATE CS 2008 | Question 46" }, { "code": null, "e": 1192, "s": 1150, "text": "GATE | GATE-CS-2014-(Set-3) | Question 65" }, { "code": null, "e": 1226, "s": 1192, "text": "GATE | GATE CS 1996 | Question 63" }, { "code": null, "e": 1260, "s": 1226, "text": "GATE | GATE-CS-2001 | Question 50" }, { "code": null, "e": 1294, "s": 1260, "text": "GATE | Gate IT 2005 | Question 52" }, { "code": null, "e": 1328, "s": 1294, "text": "GATE | GATE CS 2012 | Question 18" } ]
Homogeneity of Variance Test in R Programming
12 Oct, 2020 In statistics, a sequence of random variables is homoscedastic if all its random variables have the same finite variance. This is also known as homogeneity of variance. In this article, let’s explain methods for checking the homogeneity of variances test in R programming across two or more groups. Some statistical tests, such as two independent samples T-test and ANOVA test, assume that variances are equal across groups. There are various variance tests that can be used to evaluate the equality of variances. These include: F-test: It compares the variances of two groups. The data must be normally distributed in this test. Bartlett’s test: It compares the variances of two or more groups. The data must be normally distributed in this test also. Levene’s test: A robust alternative to Bartlett’s test that is less sensitive to deviations from normality. Fligner-Killeen test: A non-parametric test that is very robust against departures from normality. Before explaining each test let’s prepare and understand the data set first. Consider one of the standard learning data sets included in R is the “ToothGrowth” data set. The tooth growth data set is the length of the teeth in each of 10 guinea pigs at three vitamin C dosage levels (0.5, 1, and 2 mg) with two delivery methods (orange juice or ascorbic acid). The file contains 60 observations of 3 variables len: Tooth length supp: Supplement type (VC or OJ) dose: Dose in milligrams R # Exploring the ToothGrowth data setprint(head(ToothGrowth, 10))print(str(ToothGrowth)) Output: len supp dose 1 4.2 VC 0.5 2 11.5 VC 0.5 3 7.3 VC 0.5 4 5.8 VC 0.5 5 6.4 VC 0.5 6 10.0 VC 0.5 7 11.2 VC 0.5 8 11.2 VC 0.5 9 5.2 VC 0.5 10 7.0 VC 0.5 'data.frame': 60 obs. of 3 variables: $ len : num 4.2 11.5 7.3 5.8 6.4 10 11.2 11.2 5.2 7 ... $ supp: Factor w/ 2 levels "OJ","VC": 2 2 2 2 2 2 2 2 2 2 ... $ dose: num 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 ... NULL It is used to compare the variances of the two groups. The F-test is used to evaluate whether the variances of two populations are equal or not. The data must be normally distributed in this test. Statistical Hypothesis: A hypothesis is a statement about the given problem. Hypothesis testing is a statistical method that is used in making a statistical decision using experimental data. Hypothesis testing is basically an assumption that we make about a population parameter. It evaluates two mutually exclusive statements about a population to determine which statement is best supported by the sample data. To know more about the statistical hypothesis please refer to Understanding Hypothesis Testing. For F-test the statistical hypotheses are: Null Hypothesis: The variances of the two groups are equal Alternative Hypothesis: The variances are different Implementation in R: With the help of var.test() method, one can perform the f-test between two normal populations with some hypothesis that variances of two populations are equal in R programming. Syntax: var.test(formula, dataset) Parameters: formula: a formula of the form values ~ groups dataset: a matrix or data frame Example: R # R program to illustrate# F-test # Using var.test()result = var.test(len ~ supp, data = ToothGrowth) # print the resultprint(result) Output: F test to compare two variances data: len by supp F = 0.6386, num df = 29, denom df = 29, p-value = 0.2331 alternative hypothesis: true ratio of variances is not equal to 1 95 percent confidence interval: 0.3039488 1.3416857 sample estimates: ratio of variances 0.6385951 Interpretation: The p-value is p = 0.2 which is greater than the significance level 0.05. In conclusion, there is no significant difference between the two variances. Bartlett’s test is used to test if k samples are from populations with equal variances. Equal variances across populations are called homoscedasticity or homogeneity of variances. Some statistical tests, for example, the ANOVA test, assume that variances are equal across groups or samples. The Bartlett test can be used to verify that assumption. Bartlett’s test enables us to compare the variance of two or more samples to decide whether they are drawn from populations with equal variance. It is fitting for normally distributed data. Statistical Hypothesis: Null Hypothesis: All populations variances are equal Alternative Hypothesis: At least two of them differ Implementation in R: The R provides a function bartlett.test() which is available in stats package can be used to compute Barlett’s test. The syntax for this function is given below: Syntax: bartlett.test(formula, dataset) Parameters: formula: a formula of the form values ~ groups dataset: a matrix or data frame Example: R # R program to illustrate# Barlett’s test # Using bartlett.test()result = bartlett.test(len ~ supp, data = ToothGrowth) # print the resultprint(result) Output: Bartlett test of homogeneity of variances data: len by supp Bartlett's K-squared = 1.4217, df = 1, p-value = 0.2331 In statistics, Levene’s test is an inferential statistic used to evaluate the equality of variances for a variable determined for two or more groups. Some standard statistical procedures find that variances of the populations from which various samples are formed are equal. Levene’s test assesses this assumption. It examines the null hypothesis that the population variances are equal called homogeneity of variance or homoscedasticity. It compares the variances of k samples, where k can be more than two samples. It’s an alternative to Bartlett’s test that is less sensitive to departures from normality. Statistical Hypothesis: Null Hypothesis: All populations variances are equal Alternative Hypothesis: At least two of them differ Implementation in R: The R provides a function leveneTest() which is available in car package that can be used to compute Levene’s test. The syntax for this function is given below: Syntax: leveneTest(formula, dataset) Parameters: formula: a formula of the form values ~ groups dataset: a matrix or data frame Example: R # R program to illustrate# Levene's test # Import required packagelibrary(car) # Using leveneTest()result = leveneTest(len ~ supp, data = ToothGrowth) # print the resultprint(result) Output: Levene's Test for Homogeneity of Variance (center = median) Df F value Pr(>F) group 1 1.2136 0.2752 58 The Fligner-Killeen test is a non-parametric test for homogeneity of group variances based on ranks. It is useful when the data are non-normally distributed or when problems related to outliers in the dataset cannot be resolved. It is also one of the many tests for homogeneity of variances which is most robust against departures from normality. Statistical Hypothesis: Null Hypothesis: All populations variances are equal Alternative Hypothesis: At least two of them differ Implementation in R: The R provides a function fligner.test() which is available in stats package that can be used to compute the Fligner-Killeen test. The syntax for this function is given below: Syntax: fligner.test(formula, dataset) Parameters: formula: a formula of the form values ~ groups dataset: a matrix or data frame Example: R # R program to illustrate# Fligner-Killeen test # Import required packagelibrary(stats) # Using fligner.test()result = fligner.test(len ~ supp, data = ToothGrowth) # print the resultprint(result) Output: Fligner-Killeen test of homogeneity of variances data: len by supp Fligner-Killeen:med chi-squared = 0.97034, df = 1, p-value = 0.3246 R Data-science R Language Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n12 Oct, 2020" }, { "code": null, "e": 557, "s": 28, "text": "In statistics, a sequence of random variables is homoscedastic if all its random variables have the same finite variance. This is also known as homogeneity of variance. In this article, let’s explain methods for checking the homogeneity of variances test in R programming across two or more groups. Some statistical tests, such as two independent samples T-test and ANOVA test, assume that variances are equal across groups. There are various variance tests that can be used to evaluate the equality of variances. These include:" }, { "code": null, "e": 658, "s": 557, "text": "F-test: It compares the variances of two groups. The data must be normally distributed in this test." }, { "code": null, "e": 781, "s": 658, "text": "Bartlett’s test: It compares the variances of two or more groups. The data must be normally distributed in this test also." }, { "code": null, "e": 889, "s": 781, "text": "Levene’s test: A robust alternative to Bartlett’s test that is less sensitive to deviations from normality." }, { "code": null, "e": 988, "s": 889, "text": "Fligner-Killeen test: A non-parametric test that is very robust against departures from normality." }, { "code": null, "e": 1397, "s": 988, "text": "Before explaining each test let’s prepare and understand the data set first. Consider one of the standard learning data sets included in R is the “ToothGrowth” data set. The tooth growth data set is the length of the teeth in each of 10 guinea pigs at three vitamin C dosage levels (0.5, 1, and 2 mg) with two delivery methods (orange juice or ascorbic acid). The file contains 60 observations of 3 variables" }, { "code": null, "e": 1415, "s": 1397, "text": "len: Tooth length" }, { "code": null, "e": 1448, "s": 1415, "text": "supp: Supplement type (VC or OJ)" }, { "code": null, "e": 1473, "s": 1448, "text": "dose: Dose in milligrams" }, { "code": null, "e": 1475, "s": 1473, "text": "R" }, { "code": "# Exploring the ToothGrowth data setprint(head(ToothGrowth, 10))print(str(ToothGrowth))", "e": 1563, "s": 1475, "text": null }, { "code": null, "e": 1571, "s": 1563, "text": "Output:" }, { "code": null, "e": 1998, "s": 1571, "text": " len supp dose\n1 4.2 VC 0.5\n2 11.5 VC 0.5\n3 7.3 VC 0.5\n4 5.8 VC 0.5\n5 6.4 VC 0.5\n6 10.0 VC 0.5\n7 11.2 VC 0.5\n8 11.2 VC 0.5\n9 5.2 VC 0.5\n10 7.0 VC 0.5\n'data.frame': 60 obs. of 3 variables:\n $ len : num 4.2 11.5 7.3 5.8 6.4 10 11.2 11.2 5.2 7 ...\n $ supp: Factor w/ 2 levels \"OJ\",\"VC\": 2 2 2 2 2 2 2 2 2 2 ...\n $ dose: num 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 ...\nNULL\n\n" }, { "code": null, "e": 2195, "s": 1998, "text": "It is used to compare the variances of the two groups. The F-test is used to evaluate whether the variances of two populations are equal or not. The data must be normally distributed in this test." }, { "code": null, "e": 2219, "s": 2195, "text": "Statistical Hypothesis:" }, { "code": null, "e": 2747, "s": 2219, "text": "A hypothesis is a statement about the given problem. Hypothesis testing is a statistical method that is used in making a statistical decision using experimental data. Hypothesis testing is basically an assumption that we make about a population parameter. It evaluates two mutually exclusive statements about a population to determine which statement is best supported by the sample data. To know more about the statistical hypothesis please refer to Understanding Hypothesis Testing. For F-test the statistical hypotheses are:" }, { "code": null, "e": 2806, "s": 2747, "text": "Null Hypothesis: The variances of the two groups are equal" }, { "code": null, "e": 2858, "s": 2806, "text": "Alternative Hypothesis: The variances are different" }, { "code": null, "e": 2879, "s": 2858, "text": "Implementation in R:" }, { "code": null, "e": 3056, "s": 2879, "text": "With the help of var.test() method, one can perform the f-test between two normal populations with some hypothesis that variances of two populations are equal in R programming." }, { "code": null, "e": 3064, "s": 3056, "text": "Syntax:" }, { "code": null, "e": 3091, "s": 3064, "text": "var.test(formula, dataset)" }, { "code": null, "e": 3105, "s": 3093, "text": "Parameters:" }, { "code": null, "e": 3152, "s": 3105, "text": "formula: a formula of the form values ~ groups" }, { "code": null, "e": 3184, "s": 3152, "text": "dataset: a matrix or data frame" }, { "code": null, "e": 3193, "s": 3184, "text": "Example:" }, { "code": null, "e": 3195, "s": 3193, "text": "R" }, { "code": "# R program to illustrate# F-test # Using var.test()result = var.test(len ~ supp, data = ToothGrowth) # print the resultprint(result)", "e": 3331, "s": 3195, "text": null }, { "code": null, "e": 3339, "s": 3331, "text": "Output:" }, { "code": null, "e": 3626, "s": 3339, "text": "F test to compare two variances\n\ndata: len by supp\nF = 0.6386, num df = 29, denom df = 29, p-value = 0.2331\nalternative hypothesis: true ratio of variances is not equal to 1\n95 percent confidence interval:\n 0.3039488 1.3416857\nsample estimates:\nratio of variances \n 0.6385951 \n" }, { "code": null, "e": 3642, "s": 3626, "text": "Interpretation:" }, { "code": null, "e": 3793, "s": 3642, "text": "The p-value is p = 0.2 which is greater than the significance level 0.05. In conclusion, there is no significant difference between the two variances." }, { "code": null, "e": 4331, "s": 3793, "text": "Bartlett’s test is used to test if k samples are from populations with equal variances. Equal variances across populations are called homoscedasticity or homogeneity of variances. Some statistical tests, for example, the ANOVA test, assume that variances are equal across groups or samples. The Bartlett test can be used to verify that assumption. Bartlett’s test enables us to compare the variance of two or more samples to decide whether they are drawn from populations with equal variance. It is fitting for normally distributed data." }, { "code": null, "e": 4355, "s": 4331, "text": "Statistical Hypothesis:" }, { "code": null, "e": 4408, "s": 4355, "text": "Null Hypothesis: All populations variances are equal" }, { "code": null, "e": 4460, "s": 4408, "text": "Alternative Hypothesis: At least two of them differ" }, { "code": null, "e": 4481, "s": 4460, "text": "Implementation in R:" }, { "code": null, "e": 4643, "s": 4481, "text": "The R provides a function bartlett.test() which is available in stats package can be used to compute Barlett’s test. The syntax for this function is given below:" }, { "code": null, "e": 4651, "s": 4643, "text": "Syntax:" }, { "code": null, "e": 4683, "s": 4651, "text": "bartlett.test(formula, dataset)" }, { "code": null, "e": 4697, "s": 4685, "text": "Parameters:" }, { "code": null, "e": 4744, "s": 4697, "text": "formula: a formula of the form values ~ groups" }, { "code": null, "e": 4776, "s": 4744, "text": "dataset: a matrix or data frame" }, { "code": null, "e": 4785, "s": 4776, "text": "Example:" }, { "code": null, "e": 4787, "s": 4785, "text": "R" }, { "code": "# R program to illustrate# Barlett’s test # Using bartlett.test()result = bartlett.test(len ~ supp, data = ToothGrowth) # print the resultprint(result)", "e": 4941, "s": 4787, "text": null }, { "code": null, "e": 4949, "s": 4941, "text": "Output:" }, { "code": null, "e": 5068, "s": 4949, "text": "Bartlett test of homogeneity of variances\n\ndata: len by supp\nBartlett's K-squared = 1.4217, df = 1, p-value = 0.2331\n" }, { "code": null, "e": 5678, "s": 5068, "text": "In statistics, Levene’s test is an inferential statistic used to evaluate the equality of variances for a variable determined for two or more groups. Some standard statistical procedures find that variances of the populations from which various samples are formed are equal. Levene’s test assesses this assumption. It examines the null hypothesis that the population variances are equal called homogeneity of variance or homoscedasticity. It compares the variances of k samples, where k can be more than two samples. It’s an alternative to Bartlett’s test that is less sensitive to departures from normality. " }, { "code": null, "e": 5702, "s": 5678, "text": "Statistical Hypothesis:" }, { "code": null, "e": 5755, "s": 5702, "text": "Null Hypothesis: All populations variances are equal" }, { "code": null, "e": 5807, "s": 5755, "text": "Alternative Hypothesis: At least two of them differ" }, { "code": null, "e": 5828, "s": 5807, "text": "Implementation in R:" }, { "code": null, "e": 5989, "s": 5828, "text": "The R provides a function leveneTest() which is available in car package that can be used to compute Levene’s test. The syntax for this function is given below:" }, { "code": null, "e": 5997, "s": 5989, "text": "Syntax:" }, { "code": null, "e": 6026, "s": 5997, "text": "leveneTest(formula, dataset)" }, { "code": null, "e": 6040, "s": 6028, "text": "Parameters:" }, { "code": null, "e": 6087, "s": 6040, "text": "formula: a formula of the form values ~ groups" }, { "code": null, "e": 6119, "s": 6087, "text": "dataset: a matrix or data frame" }, { "code": null, "e": 6128, "s": 6119, "text": "Example:" }, { "code": null, "e": 6130, "s": 6128, "text": "R" }, { "code": "# R program to illustrate# Levene's test # Import required packagelibrary(car) # Using leveneTest()result = leveneTest(len ~ supp, data = ToothGrowth) # print the resultprint(result)", "e": 6316, "s": 6130, "text": null }, { "code": null, "e": 6324, "s": 6316, "text": "Output:" }, { "code": null, "e": 6448, "s": 6324, "text": "Levene's Test for Homogeneity of Variance (center = median)\n Df F value Pr(>F)\ngroup 1 1.2136 0.2752\n 58 \n\n" }, { "code": null, "e": 6795, "s": 6448, "text": "The Fligner-Killeen test is a non-parametric test for homogeneity of group variances based on ranks. It is useful when the data are non-normally distributed or when problems related to outliers in the dataset cannot be resolved. It is also one of the many tests for homogeneity of variances which is most robust against departures from normality." }, { "code": null, "e": 6819, "s": 6795, "text": "Statistical Hypothesis:" }, { "code": null, "e": 6872, "s": 6819, "text": "Null Hypothesis: All populations variances are equal" }, { "code": null, "e": 6924, "s": 6872, "text": "Alternative Hypothesis: At least two of them differ" }, { "code": null, "e": 6945, "s": 6924, "text": "Implementation in R:" }, { "code": null, "e": 7121, "s": 6945, "text": "The R provides a function fligner.test() which is available in stats package that can be used to compute the Fligner-Killeen test. The syntax for this function is given below:" }, { "code": null, "e": 7129, "s": 7121, "text": "Syntax:" }, { "code": null, "e": 7160, "s": 7129, "text": "fligner.test(formula, dataset)" }, { "code": null, "e": 7174, "s": 7162, "text": "Parameters:" }, { "code": null, "e": 7221, "s": 7174, "text": "formula: a formula of the form values ~ groups" }, { "code": null, "e": 7253, "s": 7221, "text": "dataset: a matrix or data frame" }, { "code": null, "e": 7262, "s": 7253, "text": "Example:" }, { "code": null, "e": 7264, "s": 7262, "text": "R" }, { "code": "# R program to illustrate# Fligner-Killeen test # Import required packagelibrary(stats) # Using fligner.test()result = fligner.test(len ~ supp, data = ToothGrowth) # print the resultprint(result)", "e": 7463, "s": 7264, "text": null }, { "code": null, "e": 7471, "s": 7463, "text": "Output:" }, { "code": null, "e": 7610, "s": 7471, "text": "Fligner-Killeen test of homogeneity of variances\n\ndata: len by supp\nFligner-Killeen:med chi-squared = 0.97034, df = 1, p-value = 0.3246\n\n" }, { "code": null, "e": 7625, "s": 7610, "text": "R Data-science" }, { "code": null, "e": 7636, "s": 7625, "text": "R Language" } ]
Standard Normal Distribution (SND) – Java Program
06 Feb, 2018 The standard normal distribution is a special case of the normal distribution. It occurs when a normal random variable has a mean of 0 and a standard deviation of 1. The normal random variable of a standard normal distribution is called a standard score or a z score.A conversion from Normally distributed to Standard Normally distributed value occurs via the formula, Z = (X - u) / s where: Z = value on the standard normal distribution X = value on the original distribution u = mean of the original distribution s = standard deviation of the original distribution Code – // Java code to demonstrate the naive method// of finding Z-value import java.io.*;import java.util.*; class SDN { public static void main(String[] args) { // initialization of variables double Z, X, s, u; X = 26; u = 50; s = 10; // master formula Z = (X - u) / s; // print the z-value System.out.println("the Z-value obtained is: " + Z); }} Output – the Z-value obtained is: -2.4 Generating a Random Standard Normal Function – Using nextGaussian() in Java :The nextGaussian() method is used to get the next random, Normally distributed double value with mean 0.0 and standard deviation 1.0. Declaration : public double nextGaussian() Parameters : NA Return Value : The method call returns the random, Normally distributed double value with mean 0.0 and standard deviation 1.0. Exception : NA The following example shows the usage of java.util.Random.nextGaussian(): Code – // Java code to demonstrate the working// of nextGaussian()import java.util.*; public class NextGaussian { public static void main(String[] args) { // create random object Random ran = new Random(); // generating integer double nxt = ran.nextGaussian(); // Printing the random Number System.out.println("The next Gaussian value generated is : " + nxt); }} Output – The next Gaussian value generated is : -0.24283691098606316 Java Java Programs Mathematical Mathematical Java Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n06 Feb, 2018" }, { "code": null, "e": 397, "s": 28, "text": "The standard normal distribution is a special case of the normal distribution. It occurs when a normal random variable has a mean of 0 and a standard deviation of 1. The normal random variable of a standard normal distribution is called a standard score or a z score.A conversion from Normally distributed to Standard Normally distributed value occurs via the formula," }, { "code": null, "e": 596, "s": 397, "text": "Z = (X - u) / s\nwhere:\nZ = value on the standard normal distribution\nX = value on the original distribution\nu = mean of the original distribution\ns = standard deviation of the original distribution\n" }, { "code": null, "e": 603, "s": 596, "text": "Code –" }, { "code": "// Java code to demonstrate the naive method// of finding Z-value import java.io.*;import java.util.*; class SDN { public static void main(String[] args) { // initialization of variables double Z, X, s, u; X = 26; u = 50; s = 10; // master formula Z = (X - u) / s; // print the z-value System.out.println(\"the Z-value obtained is: \" + Z); }}", "e": 1025, "s": 603, "text": null }, { "code": null, "e": 1034, "s": 1025, "text": "Output –" }, { "code": null, "e": 1065, "s": 1034, "text": "the Z-value obtained is: -2.4\n" }, { "code": null, "e": 1276, "s": 1065, "text": "Generating a Random Standard Normal Function – Using nextGaussian() in Java :The nextGaussian() method is used to get the next random, Normally distributed double value with mean 0.0 and standard deviation 1.0." }, { "code": null, "e": 1478, "s": 1276, "text": "Declaration :\npublic double nextGaussian()\nParameters :\nNA\nReturn Value :\nThe method call returns the random, Normally distributed double value\nwith mean 0.0 and standard deviation 1.0.\nException :\nNA\n" }, { "code": null, "e": 1552, "s": 1478, "text": "The following example shows the usage of java.util.Random.nextGaussian():" }, { "code": null, "e": 1559, "s": 1552, "text": "Code –" }, { "code": "// Java code to demonstrate the working// of nextGaussian()import java.util.*; public class NextGaussian { public static void main(String[] args) { // create random object Random ran = new Random(); // generating integer double nxt = ran.nextGaussian(); // Printing the random Number System.out.println(\"The next Gaussian value generated is : \" + nxt); }}", "e": 1975, "s": 1559, "text": null }, { "code": null, "e": 1984, "s": 1975, "text": "Output –" }, { "code": null, "e": 2045, "s": 1984, "text": "The next Gaussian value generated is : -0.24283691098606316\n" }, { "code": null, "e": 2050, "s": 2045, "text": "Java" }, { "code": null, "e": 2064, "s": 2050, "text": "Java Programs" }, { "code": null, "e": 2077, "s": 2064, "text": "Mathematical" }, { "code": null, "e": 2090, "s": 2077, "text": "Mathematical" }, { "code": null, "e": 2095, "s": 2090, "text": "Java" } ]
Egg Dropping Puzzle | DP-11
09 Jun, 2022 The following is a description of the instance of this famous puzzle involving n=2 eggs and a building with k=36 floors.Suppose that we wish to know which stories in a 36-storey building are safe to drop eggs from, and which will cause the eggs to break on landing. We make a few assumptions:.....An egg that survives a fall can be used again. .....A broken egg must be discarded. .....The effect of a fall is the same for all eggs. .....If an egg breaks when dropped, then it would break if dropped from a higher floor. .....If an egg survives a fall then it would survive a shorter fall. .....It is not ruled out that the first-floor windows break eggs, nor is it ruled out that the 36th-floor do not cause an egg to break.If only one egg is available and we wish to be sure of obtaining the right result, the experiment can be carried out in only one way. Drop the egg from the first-floor window; if it survives, drop it from the second-floor window. Continue upward until it breaks. In the worst case, this method may require 36 droppings. Suppose 2 eggs are available. What is the least number of egg-droppings that is guaranteed to work in all cases? The problem is not actually to find the critical floor, but merely to decide floors from which eggs should be dropped so that the total number of trials are minimized.Source: Wiki for Dynamic Programming Method 1: Recursion. In this post, we will discuss a solution to a general problem with ‘n’ eggs and ‘k’ floors. The solution is to try dropping an egg from every floor(from 1 to k) and recursively calculate the minimum number of droppings needed in the worst case. The floor which gives the minimum value in the worst case is going to be part of the solution. In the following solutions, we return the minimum number of trials in the worst case; these solutions can be easily modified to print floor numbers of every trial also.Meaning of a worst-case scenario: Worst case scenario gives the user the surety of the threshold floor. For example- If we have ‘1’ egg and ‘k’ floors, we will start dropping the egg from the first floor till the egg breaks suppose on the ‘kth’ floor so the number of tries to give us surety is ‘k’. 1) Optimal Substructure: When we drop an egg from a floor x, there can be two cases (1) The egg breaks (2) The egg doesn’t break. If the egg breaks after dropping from ‘xth’ floor, then we only need to check for floors lower than ‘x’ with remaining eggs as some floor should exist lower than ‘x’ in which egg would not break; so the problem reduces to x-1 floors and n-1 eggs.If the egg doesn’t break after dropping from the ‘xth’ floor, then we only need to check for floors higher than ‘x’; so the problem reduces to ‘k-x’ floors and n eggs. If the egg breaks after dropping from ‘xth’ floor, then we only need to check for floors lower than ‘x’ with remaining eggs as some floor should exist lower than ‘x’ in which egg would not break; so the problem reduces to x-1 floors and n-1 eggs. If the egg doesn’t break after dropping from the ‘xth’ floor, then we only need to check for floors higher than ‘x’; so the problem reduces to ‘k-x’ floors and n eggs. Since we need to minimize the number of trials in worst case, we take the maximum of two cases. We consider the max of above two cases for every floor and choose the floor which yields minimum number of trials. k ==> Number of floors n ==> Number of Eggs eggDrop(n, k) ==> Minimum number of trials needed to find the critical floor in worst case.eggDrop(n, k) = 1 + min{max(eggDrop(n – 1, x – 1), eggDrop(n, k – x)), where x is in {1, 2, ..., k}}Concept of worst case: For example : Let there be ‘2’ eggs and ‘2’ floors then-:If we try throwing from ‘1st’ floor: Number of tries in worst case= 1+max(0, 1) 0=>If the egg breaks from first floor then it is threshold floor (best case possibility). 1=>If the egg does not break from first floor we will now have ‘2’ eggs and 1 floor to test which will give answer as ‘1’.(worst case possibility) We take the worst case possibility in account, so 1+max(0, 1)=2If we try throwing from ‘2nd’ floor: Number of tries in worst case= 1+max(1, 0) 1=>If the egg breaks from second floor then we will have 1 egg and 1 floor to find threshold floor.(Worst Case) 0=>If egg does not break from second floor then it is threshold floor.(Best Case) We take worst case possibility for surety, so 1+max(1, 0)=2.The final answer is min(1st, 2nd, 3rd....., kth floor) So answer here is ‘2’. Chapters descriptions off, selected captions settings, opens captions settings dialog captions off, selected English This is a modal window. Beginning of dialog window. Escape will cancel and close the window. End of dialog window. Below is the implementation of the above approach: C++ C Java Python 3 C# Javascript #include <bits/stdc++.h>using namespace std; // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} // Function to get minimum// number of trials needed in worst// case with n eggs and k floorsint eggDrop(int n, int k){ // If there are no floors, // then no trials needed. // OR if there is one floor, // one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one // egg and k floors if (n == 1) return k; int min = INT_MAX, x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = max( eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1;} // Driver program to test// to printDupsint main(){ int n = 2, k = 10; cout << "Minimum number of trials " "in worst case with " << n << " eggs and " << k << " floors is " << eggDrop(n, k) << endl; return 0;} // This code is contributed// by Akanksha Rai #include <limits.h>#include <stdio.h> // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} /* Function to get minimum number of trials needed in worst case with n eggs and k floors */int eggDrop(int n, int k){ // If there are no floors, then no // trials needed. OR if there is // one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg and // k floors if (n == 1) return k; int min = INT_MAX, x, res; // Consider all droppings from 1st // floor to kth floor and // return the minimum of these values // plus 1. for (x = 1; x <= k; x++) { res = max( eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1;} /* Driver program to test to printDups*/int main(){ int n = 2, k = 10; printf("nMinimum number of trials in " "worst case with %d eggs and " "%d floors is %d \n", n, k, eggDrop(n, k)); return 0;} public class GFG { /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { // If there are no floors, then // no trials needed. OR if there // is one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg // and k floors if (n == 1) return k; int min = Integer.MAX_VALUE; int x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = Math.max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1; } // Driver code public static void main(String args[]) { int n = 2, k = 10; System.out.print("Minimum number of " + "trials in worst case with " + n + " eggs and " + k + " floors is " + eggDrop(n, k)); } // This code is contributed by Ryuga.} import sys # Function to get minimum number of trials# needed in worst case with n eggs and k floorsdef eggDrop(n, k): # If there are no floors, then no trials # needed. OR if there is one floor, one # trial needed. if (k == 1 or k == 0): return k # We need k trials for one egg # and k floors if (n == 1): return k min = sys.maxsize # Consider all droppings from 1st # floor to kth floor and return # the minimum of these values plus 1. for x in range(1, k + 1): res = max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)) if (res < min): min = res return min + 1 # Driver Codeif __name__ == "__main__": n = 2 k = 10 print("Minimum number of trials in worst case with", n, "eggs and", k, "floors is", eggDrop(n, k)) # This code is contributed by ita_c using System; class GFG { /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { // If there are no floors, then // no trials needed. OR if there // is one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg // and k floors if (n == 1) return k; int min = int.MaxValue; int x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = Math.Max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1; } // Driver code static void Main() { int n = 2, k = 10; Console.Write("Minimum number of " + "trials in worst case with " + n + " eggs and " + k + " floors is " + eggDrop(n, k)); }} // This code is contributed by Sam007. <script> /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ function eggDrop(n,k) { // If there are no floors, then // no trials needed. OR if there // is one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg // and k floors if (n == 1) return k; let min = Number.MAX_VALUE; let x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = Math.max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1; } // Driver code let n = 2, k = 10; document.write("Minimum number of " + "trials in worst case with " + n + " eggs and " + k + " floors is " + eggDrop(n, k)); // This code is contributed by avanitrachhadiya2155</script> Minimum number of trials in worst case with 2 eggs and 10 floors is 4 Output: Minimum number of trials in worst case with 2 eggs and 10 floors is 4 It should be noted that the above function computes the same subproblems again and again. See the following partial recursion tree, E(2, 2) is being evaluated twice. There will many repeated subproblems when you draw the complete recursion tree even for small values of n and k. E(2, 4) | ------------------------------------- | | | | | | | | x=1/ x=2/ x=3/ x=4/ / / .... .... / / E(1, 0) E(2, 3) E(1, 1) E(2, 2) / /... / x=1/ ..... / E(1, 0) E(2, 2) / ...... Partial recursion tree for 2 eggs and 4 floors. Complexity Analysis: Time Complexity: As there is a case of overlapping sub-problems the time complexity is exponential. Auxiliary Space :O(1). As there was no use of any data structure for storing values. Since same subproblems are called again, this problem has Overlapping Subproblems property. So Egg Dropping Puzzle has both properties (see this and this) of a dynamic programming problem. Like other typical Dynamic Programming(DP) problems, recomputations of same subproblems can be avoided by constructing a temporary array eggFloor[][] in bottom up manner.Method 2: Dynamic Programming.In this approach, we work on the same idea as described above neglecting the case of calculating the answers to sub-problems again and again.. The approach will be to make a table which will store the results of sub-problems so that to solve a sub-problem, it would only require a look-up from the table which will take constant time, which earlier took exponential time.Formally for filling DP[i][j] state where ‘i’ is the number of eggs and ‘j’ is the number of floors: We have to traverse for each floor ‘x’ from ‘1’ to ‘j’ and find minimum of: (1 + max( DP[i-1][j-1], DP[i][j-x] )). This simulation will make things clear: i => Number of eggs j => Number of floors Look up find maximum Lets fill the table for the following case: Floors = ‘4’ Eggs = ‘2’1 2 3 41 2 3 4 => 1 1 2 2 3 => 2 For ‘egg-1’ each case is the base case so the number of attempts is equal to floor number.For ‘egg-2’ it will take ‘1’ attempt for 1st floor which is base case.For floor-2 => Taking 1st floor 1 + max(0, DP[1][1]) Taking 2nd floor 1 + max(DP[1][1], 0) DP[2][2] = min(1 + max(0, DP[1][1]), 1 + max(DP[1][1], 0))For floor-3 => Taking 1st floor 1 + max(0, DP[2][2]) Taking 2nd floor 1 + max(DP[1][1], DP[2][1]) Taking 3rd floor 1 + max(0, DP[2][2]) DP[2][3]= min(‘all three floors’) = 2For floor-4 => Taking 1st floor 1 + max(0, DP[2][3]) Taking 2nd floor 1 + max(DP[1][1], DP[2][2]) Taking 3rd floor 1 + max(DP[1][2], DP[2][1]) Taking 4th floor 1 + max(0, DP[2][3]) DP[2][4]= min(‘all four floors’) = 3 C++ C Java Python3 C# PHP Javascript // A Dynamic Programming based for// the Egg Dropping Puzzle#include <bits/stdc++.h>using namespace std; // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} /* Function to get minimumnumber of trials needed in worstcase with n eggs and k floors */int eggDrop(int n, int k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ int eggFloor[n + 1][k + 1]; int res; int i, j, x; // We need one trial for one floor and 0 // trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = INT_MAX; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k];} /* Driver program to test to printDups*/int main(){ int n = 2, k = 36; cout << "\nMinimum number of trials " "in worst case with " << n<< " eggs and "<< k<< " floors is "<< eggDrop(n, k); return 0;} // this code is contributed by shivanisinghss2110 // A Dynamic Programming based for// the Egg Dropping Puzzle#include <limits.h>#include <stdio.h> // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} /* Function to get minimumnumber of trials needed in worstcase with n eggs and k floors */int eggDrop(int n, int k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ int eggFloor[n + 1][k + 1]; int res; int i, j, x; // We need one trial for one floor and 0 // trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = INT_MAX; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k];} /* Driver program to test to printDups*/int main(){ int n = 2, k = 36; printf("\nMinimum number of trials " "in worst case with %d eggs and " "%d floors is %d \n", n, k, eggDrop(n, k)); return 0;} // A Dynamic Programming based Java// Program for the Egg Dropping Puzzleclass EggDrop { // A utility function to get // maximum of two integers static int max(int a, int b) { return (a > b) ? a : b; } /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { /* A 2D table where entry eggFloor[i][j] will represent minimum number of trialsneeded for i eggs and j floors. */ int eggFloor[][] = new int[n + 1][k + 1]; int res; int i, j, x; // We need one trial for one floor and // 0 trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = Integer.MAX_VALUE; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k]; } /* Driver program to test to printDups*/ public static void main(String args[]) { int n = 2, k = 10; System.out.println("Minimum number of trials in worst" + " case with " + n + " eggs and " + k + " floors is " + eggDrop(n, k)); }}/*This code is contributed by Rajat Mishra*/ # A Dynamic Programming based Python Program for the Egg Dropping PuzzleINT_MAX = 32767 # Function to get minimum number of trials needed in worst# case with n eggs and k floorsdef eggDrop(n, k): # A 2D table where entry eggFloor[i][j] will represent minimum # number of trials needed for i eggs and j floors. eggFloor = [[0 for x in range(k + 1)] for x in range(n + 1)] # We need one trial for one floor and0 trials for 0 floors for i in range(1, n + 1): eggFloor[i][1] = 1 eggFloor[i][0] = 0 # We always need j trials for one egg and j floors. for j in range(1, k + 1): eggFloor[1][j] = j # Fill rest of the entries in table using optimal substructure # property for i in range(2, n + 1): for j in range(2, k + 1): eggFloor[i][j] = INT_MAX for x in range(1, j + 1): res = 1 + max(eggFloor[i-1][x-1], eggFloor[i][j-x]) if res < eggFloor[i][j]: eggFloor[i][j] = res # eggFloor[n][k] holds the result return eggFloor[n][k] # Driver program to test to print printDupsn = 2k = 36print("Minimum number of trials in worst case with" + str(n) + "eggs and " + str(k) + " floors is " + str(eggDrop(n, k))) # This code is contributed by Bhavya Jain // A Dynamic Programming based C# Program// for the Egg Dropping Puzzleusing System; class GFG { // A utility function to get maximum of // two integers static int max(int a, int b) { return (a > b) ? a : b; } /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ int[, ] eggFloor = new int[n + 1, k + 1]; int res; int i, j, x; // We need one trial for one floor and0 // trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i, 1] = 1; eggFloor[i, 0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1, j] = j; // Fill rest of the entries in table // using optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i, j] = int.MaxValue; for (x = 1; x <= j; x++) { res = 1 + max(eggFloor[i - 1, x - 1], eggFloor[i, j - x]); if (res < eggFloor[i, j]) eggFloor[i, j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n, k]; } // Driver function public static void Main() { int n = 2, k = 36; Console.WriteLine("Minimum number of trials " + "in worst case with " + n + " eggs and " + k + "floors is " + eggDrop(n, k)); }} // This code is contributed by Sam007. <?php// A Dynamic Programming based PHP// Program for the Egg Dropping Puzzle /* Function to get minimum number of trials needed in worst case with n eggs and k floors */function eggDrop($n, $k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ $eggFloor = array(array());; // We need one trial for one // floor and0 trials for 0 floors for ($i = 1; $i <=$n;$i++) { $eggFloor[$i][1] = 1; $eggFloor[$i][0] = 0; } // We always need j trials // for one egg and j floors. for ($j = 1; $j <= $k; $j++) $eggFloor[1][$j] = $j; // Fill rest of the entries in // table using optimal substructure // property for ($i = 2; $i <= $n; $i++) { for ($j = 2; $j <= $k; $j++) { $eggFloor[$i][$j] = 999999; for ($x = 1; $x <= $j; $x++) { $res = 1 + max($eggFloor[$i - 1][$x - 1], $eggFloor[$i][$j - $x]); if ($res < $eggFloor[$i][$j]) $eggFloor[$i][$j] = $res; } } } // eggFloor[n][k] holds the result return $eggFloor[$n][$k];} // Driver Code $n = 2; $k = 36; echo "Minimum number of trials in worst case with " .$n. " eggs and " .$k. " floors is " .eggDrop($n, $k) ; // This code is contributed by Sam007?> <script> // A Dynamic Programming based Javascript// Program for the Egg Dropping Puzzle // A utility function to get // maximum of two integersfunction max(a,b){ return (a > b) ? a : b;} /* Function to get minimum number of trials needed in worst case with n eggs and k floors */function eggDrop(n,k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trialsneeded for i eggs and j floors. */ let eggFloor = new Array(n + 1); for(let i=0;i<(n+1);i++) { eggFloor[i]=new Array(k+1); } let res; let i, j, x; // We need one trial for one floor and // 0 trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = Number.MAX_VALUE; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k];} /* Driver program to test to printDups*/let n = 2, k = 36;document.write("Minimum number of trials in worst" + " case with " + n + " eggs and " + k + " floors is " + eggDrop(n, k)); // This code is contributed by ab2127 </script> Minimum number of trials in worst case with 2 eggs and 36 floors is 8 Complexity Analysis: Time Complexity: O(n*k^2). Where ‘n’ is the number of eggs and ‘k’ is the number of floors, as we use a nested for loop ‘k^2’ times for each egg Auxiliary Space: O(n*k). As a 2-D array of size ‘n*k’ is used for storing elements. Method 3: Dynamic Programming using memoization. C++ C Java Python3 C# Javascript #include <bits/stdc++.h>using namespace std;#define MAX 1000 vector<vector<int>> memo(MAX, vector<int> (MAX, -1));int solveEggDrop(int n, int k) { if(memo[n][k] != -1) { return memo[n][k];} if (k == 1 || k == 0) return k; if (n == 1) return k; int min = INT_MAX, x, res; for (x = 1; x <= k; x++) { res = max( solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n][k] = min+1; return min + 1; } int main() { int n = 2, k = 36; cout<<solveEggDrop(n, k); return 0;} // contributed by Shivam Agrawal(shivamagrawal3) #include <stdio.h>#include<limits.h>#include<string.h>#define MAX 1000 int memo[MAX][MAX];int solveEggDrop(int n, int k) { if(memo[n][k] != -1) { return memo[n][k];} if (k == 1 || k == 0) return k; if (n == 1) return k; int min = INT_MAX, x, res; for (x = 1; x <= k; x++) { int a = solveEggDrop(n - 1, x - 1); int b = solveEggDrop(n, k - x); res = a>b?a:b; if (res < min) min = res; } memo[n][k] = min+1; return min + 1; } int main() { memset( memo, -1,MAX * MAX * sizeof( int ) ); int n = 2, k = 36; printf("%d",solveEggDrop(n, k)); return 0;} // This code is contributed by repakaeswaripriya. import java.util.Arrays; class GFG { static final int MAX = 1000; static int[][] memo = new int[MAX][MAX]; static int solveEggDrop(int n, int k) { if (memo[n][k] != -1) { return memo[n][k]; } if (k == 1 || k == 0) return k; if (n == 1) return k; int min = Integer.MAX_VALUE, x, res; for (x = 1; x <= k; x++) { res = Math.max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n][k] = min + 1; return min + 1; } public static void main(String[] args) { for (int i = 0; i < memo.length; i++) Arrays.fill(memo[i], -1); int n = 2, k = 36; System.out.print(solveEggDrop(n, k)); }} // This code IS contributed by umadevi9616 import sys MAX = 1000; memo = [[-1 for i in range(MAX)] for j in range(MAX)] ; def solveEggDrop(n, k): if (memo[n][k] != -1): return memo[n][k]; if (k == 1 or k == 0): return k; if (n == 1): return k; min = sys.maxsize; res = 0; for x in range(1,k+1): res = max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min): min = res; memo[n][k] = min + 1; return min + 1; # Driver codeif __name__ == '__main__': n = 2; k = 36; print(solveEggDrop(n, k)); # This code is contributed by gauravrajput1 using System; public class GFG { static readonly int MAX = 1000; static int[,] memo = new int[MAX,MAX]; static int solveEggDrop(int n, int k) { if (memo[n,k] != -1) { return memo[n,k]; } if (k == 1 || k == 0) return k; if (n == 1) return k; int min = int.MaxValue, x, res; for (x = 1; x <= k; x++) { res = Math.Max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n,k] = min + 1; return min + 1; } public static void Main(String[] args) { for (int i = 0; i < memo.GetLength(0); i++) for(int j =0;j<memo.GetLength(1);j++) memo[i,j] = -1; int n = 2, k = 36; Console.Write(solveEggDrop(n, k)); }} // This code is contributed by gauravrajput1 <script>var MAX = 1000; var memo = Array(MAX).fill().map(()=>Array(MAX).fill(-1)); function solveEggDrop(n , k) { if (memo[n][k] != -1) { return memo[n][k]; } if (k == 1 || k == 0) return k; if (n == 1) return k; var min = Number.MAX_VALUE, x, res; for (x = 1; x <= k; x++) { res = Math.max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n][k] = min + 1; return min + 1; } var n = 2, k = 36; document.write(solveEggDrop(n, k)); // This code is contributed by gauravrajput1</script> 8 As an exercise, you may try modifying the above DP solution to print all intermediate floors (The floors used for minimum trial solution). More Efficient Solution : Eggs dropping puzzle (Binomial Coefficient and Binary Search Solution)Egg Dropping Puzzle with 2 Eggs and K Floors 2 Eggs and 100 Floor Puzzle Egg Dropping Problem - Approach to write the code (Dynamic Programming) | GeeksforGeeks - YouTubeGeeksforGeeks532K subscribersEgg Dropping Problem - Approach to write the code (Dynamic Programming) | GeeksforGeeksWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:000:00 / 14:00•Live•<div class="player-unavailable"><h1 class="message">An error occurred.</h1><div class="submessage"><a href="https://www.youtube.com/watch?v=KVfxgpI3Tv0" target="_blank">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div> &t=3sReferences: http://archive.ite.journal.informs.org/Vol4No1/Sniedovich/index.phpPlease write comments if you find anything incorrect, or you want to share more information about the topic discussed above. Sam007 sachinhaldavanekar ankthon ukasp Akanksha_Rai bidibaaz123 avanitrachhadiya2155 shivamagrawal3 anikakapoor ab2127 shivanisinghss2110 umadevi9616 GauravRajput1 prachisoda1234 singghakshay arorakashish0911 amartyaghoshgfg simmytarika5 repakaeswaripriya mitalibhola94 Egg-Dropping Dynamic Programming Dynamic Programming Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 52, "s": 24, "text": "\n09 Jun, 2022" }, { "code": null, "e": 1415, "s": 52, "text": "The following is a description of the instance of this famous puzzle involving n=2 eggs and a building with k=36 floors.Suppose that we wish to know which stories in a 36-storey building are safe to drop eggs from, and which will cause the eggs to break on landing. We make a few assumptions:.....An egg that survives a fall can be used again. .....A broken egg must be discarded. .....The effect of a fall is the same for all eggs. .....If an egg breaks when dropped, then it would break if dropped from a higher floor. .....If an egg survives a fall then it would survive a shorter fall. .....It is not ruled out that the first-floor windows break eggs, nor is it ruled out that the 36th-floor do not cause an egg to break.If only one egg is available and we wish to be sure of obtaining the right result, the experiment can be carried out in only one way. Drop the egg from the first-floor window; if it survives, drop it from the second-floor window. Continue upward until it breaks. In the worst case, this method may require 36 droppings. Suppose 2 eggs are available. What is the least number of egg-droppings that is guaranteed to work in all cases? The problem is not actually to find the critical floor, but merely to decide floors from which eggs should be dropped so that the total number of trials are minimized.Source: Wiki for Dynamic Programming " }, { "code": null, "e": 2378, "s": 1417, "text": "Method 1: Recursion. In this post, we will discuss a solution to a general problem with ‘n’ eggs and ‘k’ floors. The solution is to try dropping an egg from every floor(from 1 to k) and recursively calculate the minimum number of droppings needed in the worst case. The floor which gives the minimum value in the worst case is going to be part of the solution. In the following solutions, we return the minimum number of trials in the worst case; these solutions can be easily modified to print floor numbers of every trial also.Meaning of a worst-case scenario: Worst case scenario gives the user the surety of the threshold floor. For example- If we have ‘1’ egg and ‘k’ floors, we will start dropping the egg from the first floor till the egg breaks suppose on the ‘kth’ floor so the number of tries to give us surety is ‘k’. 1) Optimal Substructure: When we drop an egg from a floor x, there can be two cases (1) The egg breaks (2) The egg doesn’t break. " }, { "code": null, "e": 2792, "s": 2378, "text": "If the egg breaks after dropping from ‘xth’ floor, then we only need to check for floors lower than ‘x’ with remaining eggs as some floor should exist lower than ‘x’ in which egg would not break; so the problem reduces to x-1 floors and n-1 eggs.If the egg doesn’t break after dropping from the ‘xth’ floor, then we only need to check for floors higher than ‘x’; so the problem reduces to ‘k-x’ floors and n eggs." }, { "code": null, "e": 3039, "s": 2792, "text": "If the egg breaks after dropping from ‘xth’ floor, then we only need to check for floors lower than ‘x’ with remaining eggs as some floor should exist lower than ‘x’ in which egg would not break; so the problem reduces to x-1 floors and n-1 eggs." }, { "code": null, "e": 3207, "s": 3039, "text": "If the egg doesn’t break after dropping from the ‘xth’ floor, then we only need to check for floors higher than ‘x’; so the problem reduces to ‘k-x’ floors and n eggs." }, { "code": null, "e": 3420, "s": 3207, "text": "Since we need to minimize the number of trials in worst case, we take the maximum of two cases. We consider the max of above two cases for every floor and choose the floor which yields minimum number of trials. " }, { "code": null, "e": 4529, "s": 3420, "text": "k ==> Number of floors n ==> Number of Eggs eggDrop(n, k) ==> Minimum number of trials needed to find the critical floor in worst case.eggDrop(n, k) = 1 + min{max(eggDrop(n – 1, x – 1), eggDrop(n, k – x)), where x is in {1, 2, ..., k}}Concept of worst case: For example : Let there be ‘2’ eggs and ‘2’ floors then-:If we try throwing from ‘1st’ floor: Number of tries in worst case= 1+max(0, 1) 0=>If the egg breaks from first floor then it is threshold floor (best case possibility). 1=>If the egg does not break from first floor we will now have ‘2’ eggs and 1 floor to test which will give answer as ‘1’.(worst case possibility) We take the worst case possibility in account, so 1+max(0, 1)=2If we try throwing from ‘2nd’ floor: Number of tries in worst case= 1+max(1, 0) 1=>If the egg breaks from second floor then we will have 1 egg and 1 floor to find threshold floor.(Worst Case) 0=>If egg does not break from second floor then it is threshold floor.(Best Case) We take worst case possibility for surety, so 1+max(1, 0)=2.The final answer is min(1st, 2nd, 3rd....., kth floor) So answer here is ‘2’. " }, { "code": null, "e": 4538, "s": 4529, "text": "Chapters" }, { "code": null, "e": 4565, "s": 4538, "text": "descriptions off, selected" }, { "code": null, "e": 4615, "s": 4565, "text": "captions settings, opens captions settings dialog" }, { "code": null, "e": 4638, "s": 4615, "text": "captions off, selected" }, { "code": null, "e": 4646, "s": 4638, "text": "English" }, { "code": null, "e": 4670, "s": 4646, "text": "This is a modal window." }, { "code": null, "e": 4739, "s": 4670, "text": "Beginning of dialog window. Escape will cancel and close the window." }, { "code": null, "e": 4761, "s": 4739, "text": "End of dialog window." }, { "code": null, "e": 4814, "s": 4761, "text": "Below is the implementation of the above approach: " }, { "code": null, "e": 4818, "s": 4814, "text": "C++" }, { "code": null, "e": 4820, "s": 4818, "text": "C" }, { "code": null, "e": 4825, "s": 4820, "text": "Java" }, { "code": null, "e": 4834, "s": 4825, "text": "Python 3" }, { "code": null, "e": 4837, "s": 4834, "text": "C#" }, { "code": null, "e": 4848, "s": 4837, "text": "Javascript" }, { "code": "#include <bits/stdc++.h>using namespace std; // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} // Function to get minimum// number of trials needed in worst// case with n eggs and k floorsint eggDrop(int n, int k){ // If there are no floors, // then no trials needed. // OR if there is one floor, // one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one // egg and k floors if (n == 1) return k; int min = INT_MAX, x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = max( eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1;} // Driver program to test// to printDupsint main(){ int n = 2, k = 10; cout << \"Minimum number of trials \" \"in worst case with \" << n << \" eggs and \" << k << \" floors is \" << eggDrop(n, k) << endl; return 0;} // This code is contributed// by Akanksha Rai", "e": 5999, "s": 4848, "text": null }, { "code": "#include <limits.h>#include <stdio.h> // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} /* Function to get minimum number of trials needed in worst case with n eggs and k floors */int eggDrop(int n, int k){ // If there are no floors, then no // trials needed. OR if there is // one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg and // k floors if (n == 1) return k; int min = INT_MAX, x, res; // Consider all droppings from 1st // floor to kth floor and // return the minimum of these values // plus 1. for (x = 1; x <= k; x++) { res = max( eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1;} /* Driver program to test to printDups*/int main(){ int n = 2, k = 10; printf(\"nMinimum number of trials in \" \"worst case with %d eggs and \" \"%d floors is %d \\n\", n, k, eggDrop(n, k)); return 0;}", "e": 7072, "s": 5999, "text": null }, { "code": "public class GFG { /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { // If there are no floors, then // no trials needed. OR if there // is one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg // and k floors if (n == 1) return k; int min = Integer.MAX_VALUE; int x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = Math.max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1; } // Driver code public static void main(String args[]) { int n = 2, k = 10; System.out.print(\"Minimum number of \" + \"trials in worst case with \" + n + \" eggs and \" + k + \" floors is \" + eggDrop(n, k)); } // This code is contributed by Ryuga.}", "e": 8267, "s": 7072, "text": null }, { "code": "import sys # Function to get minimum number of trials# needed in worst case with n eggs and k floorsdef eggDrop(n, k): # If there are no floors, then no trials # needed. OR if there is one floor, one # trial needed. if (k == 1 or k == 0): return k # We need k trials for one egg # and k floors if (n == 1): return k min = sys.maxsize # Consider all droppings from 1st # floor to kth floor and return # the minimum of these values plus 1. for x in range(1, k + 1): res = max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)) if (res < min): min = res return min + 1 # Driver Codeif __name__ == \"__main__\": n = 2 k = 10 print(\"Minimum number of trials in worst case with\", n, \"eggs and\", k, \"floors is\", eggDrop(n, k)) # This code is contributed by ita_c", "e": 9133, "s": 8267, "text": null }, { "code": "using System; class GFG { /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { // If there are no floors, then // no trials needed. OR if there // is one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg // and k floors if (n == 1) return k; int min = int.MaxValue; int x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = Math.Max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1; } // Driver code static void Main() { int n = 2, k = 10; Console.Write(\"Minimum number of \" + \"trials in worst case with \" + n + \" eggs and \" + k + \" floors is \" + eggDrop(n, k)); }} // This code is contributed by Sam007.", "e": 10296, "s": 9133, "text": null }, { "code": "<script> /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ function eggDrop(n,k) { // If there are no floors, then // no trials needed. OR if there // is one floor, one trial needed. if (k == 1 || k == 0) return k; // We need k trials for one egg // and k floors if (n == 1) return k; let min = Number.MAX_VALUE; let x, res; // Consider all droppings from // 1st floor to kth floor and // return the minimum of these // values plus 1. for (x = 1; x <= k; x++) { res = Math.max(eggDrop(n - 1, x - 1), eggDrop(n, k - x)); if (res < min) min = res; } return min + 1; } // Driver code let n = 2, k = 10; document.write(\"Minimum number of \" + \"trials in worst case with \" + n + \" eggs and \" + k + \" floors is \" + eggDrop(n, k)); // This code is contributed by avanitrachhadiya2155</script>", "e": 11479, "s": 10296, "text": null }, { "code": null, "e": 11549, "s": 11479, "text": "Minimum number of trials in worst case with 2 eggs and 10 floors is 4" }, { "code": null, "e": 11559, "s": 11549, "text": "Output: " }, { "code": null, "e": 11630, "s": 11559, "text": "Minimum number of trials in worst \ncase with 2 eggs and 10 floors is 4" }, { "code": null, "e": 11911, "s": 11630, "text": "It should be noted that the above function computes the same subproblems again and again. See the following partial recursion tree, E(2, 2) is being evaluated twice. There will many repeated subproblems when you draw the complete recursion tree even for small values of n and k. " }, { "code": null, "e": 12492, "s": 11911, "text": " E(2, 4)\n | \n ------------------------------------- \n | | | | \n | | | | \n x=1/ x=2/ x=3/ x=4/ \n / / .... ....\n / / \n E(1, 0) E(2, 3) E(1, 1) E(2, 2)\n / /... / \n x=1/ .....\n / \n E(1, 0) E(2, 2)\n / \n ......\n\nPartial recursion tree for 2 eggs and 4 floors." }, { "code": null, "e": 12515, "s": 12492, "text": "Complexity Analysis: " }, { "code": null, "e": 12615, "s": 12515, "text": "Time Complexity: As there is a case of overlapping sub-problems the time complexity is exponential." }, { "code": null, "e": 12700, "s": 12615, "text": "Auxiliary Space :O(1). As there was no use of any data structure for storing values." }, { "code": null, "e": 13563, "s": 12700, "text": "Since same subproblems are called again, this problem has Overlapping Subproblems property. So Egg Dropping Puzzle has both properties (see this and this) of a dynamic programming problem. Like other typical Dynamic Programming(DP) problems, recomputations of same subproblems can be avoided by constructing a temporary array eggFloor[][] in bottom up manner.Method 2: Dynamic Programming.In this approach, we work on the same idea as described above neglecting the case of calculating the answers to sub-problems again and again.. The approach will be to make a table which will store the results of sub-problems so that to solve a sub-problem, it would only require a look-up from the table which will take constant time, which earlier took exponential time.Formally for filling DP[i][j] state where ‘i’ is the number of eggs and ‘j’ is the number of floors: " }, { "code": null, "e": 13639, "s": 13563, "text": "We have to traverse for each floor ‘x’ from ‘1’ to ‘j’ and find minimum of:" }, { "code": null, "e": 13678, "s": 13639, "text": "(1 + max( DP[i-1][j-1], DP[i][j-x] ))." }, { "code": null, "e": 13720, "s": 13678, "text": "This simulation will make things clear: " }, { "code": null, "e": 14585, "s": 13720, "text": "i => Number of eggs j => Number of floors Look up find maximum Lets fill the table for the following case: Floors = ‘4’ Eggs = ‘2’1 2 3 41 2 3 4 => 1 1 2 2 3 => 2 For ‘egg-1’ each case is the base case so the number of attempts is equal to floor number.For ‘egg-2’ it will take ‘1’ attempt for 1st floor which is base case.For floor-2 => Taking 1st floor 1 + max(0, DP[1][1]) Taking 2nd floor 1 + max(DP[1][1], 0) DP[2][2] = min(1 + max(0, DP[1][1]), 1 + max(DP[1][1], 0))For floor-3 => Taking 1st floor 1 + max(0, DP[2][2]) Taking 2nd floor 1 + max(DP[1][1], DP[2][1]) Taking 3rd floor 1 + max(0, DP[2][2]) DP[2][3]= min(‘all three floors’) = 2For floor-4 => Taking 1st floor 1 + max(0, DP[2][3]) Taking 2nd floor 1 + max(DP[1][1], DP[2][2]) Taking 3rd floor 1 + max(DP[1][2], DP[2][1]) Taking 4th floor 1 + max(0, DP[2][3]) DP[2][4]= min(‘all four floors’) = 3 " }, { "code": null, "e": 14591, "s": 14587, "text": "C++" }, { "code": null, "e": 14593, "s": 14591, "text": "C" }, { "code": null, "e": 14598, "s": 14593, "text": "Java" }, { "code": null, "e": 14606, "s": 14598, "text": "Python3" }, { "code": null, "e": 14609, "s": 14606, "text": "C#" }, { "code": null, "e": 14613, "s": 14609, "text": "PHP" }, { "code": null, "e": 14624, "s": 14613, "text": "Javascript" }, { "code": "// A Dynamic Programming based for// the Egg Dropping Puzzle#include <bits/stdc++.h>using namespace std; // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} /* Function to get minimumnumber of trials needed in worstcase with n eggs and k floors */int eggDrop(int n, int k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ int eggFloor[n + 1][k + 1]; int res; int i, j, x; // We need one trial for one floor and 0 // trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = INT_MAX; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k];} /* Driver program to test to printDups*/int main(){ int n = 2, k = 36; cout << \"\\nMinimum number of trials \" \"in worst case with \" << n<< \" eggs and \"<< k<< \" floors is \"<< eggDrop(n, k); return 0;} // this code is contributed by shivanisinghss2110", "e": 16214, "s": 14624, "text": null }, { "code": "// A Dynamic Programming based for// the Egg Dropping Puzzle#include <limits.h>#include <stdio.h> // A utility function to get// maximum of two integersint max(int a, int b){ return (a > b) ? a : b;} /* Function to get minimumnumber of trials needed in worstcase with n eggs and k floors */int eggDrop(int n, int k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ int eggFloor[n + 1][k + 1]; int res; int i, j, x; // We need one trial for one floor and 0 // trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = INT_MAX; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k];} /* Driver program to test to printDups*/int main(){ int n = 2, k = 36; printf(\"\\nMinimum number of trials \" \"in worst case with %d eggs and \" \"%d floors is %d \\n\", n, k, eggDrop(n, k)); return 0;}", "e": 17765, "s": 16214, "text": null }, { "code": "// A Dynamic Programming based Java// Program for the Egg Dropping Puzzleclass EggDrop { // A utility function to get // maximum of two integers static int max(int a, int b) { return (a > b) ? a : b; } /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { /* A 2D table where entry eggFloor[i][j] will represent minimum number of trialsneeded for i eggs and j floors. */ int eggFloor[][] = new int[n + 1][k + 1]; int res; int i, j, x; // We need one trial for one floor and // 0 trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = Integer.MAX_VALUE; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k]; } /* Driver program to test to printDups*/ public static void main(String args[]) { int n = 2, k = 10; System.out.println(\"Minimum number of trials in worst\" + \" case with \" + n + \" eggs and \" + k + \" floors is \" + eggDrop(n, k)); }}/*This code is contributed by Rajat Mishra*/", "e": 19646, "s": 17765, "text": null }, { "code": "# A Dynamic Programming based Python Program for the Egg Dropping PuzzleINT_MAX = 32767 # Function to get minimum number of trials needed in worst# case with n eggs and k floorsdef eggDrop(n, k): # A 2D table where entry eggFloor[i][j] will represent minimum # number of trials needed for i eggs and j floors. eggFloor = [[0 for x in range(k + 1)] for x in range(n + 1)] # We need one trial for one floor and0 trials for 0 floors for i in range(1, n + 1): eggFloor[i][1] = 1 eggFloor[i][0] = 0 # We always need j trials for one egg and j floors. for j in range(1, k + 1): eggFloor[1][j] = j # Fill rest of the entries in table using optimal substructure # property for i in range(2, n + 1): for j in range(2, k + 1): eggFloor[i][j] = INT_MAX for x in range(1, j + 1): res = 1 + max(eggFloor[i-1][x-1], eggFloor[i][j-x]) if res < eggFloor[i][j]: eggFloor[i][j] = res # eggFloor[n][k] holds the result return eggFloor[n][k] # Driver program to test to print printDupsn = 2k = 36print(\"Minimum number of trials in worst case with\" + str(n) + \"eggs and \" + str(k) + \" floors is \" + str(eggDrop(n, k))) # This code is contributed by Bhavya Jain", "e": 20931, "s": 19646, "text": null }, { "code": "// A Dynamic Programming based C# Program// for the Egg Dropping Puzzleusing System; class GFG { // A utility function to get maximum of // two integers static int max(int a, int b) { return (a > b) ? a : b; } /* Function to get minimum number of trials needed in worst case with n eggs and k floors */ static int eggDrop(int n, int k) { /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ int[, ] eggFloor = new int[n + 1, k + 1]; int res; int i, j, x; // We need one trial for one floor and0 // trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i, 1] = 1; eggFloor[i, 0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1, j] = j; // Fill rest of the entries in table // using optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i, j] = int.MaxValue; for (x = 1; x <= j; x++) { res = 1 + max(eggFloor[i - 1, x - 1], eggFloor[i, j - x]); if (res < eggFloor[i, j]) eggFloor[i, j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n, k]; } // Driver function public static void Main() { int n = 2, k = 36; Console.WriteLine(\"Minimum number of trials \" + \"in worst case with \" + n + \" eggs and \" + k + \"floors is \" + eggDrop(n, k)); }} // This code is contributed by Sam007.", "e": 22728, "s": 20931, "text": null }, { "code": "<?php// A Dynamic Programming based PHP// Program for the Egg Dropping Puzzle /* Function to get minimum number of trials needed in worst case with n eggs and k floors */function eggDrop($n, $k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trials needed for i eggs and j floors. */ $eggFloor = array(array());; // We need one trial for one // floor and0 trials for 0 floors for ($i = 1; $i <=$n;$i++) { $eggFloor[$i][1] = 1; $eggFloor[$i][0] = 0; } // We always need j trials // for one egg and j floors. for ($j = 1; $j <= $k; $j++) $eggFloor[1][$j] = $j; // Fill rest of the entries in // table using optimal substructure // property for ($i = 2; $i <= $n; $i++) { for ($j = 2; $j <= $k; $j++) { $eggFloor[$i][$j] = 999999; for ($x = 1; $x <= $j; $x++) { $res = 1 + max($eggFloor[$i - 1][$x - 1], $eggFloor[$i][$j - $x]); if ($res < $eggFloor[$i][$j]) $eggFloor[$i][$j] = $res; } } } // eggFloor[n][k] holds the result return $eggFloor[$n][$k];} // Driver Code $n = 2; $k = 36; echo \"Minimum number of trials in worst case with \" .$n. \" eggs and \" .$k. \" floors is \" .eggDrop($n, $k) ; // This code is contributed by Sam007?>", "e": 24220, "s": 22728, "text": null }, { "code": "<script> // A Dynamic Programming based Javascript// Program for the Egg Dropping Puzzle // A utility function to get // maximum of two integersfunction max(a,b){ return (a > b) ? a : b;} /* Function to get minimum number of trials needed in worst case with n eggs and k floors */function eggDrop(n,k){ /* A 2D table where entry eggFloor[i][j] will represent minimum number of trialsneeded for i eggs and j floors. */ let eggFloor = new Array(n + 1); for(let i=0;i<(n+1);i++) { eggFloor[i]=new Array(k+1); } let res; let i, j, x; // We need one trial for one floor and // 0 trials for 0 floors for (i = 1; i <= n; i++) { eggFloor[i][1] = 1; eggFloor[i][0] = 0; } // We always need j trials for one egg // and j floors. for (j = 1; j <= k; j++) eggFloor[1][j] = j; // Fill rest of the entries in table using // optimal substructure property for (i = 2; i <= n; i++) { for (j = 2; j <= k; j++) { eggFloor[i][j] = Number.MAX_VALUE; for (x = 1; x <= j; x++) { res = 1 + max( eggFloor[i - 1][x - 1], eggFloor[i][j - x]); if (res < eggFloor[i][j]) eggFloor[i][j] = res; } } } // eggFloor[n][k] holds the result return eggFloor[n][k];} /* Driver program to test to printDups*/let n = 2, k = 36;document.write(\"Minimum number of trials in worst\" + \" case with \" + n + \" eggs and \" + k + \" floors is \" + eggDrop(n, k)); // This code is contributed by ab2127 </script>", "e": 26055, "s": 24220, "text": null }, { "code": null, "e": 26126, "s": 26055, "text": "Minimum number of trials in worst case with 2 eggs and 36 floors is 8 " }, { "code": null, "e": 26149, "s": 26126, "text": "Complexity Analysis: " }, { "code": null, "e": 26294, "s": 26149, "text": "Time Complexity: O(n*k^2). Where ‘n’ is the number of eggs and ‘k’ is the number of floors, as we use a nested for loop ‘k^2’ times for each egg" }, { "code": null, "e": 26378, "s": 26294, "text": "Auxiliary Space: O(n*k). As a 2-D array of size ‘n*k’ is used for storing elements." }, { "code": null, "e": 26427, "s": 26378, "text": "Method 3: Dynamic Programming using memoization." }, { "code": null, "e": 26431, "s": 26427, "text": "C++" }, { "code": null, "e": 26433, "s": 26431, "text": "C" }, { "code": null, "e": 26438, "s": 26433, "text": "Java" }, { "code": null, "e": 26446, "s": 26438, "text": "Python3" }, { "code": null, "e": 26449, "s": 26446, "text": "C#" }, { "code": null, "e": 26460, "s": 26449, "text": "Javascript" }, { "code": "#include <bits/stdc++.h>using namespace std;#define MAX 1000 vector<vector<int>> memo(MAX, vector<int> (MAX, -1));int solveEggDrop(int n, int k) { if(memo[n][k] != -1) { return memo[n][k];} if (k == 1 || k == 0) return k; if (n == 1) return k; int min = INT_MAX, x, res; for (x = 1; x <= k; x++) { res = max( solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n][k] = min+1; return min + 1; } int main() { int n = 2, k = 36; cout<<solveEggDrop(n, k); return 0;} // contributed by Shivam Agrawal(shivamagrawal3)", "e": 27097, "s": 26460, "text": null }, { "code": "#include <stdio.h>#include<limits.h>#include<string.h>#define MAX 1000 int memo[MAX][MAX];int solveEggDrop(int n, int k) { if(memo[n][k] != -1) { return memo[n][k];} if (k == 1 || k == 0) return k; if (n == 1) return k; int min = INT_MAX, x, res; for (x = 1; x <= k; x++) { int a = solveEggDrop(n - 1, x - 1); int b = solveEggDrop(n, k - x); res = a>b?a:b; if (res < min) min = res; } memo[n][k] = min+1; return min + 1; } int main() { memset( memo, -1,MAX * MAX * sizeof( int ) ); int n = 2, k = 36; printf(\"%d\",solveEggDrop(n, k)); return 0;} // This code is contributed by repakaeswaripriya.", "e": 27781, "s": 27097, "text": null }, { "code": "import java.util.Arrays; class GFG { static final int MAX = 1000; static int[][] memo = new int[MAX][MAX]; static int solveEggDrop(int n, int k) { if (memo[n][k] != -1) { return memo[n][k]; } if (k == 1 || k == 0) return k; if (n == 1) return k; int min = Integer.MAX_VALUE, x, res; for (x = 1; x <= k; x++) { res = Math.max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n][k] = min + 1; return min + 1; } public static void main(String[] args) { for (int i = 0; i < memo.length; i++) Arrays.fill(memo[i], -1); int n = 2, k = 36; System.out.print(solveEggDrop(n, k)); }} // This code IS contributed by umadevi9616", "e": 28659, "s": 27781, "text": null }, { "code": "import sys MAX = 1000; memo = [[-1 for i in range(MAX)] for j in range(MAX)] ; def solveEggDrop(n, k): if (memo[n][k] != -1): return memo[n][k]; if (k == 1 or k == 0): return k; if (n == 1): return k; min = sys.maxsize; res = 0; for x in range(1,k+1): res = max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min): min = res; memo[n][k] = min + 1; return min + 1; # Driver codeif __name__ == '__main__': n = 2; k = 36; print(solveEggDrop(n, k)); # This code is contributed by gauravrajput1", "e": 29263, "s": 28659, "text": null }, { "code": "using System; public class GFG { static readonly int MAX = 1000; static int[,] memo = new int[MAX,MAX]; static int solveEggDrop(int n, int k) { if (memo[n,k] != -1) { return memo[n,k]; } if (k == 1 || k == 0) return k; if (n == 1) return k; int min = int.MaxValue, x, res; for (x = 1; x <= k; x++) { res = Math.Max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n,k] = min + 1; return min + 1; } public static void Main(String[] args) { for (int i = 0; i < memo.GetLength(0); i++) for(int j =0;j<memo.GetLength(1);j++) memo[i,j] = -1; int n = 2, k = 36; Console.Write(solveEggDrop(n, k)); }} // This code is contributed by gauravrajput1", "e": 30178, "s": 29263, "text": null }, { "code": "<script>var MAX = 1000; var memo = Array(MAX).fill().map(()=>Array(MAX).fill(-1)); function solveEggDrop(n , k) { if (memo[n][k] != -1) { return memo[n][k]; } if (k == 1 || k == 0) return k; if (n == 1) return k; var min = Number.MAX_VALUE, x, res; for (x = 1; x <= k; x++) { res = Math.max(solveEggDrop(n - 1, x - 1), solveEggDrop(n, k - x)); if (res < min) min = res; } memo[n][k] = min + 1; return min + 1; } var n = 2, k = 36; document.write(solveEggDrop(n, k)); // This code is contributed by gauravrajput1</script>", "e": 30894, "s": 30178, "text": null }, { "code": null, "e": 30896, "s": 30894, "text": "8" }, { "code": null, "e": 31205, "s": 30896, "text": "As an exercise, you may try modifying the above DP solution to print all intermediate floors (The floors used for minimum trial solution). More Efficient Solution : Eggs dropping puzzle (Binomial Coefficient and Binary Search Solution)Egg Dropping Puzzle with 2 Eggs and K Floors 2 Eggs and 100 Floor Puzzle " }, { "code": null, "e": 32166, "s": 31205, "text": "Egg Dropping Problem - Approach to write the code (Dynamic Programming) | GeeksforGeeks - YouTubeGeeksforGeeks532K subscribersEgg Dropping Problem - Approach to write the code (Dynamic Programming) | GeeksforGeeksWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:000:00 / 14:00•Live•<div class=\"player-unavailable\"><h1 class=\"message\">An error occurred.</h1><div class=\"submessage\"><a href=\"https://www.youtube.com/watch?v=KVfxgpI3Tv0\" target=\"_blank\">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div>" }, { "code": null, "e": 32376, "s": 32166, "text": "&t=3sReferences: http://archive.ite.journal.informs.org/Vol4No1/Sniedovich/index.phpPlease write comments if you find anything incorrect, or you want to share more information about the topic discussed above. " }, { "code": null, "e": 32383, "s": 32376, "text": "Sam007" }, { "code": null, "e": 32402, "s": 32383, "text": "sachinhaldavanekar" }, { "code": null, "e": 32410, "s": 32402, "text": "ankthon" }, { "code": null, "e": 32416, "s": 32410, "text": "ukasp" }, { "code": null, "e": 32429, "s": 32416, "text": "Akanksha_Rai" }, { "code": null, "e": 32441, "s": 32429, "text": "bidibaaz123" }, { "code": null, "e": 32462, "s": 32441, "text": "avanitrachhadiya2155" }, { "code": null, "e": 32477, "s": 32462, "text": "shivamagrawal3" }, { "code": null, "e": 32489, "s": 32477, "text": "anikakapoor" }, { "code": null, "e": 32496, "s": 32489, "text": "ab2127" }, { "code": null, "e": 32515, "s": 32496, "text": "shivanisinghss2110" }, { "code": null, "e": 32527, "s": 32515, "text": "umadevi9616" }, { "code": null, "e": 32541, "s": 32527, "text": "GauravRajput1" }, { "code": null, "e": 32556, "s": 32541, "text": "prachisoda1234" }, { "code": null, "e": 32569, "s": 32556, "text": "singghakshay" }, { "code": null, "e": 32586, "s": 32569, "text": "arorakashish0911" }, { "code": null, "e": 32602, "s": 32586, "text": "amartyaghoshgfg" }, { "code": null, "e": 32615, "s": 32602, "text": "simmytarika5" }, { "code": null, "e": 32633, "s": 32615, "text": "repakaeswaripriya" }, { "code": null, "e": 32647, "s": 32633, "text": "mitalibhola94" }, { "code": null, "e": 32660, "s": 32647, "text": "Egg-Dropping" }, { "code": null, "e": 32680, "s": 32660, "text": "Dynamic Programming" }, { "code": null, "e": 32700, "s": 32680, "text": "Dynamic Programming" } ]
Python | Merge Range Characters in List
19 Feb, 2020 Sometimes, we require to merge some of the elements as single element in the list. This is usually with the cases with character to string conversion. This type of task is usually required in development domain to merge the names into one element. Let’s discuss certain ways in which this can be performed. Method #1 : Using join() + List SlicingThe join function can be coupled with list slicing which can perform the task of joining each character in a range picked by the list slicing functionality. # Python3 code to demonstrate # Merge Range Characters in List# using join() + list slicing # initializing list test_list = ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G'] # printing original listprint ("The original list is : " + str(test_list)) # initializing Range, i, ji, j = 3, 7 # using join() + list slicing# Merge Range Characters in Listtest_list[i : j] = [''.join(test_list[i : j])] # printing result print ("The list after merging elements : " + str(test_list)) The original list is : ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G'] The list after merging elements : ['I', 'L', 'O', 'VEGF', 'G'] Method #2 : Using reduce() + lambda + list slicingThe task of joining each element in a range is performed by reduce function and lambda. reduce function performs the task for each element in the range which is defined by the lambda function. It works with Python2 only. # Python code to demonstrate # Merge Range Characters in List# using reduce() + lambda + list slicing # initializing list test_list = ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G'] # printing original listprint ("The original list is : " + str(test_list)) # initializing strt, end strt, end = 3, 7 # using reduce() + lambda + list slicing# Merge Range Characters in Listtest_list[strt : end] = [reduce(lambda i, j: i + j, test_list[strt : end])] # printing result print ("The list after merging elements : " + str(test_list)) The original list is : ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G'] The list after merging elements : ['I', 'L', 'O', 'VEGF', 'G'] Python list-programs Python Python Programs Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n19 Feb, 2020" }, { "code": null, "e": 335, "s": 28, "text": "Sometimes, we require to merge some of the elements as single element in the list. This is usually with the cases with character to string conversion. This type of task is usually required in development domain to merge the names into one element. Let’s discuss certain ways in which this can be performed." }, { "code": null, "e": 531, "s": 335, "text": "Method #1 : Using join() + List SlicingThe join function can be coupled with list slicing which can perform the task of joining each character in a range picked by the list slicing functionality." }, { "code": "# Python3 code to demonstrate # Merge Range Characters in List# using join() + list slicing # initializing list test_list = ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G'] # printing original listprint (\"The original list is : \" + str(test_list)) # initializing Range, i, ji, j = 3, 7 # using join() + list slicing# Merge Range Characters in Listtest_list[i : j] = [''.join(test_list[i : j])] # printing result print (\"The list after merging elements : \" + str(test_list))", "e": 1002, "s": 531, "text": null }, { "code": null, "e": 1130, "s": 1002, "text": "The original list is : ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G']\nThe list after merging elements : ['I', 'L', 'O', 'VEGF', 'G']\n" }, { "code": null, "e": 1403, "s": 1132, "text": "Method #2 : Using reduce() + lambda + list slicingThe task of joining each element in a range is performed by reduce function and lambda. reduce function performs the task for each element in the range which is defined by the lambda function. It works with Python2 only." }, { "code": "# Python code to demonstrate # Merge Range Characters in List# using reduce() + lambda + list slicing # initializing list test_list = ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G'] # printing original listprint (\"The original list is : \" + str(test_list)) # initializing strt, end strt, end = 3, 7 # using reduce() + lambda + list slicing# Merge Range Characters in Listtest_list[strt : end] = [reduce(lambda i, j: i + j, test_list[strt : end])] # printing result print (\"The list after merging elements : \" + str(test_list))", "e": 1928, "s": 1403, "text": null }, { "code": null, "e": 2056, "s": 1928, "text": "The original list is : ['I', 'L', 'O', 'V', 'E', 'G', 'F', 'G']\nThe list after merging elements : ['I', 'L', 'O', 'VEGF', 'G']\n" }, { "code": null, "e": 2077, "s": 2056, "text": "Python list-programs" }, { "code": null, "e": 2084, "s": 2077, "text": "Python" }, { "code": null, "e": 2100, "s": 2084, "text": "Python Programs" } ]
Golang Program that uses String Switch
04 May, 2020 With the help of switch case we can implement the functionality of as many if statements. In Golang, switch cases can work with strings, list of variables including integer values as well as floating values. Syntax: switch optstatement; optexpression{case expression1: Statement..case expression2: Statement.....default: Statement..} Example 1: In this example we can see that by using switch cases and assuming variable as a string type we can make use of switch cases. // Golang program that uses string switchpackage main // Here "fmt" is formatted IO which// is same as C’s printf and scanf.import "fmt" // Main functionfunc main() { day := "Tue" // Use switch on the day variable. switch { case day == "Mon": fmt.Println("Monday") case day == "Tue": fmt.Println("Tuesday") case day == "Wed": fmt.Println("Wednesday") }} Output : Tuesday Example 2: // Golang program that uses string switchpackage main // Here "fmt" is formatted IO which // is same as C’s printf and scanf.import "fmt" // Main functionfunc main() { gfg := "Geek" // Use switch on the day variable. switch { case gfg == "Geek": fmt.Println("Geek") case gfg == "For": fmt.Println("For") case gfg == "Geeks": fmt.Println("Geeks") }} Output: Geek Golang-Program Picked Go Language Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n04 May, 2020" }, { "code": null, "e": 236, "s": 28, "text": "With the help of switch case we can implement the functionality of as many if statements. In Golang, switch cases can work with strings, list of variables including integer values as well as floating values." }, { "code": null, "e": 244, "s": 236, "text": "Syntax:" }, { "code": null, "e": 362, "s": 244, "text": "switch optstatement; optexpression{case expression1: Statement..case expression2: Statement.....default: Statement..}" }, { "code": null, "e": 499, "s": 362, "text": "Example 1: In this example we can see that by using switch cases and assuming variable as a string type we can make use of switch cases." }, { "code": "// Golang program that uses string switchpackage main // Here \"fmt\" is formatted IO which// is same as C’s printf and scanf.import \"fmt\" // Main functionfunc main() { day := \"Tue\" // Use switch on the day variable. switch { case day == \"Mon\": fmt.Println(\"Monday\") case day == \"Tue\": fmt.Println(\"Tuesday\") case day == \"Wed\": fmt.Println(\"Wednesday\") }}", "e": 899, "s": 499, "text": null }, { "code": null, "e": 908, "s": 899, "text": "Output :" }, { "code": null, "e": 917, "s": 908, "text": "Tuesday\n" }, { "code": null, "e": 928, "s": 917, "text": "Example 2:" }, { "code": "// Golang program that uses string switchpackage main // Here \"fmt\" is formatted IO which // is same as C’s printf and scanf.import \"fmt\" // Main functionfunc main() { gfg := \"Geek\" // Use switch on the day variable. switch { case gfg == \"Geek\": fmt.Println(\"Geek\") case gfg == \"For\": fmt.Println(\"For\") case gfg == \"Geeks\": fmt.Println(\"Geeks\") }}", "e": 1323, "s": 928, "text": null }, { "code": null, "e": 1331, "s": 1323, "text": "Output:" }, { "code": null, "e": 1337, "s": 1331, "text": "Geek\n" }, { "code": null, "e": 1352, "s": 1337, "text": "Golang-Program" }, { "code": null, "e": 1359, "s": 1352, "text": "Picked" }, { "code": null, "e": 1371, "s": 1359, "text": "Go Language" } ]
Laravel | Delete Records
27 Apr, 2020 To delete records we can use DB facade with the delete method. To do so follow the below steps one by one: Step 1: Create Controller UserController by executing this command.php artisan make:controller UserController php artisan make:controller UserController Step 2: We can delete records in two ways.First Method: The first is to delete direct using database command. Write following Code in App/Http/Controllers/UserController.php<?phpnamespace App\Http\Controllers;use Illuminate\Http\Request;use DB;class UserController extends Controller { public function index() { $users = DB::select('SELECT * FROM users'); return view('user', ['users'=>$users]); } public function destroy($id) { DB::delete('DELETE FROM users WHERE id = ?', [$id]); echo ("User Record deleted successfully."); return redirect()->route('users.index'); }}Second Method: The second way is to delete using the Laravel delete Function and User Model (Easy one).<?phpnamespace App\Http\Controllers;use Illuminate\Http\Request;use App\User;class UserController extends Controller { public function index() { $users = User::All(); return view('user', ['users'=>$users]); } public function destroy($id) { $user = User::where('id', $id)->firstorfail()->delete(); echo ("User Record deleted successfully."); return redirect()->route('users.index'); }} First Method: The first is to delete direct using database command. Write following Code in App/Http/Controllers/UserController.php <?phpnamespace App\Http\Controllers;use Illuminate\Http\Request;use DB;class UserController extends Controller { public function index() { $users = DB::select('SELECT * FROM users'); return view('user', ['users'=>$users]); } public function destroy($id) { DB::delete('DELETE FROM users WHERE id = ?', [$id]); echo ("User Record deleted successfully."); return redirect()->route('users.index'); }} Second Method: The second way is to delete using the Laravel delete Function and User Model (Easy one). <?phpnamespace App\Http\Controllers;use Illuminate\Http\Request;use App\User;class UserController extends Controller { public function index() { $users = User::All(); return view('user', ['users'=>$users]); } public function destroy($id) { $user = User::where('id', $id)->firstorfail()->delete(); echo ("User Record deleted successfully."); return redirect()->route('users.index'); }} Step 3: Implementation or Driver Code and create web routes for implementation of the above code in routes/web.php<?phpRoute::get('/user', 'UserController@index')->name('users.index');Route::delete('/user/{id}', 'UserController@destroy') ->name('users.destroy');?> <?phpRoute::get('/user', 'UserController@index')->name('users.index');Route::delete('/user/{id}', 'UserController@destroy') ->name('users.destroy');?> Step 4: Create a View File from where we display our users in resources/views directory name user.blade.php. Write following HTML code.<!DOCTYPE html><html> <head> <title>Users Record</title> <style type="text/css"> table { color: #333; font-family: sans-serif; width: 640px; border-collapse: collapse; border-spacing: 0; } td, th { border: 1px solid #CCC; height: 30px; } th { background: #F3F3F3; font-weight: bold; } td { background: #FAFAFA; text-align: center; } </style></head> <body> <table> <tr> <td>ID</td> <td>Name</td> <td>Email</td> <td>Delete</td> </tr> @foreach ($users as $user) <tr> <td>{{ $user->id }}</td> <td>{{ $user->name }}</td> <td>{{ $user->email }}</td> <td><a href="{{ route('users.index') }}" onclick="event.preventDefault(); document.getElementById( 'delete-form-{{$user->id}}').submit();"> Delete </a> </td> <form id="delete-form-{{$user->id}}" + action="{{route('users.destroy', $user->id)}}" method="post"> @csrf @method('DELETE') </form> </tr> @endforeach </table></body> </html> <!DOCTYPE html><html> <head> <title>Users Record</title> <style type="text/css"> table { color: #333; font-family: sans-serif; width: 640px; border-collapse: collapse; border-spacing: 0; } td, th { border: 1px solid #CCC; height: 30px; } th { background: #F3F3F3; font-weight: bold; } td { background: #FAFAFA; text-align: center; } </style></head> <body> <table> <tr> <td>ID</td> <td>Name</td> <td>Email</td> <td>Delete</td> </tr> @foreach ($users as $user) <tr> <td>{{ $user->id }}</td> <td>{{ $user->name }}</td> <td>{{ $user->email }}</td> <td><a href="{{ route('users.index') }}" onclick="event.preventDefault(); document.getElementById( 'delete-form-{{$user->id}}').submit();"> Delete </a> </td> <form id="delete-form-{{$user->id}}" + action="{{route('users.destroy', $user->id)}}" method="post"> @csrf @method('DELETE') </form> </tr> @endforeach </table></body> </html> Step 5: Start the server by executing php artisan:serve command and go to http://localhost:8000/user and the output will be: Output: Click on the delete button to get the record deleted. After deleting two records output is: Laravel Picked PHP Web Technologies Web technologies Questions PHP Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n27 Apr, 2020" }, { "code": null, "e": 135, "s": 28, "text": "To delete records we can use DB facade with the delete method. To do so follow the below steps one by one:" }, { "code": null, "e": 245, "s": 135, "text": "Step 1: Create Controller UserController by executing this command.php artisan make:controller UserController" }, { "code": null, "e": 288, "s": 245, "text": "php artisan make:controller UserController" }, { "code": null, "e": 1511, "s": 288, "text": "Step 2: We can delete records in two ways.First Method: The first is to delete direct using database command. Write following Code in App/Http/Controllers/UserController.php<?phpnamespace App\\Http\\Controllers;use Illuminate\\Http\\Request;use DB;class UserController extends Controller { public function index() { $users = DB::select('SELECT * FROM users'); return view('user', ['users'=>$users]); } public function destroy($id) { DB::delete('DELETE FROM users WHERE id = ?', [$id]); echo (\"User Record deleted successfully.\"); return redirect()->route('users.index'); }}Second Method: The second way is to delete using the Laravel delete Function and User Model (Easy one).<?phpnamespace App\\Http\\Controllers;use Illuminate\\Http\\Request;use App\\User;class UserController extends Controller { public function index() { $users = User::All(); return view('user', ['users'=>$users]); } public function destroy($id) { $user = User::where('id', $id)->firstorfail()->delete(); echo (\"User Record deleted successfully.\"); return redirect()->route('users.index'); }}" }, { "code": null, "e": 1643, "s": 1511, "text": "First Method: The first is to delete direct using database command. Write following Code in App/Http/Controllers/UserController.php" }, { "code": "<?phpnamespace App\\Http\\Controllers;use Illuminate\\Http\\Request;use DB;class UserController extends Controller { public function index() { $users = DB::select('SELECT * FROM users'); return view('user', ['users'=>$users]); } public function destroy($id) { DB::delete('DELETE FROM users WHERE id = ?', [$id]); echo (\"User Record deleted successfully.\"); return redirect()->route('users.index'); }}", "e": 2123, "s": 1643, "text": null }, { "code": null, "e": 2227, "s": 2123, "text": "Second Method: The second way is to delete using the Laravel delete Function and User Model (Easy one)." }, { "code": "<?phpnamespace App\\Http\\Controllers;use Illuminate\\Http\\Request;use App\\User;class UserController extends Controller { public function index() { $users = User::All(); return view('user', ['users'=>$users]); } public function destroy($id) { $user = User::where('id', $id)->firstorfail()->delete(); echo (\"User Record deleted successfully.\"); return redirect()->route('users.index'); }}", "e": 2695, "s": 2227, "text": null }, { "code": null, "e": 2963, "s": 2695, "text": "Step 3: Implementation or Driver Code and create web routes for implementation of the above code in routes/web.php<?phpRoute::get('/user', 'UserController@index')->name('users.index');Route::delete('/user/{id}', 'UserController@destroy') ->name('users.destroy');?>" }, { "code": "<?phpRoute::get('/user', 'UserController@index')->name('users.index');Route::delete('/user/{id}', 'UserController@destroy') ->name('users.destroy');?>", "e": 3117, "s": 2963, "text": null }, { "code": null, "e": 4665, "s": 3117, "text": "Step 4: Create a View File from where we display our users in resources/views directory name user.blade.php. Write following HTML code.<!DOCTYPE html><html> <head> <title>Users Record</title> <style type=\"text/css\"> table { color: #333; font-family: sans-serif; width: 640px; border-collapse: collapse; border-spacing: 0; } td, th { border: 1px solid #CCC; height: 30px; } th { background: #F3F3F3; font-weight: bold; } td { background: #FAFAFA; text-align: center; } </style></head> <body> <table> <tr> <td>ID</td> <td>Name</td> <td>Email</td> <td>Delete</td> </tr> @foreach ($users as $user) <tr> <td>{{ $user->id }}</td> <td>{{ $user->name }}</td> <td>{{ $user->email }}</td> <td><a href=\"{{ route('users.index') }}\" onclick=\"event.preventDefault(); document.getElementById( 'delete-form-{{$user->id}}').submit();\"> Delete </a> </td> <form id=\"delete-form-{{$user->id}}\" + action=\"{{route('users.destroy', $user->id)}}\" method=\"post\"> @csrf @method('DELETE') </form> </tr> @endforeach </table></body> </html>" }, { "code": "<!DOCTYPE html><html> <head> <title>Users Record</title> <style type=\"text/css\"> table { color: #333; font-family: sans-serif; width: 640px; border-collapse: collapse; border-spacing: 0; } td, th { border: 1px solid #CCC; height: 30px; } th { background: #F3F3F3; font-weight: bold; } td { background: #FAFAFA; text-align: center; } </style></head> <body> <table> <tr> <td>ID</td> <td>Name</td> <td>Email</td> <td>Delete</td> </tr> @foreach ($users as $user) <tr> <td>{{ $user->id }}</td> <td>{{ $user->name }}</td> <td>{{ $user->email }}</td> <td><a href=\"{{ route('users.index') }}\" onclick=\"event.preventDefault(); document.getElementById( 'delete-form-{{$user->id}}').submit();\"> Delete </a> </td> <form id=\"delete-form-{{$user->id}}\" + action=\"{{route('users.destroy', $user->id)}}\" method=\"post\"> @csrf @method('DELETE') </form> </tr> @endforeach </table></body> </html>", "e": 6078, "s": 4665, "text": null }, { "code": null, "e": 6204, "s": 6078, "text": "Step 5: Start the server by executing php artisan:serve command and go to http://localhost:8000/user and the output will be: " }, { "code": null, "e": 6305, "s": 6204, "text": "Output: Click on the delete button to get the record deleted. After deleting two records output is: " }, { "code": null, "e": 6313, "s": 6305, "text": "Laravel" }, { "code": null, "e": 6320, "s": 6313, "text": "Picked" }, { "code": null, "e": 6324, "s": 6320, "text": "PHP" }, { "code": null, "e": 6341, "s": 6324, "text": "Web Technologies" }, { "code": null, "e": 6368, "s": 6341, "text": "Web technologies Questions" }, { "code": null, "e": 6372, "s": 6368, "text": "PHP" } ]
C# | Convert.ToDateTime(String, IFormatProvider) Method
02 Sep, 2021 This method is used to convert the specified string representation of a number to an equivalent date and time, using the specified culture-specific formatting information.Syntax: public static DateTime ToDateTime (string value, IFormatProvider provider); Parameters: value: A string that contains a date and time to convert. provider: An object that supplies culture-specific formatting information. Return Value: This method returns the date and time equivalent of the value of value, or the date and time equivalent of MinValue if the value is null.Exception: This method will give FormatException if the value is not a properly formatted date and time string.Below programs illustrate the use of Convert.ToDateTime(String, IFormatProvider) MethodExample 1: csharp // C# program to demonstrate the// Convert.ToDateTime() Methodusing System;using System.Globalization; class GFG { // Main Methodpublic static void Main(){ try { // creating object of CultureInfo CultureInfo cultures = new CultureInfo("en-US"); // declaring and initializing String array string[] values = {"01/02/09", "2009/02/03", "01/2009/03", "01/02/2009", "01/02/23"}; // calling get() Method Console.WriteLine("Converted string value"+ " to specified DateTime: "); for (int j = 0; j < values.Length; j++) { get(values[j], cultures); } } catch (FormatException e) { Console.WriteLine("\n"); Console.Write("Exception Thrown: "); Console.Write("{0}", e.GetType(), e.Message); }} // Defining get() methodpublic static void get(string s, CultureInfo cultures){ // converting string to specified bool DateTime val = Convert.ToDateTime(s, cultures); // display the converted string Console.Write(" {0}, ", val);}} Example 2: For FormatException csharp // C# program to demonstrate the// Convert.ToDateTime() Methodusing System;using System.Globalization;class GFG { // Main Methodpublic static void Main(){ try { // creating object of CultureInfo CultureInfo cultures = new CultureInfo("en-US"); // declaring and initializing String array string[] values = {"01/02/09", "2009/02/03", "01/2009/03", "01/02/2009", "01/02/23"}; // calling get() Method Console.WriteLine("Converted string value "+ "to specified DateTime: "); for (int j = 0; j < values.Length; j++) { get(values[j], cultures); } Console.WriteLine("\n\nvalue is not a properly "+ "formatted date and time string."); // converting string to specified bool DateTime val = Convert.ToDateTime("21/2009/03", cultures); // display the converted string Console.Write(" {0}, ", val); } catch (FormatException e) { Console.Write("Exception Thrown: "); Console.Write("{0}", e.GetType(), e.Message); }}// Defining get() methodpublic static void get(string s, CultureInfo cultures){ // converting string to specified bool DateTime val = Convert.ToDateTime(s, cultures); // display the converted string Console.Write(" {0}, ", val);}} Reference: https://docs.microsoft.com/en-us/dotnet/api/system.convert.todatetime?view=netframework-4.7.2#System_Convert_ToDateTime_System_String_System_IFormatProvider_ sweetyty CSharp Convert Class CSharp-method C# Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n02 Sep, 2021" }, { "code": null, "e": 207, "s": 28, "text": "This method is used to convert the specified string representation of a number to an equivalent date and time, using the specified culture-specific formatting information.Syntax:" }, { "code": null, "e": 283, "s": 207, "text": "public static DateTime ToDateTime (string value, IFormatProvider provider);" }, { "code": null, "e": 296, "s": 283, "text": "Parameters: " }, { "code": null, "e": 354, "s": 296, "text": "value: A string that contains a date and time to convert." }, { "code": null, "e": 429, "s": 354, "text": "provider: An object that supplies culture-specific formatting information." }, { "code": null, "e": 790, "s": 429, "text": "Return Value: This method returns the date and time equivalent of the value of value, or the date and time equivalent of MinValue if the value is null.Exception: This method will give FormatException if the value is not a properly formatted date and time string.Below programs illustrate the use of Convert.ToDateTime(String, IFormatProvider) MethodExample 1: " }, { "code": null, "e": 797, "s": 790, "text": "csharp" }, { "code": "// C# program to demonstrate the// Convert.ToDateTime() Methodusing System;using System.Globalization; class GFG { // Main Methodpublic static void Main(){ try { // creating object of CultureInfo CultureInfo cultures = new CultureInfo(\"en-US\"); // declaring and initializing String array string[] values = {\"01/02/09\", \"2009/02/03\", \"01/2009/03\", \"01/02/2009\", \"01/02/23\"}; // calling get() Method Console.WriteLine(\"Converted string value\"+ \" to specified DateTime: \"); for (int j = 0; j < values.Length; j++) { get(values[j], cultures); } } catch (FormatException e) { Console.WriteLine(\"\\n\"); Console.Write(\"Exception Thrown: \"); Console.Write(\"{0}\", e.GetType(), e.Message); }} // Defining get() methodpublic static void get(string s, CultureInfo cultures){ // converting string to specified bool DateTime val = Convert.ToDateTime(s, cultures); // display the converted string Console.Write(\" {0}, \", val);}}", "e": 1929, "s": 797, "text": null }, { "code": null, "e": 1960, "s": 1929, "text": "Example 2: For FormatException" }, { "code": null, "e": 1967, "s": 1960, "text": "csharp" }, { "code": "// C# program to demonstrate the// Convert.ToDateTime() Methodusing System;using System.Globalization;class GFG { // Main Methodpublic static void Main(){ try { // creating object of CultureInfo CultureInfo cultures = new CultureInfo(\"en-US\"); // declaring and initializing String array string[] values = {\"01/02/09\", \"2009/02/03\", \"01/2009/03\", \"01/02/2009\", \"01/02/23\"}; // calling get() Method Console.WriteLine(\"Converted string value \"+ \"to specified DateTime: \"); for (int j = 0; j < values.Length; j++) { get(values[j], cultures); } Console.WriteLine(\"\\n\\nvalue is not a properly \"+ \"formatted date and time string.\"); // converting string to specified bool DateTime val = Convert.ToDateTime(\"21/2009/03\", cultures); // display the converted string Console.Write(\" {0}, \", val); } catch (FormatException e) { Console.Write(\"Exception Thrown: \"); Console.Write(\"{0}\", e.GetType(), e.Message); }}// Defining get() methodpublic static void get(string s, CultureInfo cultures){ // converting string to specified bool DateTime val = Convert.ToDateTime(s, cultures); // display the converted string Console.Write(\" {0}, \", val);}}", "e": 3425, "s": 1967, "text": null }, { "code": null, "e": 3437, "s": 3425, "text": "Reference: " }, { "code": null, "e": 3596, "s": 3437, "text": "https://docs.microsoft.com/en-us/dotnet/api/system.convert.todatetime?view=netframework-4.7.2#System_Convert_ToDateTime_System_String_System_IFormatProvider_ " }, { "code": null, "e": 3605, "s": 3596, "text": "sweetyty" }, { "code": null, "e": 3626, "s": 3605, "text": "CSharp Convert Class" }, { "code": null, "e": 3640, "s": 3626, "text": "CSharp-method" }, { "code": null, "e": 3643, "s": 3640, "text": "C#" } ]
JavaScript | Get the text of a span element
23 May, 2019 Given an HTML document and the task is to get the text of a <span> element. There are two methods used to get the span elements which are discussed below: HTML DOM textContent Property: This property set/return the text content of the defined node, and all its descendants. By setting the textContent property, the child nodes are removed and are replaced by a single Text node having the specified string.Syntax:Return the text content of a node.node.textContent Set the text content of a node.node.textContent = text Property values: It contains single value text which specifies the text content of the specified node.Return value: It returns a string, representing the text of node and all its descendants. It returns null if the element is a document, a document type, or a notation. Syntax: Return the text content of a node.node.textContent node.textContent Set the text content of a node.node.textContent = text node.textContent = text Property values: It contains single value text which specifies the text content of the specified node. Return value: It returns a string, representing the text of node and all its descendants. It returns null if the element is a document, a document type, or a notation. HTML DOM innerText Property: This property set/return the text content of defined node, and all its descendants. By setting the innerText property, any child nodes are removed and are replaced by a single Text node having the specified string.Syntax:Return the text content of a node.node.innerText Set the text content of a node.node.innerText = text Property values: It contains single value text which specifies the text content of the specified node.Return value: It returns a string, representing the “rendered” text content of a node and all its descendants. Syntax: Return the text content of a node.node.innerText node.innerText Set the text content of a node.node.innerText = text node.innerText = text Property values: It contains single value text which specifies the text content of the specified node. Return value: It returns a string, representing the “rendered” text content of a node and all its descendants. Example 1: This example gets the content by using textContent property. <!DOCTYPE HTML> <html> <head> <title> JavaScript | Get the text of a span element </title> </head> <body style = "text-align:center;" id = "body"> <h1 style = "color:green;" > GeeksForGeeks </h1> <span id="GFG_Span" style = "font-size: 15px; font-weight: bold;"> This is text of Span element. </span> <br><br> <button onclick = "gfg_Run()"> Click here </button> <p id = "GFG_DOWN" style = "color:green; font-size: 20px; font-weight: bold;"> </p> <script> var span = document.getElementById("GFG_Span"); var el_down = document.getElementById("GFG_DOWN"); function gfg_Run() { el_down.innerHTML = span.textContent; } </script> </body> </html> Output: Before clicking on the button: After clicking on the button: Example 2: This example gets the content by using innerText property. <!DOCTYPE HTML> <html> <head> <title> JavaScript | Get the text of a span element </title> </head> <body style = "text-align:center;" id = "body"> <h1 style = "color:green;" > GeeksForGeeks </h1> <span id="GFG_Span" style = "font-size: 15px; font-weight: bold;"> This is text of Span element. </span> <br><br> <button onclick = "gfg_Run()"> Click here </button> <p id = "GFG_DOWN" style = "color:green; font-size: 20px; font-weight: bold;"> </p> <script> var span = document.getElementById("GFG_Span"); var el_down = document.getElementById("GFG_DOWN"); function gfg_Run() { el_down.innerHTML = span.innerText; } </script> </body> </html> Output: Before clicking on the button: After clicking on the button: JavaScript-Misc JavaScript Web Technologies Web technologies Questions Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n23 May, 2019" }, { "code": null, "e": 183, "s": 28, "text": "Given an HTML document and the task is to get the text of a <span> element. There are two methods used to get the span elements which are discussed below:" }, { "code": null, "e": 817, "s": 183, "text": "HTML DOM textContent Property: This property set/return the text content of the defined node, and all its descendants. By setting the textContent property, the child nodes are removed and are replaced by a single Text node having the specified string.Syntax:Return the text content of a node.node.textContent\nSet the text content of a node.node.textContent = text\nProperty values: It contains single value text which specifies the text content of the specified node.Return value: It returns a string, representing the text of node and all its descendants. It returns null if the element is a document, a document type, or a notation." }, { "code": null, "e": 825, "s": 817, "text": "Syntax:" }, { "code": null, "e": 877, "s": 825, "text": "Return the text content of a node.node.textContent\n" }, { "code": null, "e": 895, "s": 877, "text": "node.textContent\n" }, { "code": null, "e": 951, "s": 895, "text": "Set the text content of a node.node.textContent = text\n" }, { "code": null, "e": 976, "s": 951, "text": "node.textContent = text\n" }, { "code": null, "e": 1079, "s": 976, "text": "Property values: It contains single value text which specifies the text content of the specified node." }, { "code": null, "e": 1247, "s": 1079, "text": "Return value: It returns a string, representing the text of node and all its descendants. It returns null if the element is a document, a document type, or a notation." }, { "code": null, "e": 1812, "s": 1247, "text": "HTML DOM innerText Property: This property set/return the text content of defined node, and all its descendants. By setting the innerText property, any child nodes are removed and are replaced by a single Text node having the specified string.Syntax:Return the text content of a node.node.innerText\nSet the text content of a node.node.innerText = text\nProperty values: It contains single value text which specifies the text content of the specified node.Return value: It returns a string, representing the “rendered” text content of a node and all its descendants." }, { "code": null, "e": 1820, "s": 1812, "text": "Syntax:" }, { "code": null, "e": 1870, "s": 1820, "text": "Return the text content of a node.node.innerText\n" }, { "code": null, "e": 1886, "s": 1870, "text": "node.innerText\n" }, { "code": null, "e": 1940, "s": 1886, "text": "Set the text content of a node.node.innerText = text\n" }, { "code": null, "e": 1963, "s": 1940, "text": "node.innerText = text\n" }, { "code": null, "e": 2066, "s": 1963, "text": "Property values: It contains single value text which specifies the text content of the specified node." }, { "code": null, "e": 2177, "s": 2066, "text": "Return value: It returns a string, representing the “rendered” text content of a node and all its descendants." }, { "code": null, "e": 2249, "s": 2177, "text": "Example 1: This example gets the content by using textContent property." }, { "code": "<!DOCTYPE HTML> <html> <head> <title> JavaScript | Get the text of a span element </title> </head> <body style = \"text-align:center;\" id = \"body\"> <h1 style = \"color:green;\" > GeeksForGeeks </h1> <span id=\"GFG_Span\" style = \"font-size: 15px; font-weight: bold;\"> This is text of Span element. </span> <br><br> <button onclick = \"gfg_Run()\"> Click here </button> <p id = \"GFG_DOWN\" style = \"color:green; font-size: 20px; font-weight: bold;\"> </p> <script> var span = document.getElementById(\"GFG_Span\"); var el_down = document.getElementById(\"GFG_DOWN\"); function gfg_Run() { el_down.innerHTML = span.textContent; } </script> </body> </html> ", "e": 3229, "s": 2249, "text": null }, { "code": null, "e": 3237, "s": 3229, "text": "Output:" }, { "code": null, "e": 3268, "s": 3237, "text": "Before clicking on the button:" }, { "code": null, "e": 3298, "s": 3268, "text": "After clicking on the button:" }, { "code": null, "e": 3368, "s": 3298, "text": "Example 2: This example gets the content by using innerText property." }, { "code": "<!DOCTYPE HTML> <html> <head> <title> JavaScript | Get the text of a span element </title> </head> <body style = \"text-align:center;\" id = \"body\"> <h1 style = \"color:green;\" > GeeksForGeeks </h1> <span id=\"GFG_Span\" style = \"font-size: 15px; font-weight: bold;\"> This is text of Span element. </span> <br><br> <button onclick = \"gfg_Run()\"> Click here </button> <p id = \"GFG_DOWN\" style = \"color:green; font-size: 20px; font-weight: bold;\"> </p> <script> var span = document.getElementById(\"GFG_Span\"); var el_down = document.getElementById(\"GFG_DOWN\"); function gfg_Run() { el_down.innerHTML = span.innerText; } </script> </body> </html> ", "e": 4334, "s": 3368, "text": null }, { "code": null, "e": 4342, "s": 4334, "text": "Output:" }, { "code": null, "e": 4373, "s": 4342, "text": "Before clicking on the button:" }, { "code": null, "e": 4403, "s": 4373, "text": "After clicking on the button:" }, { "code": null, "e": 4419, "s": 4403, "text": "JavaScript-Misc" }, { "code": null, "e": 4430, "s": 4419, "text": "JavaScript" }, { "code": null, "e": 4447, "s": 4430, "text": "Web Technologies" }, { "code": null, "e": 4474, "s": 4447, "text": "Web technologies Questions" } ]
How do we use easy_install to install Python modules?
easy_install was released in 2004, as part of setuptools. It was notable at the time for installing packages from PyPI using requirement specifiers, and automatically installing dependencies. pip came later in 2008, as alternative to easy_install, although still largely built on top of setuptools components. You should be using pip instead of easy_install for installing python modules. If you have easy_install, you can use it to install pip. Type the following command: $ easy_install pip Now you can use pip to install the module you want. For example, To install the latest version of "SomeProject": $ pip install 'SomeProject' To install a specific version: $ pip install 'SomeProject==1.4' To install greater than or equal to one version and less than another: $ pip install 'SomeProject>=1,<2'
[ { "code": null, "e": 1254, "s": 1062, "text": "easy_install was released in 2004, as part of setuptools. It was notable at the time for installing packages from PyPI using requirement specifiers, and automatically installing dependencies." }, { "code": null, "e": 1536, "s": 1254, "text": "pip came later in 2008, as alternative to easy_install, although still largely built on top of setuptools components. You should be using pip instead of easy_install for installing python modules. If you have easy_install, you can use it to install pip. Type the following command:" }, { "code": null, "e": 1555, "s": 1536, "text": "$ easy_install pip" }, { "code": null, "e": 1668, "s": 1555, "text": "Now you can use pip to install the module you want. For example, To install the latest version of \"SomeProject\":" }, { "code": null, "e": 1696, "s": 1668, "text": "$ pip install 'SomeProject'" }, { "code": null, "e": 1727, "s": 1696, "text": "To install a specific version:" }, { "code": null, "e": 1760, "s": 1727, "text": "$ pip install 'SomeProject==1.4'" }, { "code": null, "e": 1831, "s": 1760, "text": "To install greater than or equal to one version and less than another:" }, { "code": null, "e": 1865, "s": 1831, "text": "$ pip install 'SomeProject>=1,<2'" } ]
Apache NiFi - Custom Processor
Apache NiFi is an open source platform and gives developers the options to add their custom processor in the NiFi library. Follow these steps to create a custom processor. Download Maven latest version from the link given below. https://maven.apache.org/download.cgi Download Maven latest version from the link given below. https://maven.apache.org/download.cgi Add an environment variable named M2_HOME and set value as the installation directory of maven. Add an environment variable named M2_HOME and set value as the installation directory of maven. Download Eclipse IDE from the below link. https://www.eclipse.org/downloads/download.php Download Eclipse IDE from the below link. https://www.eclipse.org/downloads/download.php Open command prompt and execute Maven Archetype command. Open command prompt and execute Maven Archetype command. > mvn archetype:generate Search for the nifi type in the archetype projects. Search for the nifi type in the archetype projects. Select org.apache.nifi:nifi-processor-bundle-archetype project. Select org.apache.nifi:nifi-processor-bundle-archetype project. Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial. Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial. Enter the groupId, artifactId, version, package, and artifactBaseName etc. Enter the groupId, artifactId, version, package, and artifactBaseName etc. Then a maven project will be created having to directories. nifi-<artifactBaseName>-processors nifi-<artifactBaseName>-nar Then a maven project will be created having to directories. nifi-<artifactBaseName>-processors nifi-<artifactBaseName>-processors nifi-<artifactBaseName>-nar nifi-<artifactBaseName>-nar Run the below command in nifi-<artifactBaseName>-processors directory to add the project in the eclipse. Run the below command in nifi-<artifactBaseName>-processors directory to add the project in the eclipse. mvn install eclipse:eclipse Open eclipse and select import from the file menu. Open eclipse and select import from the file menu. Then select “Existing Projects into workspace” and add the project from nifi-<artifactBaseName>-processors directory in eclipse. Then select “Existing Projects into workspace” and add the project from nifi-<artifactBaseName>-processors directory in eclipse. Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run. Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run. Then package the code to a NAR file by running the below mentioned command. Then package the code to a NAR file by running the below mentioned command. mvn clean install A NAR file will be created at nifi--nar/target directory. A NAR file will be created at nifi--nar/target directory. Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi. Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi. After successful restart of NiFi, check the processor list for the new custom processor. After successful restart of NiFi, check the processor list for the new custom processor. For any errors, check ./logs/nifi.log file. For any errors, check ./logs/nifi.log file. 46 Lectures 3.5 hours Arnab Chakraborty 23 Lectures 1.5 hours Mukund Kumar Mishra 16 Lectures 1 hours Nilay Mehta 52 Lectures 1.5 hours Bigdata Engineer 14 Lectures 1 hours Bigdata Engineer 23 Lectures 1 hours Bigdata Engineer Print Add Notes Bookmark this page
[ { "code": null, "e": 2490, "s": 2318, "text": "Apache NiFi is an open source platform and gives developers the options to add their custom processor in the NiFi library. Follow these steps to create a custom processor." }, { "code": null, "e": 2585, "s": 2490, "text": "Download Maven latest version from the link given below.\nhttps://maven.apache.org/download.cgi" }, { "code": null, "e": 2642, "s": 2585, "text": "Download Maven latest version from the link given below." }, { "code": null, "e": 2680, "s": 2642, "text": "https://maven.apache.org/download.cgi" }, { "code": null, "e": 2776, "s": 2680, "text": "Add an environment variable named M2_HOME and set value as the installation directory of maven." }, { "code": null, "e": 2872, "s": 2776, "text": "Add an environment variable named M2_HOME and set value as the installation directory of maven." }, { "code": null, "e": 2962, "s": 2872, "text": "Download Eclipse IDE from the below link.\nhttps://www.eclipse.org/downloads/download.php\n" }, { "code": null, "e": 3004, "s": 2962, "text": "Download Eclipse IDE from the below link." }, { "code": null, "e": 3052, "s": 3004, "text": "https://www.eclipse.org/downloads/download.php\n" }, { "code": null, "e": 3109, "s": 3052, "text": "Open command prompt and execute Maven Archetype command." }, { "code": null, "e": 3166, "s": 3109, "text": "Open command prompt and execute Maven Archetype command." }, { "code": null, "e": 3192, "s": 3166, "text": "> mvn archetype:generate\n" }, { "code": null, "e": 3244, "s": 3192, "text": "Search for the nifi type in the archetype projects." }, { "code": null, "e": 3296, "s": 3244, "text": "Search for the nifi type in the archetype projects." }, { "code": null, "e": 3360, "s": 3296, "text": "Select org.apache.nifi:nifi-processor-bundle-archetype project." }, { "code": null, "e": 3424, "s": 3360, "text": "Select org.apache.nifi:nifi-processor-bundle-archetype project." }, { "code": null, "e": 3511, "s": 3424, "text": "Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial." }, { "code": null, "e": 3598, "s": 3511, "text": "Then from the list of versions select the latest version i.e. 1.7.1 for this tutorial." }, { "code": null, "e": 3673, "s": 3598, "text": "Enter the groupId, artifactId, version, package, and artifactBaseName etc." }, { "code": null, "e": 3748, "s": 3673, "text": "Enter the groupId, artifactId, version, package, and artifactBaseName etc." }, { "code": null, "e": 3874, "s": 3748, "text": "Then a maven project will be created having to directories.\n\nnifi-<artifactBaseName>-processors\nnifi-<artifactBaseName>-nar\n\n" }, { "code": null, "e": 3934, "s": 3874, "text": "Then a maven project will be created having to directories." }, { "code": null, "e": 3969, "s": 3934, "text": "nifi-<artifactBaseName>-processors" }, { "code": null, "e": 4004, "s": 3969, "text": "nifi-<artifactBaseName>-processors" }, { "code": null, "e": 4032, "s": 4004, "text": "nifi-<artifactBaseName>-nar" }, { "code": null, "e": 4060, "s": 4032, "text": "nifi-<artifactBaseName>-nar" }, { "code": null, "e": 4165, "s": 4060, "text": "Run the below command in nifi-<artifactBaseName>-processors directory to add the project in the eclipse." }, { "code": null, "e": 4270, "s": 4165, "text": "Run the below command in nifi-<artifactBaseName>-processors directory to add the project in the eclipse." }, { "code": null, "e": 4299, "s": 4270, "text": "mvn install eclipse:eclipse\n" }, { "code": null, "e": 4350, "s": 4299, "text": "Open eclipse and select import from the file menu." }, { "code": null, "e": 4401, "s": 4350, "text": "Open eclipse and select import from the file menu." }, { "code": null, "e": 4530, "s": 4401, "text": "Then select “Existing Projects into workspace” and add the project from nifi-<artifactBaseName>-processors directory in eclipse." }, { "code": null, "e": 4659, "s": 4530, "text": "Then select “Existing Projects into workspace” and add the project from nifi-<artifactBaseName>-processors directory in eclipse." }, { "code": null, "e": 4810, "s": 4659, "text": "Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run." }, { "code": null, "e": 4961, "s": 4810, "text": "Add your code in public void onTrigger(ProcessContext context, ProcessSession session) function, which runs when ever a processor is scheduled to run." }, { "code": null, "e": 5037, "s": 4961, "text": "Then package the code to a NAR file by running the below mentioned command." }, { "code": null, "e": 5113, "s": 5037, "text": "Then package the code to a NAR file by running the below mentioned command." }, { "code": null, "e": 5132, "s": 5113, "text": "mvn clean install\n" }, { "code": null, "e": 5190, "s": 5132, "text": "A NAR file will be created at nifi--nar/target directory." }, { "code": null, "e": 5248, "s": 5190, "text": "A NAR file will be created at nifi--nar/target directory." }, { "code": null, "e": 5321, "s": 5248, "text": "Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi." }, { "code": null, "e": 5394, "s": 5321, "text": "Copy the NAR file to the lib folder of Apache NiFi and restart the NiFi." }, { "code": null, "e": 5483, "s": 5394, "text": "After successful restart of NiFi, check the processor list for the new custom processor." }, { "code": null, "e": 5572, "s": 5483, "text": "After successful restart of NiFi, check the processor list for the new custom processor." }, { "code": null, "e": 5616, "s": 5572, "text": "For any errors, check ./logs/nifi.log file." }, { "code": null, "e": 5660, "s": 5616, "text": "For any errors, check ./logs/nifi.log file." }, { "code": null, "e": 5695, "s": 5660, "text": "\n 46 Lectures \n 3.5 hours \n" }, { "code": null, "e": 5714, "s": 5695, "text": " Arnab Chakraborty" }, { "code": null, "e": 5749, "s": 5714, "text": "\n 23 Lectures \n 1.5 hours \n" }, { "code": null, "e": 5770, "s": 5749, "text": " Mukund Kumar Mishra" }, { "code": null, "e": 5803, "s": 5770, "text": "\n 16 Lectures \n 1 hours \n" }, { "code": null, "e": 5816, "s": 5803, "text": " Nilay Mehta" }, { "code": null, "e": 5851, "s": 5816, "text": "\n 52 Lectures \n 1.5 hours \n" }, { "code": null, "e": 5869, "s": 5851, "text": " Bigdata Engineer" }, { "code": null, "e": 5902, "s": 5869, "text": "\n 14 Lectures \n 1 hours \n" }, { "code": null, "e": 5920, "s": 5902, "text": " Bigdata Engineer" }, { "code": null, "e": 5953, "s": 5920, "text": "\n 23 Lectures \n 1 hours \n" }, { "code": null, "e": 5971, "s": 5953, "text": " Bigdata Engineer" }, { "code": null, "e": 5978, "s": 5971, "text": " Print" }, { "code": null, "e": 5989, "s": 5978, "text": " Add Notes" } ]
Lines and Indentation in Python
Python provides no braces to indicate blocks of code for class and function definitions or flow control. Blocks of code are denoted by line indentation, which is rigidly enforced. The number of spaces in the indentation is variable, but all statements within the block must be indented the same amount. For example − if True: print "True" else: print "False" However, the following block generates an error − if True: print "Answer" print "True" else: print "Answer" print "False" Thus, in Python all the continuous lines indented with same number of spaces would form a block. The following example has various statement blocks − Note − Do not try to understand the logic at this point of time. Just make sure you understood various blocks even if they are without braces. #!/usr/bin/python import sys try: # open file stream file = open(file_name, "w") except IOError: print "There was an error writing to", file_name sys.exit() print "Enter '", file_finish, print "' When finished" while file_text != file_finish: file_text = raw_input("Enter text: ") if file_text == file_finish: # close the file file.close break file.write(file_text) file.write("\n") file.close() file_name = raw_input("Enter filename: ") if len(file_name) == 0: print "Next time please enter something" sys.exit() try: file = open(file_name, "r") except IOError: print "There was an error reading file" sys.exit() file_text = file.read() file.close() print file_text
[ { "code": null, "e": 1242, "s": 1062, "text": "Python provides no braces to indicate blocks of code for class and function definitions or flow control. Blocks of code are denoted by line indentation, which is rigidly enforced." }, { "code": null, "e": 1379, "s": 1242, "text": "The number of spaces in the indentation is variable, but all statements within the block must be indented the same amount. For example −" }, { "code": null, "e": 1421, "s": 1379, "text": "if True:\nprint \"True\"\nelse:\nprint \"False\"" }, { "code": null, "e": 1471, "s": 1421, "text": "However, the following block generates an error −" }, { "code": null, "e": 1543, "s": 1471, "text": "if True:\nprint \"Answer\"\nprint \"True\"\nelse:\nprint \"Answer\"\nprint \"False\"" }, { "code": null, "e": 1693, "s": 1543, "text": "Thus, in Python all the continuous lines indented with same number of spaces would form a block. The following example has various statement blocks −" }, { "code": null, "e": 1836, "s": 1693, "text": "Note − Do not try to understand the logic at this point of time. Just make sure you understood various blocks even if they are without braces." }, { "code": null, "e": 2560, "s": 1836, "text": "#!/usr/bin/python\nimport sys\ntry:\n # open file stream\n file = open(file_name, \"w\")\nexcept IOError:\n print \"There was an error writing to\", file_name\n sys.exit()\nprint \"Enter '\", file_finish,\nprint \"' When finished\"\nwhile file_text != file_finish:\n file_text = raw_input(\"Enter text: \")\n if file_text == file_finish:\n # close the file\n file.close\n break\n file.write(file_text)\n file.write(\"\\n\")\nfile.close()\nfile_name = raw_input(\"Enter filename: \")\nif len(file_name) == 0:\n print \"Next time please enter something\"\n sys.exit()\ntry:\n file = open(file_name, \"r\")\nexcept IOError:\n print \"There was an error reading file\"\n sys.exit()\nfile_text = file.read()\nfile.close()\nprint file_text" } ]
TD3: Learning To Run With AI. Learn to build one of the most powerful... | by Donal Byrne | Towards Data Science
This article looks at one of the most powerful and state of the art algorithms in Reinforcement Learning (RL), Twin Delayed Deep Deterministic Policy Gradients (TD3)( Fujimoto et al., 2018). By the end of this article you should have a solid understanding of what makes TD3 perform so well, be capable of implementing the algorithm yourself and use TD3 to train an agent to successfully run in the HalfCheetah environment. However, before tackling TD3 you should already have a good understanding of RL and the common algorithms such as Deep Q Networks and DDPG, which TD3 is built upon. If you need to brush up on your knowledge, check out these excellent resources, DeepMind Lecture Series, Let’s make a DQN, Spinning Up: DDPG. This article will cover the following: What is TD3Explanation of each core mechanicImplementation & code walkthroughResults & Benchmarking What is TD3 Explanation of each core mechanic Implementation & code walkthrough Results & Benchmarking The full code can be found here on my github. If you want to quickly follow along with the code used here click on the icon below to be taken to a Google Colab workbook with everything ready to go. TD3 is the successor to the Deep Deterministic Policy Gradient (DDPG)(Lillicrap et al, 2016). Up until recently, DDPG was one of the most used algorithms for continuous control problems such as robotics and autonomous driving. Although DDPG is capable of providing excellent results, it has its drawbacks. Like many RL algorithms training DDPG can be unstable and heavily reliant on finding the correct hyper parameters for the current task (OpenAI Spinning Up, 2018). This is caused by the algorithm continuously over estimating the Q values of the critic (value) network. These estimation errors build up over time and can lead to the agent falling into a local optima or experience catastrophic forgetting. TD3 addresses this issue by focusing on reducing the overestimation bias seen in previous algorithms. This is done with the addition of 3 key features: Using a pair of critic networks (The twin part of the title)Delayed updates of the actor (The delayed part)Action noise regularisation (This part didn’t make it to the title :/ ) Using a pair of critic networks (The twin part of the title) Delayed updates of the actor (The delayed part) Action noise regularisation (This part didn’t make it to the title :/ ) The first feature added to TD3 is the use of two critic networks. This was inspired by the technique seen in Deep Reinforcement Learning with Double Q-learning (Van Hasselt et al., 2016) which involved estimating the current Q value using a separate target value function, thus reducing the bias. However, the technique doesn’t work perfectly for actor critic methods. This is because the policy and target networks are updated so slowly that they look very similar, which brings bias back into the picture. Instead, an older implementation seen in Double Q Learning (Van Hasselt, 2010) is used. TD3 uses clipped double Q learning where it takes the smallest value of the two critic networks (The lesser of two evils if you will). This method favours underestimation of Q values. This underestimation bias isn’t a problem as the low values will not be propagated through the algorithm, unlike overestimate values. This provides a more stable approximation, thus improving the stability of the entire algorithm. Bottom Line: TD3 uses two separate critic networks, using the smallest value of the two when forming its targets. Target networks are a great tool for introducing stability to an agents training, however in the case of actor critic methods there are some issues to this technique. This is caused by the interaction between the policy (actor) and critic (value) networks. The training of the agent diverges when a poor policy is overestimated. Our agents policy will then continue to get worse as it is updating on states with a lot of error. In order to fix this we simply need to carry out updates of the policy network less frequently than the value network. This allows the value network to become more stable and reduce errors before it is used to update the policy network. In practice, the policy network is updated after a fixed period of time steps, while the value network continues to update after each time step. These less frequent policy updates will have value estimate with lower variance and therefore should result in a better policy. Bottom Line: TD3 uses a delayed update of the actor network, only updating it every 2 time steps instead of after each time step, resulting in more stable and efficient training. The final portion of TD3 looks at smoothing the target policy. Deterministic policy methods have a tendency to produce target values with high variance when updating the critic. This is caused by overfitting to spikes in the value estimate. In order to reduce this variance, TD3 uses a regularisation technique known as target policy smoothing. Ideally there would be no variance between target values, with similar actions receiving similar values. TD3 reduces this variance by adding a small amount of random noise to the target and averaging over mini batches. The range of noise is clipped in order to keep the target value close to the original action. By adding this additional noise to the value estimate, policies tend to be more stable as the target value is returning a higher value for actions that are more robust to noise and interference. Bottom Line: Clipped noise is added to the selected action when calculating the targets. This preferences higher values for actions that are more robust. This implementation is based off the original repo for the paper found here.The major sections of code are covered below with the complete self contained notebook found here. This implementation is written in pytorch, if you are not familiar I would suggest checking out some of the example documentation here. All network architecture and hyper parameters are the same as the ones used in the original paper. Below is the pseudo code from the paper. Although this may look complicated, when you break it down and get past the mathematical equation format, it is actually very intuitive. I have broken up the previous pseudo code into logical steps that you can follow in order to implement the TD3 algorithm as follows: Initialise networksInitialise replay bufferSelect and carry out action with exploration noiseStore transitionsUpdate criticUpdate actorUpdate target networksRepeat until sentient Initialise networks Initialise replay buffer Select and carry out action with exploration noise Store transitions Update critic Update actor Update target networks Repeat until sentient This is a fairly standard set up for both Actor and Critic networks. Note that the critic class actually contains both networks to be used. The critics forward() method returns the Q values for both critics to be used later. The get_Q method simply returns the first critic network. class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.l1 = nn.Linear(state_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, x): x = F.relu(self.l1(x)) x = F.relu(self.l2(x)) x = self.max_action * torch.tanh(self.l3(x)) return xclass Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() # Q1 architecture self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) # Q2 architecture self.l4 = nn.Linear(state_dim + action_dim, 400) self.l5 = nn.Linear(400, 300) self.l6 = nn.Linear(300, 1) def forward(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) x2 = F.relu(self.l4(xu)) x2 = F.relu(self.l5(x2)) x2 = self.l6(x2) return x1, x2 def get_Q(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) return x1 This is a standard replay buffer borrowed from the OpenAI baseline repo here This is a standard step in the markov decision process of the environment. Here the agent will pick an action with exploration noise added. state = torch.FloatTensor(state.reshape(1, -1)).to(device) action = self.actor(state).cpu().data.numpy().flatten()if noise != 0: action = (action + np.random.normal(0, noise, size=self.env.action_space.shape[0])) return action.clip(self.env.action_space.low,self.env.action_space.high) After taking an action we store the information about that time step in the replay buffer. These transitions will be used later while updating our networks. replay_buffer.add((self.obs, new_obs, action, reward, done_bool)) Once we have carried out a full time step through the environment, we train our model for several iterations. The first step in the update carried it involves the critic. This is one of the most important parts of the algorithm and where most of the TD3 additional features are implemented. First thing to do is to sample a mini batch of stored transitions from the replay buffer. # Sample mini batchs, s_, a, r, d = replay_buffer.sample(batch_size)state = torch.FloatTensor(s).to(device)action = torch.FloatTensor(s_).to(device)next_state = torch.FloatTensor(y).to(device)done = torch.FloatTensor(1 - d).to(device)reward = torch.FloatTensor(r).to(device) Next we are going to select an action for each of the states that we have pulled in from our mini batch and apply the target policy smoothing. As described earlier, this is just picking an action with our target actor network and we add noise to that action that has been clipped in order to ensure that the noisy action isn’t too far away from the original action value. # Select action with the actor target and apply clipped noisenoise = torch.FloatTensor(u).data.normal_(0, policy_noise).to(device)noise = noise.clamp(-noise_clip, noise_clip)next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action) Next we need to compute our target Q values of the critic. This is where the double critic networks come into play. We are going to get the Q values for each target critic and then take the smallest of the two for our target Q value. # Compute the target Q valuetarget_Q1, target_Q2 = self.critic_target(next_state, next_action)target_Q = torch.min(target_Q1, target_Q2)target_Q = reward + (done * discount * target_Q).detach() Finally we calculate the loss for the two current critic networks. This is done by getting the MSE of each current critic and the target Q value we just calculated. We then carry out the optimisation of the critic as normal. # Get current Q estimatescurrent_Q1, current_Q2 = self.critic(state, action)# Compute critic losscritic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)# Optimize the criticself.critic_optimizer.zero_grad()critic_loss.backward()self.critic_optimizer.step() The actor is much simpler to update when compared to the critic. First we make sure that we are only updating the actor every d time steps. In our case and in the paper, the actor was updated every 2nd time step. # Delayed policy updatesif it % policy_freq == 0: # Compute actor loss actor_loss = -self.critic.Q1(state, self.actor(state)).mean() # Optimize the actor self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # Update the frozen target models for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) The actor’s loss function simply gets the mean of the -Q values from our critic network with our actor choosing what action to take given the mini batch of states. Just like before, we optimise our actor network through backpropagation. Finally we update our frozen target networks using a soft update. This is done along side the actor update and is also delayed. # Update the frozen target models for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) The authors results in the original paper boasts excellent scores across a variety of benchmarking environments such as the MuJoco control suite. The results below show how TD3 outperforms almost all algorithms, including the recent SAC (Haarnoja et al., 2018) algorithm and PPO (Schulman et al., 2017) which is OpenAI’s go to algorithm used for ground breaking research such as their DOTA 2 agent. The algorithms used as benchmark included the OpenAI baseline implementation of DDPG, PPO, ACKTR (Wu et al., 2017) and TRPO (Schulman et al., 2015). SAC was implemented from the authors github. However, since the release of TD3, improvements have been made to SAC, as seen in Soft Actor-Critic Algorithms and Applications (Haarnoja et al., 2019). Here Haarnoja shows new results that outperform TD3 across the board. In order to make an unbiased review of the algorithm we can see benchmarking results from OpenAI:Spinning Up’s implementations of the main RL algorithms. As you can see in Fig 6, TD3 manages to outperform SAC in the Ant environment. However, SAC achieves a higher performance in the HalfCheetah environment. Below is the training results from my own implementation of TD3 tested on the Roboschool HalfCheetah environment. The graph above shows the agents average score over the last 100 episodes. As you can see the agent quickly learns to stand and then to walk successfully. Although it does briefly fall into a local optima, the agent is able to quickly recover, converging on an optimal policy after 500k time steps. The video below shows the results of the fully trained agent. Congratulations, we have covered everything you need to start implementing one of the best state-of-the-art reinforcement learning algorithms on the market! We have now gone through what TD3 is and explained the core mechanics that makes the algorithm perform so well. Not only that, but we have gone step by step through the algorithm and learned how to build the algorithm with pytorch. Finally we took a look at the results of the algorithm seen in the original paper and this articles implementation. I hope you found this article helpful and learned something about Reinforcement Learning! [1] Fujimoto, S., van Hoof, H., and Meger, D. Addressing function approximation error in actor-critic methods. arXiv preprint arXiv:1802.09477, 2018. [2] Lillicrap, T. P., Hunt, J. J., Pritzel, A., Heess, N., Erez, T., Tassa, Y., Silver, D., and Wierstra, D. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015. [3] OpenAI — Spinng Up, 2018: https://spinningup.openai.com/en/latest/algorithms/td3.html#background [4] Hado van Hasselt (2010). Double Q-learning. Advances in Neural Information Processing Systems 23 (NIPS 2010), Vancouver, British Columbia, Canada, pp. 2613–2622. [5] Van Hasselt, H., Guez, A., and Silver, D. Deep reinforcement learning with double q-learning. In AAAI, pp. 2094– 2100, 2016. [6] Haarnoja, T., Zhou, A., Abbeel, P., and Levine, S. Soft actor-critic: Off-policy maximum entropy deep reinforce- ment learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018. [7] Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. Proximal policy optimization algorithms.arXiv preprint arXiv:1707.06347, 2017. [8] Schulman, J., Levine, S., Abbeel, P., Jordan, M., and Moritz, P. Trust region policy optimization. In International Conference on Machine Learning, pp. 1889–1897, 2015. [9] Wu, Y., Mansimov, E., Grosse, R. B., Liao, S., and Ba, J. Scalable trust-region method for deep reinforcement learning using kronecker-factored approximation. In Ad- vances in Neural Information Processing Systems, pp. 5285–5294, 2017. [10] Haarnoja, T., Zhou, A., Abbeel, P., and Levine, S. Soft Actor-Critic Algorithms and Applications. arXiv preprint arXiv:1812.05905v2, 2019
[ { "code": null, "e": 595, "s": 172, "text": "This article looks at one of the most powerful and state of the art algorithms in Reinforcement Learning (RL), Twin Delayed Deep Deterministic Policy Gradients (TD3)( Fujimoto et al., 2018). By the end of this article you should have a solid understanding of what makes TD3 perform so well, be capable of implementing the algorithm yourself and use TD3 to train an agent to successfully run in the HalfCheetah environment." }, { "code": null, "e": 941, "s": 595, "text": "However, before tackling TD3 you should already have a good understanding of RL and the common algorithms such as Deep Q Networks and DDPG, which TD3 is built upon. If you need to brush up on your knowledge, check out these excellent resources, DeepMind Lecture Series, Let’s make a DQN, Spinning Up: DDPG. This article will cover the following:" }, { "code": null, "e": 1041, "s": 941, "text": "What is TD3Explanation of each core mechanicImplementation & code walkthroughResults & Benchmarking" }, { "code": null, "e": 1053, "s": 1041, "text": "What is TD3" }, { "code": null, "e": 1087, "s": 1053, "text": "Explanation of each core mechanic" }, { "code": null, "e": 1121, "s": 1087, "text": "Implementation & code walkthrough" }, { "code": null, "e": 1144, "s": 1121, "text": "Results & Benchmarking" }, { "code": null, "e": 1342, "s": 1144, "text": "The full code can be found here on my github. If you want to quickly follow along with the code used here click on the icon below to be taken to a Google Colab workbook with everything ready to go." }, { "code": null, "e": 2204, "s": 1342, "text": "TD3 is the successor to the Deep Deterministic Policy Gradient (DDPG)(Lillicrap et al, 2016). Up until recently, DDPG was one of the most used algorithms for continuous control problems such as robotics and autonomous driving. Although DDPG is capable of providing excellent results, it has its drawbacks. Like many RL algorithms training DDPG can be unstable and heavily reliant on finding the correct hyper parameters for the current task (OpenAI Spinning Up, 2018). This is caused by the algorithm continuously over estimating the Q values of the critic (value) network. These estimation errors build up over time and can lead to the agent falling into a local optima or experience catastrophic forgetting. TD3 addresses this issue by focusing on reducing the overestimation bias seen in previous algorithms. This is done with the addition of 3 key features:" }, { "code": null, "e": 2383, "s": 2204, "text": "Using a pair of critic networks (The twin part of the title)Delayed updates of the actor (The delayed part)Action noise regularisation (This part didn’t make it to the title :/ )" }, { "code": null, "e": 2444, "s": 2383, "text": "Using a pair of critic networks (The twin part of the title)" }, { "code": null, "e": 2492, "s": 2444, "text": "Delayed updates of the actor (The delayed part)" }, { "code": null, "e": 2564, "s": 2492, "text": "Action noise regularisation (This part didn’t make it to the title :/ )" }, { "code": null, "e": 3295, "s": 2564, "text": "The first feature added to TD3 is the use of two critic networks. This was inspired by the technique seen in Deep Reinforcement Learning with Double Q-learning (Van Hasselt et al., 2016) which involved estimating the current Q value using a separate target value function, thus reducing the bias. However, the technique doesn’t work perfectly for actor critic methods. This is because the policy and target networks are updated so slowly that they look very similar, which brings bias back into the picture. Instead, an older implementation seen in Double Q Learning (Van Hasselt, 2010) is used. TD3 uses clipped double Q learning where it takes the smallest value of the two critic networks (The lesser of two evils if you will)." }, { "code": null, "e": 3575, "s": 3295, "text": "This method favours underestimation of Q values. This underestimation bias isn’t a problem as the low values will not be propagated through the algorithm, unlike overestimate values. This provides a more stable approximation, thus improving the stability of the entire algorithm." }, { "code": null, "e": 3689, "s": 3575, "text": "Bottom Line: TD3 uses two separate critic networks, using the smallest value of the two when forming its targets." }, { "code": null, "e": 4117, "s": 3689, "text": "Target networks are a great tool for introducing stability to an agents training, however in the case of actor critic methods there are some issues to this technique. This is caused by the interaction between the policy (actor) and critic (value) networks. The training of the agent diverges when a poor policy is overestimated. Our agents policy will then continue to get worse as it is updating on states with a lot of error." }, { "code": null, "e": 4627, "s": 4117, "text": "In order to fix this we simply need to carry out updates of the policy network less frequently than the value network. This allows the value network to become more stable and reduce errors before it is used to update the policy network. In practice, the policy network is updated after a fixed period of time steps, while the value network continues to update after each time step. These less frequent policy updates will have value estimate with lower variance and therefore should result in a better policy." }, { "code": null, "e": 4806, "s": 4627, "text": "Bottom Line: TD3 uses a delayed update of the actor network, only updating it every 2 time steps instead of after each time step, resulting in more stable and efficient training." }, { "code": null, "e": 5464, "s": 4806, "text": "The final portion of TD3 looks at smoothing the target policy. Deterministic policy methods have a tendency to produce target values with high variance when updating the critic. This is caused by overfitting to spikes in the value estimate. In order to reduce this variance, TD3 uses a regularisation technique known as target policy smoothing. Ideally there would be no variance between target values, with similar actions receiving similar values. TD3 reduces this variance by adding a small amount of random noise to the target and averaging over mini batches. The range of noise is clipped in order to keep the target value close to the original action." }, { "code": null, "e": 5659, "s": 5464, "text": "By adding this additional noise to the value estimate, policies tend to be more stable as the target value is returning a higher value for actions that are more robust to noise and interference." }, { "code": null, "e": 5813, "s": 5659, "text": "Bottom Line: Clipped noise is added to the selected action when calculating the targets. This preferences higher values for actions that are more robust." }, { "code": null, "e": 6401, "s": 5813, "text": "This implementation is based off the original repo for the paper found here.The major sections of code are covered below with the complete self contained notebook found here. This implementation is written in pytorch, if you are not familiar I would suggest checking out some of the example documentation here. All network architecture and hyper parameters are the same as the ones used in the original paper. Below is the pseudo code from the paper. Although this may look complicated, when you break it down and get past the mathematical equation format, it is actually very intuitive." }, { "code": null, "e": 6534, "s": 6401, "text": "I have broken up the previous pseudo code into logical steps that you can follow in order to implement the TD3 algorithm as follows:" }, { "code": null, "e": 6713, "s": 6534, "text": "Initialise networksInitialise replay bufferSelect and carry out action with exploration noiseStore transitionsUpdate criticUpdate actorUpdate target networksRepeat until sentient" }, { "code": null, "e": 6733, "s": 6713, "text": "Initialise networks" }, { "code": null, "e": 6758, "s": 6733, "text": "Initialise replay buffer" }, { "code": null, "e": 6809, "s": 6758, "text": "Select and carry out action with exploration noise" }, { "code": null, "e": 6827, "s": 6809, "text": "Store transitions" }, { "code": null, "e": 6841, "s": 6827, "text": "Update critic" }, { "code": null, "e": 6854, "s": 6841, "text": "Update actor" }, { "code": null, "e": 6877, "s": 6854, "text": "Update target networks" }, { "code": null, "e": 6899, "s": 6877, "text": "Repeat until sentient" }, { "code": null, "e": 7182, "s": 6899, "text": "This is a fairly standard set up for both Actor and Critic networks. Note that the critic class actually contains both networks to be used. The critics forward() method returns the Q values for both critics to be used later. The get_Q method simply returns the first critic network." }, { "code": null, "e": 8455, "s": 7182, "text": "class Actor(nn.Module): def __init__(self, state_dim, action_dim, max_action): super(Actor, self).__init__() self.l1 = nn.Linear(state_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, action_dim) self.max_action = max_action def forward(self, x): x = F.relu(self.l1(x)) x = F.relu(self.l2(x)) x = self.max_action * torch.tanh(self.l3(x)) return xclass Critic(nn.Module): def __init__(self, state_dim, action_dim): super(Critic, self).__init__() # Q1 architecture self.l1 = nn.Linear(state_dim + action_dim, 400) self.l2 = nn.Linear(400, 300) self.l3 = nn.Linear(300, 1) # Q2 architecture self.l4 = nn.Linear(state_dim + action_dim, 400) self.l5 = nn.Linear(400, 300) self.l6 = nn.Linear(300, 1) def forward(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) x2 = F.relu(self.l4(xu)) x2 = F.relu(self.l5(x2)) x2 = self.l6(x2) return x1, x2 def get_Q(self, x, u): xu = torch.cat([x, u], 1) x1 = F.relu(self.l1(xu)) x1 = F.relu(self.l2(x1)) x1 = self.l3(x1) return x1" }, { "code": null, "e": 8532, "s": 8455, "text": "This is a standard replay buffer borrowed from the OpenAI baseline repo here" }, { "code": null, "e": 8672, "s": 8532, "text": "This is a standard step in the markov decision process of the environment. Here the agent will pick an action with exploration noise added." }, { "code": null, "e": 9015, "s": 8672, "text": "state = torch.FloatTensor(state.reshape(1, -1)).to(device) action = self.actor(state).cpu().data.numpy().flatten()if noise != 0: action = (action + np.random.normal(0, noise, size=self.env.action_space.shape[0])) return action.clip(self.env.action_space.low,self.env.action_space.high)" }, { "code": null, "e": 9172, "s": 9015, "text": "After taking an action we store the information about that time step in the replay buffer. These transitions will be used later while updating our networks." }, { "code": null, "e": 9238, "s": 9172, "text": "replay_buffer.add((self.obs, new_obs, action, reward, done_bool))" }, { "code": null, "e": 9619, "s": 9238, "text": "Once we have carried out a full time step through the environment, we train our model for several iterations. The first step in the update carried it involves the critic. This is one of the most important parts of the algorithm and where most of the TD3 additional features are implemented. First thing to do is to sample a mini batch of stored transitions from the replay buffer." }, { "code": null, "e": 9894, "s": 9619, "text": "# Sample mini batchs, s_, a, r, d = replay_buffer.sample(batch_size)state = torch.FloatTensor(s).to(device)action = torch.FloatTensor(s_).to(device)next_state = torch.FloatTensor(y).to(device)done = torch.FloatTensor(1 - d).to(device)reward = torch.FloatTensor(r).to(device)" }, { "code": null, "e": 10266, "s": 9894, "text": "Next we are going to select an action for each of the states that we have pulled in from our mini batch and apply the target policy smoothing. As described earlier, this is just picking an action with our target actor network and we add noise to that action that has been clipped in order to ensure that the noisy action isn’t too far away from the original action value." }, { "code": null, "e": 10535, "s": 10266, "text": "# Select action with the actor target and apply clipped noisenoise = torch.FloatTensor(u).data.normal_(0, policy_noise).to(device)noise = noise.clamp(-noise_clip, noise_clip)next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)" }, { "code": null, "e": 10769, "s": 10535, "text": "Next we need to compute our target Q values of the critic. This is where the double critic networks come into play. We are going to get the Q values for each target critic and then take the smallest of the two for our target Q value." }, { "code": null, "e": 10963, "s": 10769, "text": "# Compute the target Q valuetarget_Q1, target_Q2 = self.critic_target(next_state, next_action)target_Q = torch.min(target_Q1, target_Q2)target_Q = reward + (done * discount * target_Q).detach()" }, { "code": null, "e": 11188, "s": 10963, "text": "Finally we calculate the loss for the two current critic networks. This is done by getting the MSE of each current critic and the target Q value we just calculated. We then carry out the optimisation of the critic as normal." }, { "code": null, "e": 11471, "s": 11188, "text": "# Get current Q estimatescurrent_Q1, current_Q2 = self.critic(state, action)# Compute critic losscritic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)# Optimize the criticself.critic_optimizer.zero_grad()critic_loss.backward()self.critic_optimizer.step()" }, { "code": null, "e": 11684, "s": 11471, "text": "The actor is much simpler to update when compared to the critic. First we make sure that we are only updating the actor every d time steps. In our case and in the paper, the actor was updated every 2nd time step." }, { "code": null, "e": 12383, "s": 11684, "text": "# Delayed policy updatesif it % policy_freq == 0: # Compute actor loss actor_loss = -self.critic.Q1(state, self.actor(state)).mean() # Optimize the actor self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # Update the frozen target models for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data) for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)" }, { "code": null, "e": 12620, "s": 12383, "text": "The actor’s loss function simply gets the mean of the -Q values from our critic network with our actor choosing what action to take given the mini batch of states. Just like before, we optimise our actor network through backpropagation." }, { "code": null, "e": 12748, "s": 12620, "text": "Finally we update our frozen target networks using a soft update. This is done along side the actor update and is also delayed." }, { "code": null, "e": 13146, "s": 12748, "text": "# Update the frozen target models for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()): target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)" }, { "code": null, "e": 13545, "s": 13146, "text": "The authors results in the original paper boasts excellent scores across a variety of benchmarking environments such as the MuJoco control suite. The results below show how TD3 outperforms almost all algorithms, including the recent SAC (Haarnoja et al., 2018) algorithm and PPO (Schulman et al., 2017) which is OpenAI’s go to algorithm used for ground breaking research such as their DOTA 2 agent." }, { "code": null, "e": 13739, "s": 13545, "text": "The algorithms used as benchmark included the OpenAI baseline implementation of DDPG, PPO, ACKTR (Wu et al., 2017) and TRPO (Schulman et al., 2015). SAC was implemented from the authors github." }, { "code": null, "e": 14270, "s": 13739, "text": "However, since the release of TD3, improvements have been made to SAC, as seen in Soft Actor-Critic Algorithms and Applications (Haarnoja et al., 2019). Here Haarnoja shows new results that outperform TD3 across the board. In order to make an unbiased review of the algorithm we can see benchmarking results from OpenAI:Spinning Up’s implementations of the main RL algorithms. As you can see in Fig 6, TD3 manages to outperform SAC in the Ant environment. However, SAC achieves a higher performance in the HalfCheetah environment." }, { "code": null, "e": 14539, "s": 14270, "text": "Below is the training results from my own implementation of TD3 tested on the Roboschool HalfCheetah environment. The graph above shows the agents average score over the last 100 episodes. As you can see the agent quickly learns to stand and then to walk successfully." }, { "code": null, "e": 14745, "s": 14539, "text": "Although it does briefly fall into a local optima, the agent is able to quickly recover, converging on an optimal policy after 500k time steps. The video below shows the results of the fully trained agent." }, { "code": null, "e": 15340, "s": 14745, "text": "Congratulations, we have covered everything you need to start implementing one of the best state-of-the-art reinforcement learning algorithms on the market! We have now gone through what TD3 is and explained the core mechanics that makes the algorithm perform so well. Not only that, but we have gone step by step through the algorithm and learned how to build the algorithm with pytorch. Finally we took a look at the results of the algorithm seen in the original paper and this articles implementation. I hope you found this article helpful and learned something about Reinforcement Learning!" }, { "code": null, "e": 15490, "s": 15340, "text": "[1] Fujimoto, S., van Hoof, H., and Meger, D. Addressing function approximation error in actor-critic methods. arXiv preprint arXiv:1802.09477, 2018." }, { "code": null, "e": 15691, "s": 15490, "text": "[2] Lillicrap, T. P., Hunt, J. J., Pritzel, A., Heess, N., Erez, T., Tassa, Y., Silver, D., and Wierstra, D. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015." }, { "code": null, "e": 15792, "s": 15691, "text": "[3] OpenAI — Spinng Up, 2018: https://spinningup.openai.com/en/latest/algorithms/td3.html#background" }, { "code": null, "e": 15958, "s": 15792, "text": "[4] Hado van Hasselt (2010). Double Q-learning. Advances in Neural Information Processing Systems 23 (NIPS 2010), Vancouver, British Columbia, Canada, pp. 2613–2622." }, { "code": null, "e": 16087, "s": 15958, "text": "[5] Van Hasselt, H., Guez, A., and Silver, D. Deep reinforcement learning with double q-learning. In AAAI, pp. 2094– 2100, 2016." }, { "code": null, "e": 16282, "s": 16087, "text": "[6] Haarnoja, T., Zhou, A., Abbeel, P., and Levine, S. Soft actor-critic: Off-policy maximum entropy deep reinforce- ment learning with a stochastic actor. arXiv preprint arXiv:1801.01290, 2018." }, { "code": null, "e": 16433, "s": 16282, "text": "[7] Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. Proximal policy optimization algorithms.arXiv preprint arXiv:1707.06347, 2017." }, { "code": null, "e": 16606, "s": 16433, "text": "[8] Schulman, J., Levine, S., Abbeel, P., Jordan, M., and Moritz, P. Trust region policy optimization. In International Conference on Machine Learning, pp. 1889–1897, 2015." }, { "code": null, "e": 16846, "s": 16606, "text": "[9] Wu, Y., Mansimov, E., Grosse, R. B., Liao, S., and Ba, J. Scalable trust-region method for deep reinforcement learning using kronecker-factored approximation. In Ad- vances in Neural Information Processing Systems, pp. 5285–5294, 2017." } ]
Functions in Power BI — (Table.FindText) | by Peter Hui | Towards Data Science
I’ve always been amazed at the simple find function. You can find anything that matches your criteria, even if your search item is hidden in different work sheets, open text or tables. Just imagine, you can search for anything that appears anywhere in your raw data. Even if it is hidden somewhere in an open text field. Quite amazing! How is this helpful? It’s very helpful if you are looking for a key word or a set of numbers that has importance, but you just don’t know where to look. This number can be in the open-text field, it could be in the name field etc. Once found, you can combine these search results and tell a story with it. Sometimes, doing a find is just out of curiosity, but sometimes you may be surprised by what you’ve found. I’ve used this Table.FindText function as a fraud detection exercise, It came in very handy because my tables were huge and I was just looking for one single individual. There are other ways this simple function can be helpful. You can use it as a historical search of account records and to answer some questions you may have. Let’s get started! Let’s take a look at the Table.FindText function on Microsoft. In plain English, this is a function that uses a table as input and searches for an item within that table. It then returns a table with the rows that matches your criteria. Let’s try this out. The first step is tricky — maybe you are lucky to have a column of tables already set up for you, but I have to convert my binary files into table format from a folder. It’s not that difficult and I use this function. (FOLDER_PATH , FILENAME ) =>let Source = Csv.Document(File.Contents(FOLDER_PATH&FILENAME)), #"Promoted Headers" = Table.PromoteHeaders(Source, [PromoteAllScalars=true])in #"Promoted Headers" If you connect Power BI using the folders option and have a list of binary files, you can convert these binary files into tables using the above function. To add in a new function, simply go to Power Query > New Blank Query > Advanced Editor > Paste in the script The FOLDER_PATH and FILENAME parameters in this query are the columns Power Query prepared for you once you connect to it. Here is an example. Now I will apply the function using the Invoke function selection. If you are confused about how to use functions, here is an article for you to help you get started. Functions are great! After I apply the function, I get this output. Great! Now I have a column of tables to apply my Table.FindText(). I’ll remove the columns I don’t need and only reflect the table column. All we need to do now is apply our function by creating a new column. I want to see if the word “Apple” occurs anywhere in my tables. Now when you drill down you can see “Apple” occurs in the table under the flavors column. If I move it to another selection, you can see “Apple” occurs in the Stand column under “Appleton” as well. Now to analyze your results, you can just combine all the files and the files will contain rows that only contain the word “Apple”. Yes, my example is quite simple and I only have 10 tables, but think of it this way, if you are able to understand how to build a simple prototype, you can use this across any number of tables you have in your regular work :) Table.FindText() is a neat function. Be imaginative with it, you can find your search selection in any column and any table. Table.FindText() is a neat function. Be imaginative with it, you can find your search selection in any column and any table. 2. Remove some columns you don’t need in the beginning, it makes this run a lot faster! 3. The tricky part isn’t using the search function but converting the binaries into Tables. There are other functions which you may find useful. These functions will be useful if you already know the key words or numbers you are looking for and you know which columns to look. They are Table.Contains(), Table.ContainsAll() Table.ContainsAny(). I’ll be writing an article in the future about these as well. Stay safe and thank you for reading!
[ { "code": null, "e": 357, "s": 172, "text": "I’ve always been amazed at the simple find function. You can find anything that matches your criteria, even if your search item is hidden in different work sheets, open text or tables." }, { "code": null, "e": 508, "s": 357, "text": "Just imagine, you can search for anything that appears anywhere in your raw data. Even if it is hidden somewhere in an open text field. Quite amazing!" }, { "code": null, "e": 739, "s": 508, "text": "How is this helpful? It’s very helpful if you are looking for a key word or a set of numbers that has importance, but you just don’t know where to look. This number can be in the open-text field, it could be in the name field etc." }, { "code": null, "e": 921, "s": 739, "text": "Once found, you can combine these search results and tell a story with it. Sometimes, doing a find is just out of curiosity, but sometimes you may be surprised by what you’ve found." }, { "code": null, "e": 1249, "s": 921, "text": "I’ve used this Table.FindText function as a fraud detection exercise, It came in very handy because my tables were huge and I was just looking for one single individual. There are other ways this simple function can be helpful. You can use it as a historical search of account records and to answer some questions you may have." }, { "code": null, "e": 1268, "s": 1249, "text": "Let’s get started!" }, { "code": null, "e": 1331, "s": 1268, "text": "Let’s take a look at the Table.FindText function on Microsoft." }, { "code": null, "e": 1505, "s": 1331, "text": "In plain English, this is a function that uses a table as input and searches for an item within that table. It then returns a table with the rows that matches your criteria." }, { "code": null, "e": 1525, "s": 1505, "text": "Let’s try this out." }, { "code": null, "e": 1694, "s": 1525, "text": "The first step is tricky — maybe you are lucky to have a column of tables already set up for you, but I have to convert my binary files into table format from a folder." }, { "code": null, "e": 1743, "s": 1694, "text": "It’s not that difficult and I use this function." }, { "code": null, "e": 1942, "s": 1743, "text": "(FOLDER_PATH , FILENAME ) =>let Source = Csv.Document(File.Contents(FOLDER_PATH&FILENAME)), #\"Promoted Headers\" = Table.PromoteHeaders(Source, [PromoteAllScalars=true])in #\"Promoted Headers\"" }, { "code": null, "e": 2097, "s": 1942, "text": "If you connect Power BI using the folders option and have a list of binary files, you can convert these binary files into tables using the above function." }, { "code": null, "e": 2206, "s": 2097, "text": "To add in a new function, simply go to Power Query > New Blank Query > Advanced Editor > Paste in the script" }, { "code": null, "e": 2329, "s": 2206, "text": "The FOLDER_PATH and FILENAME parameters in this query are the columns Power Query prepared for you once you connect to it." }, { "code": null, "e": 2349, "s": 2329, "text": "Here is an example." }, { "code": null, "e": 2537, "s": 2349, "text": "Now I will apply the function using the Invoke function selection. If you are confused about how to use functions, here is an article for you to help you get started. Functions are great!" }, { "code": null, "e": 2584, "s": 2537, "text": "After I apply the function, I get this output." }, { "code": null, "e": 2651, "s": 2584, "text": "Great! Now I have a column of tables to apply my Table.FindText()." }, { "code": null, "e": 2723, "s": 2651, "text": "I’ll remove the columns I don’t need and only reflect the table column." }, { "code": null, "e": 2857, "s": 2723, "text": "All we need to do now is apply our function by creating a new column. I want to see if the word “Apple” occurs anywhere in my tables." }, { "code": null, "e": 2947, "s": 2857, "text": "Now when you drill down you can see “Apple” occurs in the table under the flavors column." }, { "code": null, "e": 3055, "s": 2947, "text": "If I move it to another selection, you can see “Apple” occurs in the Stand column under “Appleton” as well." }, { "code": null, "e": 3187, "s": 3055, "text": "Now to analyze your results, you can just combine all the files and the files will contain rows that only contain the word “Apple”." }, { "code": null, "e": 3413, "s": 3187, "text": "Yes, my example is quite simple and I only have 10 tables, but think of it this way, if you are able to understand how to build a simple prototype, you can use this across any number of tables you have in your regular work :)" }, { "code": null, "e": 3538, "s": 3413, "text": "Table.FindText() is a neat function. Be imaginative with it, you can find your search selection in any column and any table." }, { "code": null, "e": 3663, "s": 3538, "text": "Table.FindText() is a neat function. Be imaginative with it, you can find your search selection in any column and any table." }, { "code": null, "e": 3751, "s": 3663, "text": "2. Remove some columns you don’t need in the beginning, it makes this run a lot faster!" }, { "code": null, "e": 3843, "s": 3751, "text": "3. The tricky part isn’t using the search function but converting the binaries into Tables." }, { "code": null, "e": 3896, "s": 3843, "text": "There are other functions which you may find useful." }, { "code": null, "e": 4096, "s": 3896, "text": "These functions will be useful if you already know the key words or numbers you are looking for and you know which columns to look. They are Table.Contains(), Table.ContainsAll() Table.ContainsAny()." }, { "code": null, "e": 4158, "s": 4096, "text": "I’ll be writing an article in the future about these as well." } ]
Convert Java Object to Json String using GSON - GeeksforGeeks
04 Apr, 2019 JSON Stand for JavaScript Object Notation. It’s a standard text-based format which shows structured data based on JavaScript object syntax. It is commonly used for transmitting data in web applications. JSON is highly recommended to transmit data between a server and web application. To convert a Java object into JSON, the following methods can be used: GSON: It is an open-source Java library which is used to serialize and deserialize Java objects to JSON. Jackson API In this article, Java object is converted into the JSON using GSON: The steps to do this are as follows: Add jar files of Jackson (in case of Maven project add Gson dependencies in the pom.xml file)<dependency> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> <version>2.6.2</version> </dependency>Below is the screenshot showing this step:-Create a POJO (Plain Old Java Object) to be converted into JSONpackage GeeksforGeeks.Geeks; public class Organisation { private String organisation_name; private String description; private int Employees; // Calling getters and setters public String getOrganisation_name() { return organisation_name; } public void setOrganisation_name(String organisation_name) { this.organisation_name = organisation_name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public int getEmployees() { return Employees; } public void setEmployees(int employees) { Employees = employees; } // Creating toString @Override public String toString() { return "Organisation [organisation_name=" + organisation_name + ", description=" + description + ", Employees=" + Employees + "]"; }}Below is the screenshot showing this step:-Create a Java class for converting the Organisation object into JSON.package GeeksforGeeks.Geeks; import com.google.gson.Gson; public class ObjectToJson { public static void main(String[] a) { /**Creating object of Organisation **/ Organisation org = new Organisation(); /** Insert the data into the object **/ org = getObjectData(org); System.out.println("Json represenatation" + " of Object organisation is "); // In the below line // we have created a New Gson Object // and call it's toJson inbuid function // and passes the object of organisation System.out.println(new Gson().toJson(org)); } /** Get the data to be inserted into the object **/ public static Organisation getObjectData(Organisation org) { /**insert the data**/ org.setOrganisation_name("GeeksforGeeks"); org.setDescription("A computer Science portal for Geeks"); org.setEmployees(2000); /**Return Object**/ return org; }}Below is the screenshot showing this step:-Execute the processOutput JsonOutput { "organisation_name" : "GeeksforGeeks", "description" : "A computer Science portal for Geeks", "Employee" : "2000" } Add jar files of Jackson (in case of Maven project add Gson dependencies in the pom.xml file)<dependency> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> <version>2.6.2</version> </dependency>Below is the screenshot showing this step:- <dependency> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> <version>2.6.2</version> </dependency> Below is the screenshot showing this step:- Create a POJO (Plain Old Java Object) to be converted into JSONpackage GeeksforGeeks.Geeks; public class Organisation { private String organisation_name; private String description; private int Employees; // Calling getters and setters public String getOrganisation_name() { return organisation_name; } public void setOrganisation_name(String organisation_name) { this.organisation_name = organisation_name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public int getEmployees() { return Employees; } public void setEmployees(int employees) { Employees = employees; } // Creating toString @Override public String toString() { return "Organisation [organisation_name=" + organisation_name + ", description=" + description + ", Employees=" + Employees + "]"; }}Below is the screenshot showing this step:- package GeeksforGeeks.Geeks; public class Organisation { private String organisation_name; private String description; private int Employees; // Calling getters and setters public String getOrganisation_name() { return organisation_name; } public void setOrganisation_name(String organisation_name) { this.organisation_name = organisation_name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public int getEmployees() { return Employees; } public void setEmployees(int employees) { Employees = employees; } // Creating toString @Override public String toString() { return "Organisation [organisation_name=" + organisation_name + ", description=" + description + ", Employees=" + Employees + "]"; }} Below is the screenshot showing this step:- Create a Java class for converting the Organisation object into JSON.package GeeksforGeeks.Geeks; import com.google.gson.Gson; public class ObjectToJson { public static void main(String[] a) { /**Creating object of Organisation **/ Organisation org = new Organisation(); /** Insert the data into the object **/ org = getObjectData(org); System.out.println("Json represenatation" + " of Object organisation is "); // In the below line // we have created a New Gson Object // and call it's toJson inbuid function // and passes the object of organisation System.out.println(new Gson().toJson(org)); } /** Get the data to be inserted into the object **/ public static Organisation getObjectData(Organisation org) { /**insert the data**/ org.setOrganisation_name("GeeksforGeeks"); org.setDescription("A computer Science portal for Geeks"); org.setEmployees(2000); /**Return Object**/ return org; }}Below is the screenshot showing this step:- package GeeksforGeeks.Geeks; import com.google.gson.Gson; public class ObjectToJson { public static void main(String[] a) { /**Creating object of Organisation **/ Organisation org = new Organisation(); /** Insert the data into the object **/ org = getObjectData(org); System.out.println("Json represenatation" + " of Object organisation is "); // In the below line // we have created a New Gson Object // and call it's toJson inbuid function // and passes the object of organisation System.out.println(new Gson().toJson(org)); } /** Get the data to be inserted into the object **/ public static Organisation getObjectData(Organisation org) { /**insert the data**/ org.setOrganisation_name("GeeksforGeeks"); org.setDescription("A computer Science portal for Geeks"); org.setEmployees(2000); /**Return Object**/ return org; }} Below is the screenshot showing this step:- Execute the process Output JsonOutput { "organisation_name" : "GeeksforGeeks", "description" : "A computer Science portal for Geeks", "Employee" : "2000" } Output { "organisation_name" : "GeeksforGeeks", "description" : "A computer Science portal for Geeks", "Employee" : "2000" } Below is the screenshot showing Output on Console: Java-String-Programs JSON Java Java Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments HashMap in Java with Examples Interfaces in Java Object Oriented Programming (OOPs) Concept in Java ArrayList in Java How to iterate any Map in Java Initialize an ArrayList in Java Singleton Class in Java Overriding in Java Collections in Java Multithreading in Java
[ { "code": null, "e": 24842, "s": 24814, "text": "\n04 Apr, 2019" }, { "code": null, "e": 25127, "s": 24842, "text": "JSON Stand for JavaScript Object Notation. It’s a standard text-based format which shows structured data based on JavaScript object syntax. It is commonly used for transmitting data in web applications. JSON is highly recommended to transmit data between a server and web application." }, { "code": null, "e": 25198, "s": 25127, "text": "To convert a Java object into JSON, the following methods can be used:" }, { "code": null, "e": 25303, "s": 25198, "text": "GSON: It is an open-source Java library which is used to serialize and deserialize Java objects to JSON." }, { "code": null, "e": 25315, "s": 25303, "text": "Jackson API" }, { "code": null, "e": 25383, "s": 25315, "text": "In this article, Java object is converted into the JSON using GSON:" }, { "code": null, "e": 25420, "s": 25383, "text": "The steps to do this are as follows:" }, { "code": null, "e": 28064, "s": 25420, "text": "Add jar files of Jackson (in case of Maven project add Gson dependencies in the pom.xml file)<dependency> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> <version>2.6.2</version> </dependency>Below is the screenshot showing this step:-Create a POJO (Plain Old Java Object) to be converted into JSONpackage GeeksforGeeks.Geeks; public class Organisation { private String organisation_name; private String description; private int Employees; // Calling getters and setters public String getOrganisation_name() { return organisation_name; } public void setOrganisation_name(String organisation_name) { this.organisation_name = organisation_name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public int getEmployees() { return Employees; } public void setEmployees(int employees) { Employees = employees; } // Creating toString @Override public String toString() { return \"Organisation [organisation_name=\" + organisation_name + \", description=\" + description + \", Employees=\" + Employees + \"]\"; }}Below is the screenshot showing this step:-Create a Java class for converting the Organisation object into JSON.package GeeksforGeeks.Geeks; import com.google.gson.Gson; public class ObjectToJson { public static void main(String[] a) { /**Creating object of Organisation **/ Organisation org = new Organisation(); /** Insert the data into the object **/ org = getObjectData(org); System.out.println(\"Json represenatation\" + \" of Object organisation is \"); // In the below line // we have created a New Gson Object // and call it's toJson inbuid function // and passes the object of organisation System.out.println(new Gson().toJson(org)); } /** Get the data to be inserted into the object **/ public static Organisation getObjectData(Organisation org) { /**insert the data**/ org.setOrganisation_name(\"GeeksforGeeks\"); org.setDescription(\"A computer Science portal for Geeks\"); org.setEmployees(2000); /**Return Object**/ return org; }}Below is the screenshot showing this step:-Execute the processOutput JsonOutput\n{\n \"organisation_name\" : \"GeeksforGeeks\",\n \"description\" : \"A computer Science portal for Geeks\",\n \"Employee\" : \"2000\"\n}" }, { "code": null, "e": 28342, "s": 28064, "text": "Add jar files of Jackson (in case of Maven project add Gson dependencies in the pom.xml file)<dependency> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> <version>2.6.2</version> </dependency>Below is the screenshot showing this step:-" }, { "code": "<dependency> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> <version>2.6.2</version> </dependency>", "e": 28484, "s": 28342, "text": null }, { "code": null, "e": 28528, "s": 28484, "text": "Below is the screenshot showing this step:-" }, { "code": null, "e": 29626, "s": 28528, "text": "Create a POJO (Plain Old Java Object) to be converted into JSONpackage GeeksforGeeks.Geeks; public class Organisation { private String organisation_name; private String description; private int Employees; // Calling getters and setters public String getOrganisation_name() { return organisation_name; } public void setOrganisation_name(String organisation_name) { this.organisation_name = organisation_name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public int getEmployees() { return Employees; } public void setEmployees(int employees) { Employees = employees; } // Creating toString @Override public String toString() { return \"Organisation [organisation_name=\" + organisation_name + \", description=\" + description + \", Employees=\" + Employees + \"]\"; }}Below is the screenshot showing this step:-" }, { "code": "package GeeksforGeeks.Geeks; public class Organisation { private String organisation_name; private String description; private int Employees; // Calling getters and setters public String getOrganisation_name() { return organisation_name; } public void setOrganisation_name(String organisation_name) { this.organisation_name = organisation_name; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } public int getEmployees() { return Employees; } public void setEmployees(int employees) { Employees = employees; } // Creating toString @Override public String toString() { return \"Organisation [organisation_name=\" + organisation_name + \", description=\" + description + \", Employees=\" + Employees + \"]\"; }}", "e": 30618, "s": 29626, "text": null }, { "code": null, "e": 30662, "s": 30618, "text": "Below is the screenshot showing this step:-" }, { "code": null, "e": 31772, "s": 30662, "text": "Create a Java class for converting the Organisation object into JSON.package GeeksforGeeks.Geeks; import com.google.gson.Gson; public class ObjectToJson { public static void main(String[] a) { /**Creating object of Organisation **/ Organisation org = new Organisation(); /** Insert the data into the object **/ org = getObjectData(org); System.out.println(\"Json represenatation\" + \" of Object organisation is \"); // In the below line // we have created a New Gson Object // and call it's toJson inbuid function // and passes the object of organisation System.out.println(new Gson().toJson(org)); } /** Get the data to be inserted into the object **/ public static Organisation getObjectData(Organisation org) { /**insert the data**/ org.setOrganisation_name(\"GeeksforGeeks\"); org.setDescription(\"A computer Science portal for Geeks\"); org.setEmployees(2000); /**Return Object**/ return org; }}Below is the screenshot showing this step:-" }, { "code": "package GeeksforGeeks.Geeks; import com.google.gson.Gson; public class ObjectToJson { public static void main(String[] a) { /**Creating object of Organisation **/ Organisation org = new Organisation(); /** Insert the data into the object **/ org = getObjectData(org); System.out.println(\"Json represenatation\" + \" of Object organisation is \"); // In the below line // we have created a New Gson Object // and call it's toJson inbuid function // and passes the object of organisation System.out.println(new Gson().toJson(org)); } /** Get the data to be inserted into the object **/ public static Organisation getObjectData(Organisation org) { /**insert the data**/ org.setOrganisation_name(\"GeeksforGeeks\"); org.setDescription(\"A computer Science portal for Geeks\"); org.setEmployees(2000); /**Return Object**/ return org; }}", "e": 32770, "s": 31772, "text": null }, { "code": null, "e": 32814, "s": 32770, "text": "Below is the screenshot showing this step:-" }, { "code": null, "e": 32834, "s": 32814, "text": "Execute the process" }, { "code": null, "e": 32976, "s": 32834, "text": "Output JsonOutput\n{\n \"organisation_name\" : \"GeeksforGeeks\",\n \"description\" : \"A computer Science portal for Geeks\",\n \"Employee\" : \"2000\"\n}" }, { "code": null, "e": 33107, "s": 32976, "text": "Output\n{\n \"organisation_name\" : \"GeeksforGeeks\",\n \"description\" : \"A computer Science portal for Geeks\",\n \"Employee\" : \"2000\"\n}" }, { "code": null, "e": 33158, "s": 33107, "text": "Below is the screenshot showing Output on Console:" }, { "code": null, "e": 33179, "s": 33158, "text": "Java-String-Programs" }, { "code": null, "e": 33184, "s": 33179, "text": "JSON" }, { "code": null, "e": 33189, "s": 33184, "text": "Java" }, { "code": null, "e": 33194, "s": 33189, "text": "Java" }, { "code": null, "e": 33292, "s": 33194, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 33301, "s": 33292, "text": "Comments" }, { "code": null, "e": 33314, "s": 33301, "text": "Old Comments" }, { "code": null, "e": 33344, "s": 33314, "text": "HashMap in Java with Examples" }, { "code": null, "e": 33363, "s": 33344, "text": "Interfaces in Java" }, { "code": null, "e": 33414, "s": 33363, "text": "Object Oriented Programming (OOPs) Concept in Java" }, { "code": null, "e": 33432, "s": 33414, "text": "ArrayList in Java" }, { "code": null, "e": 33463, "s": 33432, "text": "How to iterate any Map in Java" }, { "code": null, "e": 33495, "s": 33463, "text": "Initialize an ArrayList in Java" }, { "code": null, "e": 33519, "s": 33495, "text": "Singleton Class in Java" }, { "code": null, "e": 33538, "s": 33519, "text": "Overriding in Java" }, { "code": null, "e": 33558, "s": 33538, "text": "Collections in Java" } ]
Java Program for Common Divisors of Two Numbers - GeeksforGeeks
04 Dec, 2018 Given two integer numbers, the task is to find the count of all common divisors of given numbers? Input : a = 12, b = 24 Output: 6 // all common divisors are 1, 2, 3, // 4, 6 and 12 Input : a = 3, b = 17 Output: 1 // all common divisors are 1 Input : a = 20, b = 36 Output: 3 // all common divisors are 1, 2, 4 // Java implementation of program class Test { // method to calculate gcd of two numbers static int gcd(int a, int b) { if (a == 0) return b; return gcd(b % a, a); } // method to calculate all common divisors // of two given numbers // a, b --> input integer numbers static int commDiv(int a, int b) { // find gcd of a, b int n = gcd(a, b); // Count divisors of n. int result = 0; for (int i = 1; i <= Math.sqrt(n); i++) { // if 'i' is factor of n if (n % i == 0) { // check if divisors are equal if (n / i == i) result += 1; else result += 2; } } return result; } // Driver method public static void main(String args[]) { int a = 12, b = 24; System.out.println(commDiv(a, b)); }} 6 Please refer complete article on Common Divisors of Two Numbers for more details! divisors GCD-LCM Java Programs Mathematical Mathematical Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. How to Iterate HashMap in Java? Iterate through List in Java Factory method design pattern in Java Java program to count the occurrence of each character in a string using Hashmap Java Program to Remove Duplicate Elements From the Array Program for Fibonacci numbers C++ Data Types Write a program to print all permutations of a given string Set in C++ Standard Template Library (STL) Coin Change | DP-7
[ { "code": null, "e": 26149, "s": 26121, "text": "\n04 Dec, 2018" }, { "code": null, "e": 26247, "s": 26149, "text": "Given two integer numbers, the task is to find the count of all common divisors of given numbers?" }, { "code": null, "e": 26464, "s": 26247, "text": "Input : a = 12, b = 24\nOutput: 6\n// all common divisors are 1, 2, 3, \n// 4, 6 and 12\n\nInput : a = 3, b = 17\nOutput: 1\n// all common divisors are 1\n\nInput : a = 20, b = 36\nOutput: 3\n// all common divisors are 1, 2, 4\n" }, { "code": "// Java implementation of program class Test { // method to calculate gcd of two numbers static int gcd(int a, int b) { if (a == 0) return b; return gcd(b % a, a); } // method to calculate all common divisors // of two given numbers // a, b --> input integer numbers static int commDiv(int a, int b) { // find gcd of a, b int n = gcd(a, b); // Count divisors of n. int result = 0; for (int i = 1; i <= Math.sqrt(n); i++) { // if 'i' is factor of n if (n % i == 0) { // check if divisors are equal if (n / i == i) result += 1; else result += 2; } } return result; } // Driver method public static void main(String args[]) { int a = 12, b = 24; System.out.println(commDiv(a, b)); }}", "e": 27398, "s": 26464, "text": null }, { "code": null, "e": 27401, "s": 27398, "text": "6\n" }, { "code": null, "e": 27483, "s": 27401, "text": "Please refer complete article on Common Divisors of Two Numbers for more details!" }, { "code": null, "e": 27492, "s": 27483, "text": "divisors" }, { "code": null, "e": 27500, "s": 27492, "text": "GCD-LCM" }, { "code": null, "e": 27514, "s": 27500, "text": "Java Programs" }, { "code": null, "e": 27527, "s": 27514, "text": "Mathematical" }, { "code": null, "e": 27540, "s": 27527, "text": "Mathematical" }, { "code": null, "e": 27638, "s": 27540, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 27670, "s": 27638, "text": "How to Iterate HashMap in Java?" }, { "code": null, "e": 27699, "s": 27670, "text": "Iterate through List in Java" }, { "code": null, "e": 27737, "s": 27699, "text": "Factory method design pattern in Java" }, { "code": null, "e": 27818, "s": 27737, "text": "Java program to count the occurrence of each character in a string using Hashmap" }, { "code": null, "e": 27875, "s": 27818, "text": "Java Program to Remove Duplicate Elements From the Array" }, { "code": null, "e": 27905, "s": 27875, "text": "Program for Fibonacci numbers" }, { "code": null, "e": 27920, "s": 27905, "text": "C++ Data Types" }, { "code": null, "e": 27980, "s": 27920, "text": "Write a program to print all permutations of a given string" }, { "code": null, "e": 28023, "s": 27980, "text": "Set in C++ Standard Template Library (STL)" } ]
Neighbourhood Segmentation and Clustering using Foursquare API | by Mathangpeddi | Towards Data Science
Whoever said money can’t buy happiness didn’t know where to go shopping. For many shoppers, visiting shopping malls is a great way to relax and enjoy themselves during weekends and holidays. Property developers are also taking advantage of this trend to build more shopping malls to cater to the demand. As a result, there are many shopping malls in the city of Hyderabad and many more are being built. Opening shopping malls allow property developers to earn consistent rental income. As with any business decision, opening a new shopping mall requires serious consideration and is a lot more complicated than it seems. Particularly, the location of the shopping mall is one of the most important decisions that will determine whether the mall will be a success or a failure. The objective of this project is to analyze and select the best locations in the city of Hyderabad, India, to open a new shopping mall. This project is mainly focused on geospatial analysis of the Hyderabad City to understand which would be the best place to open a new mall. Using data science methodology and machine learning techniques like clustering, this project aims to provide solutions to answer the business question: In the city of Hyderabad, if a property developer is looking to open a new shopping mall, where would you recommend that they open it? To solve the problem, we will need the following data: • List of neighbourhoods in Hyderabad. This defines the scope of this project which is confined to the city of Hyderabad, the capital city of Telangana, which is in South India• Latitude and longitude coordinates of those neighbourhoods. This is required in order to plot the map and also to get the venue data • Venue data, particularly data related to shopping malls. We will use this data to perform clustering on the neighbourhoods. This Wikipedia page is a list of neighbourhoods in Hyderabad, with 200 neighbourhoods. I have used web scraping techniques to extract the data from the Wikipedia page, with the help of Python requests and beautifulsoup packages. Then we can get the latitude and longitude coordinates of the neighbourhoods using Python Geocoder package. After that, I have used the Foursquare API to get the venue data for those neighbourhoods. Foursquare API will provide many categories of the venue data, and we are particularly interested in the Shopping Mall category in order to help us solve the business problem. This is a project that will make use of many data science skills, from web scraping (Wikipedia), working with API (Foursquare), data cleaning, data wrangling, to machine learning (K-means clustering) and map visualization (Folium). The Foursquare API allows application developers to interact with the Foursquare platform. The API itself is a RESTful set of addresses to which you can send requests, so there’s really nothing to download onto your server. Search for Cafes in Hyderabad Cafes returned by Foursquare On the left, you see all the cafes and their name, category, address of every venue in Hyderabad. On the right, you see the map of the venues on the left. Exploring the cafes If you click on the first one which is The Coffee Cup, then you are redirected to this page where you see all the information in the Foursquare dataset about The Coffee Cup. This includes its name, full address, working hours, tips and images that users have posted about the cafe. So similarly you can explore the shopping malls in the city of Hyderabad. To explore the Foursquare click here Web Scraping Perform scraping using Python requests and beautifulsoup packages to extract the list of neighbourhoods data. # Send the GET requestdata = requests.get("https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Hyderabad,_India").text# Parse data from the html into a beautifulsoup objectsoup = BeautifulSoup(data, 'html.parser')# Create a list to store neighbourhood dataneighborhoodList = []# Append the data into the listfor row in soup.find_all("div", class_="mw-category")[0].findAll("li"): neighborhoodList.append(row.text)# Create a new DataFrame from the listkl_df = pd.DataFrame({"Neighborhood": neighborhoodList})kl_df.head() This is the data frame created after scraping the data. We need to get the geographical coordinates in the form of latitude and longitude in order to be able to use Foursquare API. To do so, we will use the Geocoder package that will allow us to convert the address into geographical coordinates in the form of latitude and longitude. # Defining a function to get coordinatesdef get_latlng(neighborhood): # initialize your variable to None lat_lng_coords = None # loop until you get the coordinates while(lat_lng_coords is None): g = geocoder.arcgis('{}, Hyderabad, India'.format(neighborhood)) lat_lng_coords = g.latlng return lat_lng_coords# Call the function to get the coordinates, store in a new list using list comprehensioncoords = [ get_latlng(neighborhood) for neighborhood in kl_df["Neighborhood"].tolist()] We have obtained the latitude and longitude coordinates for all the places, so we need to merge the coordinates into the original data frame. # Create temporary dataframe to populate the coordinates into Latitude and Longitudedf_coords = pd.DataFrame(coords, columns=['Latitude', 'Longitude'])# Merge the coordinates into the original dataframekl_df['Latitude'] = df_coords['Latitude']kl_df['Longitude'] = df_coords['Longitude']print(kl_df.shape)kl_df(200,3) This is the combined data frame which contains all the neighbourhoods along with the geographical coordinates. # Getting the coordinates of Hyderabadaddress = 'Hyderabad, India'geolocator = Nominatim(user_agent="my-application")location = geolocator.geocode(address)latitude = location.latitudelongitude = location.longitudeprint('The geograpical coordinate of Hyderabad, India {}, {}.'.format(latitude, longitude)) After gathering the data, we have to populate the data into a pandas DataFrame and then visualize the neighbourhoods in a map using Folium package. map_kl = folium.Map(location=[latitude, longitude], zoom_start=11)# Adding markers to mapfor lat, lng, neighborhood in zip(kl_df['Latitude'], kl_df['Longitude'], kl_df['Neighborhood']): label = '{}'.format(neighborhood) label = folium.Popup(label, parse_html=True) folium.CircleMarker([lat, lng],radius=5,popup=label,color='blue',fill=True,fill_color='#3186cc',fill_opacity=0.7).add_to(map_kl)map_kl Use the foursquare API to explore the neighbourhoods CLIENT_ID = '' # your Foursquare IDCLIENT_SECRET = '' # your Foursquare SecretVERSION = '20180604'radius = 2000LIMIT = 100venues = []for lat, long, neighborhood in zip(kl_df['Latitude'], kl_df['Longitude'], kl_df['Neighborhood']):# Create the API request URLurl = "https://api.foursquare.com/v2/venues/explore?client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}".format(CLIENT_ID,CLIENT_SECRET,VERSION,lat,long,radius,LIMIT)# Make the GET requestresults = requests.get(url).json()["response"]['groups'][0]['items']# Return only relevant information for each nearby venuefor venue in results: venues.append((neighborhood,lat,long,venue['venue']['name'], venue['venue']['location']['lat'],venue['venue']['location'] ['lng'],venue['venue']['categories'][0]['name'])) After extracting all the venues, we have to convert the venues list into a new DataFrame. To know more about Foursquare documentation click here venues_df = pd.DataFrame(venues)# Defining the column namesvenues_df.columns = ['Neighborhood', 'Latitude', 'Longitude', 'VenueName', 'VenueLatitude', 'VenueLongitude', 'VenueCategory']print(venues_df.shape)venues_df.head() # Lets check how many venues were returned for each neighbourhoodvenues_df.groupby(["Neighborhood"]).count()# Lets check out how many unique categories can be curated from all the returned valuesprint('There are {} unique categories.'.format(len(venues_df['VenueCategory'].unique())))There are 174 unique categories# Displaying the first 50 Venue Category namesvenues_df['VenueCategory'].unique()[:50] Analyzing each neighbourhood Here we apply one hot encoding to all the venues. So now the number of columns becomes 175 # One hot encodingkl_onehot = pd.get_dummies(venues_df[['VenueCategory']], prefix="", prefix_sep="")# Adding neighborhood column back to dataframekl_onehot['Neighborhoods'] = venues_df['Neighborhood']# Moving neighbourhood column to the first columnfixed_columns = [kl_onehot.columns[-1]] + list(kl_onehot.columns[:-1])kl_onehot = kl_onehot[fixed_columns]print(kl_onehot.shape)(6684, 175) Next, let’s group rows of neighbourhood by taking the sum of the frequency of occurrence of each category. kl_grouped=kl_onehot.groupby(["Neighborhoods"]).sum().reset_index()print(kl_grouped.shape)kl_grouped(198, 175) len((kl_grouped[kl_grouped["Shopping Mall"] > 0])) There are 66 shopping malls in Hyderabad which is very high. So now we have to select a suitable location where the number of shopping malls is less so that our chances of setting up a shopping mall at that location should be good. # Creating a dataframe for Shopping Mall data onlykl_mall = kl_grouped[["Neighborhoods","Shopping Mall"]] Clustering the neighbourhoods Now we need to cluster all the neighbourhoods into different clusters. The results will allow us to identify which neighbourhoods have a higher concentration of shopping malls while which neighbourhoods have a fewer number of shopping malls. Based on the occurrence of shopping malls in different neighbourhoods, it will help us answer the question as to which neighbourhoods are most suitable to open new shopping malls. # Setting the number of clusterskclusters = 3kl_clustering = kl_mall.drop(["Neighborhoods"], 1)# Run k-means clustering algorithmkmeans = KMeans(n_clusters=kclusters,random_state=0).fit(kl_clustering)# Checking cluster labels generated for each row in the dataframekmeans.labels_[0:10]array([0, 0, 0, 1, 0, 1, 1, 0, 0, 0], dtype=int32) We set the number of clusters to 3 and run the algorithm. After applying the K-Means clustering algorithm, all the neighbourhoods get segregated and form different clusters. # Creating a new dataframe that includes the cluster as well as the top 10 venues for each neighborhood.kl_merged = kl_mall.copy()# Add the clustering labelskl_merged["Cluster Labels"] = kmeans.labels_kl_merged.rename(columns={"Neighborhoods": "Neighborhood"}, inplace=True)kl_merged.head(10) Here the Shopping Mall column represents the number of shopping malls in that particular area and Cluster Labels represents the cluster number (either 0 or 1 or 2) # Adding latitude and longitude values to the existing dataframekl_merged['Latitude'] = kl_df['Latitude']kl_merged['Longitude'] = kl_df['Longitude']# Sorting the results by Cluster Labelskl_merged.sort_values(["Cluster Labels"], inplace=True)kl_merged Now here we can clearly see all the places which belong to the 1st cluster(cluster number 0). Similarly, we see all the cluster numbers in the sorted order that is 0,1,2. Visualizing the resulting clusters # Creating the mapmap_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)# Setting color scheme for the clustersx = np.arange(kclusters)ys = [i+x+(i*x)**2 for i in range(kclusters)]colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))rainbow = [colors.rgb2hex(i) for i in colors_array]# Add markers to the mapmarkers_colors = []for lat, lon, poi, cluster in zip(kl_merged['Latitude'], kl_merged['Longitude'], kl_merged['Neighborhood'], kl_merged['Cluster Labels']):label = folium.Popup(str(poi) + ' - Cluster ' + str(cluster), parse_html=True) folium.CircleMarker([lat,lon],radius=5,popup=label,color=rainbow[cluster-1],fill=True,fill_color=rainbow[cluster-1],fill_opacity=0.7).add_to(map_clusters)map_clusters Examining the clusters len(kl_merged.loc[kl_merged['Cluster Labels'] == 0])len(kl_merged.loc[kl_merged['Cluster Labels'] == 1])len(kl_merged.loc[kl_merged['Cluster Labels'] == 2]) There are 132 places in cluster 0 which is the highest among the 3 clusters, and cluster 0 contains all the places which do not have a shopping mall. Cluster 1 contains 51 places and all of them contain exactly 1 shopping mall, while cluster 2 contains 15 places where all the places contain 2 or more shopping malls. The results from the K-means clustering show that we can categorize the neighbourhoods into 3 clusters based on the frequency of occurrence for “Shopping Mall”:• Cluster 0: Neighbourhoods with very less number of shopping malls • Cluster 1: Neighbourhoods with a moderate concentration of shopping malls • Cluster 2: Neighbourhoods with a high concentration of shopping malls We visualize the results of the clustering in the map with cluster 0 in red colour, cluster 1 in purple colour, and cluster 2 in mint green colour. A good number of shopping malls are concentrated in the central area of Hyderabad city. Cluster 0 has a very low number of malls. This represents a great opportunity and high potential areas to open new shopping malls, as there is very little to no competition from existing malls. Meanwhile, shopping malls in cluster 2 are likely suffering from intense competition because of oversupply and a high concentration of shopping malls. Therefore, this project recommends property developers to capitalize on these findings to open new shopping malls in neighbourhoods in cluster 0 with little to no competition. Property developers with unique selling propositions to stand out from the competition can also open new shopping malls in neighbourhoods in cluster 1 with moderate competition. Lastly, property developers are advised to avoid neighbourhoods in cluster 2 which already have a high concentration of shopping malls and suffering from intense competition. So we can apply the same approach for large datasets and can easily distinguish the venues based on the category. Suppose if there are 400 restaurants in a city then we can easily segregate them into different clusters. We can apply this method not only for shopping malls but restaurants, coffee shops, and much more. In this project, we only consider one factor i.e. frequency of occurrence of shopping malls, there are other factors such as population and income of residents that could influence the location decision of a new shopping mall. But for setting up a shopping mall we need to consider other factors such as the cost of rent, the surroundings around the shopping mall, the kind of people in the locality-if it's a luxurious area many people prefer going out, their lifestyle will be different from others and therefore spend a lot. If we decide a place where the competition is less, then we need to consider the people living in that locality as well. If the people in that area spend a lot and love going out then it’ll be a success. If the people staying near the mall don't prefer going out, then it's better to consider some other place with less competition and a good crowd. You can always refer to my GitHub Repository for the entire project. “We used to build civilizations. Now we build shopping malls.” -Billy Bryson Connect with me on LinkedIn I hope you found the article insightful. I would love to hear feedback to improvise it and come back with better content. Thank you so much for reading !
[ { "code": null, "e": 244, "s": 171, "text": "Whoever said money can’t buy happiness didn’t know where to go shopping." }, { "code": null, "e": 948, "s": 244, "text": "For many shoppers, visiting shopping malls is a great way to relax and enjoy themselves during weekends and holidays. Property developers are also taking advantage of this trend to build more shopping malls to cater to the demand. As a result, there are many shopping malls in the city of Hyderabad and many more are being built. Opening shopping malls allow property developers to earn consistent rental income. As with any business decision, opening a new shopping mall requires serious consideration and is a lot more complicated than it seems. Particularly, the location of the shopping mall is one of the most important decisions that will determine whether the mall will be a success or a failure." }, { "code": null, "e": 1511, "s": 948, "text": "The objective of this project is to analyze and select the best locations in the city of Hyderabad, India, to open a new shopping mall. This project is mainly focused on geospatial analysis of the Hyderabad City to understand which would be the best place to open a new mall. Using data science methodology and machine learning techniques like clustering, this project aims to provide solutions to answer the business question: In the city of Hyderabad, if a property developer is looking to open a new shopping mall, where would you recommend that they open it?" }, { "code": null, "e": 2003, "s": 1511, "text": "To solve the problem, we will need the following data: • List of neighbourhoods in Hyderabad. This defines the scope of this project which is confined to the city of Hyderabad, the capital city of Telangana, which is in South India• Latitude and longitude coordinates of those neighbourhoods. This is required in order to plot the map and also to get the venue data • Venue data, particularly data related to shopping malls. We will use this data to perform clustering on the neighbourhoods." }, { "code": null, "e": 2431, "s": 2003, "text": "This Wikipedia page is a list of neighbourhoods in Hyderabad, with 200 neighbourhoods. I have used web scraping techniques to extract the data from the Wikipedia page, with the help of Python requests and beautifulsoup packages. Then we can get the latitude and longitude coordinates of the neighbourhoods using Python Geocoder package. After that, I have used the Foursquare API to get the venue data for those neighbourhoods." }, { "code": null, "e": 2839, "s": 2431, "text": "Foursquare API will provide many categories of the venue data, and we are particularly interested in the Shopping Mall category in order to help us solve the business problem. This is a project that will make use of many data science skills, from web scraping (Wikipedia), working with API (Foursquare), data cleaning, data wrangling, to machine learning (K-means clustering) and map visualization (Folium)." }, { "code": null, "e": 3063, "s": 2839, "text": "The Foursquare API allows application developers to interact with the Foursquare platform. The API itself is a RESTful set of addresses to which you can send requests, so there’s really nothing to download onto your server." }, { "code": null, "e": 3093, "s": 3063, "text": "Search for Cafes in Hyderabad" }, { "code": null, "e": 3122, "s": 3093, "text": "Cafes returned by Foursquare" }, { "code": null, "e": 3277, "s": 3122, "text": "On the left, you see all the cafes and their name, category, address of every venue in Hyderabad. On the right, you see the map of the venues on the left." }, { "code": null, "e": 3297, "s": 3277, "text": "Exploring the cafes" }, { "code": null, "e": 3653, "s": 3297, "text": "If you click on the first one which is The Coffee Cup, then you are redirected to this page where you see all the information in the Foursquare dataset about The Coffee Cup. This includes its name, full address, working hours, tips and images that users have posted about the cafe. So similarly you can explore the shopping malls in the city of Hyderabad." }, { "code": null, "e": 3690, "s": 3653, "text": "To explore the Foursquare click here" }, { "code": null, "e": 3703, "s": 3690, "text": "Web Scraping" }, { "code": null, "e": 3813, "s": 3703, "text": "Perform scraping using Python requests and beautifulsoup packages to extract the list of neighbourhoods data." }, { "code": null, "e": 4339, "s": 3813, "text": "# Send the GET requestdata = requests.get(\"https://en.wikipedia.org/wiki/Category:Neighbourhoods_in_Hyderabad,_India\").text# Parse data from the html into a beautifulsoup objectsoup = BeautifulSoup(data, 'html.parser')# Create a list to store neighbourhood dataneighborhoodList = []# Append the data into the listfor row in soup.find_all(\"div\", class_=\"mw-category\")[0].findAll(\"li\"): neighborhoodList.append(row.text)# Create a new DataFrame from the listkl_df = pd.DataFrame({\"Neighborhood\": neighborhoodList})kl_df.head()" }, { "code": null, "e": 4674, "s": 4339, "text": "This is the data frame created after scraping the data. We need to get the geographical coordinates in the form of latitude and longitude in order to be able to use Foursquare API. To do so, we will use the Geocoder package that will allow us to convert the address into geographical coordinates in the form of latitude and longitude." }, { "code": null, "e": 5186, "s": 4674, "text": "# Defining a function to get coordinatesdef get_latlng(neighborhood): # initialize your variable to None lat_lng_coords = None # loop until you get the coordinates while(lat_lng_coords is None): g = geocoder.arcgis('{}, Hyderabad, India'.format(neighborhood)) lat_lng_coords = g.latlng return lat_lng_coords# Call the function to get the coordinates, store in a new list using list comprehensioncoords = [ get_latlng(neighborhood) for neighborhood in kl_df[\"Neighborhood\"].tolist()]" }, { "code": null, "e": 5328, "s": 5186, "text": "We have obtained the latitude and longitude coordinates for all the places, so we need to merge the coordinates into the original data frame." }, { "code": null, "e": 5645, "s": 5328, "text": "# Create temporary dataframe to populate the coordinates into Latitude and Longitudedf_coords = pd.DataFrame(coords, columns=['Latitude', 'Longitude'])# Merge the coordinates into the original dataframekl_df['Latitude'] = df_coords['Latitude']kl_df['Longitude'] = df_coords['Longitude']print(kl_df.shape)kl_df(200,3)" }, { "code": null, "e": 5756, "s": 5645, "text": "This is the combined data frame which contains all the neighbourhoods along with the geographical coordinates." }, { "code": null, "e": 6061, "s": 5756, "text": "# Getting the coordinates of Hyderabadaddress = 'Hyderabad, India'geolocator = Nominatim(user_agent=\"my-application\")location = geolocator.geocode(address)latitude = location.latitudelongitude = location.longitudeprint('The geograpical coordinate of Hyderabad, India {}, {}.'.format(latitude, longitude))" }, { "code": null, "e": 6209, "s": 6061, "text": "After gathering the data, we have to populate the data into a pandas DataFrame and then visualize the neighbourhoods in a map using Folium package." }, { "code": null, "e": 6610, "s": 6209, "text": "map_kl = folium.Map(location=[latitude, longitude], zoom_start=11)# Adding markers to mapfor lat, lng, neighborhood in zip(kl_df['Latitude'], kl_df['Longitude'], kl_df['Neighborhood']): label = '{}'.format(neighborhood) label = folium.Popup(label, parse_html=True) folium.CircleMarker([lat, lng],radius=5,popup=label,color='blue',fill=True,fill_color='#3186cc',fill_opacity=0.7).add_to(map_kl)map_kl" }, { "code": null, "e": 6663, "s": 6610, "text": "Use the foursquare API to explore the neighbourhoods" }, { "code": null, "e": 7447, "s": 6663, "text": "CLIENT_ID = '' # your Foursquare IDCLIENT_SECRET = '' # your Foursquare SecretVERSION = '20180604'radius = 2000LIMIT = 100venues = []for lat, long, neighborhood in zip(kl_df['Latitude'], kl_df['Longitude'], kl_df['Neighborhood']):# Create the API request URLurl = \"https://api.foursquare.com/v2/venues/explore?client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}\".format(CLIENT_ID,CLIENT_SECRET,VERSION,lat,long,radius,LIMIT)# Make the GET requestresults = requests.get(url).json()[\"response\"]['groups'][0]['items']# Return only relevant information for each nearby venuefor venue in results: venues.append((neighborhood,lat,long,venue['venue']['name'], venue['venue']['location']['lat'],venue['venue']['location'] ['lng'],venue['venue']['categories'][0]['name']))" }, { "code": null, "e": 7537, "s": 7447, "text": "After extracting all the venues, we have to convert the venues list into a new DataFrame." }, { "code": null, "e": 7592, "s": 7537, "text": "To know more about Foursquare documentation click here" }, { "code": null, "e": 7816, "s": 7592, "text": "venues_df = pd.DataFrame(venues)# Defining the column namesvenues_df.columns = ['Neighborhood', 'Latitude', 'Longitude', 'VenueName', 'VenueLatitude', 'VenueLongitude', 'VenueCategory']print(venues_df.shape)venues_df.head()" }, { "code": null, "e": 8218, "s": 7816, "text": "# Lets check how many venues were returned for each neighbourhoodvenues_df.groupby([\"Neighborhood\"]).count()# Lets check out how many unique categories can be curated from all the returned valuesprint('There are {} unique categories.'.format(len(venues_df['VenueCategory'].unique())))There are 174 unique categories# Displaying the first 50 Venue Category namesvenues_df['VenueCategory'].unique()[:50]" }, { "code": null, "e": 8247, "s": 8218, "text": "Analyzing each neighbourhood" }, { "code": null, "e": 8338, "s": 8247, "text": "Here we apply one hot encoding to all the venues. So now the number of columns becomes 175" }, { "code": null, "e": 8727, "s": 8338, "text": "# One hot encodingkl_onehot = pd.get_dummies(venues_df[['VenueCategory']], prefix=\"\", prefix_sep=\"\")# Adding neighborhood column back to dataframekl_onehot['Neighborhoods'] = venues_df['Neighborhood']# Moving neighbourhood column to the first columnfixed_columns = [kl_onehot.columns[-1]] + list(kl_onehot.columns[:-1])kl_onehot = kl_onehot[fixed_columns]print(kl_onehot.shape)(6684, 175)" }, { "code": null, "e": 8834, "s": 8727, "text": "Next, let’s group rows of neighbourhood by taking the sum of the frequency of occurrence of each category." }, { "code": null, "e": 8945, "s": 8834, "text": "kl_grouped=kl_onehot.groupby([\"Neighborhoods\"]).sum().reset_index()print(kl_grouped.shape)kl_grouped(198, 175)" }, { "code": null, "e": 8996, "s": 8945, "text": "len((kl_grouped[kl_grouped[\"Shopping Mall\"] > 0]))" }, { "code": null, "e": 9228, "s": 8996, "text": "There are 66 shopping malls in Hyderabad which is very high. So now we have to select a suitable location where the number of shopping malls is less so that our chances of setting up a shopping mall at that location should be good." }, { "code": null, "e": 9334, "s": 9228, "text": "# Creating a dataframe for Shopping Mall data onlykl_mall = kl_grouped[[\"Neighborhoods\",\"Shopping Mall\"]]" }, { "code": null, "e": 9364, "s": 9334, "text": "Clustering the neighbourhoods" }, { "code": null, "e": 9786, "s": 9364, "text": "Now we need to cluster all the neighbourhoods into different clusters. The results will allow us to identify which neighbourhoods have a higher concentration of shopping malls while which neighbourhoods have a fewer number of shopping malls. Based on the occurrence of shopping malls in different neighbourhoods, it will help us answer the question as to which neighbourhoods are most suitable to open new shopping malls." }, { "code": null, "e": 10122, "s": 9786, "text": "# Setting the number of clusterskclusters = 3kl_clustering = kl_mall.drop([\"Neighborhoods\"], 1)# Run k-means clustering algorithmkmeans = KMeans(n_clusters=kclusters,random_state=0).fit(kl_clustering)# Checking cluster labels generated for each row in the dataframekmeans.labels_[0:10]array([0, 0, 0, 1, 0, 1, 1, 0, 0, 0], dtype=int32)" }, { "code": null, "e": 10296, "s": 10122, "text": "We set the number of clusters to 3 and run the algorithm. After applying the K-Means clustering algorithm, all the neighbourhoods get segregated and form different clusters." }, { "code": null, "e": 10589, "s": 10296, "text": "# Creating a new dataframe that includes the cluster as well as the top 10 venues for each neighborhood.kl_merged = kl_mall.copy()# Add the clustering labelskl_merged[\"Cluster Labels\"] = kmeans.labels_kl_merged.rename(columns={\"Neighborhoods\": \"Neighborhood\"}, inplace=True)kl_merged.head(10)" }, { "code": null, "e": 10753, "s": 10589, "text": "Here the Shopping Mall column represents the number of shopping malls in that particular area and Cluster Labels represents the cluster number (either 0 or 1 or 2)" }, { "code": null, "e": 11006, "s": 10753, "text": " # Adding latitude and longitude values to the existing dataframekl_merged['Latitude'] = kl_df['Latitude']kl_merged['Longitude'] = kl_df['Longitude']# Sorting the results by Cluster Labelskl_merged.sort_values([\"Cluster Labels\"], inplace=True)kl_merged" }, { "code": null, "e": 11177, "s": 11006, "text": "Now here we can clearly see all the places which belong to the 1st cluster(cluster number 0). Similarly, we see all the cluster numbers in the sorted order that is 0,1,2." }, { "code": null, "e": 11212, "s": 11177, "text": "Visualizing the resulting clusters" }, { "code": null, "e": 11941, "s": 11212, "text": "# Creating the mapmap_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)# Setting color scheme for the clustersx = np.arange(kclusters)ys = [i+x+(i*x)**2 for i in range(kclusters)]colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))rainbow = [colors.rgb2hex(i) for i in colors_array]# Add markers to the mapmarkers_colors = []for lat, lon, poi, cluster in zip(kl_merged['Latitude'], kl_merged['Longitude'], kl_merged['Neighborhood'], kl_merged['Cluster Labels']):label = folium.Popup(str(poi) + ' - Cluster ' + str(cluster), parse_html=True) folium.CircleMarker([lat,lon],radius=5,popup=label,color=rainbow[cluster-1],fill=True,fill_color=rainbow[cluster-1],fill_opacity=0.7).add_to(map_clusters)map_clusters" }, { "code": null, "e": 11964, "s": 11941, "text": "Examining the clusters" }, { "code": null, "e": 12121, "s": 11964, "text": "len(kl_merged.loc[kl_merged['Cluster Labels'] == 0])len(kl_merged.loc[kl_merged['Cluster Labels'] == 1])len(kl_merged.loc[kl_merged['Cluster Labels'] == 2])" }, { "code": null, "e": 12439, "s": 12121, "text": "There are 132 places in cluster 0 which is the highest among the 3 clusters, and cluster 0 contains all the places which do not have a shopping mall. Cluster 1 contains 51 places and all of them contain exactly 1 shopping mall, while cluster 2 contains 15 places where all the places contain 2 or more shopping malls." }, { "code": null, "e": 12963, "s": 12439, "text": "The results from the K-means clustering show that we can categorize the neighbourhoods into 3 clusters based on the frequency of occurrence for “Shopping Mall”:• Cluster 0: Neighbourhoods with very less number of shopping malls • Cluster 1: Neighbourhoods with a moderate concentration of shopping malls • Cluster 2: Neighbourhoods with a high concentration of shopping malls We visualize the results of the clustering in the map with cluster 0 in red colour, cluster 1 in purple colour, and cluster 2 in mint green colour." }, { "code": null, "e": 13925, "s": 12963, "text": "A good number of shopping malls are concentrated in the central area of Hyderabad city. Cluster 0 has a very low number of malls. This represents a great opportunity and high potential areas to open new shopping malls, as there is very little to no competition from existing malls. Meanwhile, shopping malls in cluster 2 are likely suffering from intense competition because of oversupply and a high concentration of shopping malls. Therefore, this project recommends property developers to capitalize on these findings to open new shopping malls in neighbourhoods in cluster 0 with little to no competition. Property developers with unique selling propositions to stand out from the competition can also open new shopping malls in neighbourhoods in cluster 1 with moderate competition. Lastly, property developers are advised to avoid neighbourhoods in cluster 2 which already have a high concentration of shopping malls and suffering from intense competition." }, { "code": null, "e": 14471, "s": 13925, "text": "So we can apply the same approach for large datasets and can easily distinguish the venues based on the category. Suppose if there are 400 restaurants in a city then we can easily segregate them into different clusters. We can apply this method not only for shopping malls but restaurants, coffee shops, and much more. In this project, we only consider one factor i.e. frequency of occurrence of shopping malls, there are other factors such as population and income of residents that could influence the location decision of a new shopping mall." }, { "code": null, "e": 15122, "s": 14471, "text": "But for setting up a shopping mall we need to consider other factors such as the cost of rent, the surroundings around the shopping mall, the kind of people in the locality-if it's a luxurious area many people prefer going out, their lifestyle will be different from others and therefore spend a lot. If we decide a place where the competition is less, then we need to consider the people living in that locality as well. If the people in that area spend a lot and love going out then it’ll be a success. If the people staying near the mall don't prefer going out, then it's better to consider some other place with less competition and a good crowd." }, { "code": null, "e": 15191, "s": 15122, "text": "You can always refer to my GitHub Repository for the entire project." }, { "code": null, "e": 15268, "s": 15191, "text": "“We used to build civilizations. Now we build shopping malls.” -Billy Bryson" }, { "code": null, "e": 15296, "s": 15268, "text": "Connect with me on LinkedIn" }, { "code": null, "e": 15418, "s": 15296, "text": "I hope you found the article insightful. I would love to hear feedback to improvise it and come back with better content." } ]
How to delete different rows and columns of a matrix using a single line code in R?
Deletion or addition of rows and columns in a matrix of any size is mostly done by using single square brackets and it is also the easiest way. To delete rows and columns, we just need to use the column index or row index and if we want to delete more than one of them then we can separate them by commas by inserting them inside c as c(-1,-2). If we want to delete more than one rows or columns in a sequence then a colon can be used. Live Demo > M<-matrix(1:100,nrow=10) > M [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [1,] 1 11 21 31 41 51 61 71 81 91 [2,] 2 12 22 32 42 52 62 72 82 92 [3,] 3 13 23 33 43 53 63 73 83 93 [4,] 4 14 24 34 44 54 64 74 84 94 [5,] 5 15 25 35 45 55 65 75 85 95 [6,] 6 16 26 36 46 56 66 76 86 96 [7,] 7 17 27 37 47 57 67 77 87 97 [8,] 8 18 28 38 48 58 68 78 88 98 [9,] 9 19 29 39 49 59 69 79 89 99 [10,] 10 20 30 40 50 60 70 80 90 100 > M[-2:-3,-6:-7] [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [1,] 1 11 21 31 41 71 81 91 [2,] 4 14 24 34 44 74 84 94 [3,] 5 15 25 35 45 75 85 95 [4,] 6 16 26 36 46 76 86 96 [5,] 7 17 27 37 47 77 87 97 [6,] 8 18 28 38 48 78 88 98 [7,] 9 19 29 39 49 79 89 99 [8,] 10 20 30 40 50 80 90 100 > M[-1:-3,-6:-10] [,1] [,2] [,3] [,4] [,5] [1,] 4 14 24 34 44 [2,] 5 15 25 35 45 [3,] 6 16 26 36 46 [4,] 7 17 27 37 47 [5,] 8 18 28 38 48 [6,] 9 19 29 39 49 [7,] 10 20 30 40 50 > M[c(-1,-3),c(-6,-10)] [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [1,] 2 12 22 32 42 62 72 82 [2,] 4 14 24 34 44 64 74 84 [3,] 5 15 25 35 45 65 75 85 [4,] 6 16 26 36 46 66 76 86 [5,] 7 17 27 37 47 67 77 87 [6,] 8 18 28 38 48 68 78 88 [7,] 9 19 29 39 49 69 79 89 [8,] 10 20 30 40 50 70 80 90 > M[c(-5,-8),c(-2,-9)] [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [1,] 1 21 31 41 51 61 71 91 [2,] 2 22 32 42 52 62 72 92 [3,] 3 23 33 43 53 63 73 93 [4,] 4 24 34 44 54 64 74 94 [5,] 6 26 36 46 56 66 76 96 [6,] 7 27 37 47 57 67 77 97 [7,] 9 29 39 49 59 69 79 99 [8,] 10 30 40 50 60 70 80 100 > M[c(-5,-6:-8),c(-2,-9)] [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [1,] 1 21 31 41 51 61 71 91 [2,] 2 22 32 42 52 62 72 92 [3,] 3 23 33 43 53 63 73 93 [4,] 4 24 34 44 54 64 74 94 [5,] 9 29 39 49 59 69 79 99 [6,] 10 30 40 50 60 70 80 100 > M[c(-5),c(-2,-5:-9)] [,1] [,2] [,3] [,4] [1,] 1 21 31 91 [2,] 2 22 32 92 [3,] 3 23 33 93 [4,] 4 24 34 94 [5,] 6 26 36 96 [6,] 7 27 37 97 [7,] 8 28 38 98 [8,] 9 29 39 99 [9,] 10 30 40 100 > M[c(-2,-5),c(-2:-4,-5,-9)] [,1] [,2] [,3] [,4] [,5] [1,] 1 51 61 71 91 [2,] 3 53 63 73 93 [3,] 4 54 64 74 94 [4,] 6 56 66 76 96 [5,] 7 57 67 77 97 [6,] 8 58 68 78 98 [7,] 9 59 69 79 99 [8,] 10 60 70 80 100
[ { "code": null, "e": 1498, "s": 1062, "text": "Deletion or addition of rows and columns in a matrix of any size is mostly done by using single square brackets and it is also the easiest way. To delete rows and columns, we just need to use the column index or row index and if we want to delete more than one of them then we can separate them by commas by inserting them inside c as c(-1,-2). If we want to delete more than one rows or columns in a sequence then a colon can be used." }, { "code": null, "e": 1509, "s": 1498, "text": " Live Demo" }, { "code": null, "e": 1540, "s": 1509, "text": "> M<-matrix(1:100,nrow=10)\n> M" }, { "code": null, "e": 2135, "s": 1540, "text": " [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]\n[1,] 1 11 21 31 41 51 61 71 81 91\n[2,] 2 12 22 32 42 52 62 72 82 92\n[3,] 3 13 23 33 43 53 63 73 83 93\n[4,] 4 14 24 34 44 54 64 74 84 94\n[5,] 5 15 25 35 45 55 65 75 85 95\n[6,] 6 16 26 36 46 56 66 76 86 96\n[7,] 7 17 27 37 47 57 67 77 87 97\n[8,] 8 18 28 38 48 58 68 78 88 98\n[9,] 9 19 29 39 49 59 69 79 89 99\n[10,] 10 20 30 40 50 60 70 80 90 100" }, { "code": null, "e": 2152, "s": 2135, "text": "> M[-2:-3,-6:-7]" }, { "code": null, "e": 2418, "s": 2152, "text": "[,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 1 11 21 31 41 71 81 91\n[2,] 4 14 24 34 44 74 84 94\n[3,] 5 15 25 35 45 75 85 95\n[4,] 6 16 26 36 46 76 86 96\n[5,] 7 17 27 37 47 77 87 97\n[6,] 8 18 28 38 48 78 88 98\n[7,] 9 19 29 39 49 79 89 99\n[8,] 10 20 30 40 50 80 90 100" }, { "code": null, "e": 2436, "s": 2418, "text": "> M[-1:-3,-6:-10]" }, { "code": null, "e": 2598, "s": 2436, "text": " [,1] [,2] [,3] [,4] [,5]\n[1,] 4 14 24 34 44\n[2,] 5 15 25 35 45\n[3,] 6 16 26 36 46\n[4,] 7 17 27 37 47\n[5,] 8 18 28 38 48\n[6,] 9 19 29 39 49\n[7,] 10 20 30 40 50" }, { "code": null, "e": 2622, "s": 2598, "text": "> M[c(-1,-3),c(-6,-10)]" }, { "code": null, "e": 2890, "s": 2622, "text": " [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 2 12 22 32 42 62 72 82\n[2,] 4 14 24 34 44 64 74 84\n[3,] 5 15 25 35 45 65 75 85\n[4,] 6 16 26 36 46 66 76 86\n[5,] 7 17 27 37 47 67 77 87\n[6,] 8 18 28 38 48 68 78 88\n[7,] 9 19 29 39 49 69 79 89\n[8,] 10 20 30 40 50 70 80 90" }, { "code": null, "e": 2913, "s": 2890, "text": "> M[c(-5,-8),c(-2,-9)]" }, { "code": null, "e": 3182, "s": 2913, "text": " [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 1 21 31 41 51 61 71 91\n[2,] 2 22 32 42 52 62 72 92\n[3,] 3 23 33 43 53 63 73 93\n[4,] 4 24 34 44 54 64 74 94\n[5,] 6 26 36 46 56 66 76 96\n[6,] 7 27 37 47 57 67 77 97\n[7,] 9 29 39 49 59 69 79 99\n[8,] 10 30 40 50 60 70 80 100" }, { "code": null, "e": 3208, "s": 3182, "text": "> M[c(-5,-6:-8),c(-2,-9)]" }, { "code": null, "e": 3421, "s": 3208, "text": " [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n[1,] 1 21 31 41 51 61 71 91\n[2,] 2 22 32 42 52 62 72 92\n[3,] 3 23 33 43 53 63 73 93\n[4,] 4 24 34 44 54 64 74 94\n[5,] 9 29 39 49 59 69 79 99\n[6,] 10 30 40 50 60 70 80 100" }, { "code": null, "e": 3444, "s": 3421, "text": "> M[c(-5),c(-2,-5:-9)]" }, { "code": null, "e": 3613, "s": 3444, "text": " [,1] [,2] [,3] [,4]\n[1,] 1 21 31 91\n[2,] 2 22 32 92\n[3,] 3 23 33 93\n[4,] 4 24 34 94\n[5,] 6 26 36 96\n[6,] 7 27 37 97\n[7,] 8 28 38 98\n[8,] 9 29 39 99\n[9,] 10 30 40 100" }, { "code": null, "e": 3642, "s": 3613, "text": "> M[c(-2,-5),c(-2:-4,-5,-9)]" }, { "code": null, "e": 3824, "s": 3642, "text": " [,1] [,2] [,3] [,4] [,5]\n[1,] 1 51 61 71 91\n[2,] 3 53 63 73 93\n[3,] 4 54 64 74 94\n[4,] 6 56 66 76 96\n[5,] 7 57 67 77 97\n[6,] 8 58 68 78 98\n[7,] 9 59 69 79 99\n[8,] 10 60 70 80 100" } ]
GATE | GATE CS 2019 | Question 56 - GeeksforGeeks
19 Feb, 2019 Suppose Y is distributed uniformly in the open interval (1, 6). The probability that the polynomial 3x2 + 6xY + 3Y + 6 has only real roots is (rounded off to 1 decimal place) _________. Note: This was Numerical Type question.(A) 0.80(B) 0.17(C) 0.20(D) 1Answer: (A)Explanation: For a quadratic polynomial ax2 + bx + c = 0. There are three condition: b2 - 4ac > 0 {real and distinct root, i.e., two real roots} b2 - 4ac = 0 {real and equal roots, i.e., only one real root} b2 - 4ac < 0 {imaginary roots} Polynomial 3x2 + 6xY + 3Y + 6 has only real roots, ⇒ b2 – 4ax ≥ 0 ⇒ (6Y)2 – 4(3) (3Y+ 6) ≥ 0 ⇒ Y2 – Y + 2 ≥ 0 Y ∈ (–∞, – 1] ∩ [2, ∞) ⇒ Y ∈ [2, 6) Since y is uniformly distributed in (1, 6). Probability distributed function, f(Y) = (1/5), 1 < y > 6 Hence, So, answer is 0.8.Quiz of this Question GATE Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. GATE | Gate IT 2007 | Question 25 GATE | GATE-CS-2000 | Question 41 GATE | GATE-CS-2001 | Question 39 GATE | GATE-CS-2005 | Question 6 GATE | GATE MOCK 2017 | Question 21 GATE | GATE-CS-2006 | Question 47 GATE | GATE MOCK 2017 | Question 24 GATE | Gate IT 2008 | Question 43 GATE | GATE-CS-2009 | Question 38 GATE | GATE-CS-2003 | Question 90
[ { "code": null, "e": 25741, "s": 25713, "text": "\n19 Feb, 2019" }, { "code": null, "e": 25927, "s": 25741, "text": "Suppose Y is distributed uniformly in the open interval (1, 6). The probability that the polynomial 3x2 + 6xY + 3Y + 6 has only real roots is (rounded off to 1 decimal place) _________." }, { "code": null, "e": 26091, "s": 25927, "text": "Note: This was Numerical Type question.(A) 0.80(B) 0.17(C) 0.20(D) 1Answer: (A)Explanation: For a quadratic polynomial ax2 + bx + c = 0. There are three condition:" }, { "code": null, "e": 26254, "s": 26091, "text": "b2 - 4ac > 0 {real and distinct root, i.e., two real roots}\nb2 - 4ac = 0 {real and equal roots, i.e., only one real root}\nb2 - 4ac < 0 {imaginary roots} " }, { "code": null, "e": 26305, "s": 26254, "text": "Polynomial 3x2 + 6xY + 3Y + 6 has only real roots," }, { "code": null, "e": 26402, "s": 26305, "text": "⇒ b2 – 4ax ≥ 0\n⇒ (6Y)2 – 4(3) (3Y+ 6) ≥ 0\n⇒ Y2 – Y + 2 ≥ 0\n\nY ∈ (–∞, – 1] ∩ [2, ∞)\n⇒ Y ∈ [2, 6) " }, { "code": null, "e": 26446, "s": 26402, "text": "Since y is uniformly distributed in (1, 6)." }, { "code": null, "e": 26480, "s": 26446, "text": "Probability distributed function," }, { "code": null, "e": 26505, "s": 26480, "text": " f(Y) = (1/5), 1 < y > 6" }, { "code": null, "e": 26512, "s": 26505, "text": "Hence," }, { "code": null, "e": 26552, "s": 26512, "text": "So, answer is 0.8.Quiz of this Question" }, { "code": null, "e": 26557, "s": 26552, "text": "GATE" }, { "code": null, "e": 26655, "s": 26557, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 26689, "s": 26655, "text": "GATE | Gate IT 2007 | Question 25" }, { "code": null, "e": 26723, "s": 26689, "text": "GATE | GATE-CS-2000 | Question 41" }, { "code": null, "e": 26757, "s": 26723, "text": "GATE | GATE-CS-2001 | Question 39" }, { "code": null, "e": 26790, "s": 26757, "text": "GATE | GATE-CS-2005 | Question 6" }, { "code": null, "e": 26826, "s": 26790, "text": "GATE | GATE MOCK 2017 | Question 21" }, { "code": null, "e": 26860, "s": 26826, "text": "GATE | GATE-CS-2006 | Question 47" }, { "code": null, "e": 26896, "s": 26860, "text": "GATE | GATE MOCK 2017 | Question 24" }, { "code": null, "e": 26930, "s": 26896, "text": "GATE | Gate IT 2008 | Question 43" }, { "code": null, "e": 26964, "s": 26930, "text": "GATE | GATE-CS-2009 | Question 38" } ]
Construct a DataFrame in Pandas using string data in Python
Here we will see how we can construct a pandas dataframe using string type data. Pandas supports csv files, but we can do the same using string also. For string type data, we have to use one wrapper, that helps to simulate as the data is taken as csv reader. Here we are using a string that takes data and separated by semicolon. Let us see the following implementation to get better understanding − import pandas as pd from io import StringIO str_data = StringIO("""Id;Subject;Course_Fee 10;DBMS;3000 11;Basic Maths;2000 12;Data Science;40000 13;Algorithm;5000 """) df = pd.read_csv(str_data, sep =";") print(df) Id Subject Course_Fee 0 10 DBMS 3000 1 11 Basic Maths 2000 2 12 Data Science 40000 3 13 Algorithm 5000
[ { "code": null, "e": 1321, "s": 1062, "text": "Here we will see how we can construct a pandas dataframe using string type data. Pandas supports csv files, but we can do the same using string also. For string type data, we have to use one wrapper, that helps to simulate as the data is taken as csv reader." }, { "code": null, "e": 1392, "s": 1321, "text": "Here we are using a string that takes data and separated by semicolon." }, { "code": null, "e": 1462, "s": 1392, "text": "Let us see the following implementation to get better understanding −" }, { "code": null, "e": 1691, "s": 1462, "text": "import pandas as pd\nfrom io import StringIO\nstr_data = StringIO(\"\"\"Id;Subject;Course_Fee\n 10;DBMS;3000\n 11;Basic Maths;2000\n 12;Data Science;40000\n 13;Algorithm;5000\n \"\"\")\ndf = pd.read_csv(str_data, sep =\";\")\nprint(df)" }, { "code": null, "e": 1794, "s": 1691, "text": "Id Subject Course_Fee\n0 10 DBMS 3000\n1 11 Basic Maths 2000\n2 12 Data Science 40000\n3 13 Algorithm 5000" } ]
Spring Cloud - Service Discovery Using Eureka
Service discovery is one of the most critical parts when an application is deployed as microservices in the cloud. This is because for any use operation, an application in a microservice architecture may require access to multiple services and the communication amongst them. Service discovery helps tracking the service address and the ports where the service instances can be contacted to. There are three components at play here − Service Instances − Responsible to handle incoming request for the service and respond to those requests. Service Instances − Responsible to handle incoming request for the service and respond to those requests. Service Registry − Keeps track of the addresses of the service instances. The service instances are supposed to register their address with the service registry. Service Registry − Keeps track of the addresses of the service instances. The service instances are supposed to register their address with the service registry. Service Client − The client which wants access or wants to place a request and get response from the service instances. The service client contacts the service registry to get the address of the instances. Service Client − The client which wants access or wants to place a request and get response from the service instances. The service client contacts the service registry to get the address of the instances. Apache Zookeeper, Eureka and Consul are a few well-known components which are used for Service Discovery. In this tutorial, we will use Eureka For setting up Eureka Server, we need to update the POM file to contain the following dependency − <dependencies> <dependency> <groupId>org.springframework.cloud</groupId> <artifactId>spring-cloud-starter-netflix-eureka-server</artifactId> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> </dependencies> And then, annotate our Spring application class with the correct annotation, i.e.,@EnableEurekaServer. package com.tutorialspoint; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.cloud.netflix.eureka.server.EnableEurekaServer; @SpringBootApplication @EnableEurekaServer public class RestaurantServiceRegistry{ public static void main(String[] args) { SpringApplication.run(RestaurantServiceRegistry.class, args); } } We also need a properties file if we want to configure the registry and change its default values. Here are the changes we will make − Update the port to 8900 rather than the default 8080 Update the port to 8900 rather than the default 8080 In production, one would have more than one node for registry for its high availability. That’s is where we need peer-to-peer communication between registries. As we are executing this in standalone mode, we can simply set client properties to false to avoid any errors. In production, one would have more than one node for registry for its high availability. That’s is where we need peer-to-peer communication between registries. As we are executing this in standalone mode, we can simply set client properties to false to avoid any errors. So, this is how our application.yml file will look like − server: port: 8900 eureka: client: register-with-eureka: false fetch-registry: false And that is it, let us now compile the project and run the program by using the following command − java -jar .\target\spring-cloud-eureka-server-1.0.jar Now we can see the logs in the console − ... 2021-03-07 13:33:10.156 INFO 17660 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8900 (http) 2021-03-07 13:33:10.172 INFO 17660 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat] ... 2021-03-07 13:33:16.483 INFO 17660 --- [ main] DiscoveryClientOptionalArgsConfiguration : Eureka HTTP Client uses Jersey ... 2021-03-07 13:33:16.632 INFO 17660 --- [ main] o.s.c.n.eureka.InstanceInfoFactory : Setting initial instance status as: STARTING 2021-03-07 13:33:16.675 INFO 17660 --- [ main] com.netflix.discovery.DiscoveryClient : Initializing Eureka in region useast- 1 2021-03-07 13:33:16.675 INFO 17660 --- [ main] com.netflix.discovery.DiscoveryClient : Client configured to neither register nor query for data. 2021-03-07 13:33:16.686 INFO 17660 --- [ main] com.netflix.discovery.DiscoveryClient : Discovery Client initialized at timestamp 1615104196685 with initial instances count: 0 ... 2021-03-07 13:33:16.873 INFO 17660 --- [ Thread-10] e.s.EurekaServerInitializerConfiguration : Started Eureka Server 2021-03-07 13:33:18.609 INFO 17660 --- [ main] c.t.RestaurantServiceRegistry : Started RestaurantServiceRegistry in 15.219 seconds (JVM running for 16.068) As we see from the above logs that the Eureka registry has been setup. We also get a dashboard for Eureka (see the following image) which is hosted on the server URL. Now, we will set up the service instances which would register to the Eureka server. For setting up Eureka Client, we will use a separate Maven project and update the POM file to contain the following dependency − <dependencies> <dependency> <groupId>org.springframework.cloud</groupId> <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> </dependencies> And then, annotate our Spring application class with the correct annotation, i.e.,@EnableDiscoveryClient package com.tutorialspoint; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.cloud.client.discovery.EnableDiscoveryClient; @SpringBootApplication @EnableDiscoveryClient public class RestaurantCustomerService{ public static void main(String[] args) { SpringApplication.run(RestaurantCustomerService.class, args); } } We also need a properties file if we want to configure the client and change its default values. Here are the changes we will make − We will provide the port at runtime while jar at execution. We will provide the port at runtime while jar at execution. We will specify the URL at which Eureka server is running. We will specify the URL at which Eureka server is running. So, this is how our application.yml file will look like spring: application: name: customer-service server: port: ${app_port} eureka: client: serviceURL: defaultZone: http://localhost:8900/eureka For execution, we will have two service instances running. To do that, let's open up two shells and then execute the following command on one shell − java -Dapp_port=8081 -jar .\target\spring-cloud-eureka-client-1.0.jar And execute the following on the other shell − java -Dapp_port=8082 -jar .\target\spring-cloud-eureka-client-1.0.jar Now we can see the logs in the console − ... 2021-03-07 15:22:22.474 INFO 16920 --- [ main] com.netflix.discovery.DiscoveryClient : Starting heartbeat executor: renew interval is: 30 2021-03-07 15:22:22.482 INFO 16920 --- [ main] c.n.discovery.InstanceInfoReplicator : InstanceInfoReplicator onDemand update allowed rate per min is 4 2021-03-07 15:22:22.490 INFO 16920 --- [ main] com.netflix.discovery.DiscoveryClient : Discovery Client initialized at timestamp 1615110742488 with initial instances count: 0 2021-03-07 15:22:22.492 INFO 16920 --- [ main] o.s.c.n.e.s.EurekaServiceRegistry : Registering application CUSTOMERSERVICE with eureka with status UP 2021-03-07 15:22:22.494 INFO 16920 --- [ main] com.netflix.discovery.DiscoveryClient : Saw local status change event StatusChangeEvent [timestamp=1615110742494, current=UP, previous=STARTING] 2021-03-07 15:22:22.500 INFO 16920 --- [nfoReplicator-0] com.netflix.discovery.DiscoveryClient : DiscoveryClient_CUSTOMERSERVICE/ localhost:customer-service:8081: registering service... 2021-03-07 15:22:22.588 INFO 16920 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8081 (http) with context path '' 2021-03-07 15:22:22.591 INFO 16920 --- [ main] .s.c.n.e.s.EurekaAutoServiceRegistration : Updating port to 8081 2021-03-07 15:22:22.705 INFO 16920 --- [nfoReplicator-0] com.netflix.discovery.DiscoveryClient : DiscoveryClient_CUSTOMERSERVICE/ localhost:customer-service:8081 - registration status: 204 ... As we see from above logs that the client instance has been setup. We can also look at the Eureka Server dashboard we saw earlier. As we see, there are two instances of “CUSTOMER-SERVICE” running that the Eureka server is aware of − Our Eureka server has got the registered client instances of the “Customer-Service” setup. We can now setup the Consumer which can ask the Eureka Server the address of the “Customer-Service” nodes. For this purpose, let us add a controller which can get the information from the Eureka Registry. This controller will be added to our earlier Eureka Client itself, i.e., “Customer Service”. Let us create the following controller to the client. package com.tutorialspoint; import java.util.List; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.cloud.client.ServiceInstance; import org.springframework.cloud.client.discovery.DiscoveryClient; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; @RestController class RestaurantCustomerInstancesController { @Autowired private DiscoveryClient eurekaConsumer; @RequestMapping("/customer_service_instances") Note the annotation @DiscoveryClient which is what Spring framework provides to talk to the registry. Let us now recompile our Eureka clients. For execution, we will have two service instances running. To do that, let's open up two shells and then execute the following command on one shell − java -Dapp_port=8081 -jar .\target\spring-cloud-eureka-client-1.0.jar And execute the following on the other shell − java -Dapp_port=8082 -jar .\target\spring-cloud-eureka-client-1.0.jar Once the client on both shells have started, let us now hit the http://localhost:8081/customer_service_instances we created in the controller. This URL displays complete information about both the instances. [ { "scheme": "http", "host": "localhost", "port": 8081, "metadata": { "management.port": "8081" }, "secure": false, "instanceInfo": { "instanceId": "localhost:customer-service:8081", "app": "CUSTOMER-SERVICE", "appGroupName": null, "ipAddr": "10.0.75.1", "sid": "na", "homePageUrl": "http://localhost:8081/", "statusPageUrl": "http://localhost:8081/actuator/info", "healthCheckUrl": "http://localhost:8081/actuator/health", "secureHealthCheckUrl": null, "vipAddress": "customer-service", "secureVipAddress": "customer-service", "countryId": 1, "dataCenterInfo": { "@class": "com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo", "name": "MyOwn" }, "hostName": "localhost", "status": "UP", "overriddenStatus": "UNKNOWN", "leaseInfo": { "renewalIntervalInSecs": 30, "durationInSecs": 90, "registrationTimestamp": 1616667914313, "lastRenewalTimestamp": 1616667914313, "evictionTimestamp": 0, "serviceUpTimestamp": 1616667914313 }, "isCoordinatingDiscoveryServer": false, "metadata": { "management.port": "8081" }, "lastUpdatedTimestamp": 1616667914313, "lastDirtyTimestamp": 1616667914162, "actionType": "ADDED", "asgName": null }, "instanceId": "localhost:customer-service:8081", "serviceId": "CUSTOMER-SERVICE", "uri": "http://localhost:8081" }, { "scheme": "http", "host": "localhost", "port": 8082, "metadata": { "management.port": "8082" }, "secure": false, "instanceInfo": { "instanceId": "localhost:customer-service:8082", "app": "CUSTOMER-SERVICE", "appGroupName": null, "ipAddr": "10.0.75.1", "sid": "na", "homePageUrl": "http://localhost:8082/", "statusPageUrl": "http://localhost:8082/actuator/info", "healthCheckUrl": "http://localhost:8082/actuator/health", "secureHealthCheckUrl": null, "vipAddress": "customer-service", "secureVipAddress": "customer-service", "countryId": 1, "dataCenterInfo": { "@class": "com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo", "name": "MyOwn" }, "hostName": "localhost", "status": "UP", "overriddenStatus": "UNKNOWN", "leaseInfo": { "renewalIntervalInSecs": 30, "durationInSecs": 90, "registrationTimestamp": 1616667913690, "lastRenewalTimestamp": 1616667913690, "evictionTimestamp": 0, "serviceUpTimestamp": 1616667913690 }, "isCoordinatingDiscoveryServer": false, "metadata": { "management.port": "8082" }, "lastUpdatedTimestamp": 1616667913690, "lastDirtyTimestamp": 1616667913505, "actionType": "ADDED", "asgName": null }, "instanceId": "localhost:customer-service:8082", "serviceId": "CUSTOMER-SERVICE", "uri": "http://localhost:8082" } ] Eureka Server provides various APIs for the client instances or the services to talk to. A lot of these APIs are abstracted and can be used directly with @DiscoveryClient we defined and used earlier. Just to note, their HTTP counterparts also exist and can be useful for Non-Spring framework usage of Eureka. In fact, the API that we used earlier, i.e., to get the information about the client running “Customer_Service” can also be invoked via the browser using http://localhost:8900/eureka/apps/customer-service as can be seen here − <application slick-uniqueid="3"> <div> <a id="slick_uniqueid"/> </div> <name>CUSTOMER-SERVICE</name> <instance> <instanceId>localhost:customer-service:8082</instanceId> <hostName>localhost</hostName> <app>CUSTOMER-SERVICE</app> <ipAddr>10.0.75.1</ipAddr> <status>UP</status> <overriddenstatus>UNKNOWN</overriddenstatus> <port enabled="true">8082</port> <securePort enabled="false">443</securePort> <countryId>1</countryId> <dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo"> <name>MyOwn</name> </dataCenterInfo> <leaseInfo> <renewalIntervalInSecs>30</renewalIntervalInSecs> <durationInSecs>90</durationInSecs> <registrationTimestamp>1616667913690</registrationTimestamp> <lastRenewalTimestamp>1616668273546</lastRenewalTimestamp> <evictionTimestamp>0</evictionTimestamp> <serviceUpTimestamp>1616667913690</serviceUpTimestamp> </leaseInfo> <metadata> <management.port>8082</management.port> </metadata> <homePageUrl>http://localhost:8082/</homePageUrl> <statusPageUrl>http://localhost:8082/actuator/info</statusPageUrl> <healthCheckUrl>http://localhost:8082/actuator/health</healthCheckUrl> <vipAddress>customer-service</vipAddress> <secureVipAddress>customer-service</secureVipAddress> <isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer> <lastUpdatedTimestamp>1616667913690</lastUpdatedTimestamp> <lastDirtyTimestamp>1616667913505</lastDirtyTimestamp> <actionType>ADDED</actionType> </instance> <instance> <instanceId>localhost:customer-service:8081</instanceId> <hostName>localhost</hostName> <app>CUSTOMER-SERVICE</app> <ipAddr>10.0.75.1</ipAddr> <status>UP</status> <overriddenstatus>UNKNOWN</overriddenstatus> <port enabled="true">8081</port> <securePort enabled="false">443</securePort> <countryId>1</countryId> <dataCenterInfo class="com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo"> <name>MyOwn</name> </dataCenterInfo> <leaseInfo> <renewalIntervalInSecs>30</renewalIntervalInSecs> <durationInSecs>90</durationInSecs> <registrationTimestamp>1616667914313</registrationTimestamp> <lastRenewalTimestamp>1616668274227</lastRenewalTimestamp> <evictionTimestamp>0</evictionTimestamp> <serviceUpTimestamp>1616667914313</serviceUpTimestamp> </leaseInfo> <metadata> <management.port>8081</management.port> </metadata> <homePageUrl>http://localhost:8081/</homePageUrl> <statusPageUrl>http://localhost:8081/actuator/info</statusPageUrl> <healthCheckUrl>http://localhost:8081/actuator/health</healthCheckUrl> <vipAddress>customer-service</vipAddress> <secureVipAddress>customer-service</secureVipAddress> <isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer> <lastUpdatedTimestamp>1616667914313</lastUpdatedTimestamp> <lastDirtyTimestamp>1616667914162</lastDirtyTimestamp> <actionType>ADDED</actionType> </instance> </application> Few other useful APIs are − More details about the programmatic API can be found here https://javadoc.io/doc/com.netflix.eureka/eureka-client/latest/index.html We have been using Eureka server in standalone mode. However, in a Production environment, we should ideally have more than one instance of the Eureka server running. This ensures that even if one machine goes down, the machine with another Eureka server keeps on running. Let us try to setup Eureka server in high-availability mode. For our example, we will use two instances.For this, we will use the following application-ha.yml to start the Eureka server. Points to note − We have parameterized the port so that we can start multiple instances using same the config file. We have parameterized the port so that we can start multiple instances using same the config file. We have added address, again parameterized, to pass the Eureka server address. We have added address, again parameterized, to pass the Eureka server address. We are naming the app as “Eureka-Server”. We are naming the app as “Eureka-Server”. spring: application: name: eureka-server server: port: ${app_port} eureka: client: serviceURL: defaultZone: ${eureka_other_server_url} Let us now recompile our Eureka server project. For execution, we will have two service instances running. To do that, let's open two shells and then execute the following command on one shell − java -Dapp_port=8900 '-Deureka_other_server_url=http://localhost:8901/eureka' - jar .\target\spring-cloud-eureka-server-1.0.jar -- spring.config.location=classpath:application-ha.yml And execute the following on the other shell − java -Dapp_port=8901 '-Deureka_other_server_url=http://localhost:8900/eureka' - jar .\target\spring-cloud-eureka-server-1.0.jar -- spring.config.location=classpath:application-ha.yml We can verify that the servers are up and running in high-availability mode by looking at the dashboard. For example, here is the dashboard on Eureka server 1 − And here is the dashboard of Eureka server 2 − So, as we see, we have two Eureka servers running and in sync. Even if one server goes down, the other server would keep functioning. We can also update the service instance application to have addresses for both Eureka servers by having comma-separated server addresses. spring: application: name: customer-service server: port: ${app_port} eureka: client: serviceURL: defaultZone: http://localhost:8900/eureka, http://localhost:8901/eureka Eureka also supports the concept of zone awareness. Zone awareness as a concept is very useful when we have a cluster across different geographies. Say, we get an incoming request for a service and we need to choose the server which should service the request. Instead of sending and processing that request on a server which is located far, it is more fruitful to choose a server which is in the same zone. This is because, network bottleneck is very common in a distributed application and thus we should avoid it. Let us now try to setup Eureka clients and make them Zone aware. For doing that, let us add application-za.yml spring: application: name: customer-service server: port: ${app_port} eureka: instance: metadataMap: zone: ${zoneName} client: serviceURL: defaultZone: http://localhost:8900/eureka Let us now recompile our Eureka client project. For execution, we will have two service instances running. To do that, let's open two shells and then execute the following command on one shell − java -Dapp_port=8080 -Dzone_name=USA -jar .\target\spring-cloud-eureka-client- 1.0.jar --spring.config.location=classpath:application-za.yml And execute the following on the other shell − java -Dapp_port=8081 -Dzone_name=EU -jar .\target\spring-cloud-eureka-client- 1.0.jar --spring.config.location=classpath:application-za.yml We can go back to the dashboard to verify that the Eureka Server registers the zone of the services. As seen in the following image, we have two availability zones instead of 1, which we have been seeing till now. Now, any client can look at the zone it is present in. Say the client is located in USA, it would prefer the service instance of USA. And it can get the zone information from the Eureka Server. 102 Lectures 8 hours Karthikeya T 39 Lectures 5 hours Chaand Sheikh 73 Lectures 5.5 hours Senol Atac 62 Lectures 4.5 hours Senol Atac 67 Lectures 4.5 hours Senol Atac 69 Lectures 5 hours Senol Atac Print Add Notes Bookmark this page
[ { "code": null, "e": 2154, "s": 1878, "text": "Service discovery is one of the most critical parts when an application is deployed as microservices in the cloud. This is because for any use operation, an application in a microservice architecture may require access to multiple services and the communication amongst them." }, { "code": null, "e": 2312, "s": 2154, "text": "Service discovery helps tracking the service address and the ports where the service instances can be contacted to. There are three components at play here −" }, { "code": null, "e": 2418, "s": 2312, "text": "Service Instances − Responsible to handle incoming request for the service and respond to those requests." }, { "code": null, "e": 2524, "s": 2418, "text": "Service Instances − Responsible to handle incoming request for the service and respond to those requests." }, { "code": null, "e": 2686, "s": 2524, "text": "Service Registry − Keeps track of the addresses of the service instances. The service instances are supposed to register their address with the service registry." }, { "code": null, "e": 2848, "s": 2686, "text": "Service Registry − Keeps track of the addresses of the service instances. The service instances are supposed to register their address with the service registry." }, { "code": null, "e": 3054, "s": 2848, "text": "Service Client − The client which wants access or wants to place a request and get response from the service instances. The service client contacts the service registry to get the address of the instances." }, { "code": null, "e": 3260, "s": 3054, "text": "Service Client − The client which wants access or wants to place a request and get response from the service instances. The service client contacts the service registry to get the address of the instances." }, { "code": null, "e": 3403, "s": 3260, "text": "Apache Zookeeper, Eureka and Consul are a few well-known components which are used for Service Discovery. In this tutorial, we will use Eureka" }, { "code": null, "e": 3502, "s": 3403, "text": "For setting up Eureka Server, we need to update the POM file to contain the following dependency −" }, { "code": null, "e": 3829, "s": 3502, "text": "<dependencies>\n <dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-netflix-eureka-server</artifactId>\n </dependency>\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-web</artifactId>\n </dependency>\n</dependencies>" }, { "code": null, "e": 3932, "s": 3829, "text": "And then, annotate our Spring application class with the correct annotation, i.e.,@EnableEurekaServer." }, { "code": null, "e": 4357, "s": 3932, "text": "package com.tutorialspoint;\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.cloud.netflix.eureka.server.EnableEurekaServer;\n@SpringBootApplication\n@EnableEurekaServer\npublic class RestaurantServiceRegistry{\n public static void main(String[] args) {\n SpringApplication.run(RestaurantServiceRegistry.class, args);\n }\n}" }, { "code": null, "e": 4492, "s": 4357, "text": "We also need a properties file if we want to configure the registry and change its default values. Here are the changes we will make −" }, { "code": null, "e": 4545, "s": 4492, "text": "Update the port to 8900 rather than the default 8080" }, { "code": null, "e": 4598, "s": 4545, "text": "Update the port to 8900 rather than the default 8080" }, { "code": null, "e": 4869, "s": 4598, "text": "In production, one would have more than one node for registry for its high availability. That’s is where we need peer-to-peer communication between registries. As we are executing this in standalone mode, we can simply set client properties to false to avoid any errors." }, { "code": null, "e": 5140, "s": 4869, "text": "In production, one would have more than one node for registry for its high availability. That’s is where we need peer-to-peer communication between registries. As we are executing this in standalone mode, we can simply set client properties to false to avoid any errors." }, { "code": null, "e": 5198, "s": 5140, "text": "So, this is how our application.yml file will look like −" }, { "code": null, "e": 5301, "s": 5198, "text": "server:\n port: 8900\neureka:\n client:\n register-with-eureka: false\n fetch-registry: false" }, { "code": null, "e": 5401, "s": 5301, "text": "And that is it, let us now compile the project and run the program by using the following command −" }, { "code": null, "e": 5455, "s": 5401, "text": "java -jar .\\target\\spring-cloud-eureka-server-1.0.jar" }, { "code": null, "e": 5496, "s": 5455, "text": "Now we can see the logs in the console −" }, { "code": null, "e": 6731, "s": 5496, "text": "...\n2021-03-07 13:33:10.156 INFO 17660 --- [ main]\no.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8900\n(http)\n2021-03-07 13:33:10.172 INFO 17660 --- [ main]\no.apache.catalina.core.StandardService : Starting service [Tomcat]\n...\n2021-03-07 13:33:16.483 INFO 17660 --- [ main]\nDiscoveryClientOptionalArgsConfiguration : Eureka HTTP Client uses Jersey\n...\n2021-03-07 13:33:16.632 INFO 17660 --- [ main]\no.s.c.n.eureka.InstanceInfoFactory : Setting initial instance status as:\nSTARTING\n2021-03-07 13:33:16.675 INFO 17660 --- [ main]\ncom.netflix.discovery.DiscoveryClient : Initializing Eureka in region useast-\n1\n2021-03-07 13:33:16.675 INFO 17660 --- [ main]\ncom.netflix.discovery.DiscoveryClient : Client configured to neither register\nnor query for data.\n2021-03-07 13:33:16.686 INFO 17660 --- [ main]\ncom.netflix.discovery.DiscoveryClient : Discovery Client initialized at\ntimestamp 1615104196685 with initial instances count: 0\n...\n2021-03-07 13:33:16.873 INFO 17660 --- [ Thread-10]\ne.s.EurekaServerInitializerConfiguration : Started Eureka Server\n2021-03-07 13:33:18.609 INFO 17660 --- [ main]\nc.t.RestaurantServiceRegistry : Started RestaurantServiceRegistry in\n15.219 seconds (JVM running for 16.068)\n" }, { "code": null, "e": 6898, "s": 6731, "text": "As we see from the above logs that the Eureka registry has been setup. We also get a dashboard for Eureka (see the following image) which is hosted on the server URL." }, { "code": null, "e": 7112, "s": 6898, "text": "Now, we will set up the service instances which would register to the Eureka server. For setting up Eureka Client, we will use a separate Maven project and update the POM file to contain the following dependency −" }, { "code": null, "e": 7439, "s": 7112, "text": "<dependencies>\n <dependency>\n <groupId>org.springframework.cloud</groupId>\n <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>\n </dependency>\n <dependency>\n <groupId>org.springframework.boot</groupId>\n <artifactId>spring-boot-starter-web</artifactId>\n </dependency>\n</dependencies>" }, { "code": null, "e": 7544, "s": 7439, "text": "And then, annotate our Spring application class with the correct annotation, i.e.,@EnableDiscoveryClient" }, { "code": null, "e": 7970, "s": 7544, "text": "package com.tutorialspoint;\nimport org.springframework.boot.SpringApplication;\nimport org.springframework.boot.autoconfigure.SpringBootApplication;\nimport org.springframework.cloud.client.discovery.EnableDiscoveryClient;\n@SpringBootApplication\n@EnableDiscoveryClient\npublic class RestaurantCustomerService{\n public static void main(String[] args) {\n SpringApplication.run(RestaurantCustomerService.class, args);\n }\n}" }, { "code": null, "e": 8103, "s": 7970, "text": "We also need a properties file if we want to configure the client and change its default values. Here are the changes we will make −" }, { "code": null, "e": 8163, "s": 8103, "text": "We will provide the port at runtime while jar at execution." }, { "code": null, "e": 8223, "s": 8163, "text": "We will provide the port at runtime while jar at execution." }, { "code": null, "e": 8282, "s": 8223, "text": "We will specify the URL at which Eureka server is running." }, { "code": null, "e": 8341, "s": 8282, "text": "We will specify the URL at which Eureka server is running." }, { "code": null, "e": 8397, "s": 8341, "text": "So, this is how our application.yml file will look like" }, { "code": null, "e": 8567, "s": 8397, "text": "spring:\n application:\n name: customer-service\nserver:\n port: ${app_port}\neureka:\n client:\n serviceURL:\n defaultZone: http://localhost:8900/eureka" }, { "code": null, "e": 8717, "s": 8567, "text": "For execution, we will have two service instances running. To do that, let's open up two shells and then execute the following command on one shell −" }, { "code": null, "e": 8788, "s": 8717, "text": "java -Dapp_port=8081 -jar .\\target\\spring-cloud-eureka-client-1.0.jar\n" }, { "code": null, "e": 8835, "s": 8788, "text": "And execute the following on the other shell −" }, { "code": null, "e": 8906, "s": 8835, "text": "java -Dapp_port=8082 -jar .\\target\\spring-cloud-eureka-client-1.0.jar\n" }, { "code": null, "e": 8947, "s": 8906, "text": "Now we can see the logs in the console −" }, { "code": null, "e": 10398, "s": 8947, "text": "...\n2021-03-07 15:22:22.474 INFO 16920 --- [ main]\ncom.netflix.discovery.DiscoveryClient : Starting heartbeat executor: renew\ninterval is: 30\n2021-03-07 15:22:22.482 INFO 16920 --- [ main]\nc.n.discovery.InstanceInfoReplicator : InstanceInfoReplicator onDemand\nupdate allowed rate per min is 4\n2021-03-07 15:22:22.490 INFO 16920 --- [ main]\ncom.netflix.discovery.DiscoveryClient : Discovery Client initialized at\ntimestamp 1615110742488 with initial instances count: 0\n2021-03-07 15:22:22.492 INFO 16920 --- [ main]\no.s.c.n.e.s.EurekaServiceRegistry : Registering application CUSTOMERSERVICE\nwith eureka with status UP\n2021-03-07 15:22:22.494 INFO 16920 --- [ main]\ncom.netflix.discovery.DiscoveryClient : Saw local status change event\nStatusChangeEvent [timestamp=1615110742494, current=UP, previous=STARTING]\n2021-03-07 15:22:22.500 INFO 16920 --- [nfoReplicator-0]\ncom.netflix.discovery.DiscoveryClient : DiscoveryClient_CUSTOMERSERVICE/\nlocalhost:customer-service:8081: registering service...\n2021-03-07 15:22:22.588 INFO 16920 --- [ main]\no.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8081\n(http) with context path ''\n2021-03-07 15:22:22.591 INFO 16920 --- [ main]\n.s.c.n.e.s.EurekaAutoServiceRegistration : Updating port to 8081\n2021-03-07 15:22:22.705 INFO 16920 --- [nfoReplicator-0]\ncom.netflix.discovery.DiscoveryClient : DiscoveryClient_CUSTOMERSERVICE/\nlocalhost:customer-service:8081 - registration status: 204\n...\n" }, { "code": null, "e": 10631, "s": 10398, "text": "As we see from above logs that the client instance has been setup. We can also look at the Eureka Server dashboard we saw earlier. As we see, there are two instances of “CUSTOMER-SERVICE” running that the Eureka server is aware of −" }, { "code": null, "e": 10829, "s": 10631, "text": "Our Eureka server has got the registered client instances of the “Customer-Service” setup. We can now setup the Consumer which can ask the Eureka Server the address of the “Customer-Service” nodes." }, { "code": null, "e": 11074, "s": 10829, "text": "For this purpose, let us add a controller which can get the information from the Eureka Registry. This controller will be added to our earlier Eureka Client itself, i.e., “Customer Service”. Let us create the following controller to the client." }, { "code": null, "e": 11607, "s": 11074, "text": "package com.tutorialspoint;\nimport java.util.List;\nimport org.springframework.beans.factory.annotation.Autowired;\nimport org.springframework.cloud.client.ServiceInstance;\nimport org.springframework.cloud.client.discovery.DiscoveryClient;\nimport org.springframework.web.bind.annotation.RequestMapping;\nimport org.springframework.web.bind.annotation.RestController;\n@RestController\nclass RestaurantCustomerInstancesController {\n @Autowired\n private DiscoveryClient eurekaConsumer;\n @RequestMapping(\"/customer_service_instances\")" }, { "code": null, "e": 11709, "s": 11607, "text": "Note the annotation @DiscoveryClient which is what Spring framework provides to talk to the registry." }, { "code": null, "e": 11900, "s": 11709, "text": "Let us now recompile our Eureka clients. For execution, we will have two service instances running. To do that, let's open up two shells and then execute the following command on one shell −" }, { "code": null, "e": 11971, "s": 11900, "text": "java -Dapp_port=8081 -jar .\\target\\spring-cloud-eureka-client-1.0.jar\n" }, { "code": null, "e": 12018, "s": 11971, "text": "And execute the following on the other shell −" }, { "code": null, "e": 12089, "s": 12018, "text": "java -Dapp_port=8082 -jar .\\target\\spring-cloud-eureka-client-1.0.jar\n" }, { "code": null, "e": 12297, "s": 12089, "text": "Once the client on both shells have started, let us now hit the http://localhost:8081/customer_service_instances we created in the controller. This URL displays complete information about both the instances." }, { "code": null, "e": 15531, "s": 12297, "text": "[\n {\n \"scheme\": \"http\",\n \"host\": \"localhost\",\n \"port\": 8081,\n \"metadata\": {\n \"management.port\": \"8081\"\n },\n \"secure\": false,\n \"instanceInfo\": {\n \"instanceId\": \"localhost:customer-service:8081\",\n \"app\": \"CUSTOMER-SERVICE\",\n \"appGroupName\": null,\n \"ipAddr\": \"10.0.75.1\",\n \"sid\": \"na\",\n \"homePageUrl\": \"http://localhost:8081/\",\n \"statusPageUrl\": \"http://localhost:8081/actuator/info\",\n \"healthCheckUrl\": \"http://localhost:8081/actuator/health\",\n \"secureHealthCheckUrl\": null,\n \"vipAddress\": \"customer-service\",\n \"secureVipAddress\": \"customer-service\",\n \"countryId\": 1,\n \"dataCenterInfo\": {\n \"@class\": \"com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo\",\n \"name\": \"MyOwn\"\n },\n \"hostName\": \"localhost\",\n \"status\": \"UP\",\n \"overriddenStatus\": \"UNKNOWN\",\n \"leaseInfo\": {\n \"renewalIntervalInSecs\": 30,\n \"durationInSecs\": 90,\n \"registrationTimestamp\": 1616667914313,\n \"lastRenewalTimestamp\": 1616667914313,\n \"evictionTimestamp\": 0,\n \"serviceUpTimestamp\": 1616667914313\n },\n \"isCoordinatingDiscoveryServer\": false,\n \"metadata\": {\n \"management.port\": \"8081\"\n },\n \"lastUpdatedTimestamp\": 1616667914313,\n \"lastDirtyTimestamp\": 1616667914162,\n \"actionType\": \"ADDED\",\n \"asgName\": null\n },\n \"instanceId\": \"localhost:customer-service:8081\",\n \"serviceId\": \"CUSTOMER-SERVICE\",\n \"uri\": \"http://localhost:8081\"\n },\n {\n \"scheme\": \"http\",\n \"host\": \"localhost\",\n \"port\": 8082,\n \"metadata\": {\n \"management.port\": \"8082\"\n },\n \"secure\": false,\n \"instanceInfo\": {\n \"instanceId\": \"localhost:customer-service:8082\",\n \"app\": \"CUSTOMER-SERVICE\",\n \"appGroupName\": null,\n \"ipAddr\": \"10.0.75.1\",\n \"sid\": \"na\",\n \"homePageUrl\": \"http://localhost:8082/\",\n \"statusPageUrl\": \"http://localhost:8082/actuator/info\",\n \"healthCheckUrl\": \"http://localhost:8082/actuator/health\",\n \"secureHealthCheckUrl\": null,\n \"vipAddress\": \"customer-service\",\n \"secureVipAddress\": \"customer-service\",\n \"countryId\": 1,\n \"dataCenterInfo\": {\n \"@class\": \"com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo\",\n \"name\": \"MyOwn\"\n },\n \"hostName\": \"localhost\",\n \"status\": \"UP\",\n \"overriddenStatus\": \"UNKNOWN\",\n \"leaseInfo\": {\n \"renewalIntervalInSecs\": 30,\n \"durationInSecs\": 90,\n \"registrationTimestamp\": 1616667913690,\n \"lastRenewalTimestamp\": 1616667913690,\n \"evictionTimestamp\": 0,\n \"serviceUpTimestamp\": 1616667913690\n },\n \"isCoordinatingDiscoveryServer\": false,\n \"metadata\": {\n \"management.port\": \"8082\"\n },\n \"lastUpdatedTimestamp\": 1616667913690,\n \"lastDirtyTimestamp\": 1616667913505,\n \"actionType\": \"ADDED\",\n \"asgName\": null\n },\n \"instanceId\": \"localhost:customer-service:8082\",\n \"serviceId\": \"CUSTOMER-SERVICE\",\n \"uri\": \"http://localhost:8082\"\n }\n]" }, { "code": null, "e": 15840, "s": 15531, "text": "Eureka Server provides various APIs for the client instances or the services to talk to. A lot of these APIs are abstracted and can be used directly with @DiscoveryClient we defined and used earlier. Just to note, their HTTP counterparts also exist and can be useful for Non-Spring framework usage of Eureka." }, { "code": null, "e": 16067, "s": 15840, "text": "In fact, the API that we used earlier, i.e., to get the information about the client running “Customer_Service” can also be invoked via the browser using http://localhost:8900/eureka/apps/customer-service as can be seen here −" }, { "code": null, "e": 19521, "s": 16067, "text": "<application slick-uniqueid=\"3\">\n <div>\n <a id=\"slick_uniqueid\"/>\n </div>\n <name>CUSTOMER-SERVICE</name>\n <instance>\n <instanceId>localhost:customer-service:8082</instanceId>\n <hostName>localhost</hostName>\n <app>CUSTOMER-SERVICE</app>\n <ipAddr>10.0.75.1</ipAddr>\n <status>UP</status>\n <overriddenstatus>UNKNOWN</overriddenstatus>\n <port enabled=\"true\">8082</port>\n <securePort enabled=\"false\">443</securePort>\n <countryId>1</countryId>\n <dataCenterInfo\nclass=\"com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo\">\n <name>MyOwn</name>\n </dataCenterInfo>\n <leaseInfo>\n <renewalIntervalInSecs>30</renewalIntervalInSecs>\n <durationInSecs>90</durationInSecs>\n <registrationTimestamp>1616667913690</registrationTimestamp>\n <lastRenewalTimestamp>1616668273546</lastRenewalTimestamp>\n <evictionTimestamp>0</evictionTimestamp>\n <serviceUpTimestamp>1616667913690</serviceUpTimestamp>\n </leaseInfo>\n <metadata>\n <management.port>8082</management.port>\n </metadata>\n <homePageUrl>http://localhost:8082/</homePageUrl>\n <statusPageUrl>http://localhost:8082/actuator/info</statusPageUrl>\n <healthCheckUrl>http://localhost:8082/actuator/health</healthCheckUrl>\n <vipAddress>customer-service</vipAddress>\n <secureVipAddress>customer-service</secureVipAddress>\n <isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>\n <lastUpdatedTimestamp>1616667913690</lastUpdatedTimestamp>\n <lastDirtyTimestamp>1616667913505</lastDirtyTimestamp>\n <actionType>ADDED</actionType>\n </instance>\n <instance>\n <instanceId>localhost:customer-service:8081</instanceId>\n <hostName>localhost</hostName>\n <app>CUSTOMER-SERVICE</app>\n <ipAddr>10.0.75.1</ipAddr>\n <status>UP</status>\n <overriddenstatus>UNKNOWN</overriddenstatus>\n <port enabled=\"true\">8081</port>\n <securePort enabled=\"false\">443</securePort>\n <countryId>1</countryId>\n <dataCenterInfo\nclass=\"com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo\">\n <name>MyOwn</name>\n </dataCenterInfo>\n <leaseInfo>\n <renewalIntervalInSecs>30</renewalIntervalInSecs>\n <durationInSecs>90</durationInSecs>\n <registrationTimestamp>1616667914313</registrationTimestamp>\n <lastRenewalTimestamp>1616668274227</lastRenewalTimestamp>\n <evictionTimestamp>0</evictionTimestamp>\n <serviceUpTimestamp>1616667914313</serviceUpTimestamp>\n </leaseInfo>\n <metadata>\n <management.port>8081</management.port>\n </metadata>\n <homePageUrl>http://localhost:8081/</homePageUrl>\n <statusPageUrl>http://localhost:8081/actuator/info</statusPageUrl>\n <healthCheckUrl>http://localhost:8081/actuator/health</healthCheckUrl>\n <vipAddress>customer-service</vipAddress>\n <secureVipAddress>customer-service</secureVipAddress>\n <isCoordinatingDiscoveryServer>false</isCoordinatingDiscoveryServer>\n <lastUpdatedTimestamp>1616667914313</lastUpdatedTimestamp>\n <lastDirtyTimestamp>1616667914162</lastDirtyTimestamp>\n <actionType>ADDED</actionType>\n </instance>\n</application>" }, { "code": null, "e": 19549, "s": 19521, "text": "Few other useful APIs are −" }, { "code": null, "e": 19681, "s": 19549, "text": "More details about the programmatic API can be found here https://javadoc.io/doc/com.netflix.eureka/eureka-client/latest/index.html" }, { "code": null, "e": 19954, "s": 19681, "text": "We have been using Eureka server in standalone mode. However, in a Production environment, we should ideally have more than one instance of the Eureka server running. This ensures that even if one machine goes down, the machine with another Eureka server keeps on running." }, { "code": null, "e": 20141, "s": 19954, "text": "Let us try to setup Eureka server in high-availability mode. For our example, we will use two instances.For this, we will use the following application-ha.yml to start the Eureka server." }, { "code": null, "e": 20158, "s": 20141, "text": "Points to note −" }, { "code": null, "e": 20257, "s": 20158, "text": "We have parameterized the port so that we can start multiple instances using same the config file." }, { "code": null, "e": 20356, "s": 20257, "text": "We have parameterized the port so that we can start multiple instances using same the config file." }, { "code": null, "e": 20435, "s": 20356, "text": "We have added address, again parameterized, to pass the Eureka server address." }, { "code": null, "e": 20514, "s": 20435, "text": "We have added address, again parameterized, to pass the Eureka server address." }, { "code": null, "e": 20556, "s": 20514, "text": "We are naming the app as “Eureka-Server”." }, { "code": null, "e": 20598, "s": 20556, "text": "We are naming the app as “Eureka-Server”." }, { "code": null, "e": 20763, "s": 20598, "text": "spring:\n application:\n name: eureka-server\nserver:\n port: ${app_port}\neureka:\n client:\n serviceURL:\n defaultZone: ${eureka_other_server_url}" }, { "code": null, "e": 20958, "s": 20763, "text": "Let us now recompile our Eureka server project. For execution, we will have two service instances running. To do that, let's open two shells and then execute the following command on one shell −" }, { "code": null, "e": 21141, "s": 20958, "text": "java -Dapp_port=8900 '-Deureka_other_server_url=http://localhost:8901/eureka' -\njar .\\target\\spring-cloud-eureka-server-1.0.jar --\nspring.config.location=classpath:application-ha.yml" }, { "code": null, "e": 21188, "s": 21141, "text": "And execute the following on the other shell −" }, { "code": null, "e": 21371, "s": 21188, "text": "java -Dapp_port=8901 '-Deureka_other_server_url=http://localhost:8900/eureka' -\njar .\\target\\spring-cloud-eureka-server-1.0.jar --\nspring.config.location=classpath:application-ha.yml" }, { "code": null, "e": 21532, "s": 21371, "text": "We can verify that the servers are up and running in high-availability mode by looking at the dashboard. For example, here is the dashboard on Eureka server 1 −" }, { "code": null, "e": 21579, "s": 21532, "text": "And here is the dashboard of Eureka server 2 −" }, { "code": null, "e": 21713, "s": 21579, "text": "So, as we see, we have two Eureka servers running and in sync. Even if one server goes down, the other server would keep functioning." }, { "code": null, "e": 21851, "s": 21713, "text": "We can also update the service instance application to have addresses for both Eureka servers by having comma-separated server addresses." }, { "code": null, "e": 22051, "s": 21851, "text": "spring:\n application:\n name: customer-service\nserver:\n port: ${app_port}\neureka:\n client:\n serviceURL:\n defaultZone: http://localhost:8900/eureka,\nhttp://localhost:8901/eureka" }, { "code": null, "e": 22568, "s": 22051, "text": "Eureka also supports the concept of zone awareness. Zone awareness as a concept is very useful when we have a cluster across different geographies. Say, we get an incoming request for a service and we need to choose the server which should service the request. Instead of sending and processing that request on a server which is located far, it is more fruitful to choose a server which is in the same zone. This is because, network bottleneck is very common in a distributed application and thus we should avoid it." }, { "code": null, "e": 22679, "s": 22568, "text": "Let us now try to setup Eureka clients and make them Zone aware. For doing that, let us add application-za.yml" }, { "code": null, "e": 22908, "s": 22679, "text": "spring:\n application:\n name: customer-service\nserver:\n port: ${app_port}\neureka:\n instance:\n metadataMap:\n zone: ${zoneName}\n client:\n serviceURL:\n defaultZone: http://localhost:8900/eureka" }, { "code": null, "e": 23103, "s": 22908, "text": "Let us now recompile our Eureka client project. For execution, we will have two service instances running. To do that, let's open two shells and then execute the following command on one shell −" }, { "code": null, "e": 23245, "s": 23103, "text": "java -Dapp_port=8080 -Dzone_name=USA -jar .\\target\\spring-cloud-eureka-client-\n1.0.jar --spring.config.location=classpath:application-za.yml\n" }, { "code": null, "e": 23292, "s": 23245, "text": "And execute the following on the other shell −" }, { "code": null, "e": 23433, "s": 23292, "text": "java -Dapp_port=8081 -Dzone_name=EU -jar .\\target\\spring-cloud-eureka-client-\n1.0.jar --spring.config.location=classpath:application-za.yml\n" }, { "code": null, "e": 23647, "s": 23433, "text": "We can go back to the dashboard to verify that the Eureka Server registers the zone of the services. As seen in the following image, we have two availability zones instead of 1, which we have been seeing till now." }, { "code": null, "e": 23841, "s": 23647, "text": "Now, any client can look at the zone it is present in. Say the client is located in USA, it would prefer the service instance of USA. And it can get the zone information from the Eureka Server." }, { "code": null, "e": 23875, "s": 23841, "text": "\n 102 Lectures \n 8 hours \n" }, { "code": null, "e": 23889, "s": 23875, "text": " Karthikeya T" }, { "code": null, "e": 23922, "s": 23889, "text": "\n 39 Lectures \n 5 hours \n" }, { "code": null, "e": 23937, "s": 23922, "text": " Chaand Sheikh" }, { "code": null, "e": 23972, "s": 23937, "text": "\n 73 Lectures \n 5.5 hours \n" }, { "code": null, "e": 23984, "s": 23972, "text": " Senol Atac" }, { "code": null, "e": 24019, "s": 23984, "text": "\n 62 Lectures \n 4.5 hours \n" }, { "code": null, "e": 24031, "s": 24019, "text": " Senol Atac" }, { "code": null, "e": 24066, "s": 24031, "text": "\n 67 Lectures \n 4.5 hours \n" }, { "code": null, "e": 24078, "s": 24066, "text": " Senol Atac" }, { "code": null, "e": 24111, "s": 24078, "text": "\n 69 Lectures \n 5 hours \n" }, { "code": null, "e": 24123, "s": 24111, "text": " Senol Atac" }, { "code": null, "e": 24130, "s": 24123, "text": " Print" }, { "code": null, "e": 24141, "s": 24130, "text": " Add Notes" } ]
How to float three div side by side using CSS? - GeeksforGeeks
12 Feb, 2019 Three or more different div can be put side-by-side using CSS. Use CSS property to set the height and width of div and use display property to place div in side-by-side format. float:left; This property is used for those elements(div) that will float on left side. float:right; This property is used for those elements(div) that will float on right side. Example 1: This example place three div side by side using float property. <!DOCTYPE html> <html> <head> <!-- CSS property to place div side by side --> <style> #leftbox { float:left; background:Red; width:25%; height:280px; } #middlebox{ float:left; background:Green; width:50%; height:280px; } #rightbox{ float:right; background:blue; width:25%; height:280px; } h1{ color:green; text-align:center; } </style> </head> <body> <div id = "boxes"> <h1>GeeksforGeeks</h1> <div id = "leftbox"> <h2>Learn:</h2> It is a good platform to learn programming. It is an educational website. Prepare for the Recruitment drive of product based companies like Microsoft, Amazon, Adobe etc with a free online placement preparation course. </div> <div id = "middlebox"> <h2>GeeksforGeeks:</h2> The course focuses on various MCQ's & Coding question likely to be asked in the interviews & make your upcoming placement season efficient and successful. </div> <div id = "rightbox"> <h2>Contribute:</h2> Any geeks can help other geeks by writing articles on the GeeksforGeeks, publishing articles follow few steps that are Articles that need little modification/improvement from reviewers are published first. </div> </div> </body> </html> Output: Example 2: This example place three div side by side using float property. <!DOCTYPE html> <html> <head> <!-- CSS style to put div side by side --> <style type="text/css"> .container { width:600px; height:190px; background-color:green; padding-top:20px; padding-left:15px; padding-right:15px; } #st-box { float:left; width:180px; height:160px; background-color:white; border:solid black; } #nd-box { float:left; width:180px; height:160px; background-color:white; border:solid black; margin-left:20px; } #rd-box { float:right; width:180px; height:160px; background-color:white; border:solid black; } h1 { color:Green; } </style> </head> <body> <center> <h1>GeeksforGeeks</h1> <div class="container"> <div id="st-box"> <img src="https://media.geeksforgeeks.org/wp-content/uploads/download-49.png "style="width:180px; height:160px;"> </div> <div id="nd-box"> <p> We can create as many div as many we want side by side with the same height and also with the different heights. </p> </div> <div id="rd-box"> <img src="https://media.geeksforgeeks.org/wp-content/uploads/images-31.png "style="width:180px; height:160px;"> </div> </div> </center> </body></html> Output: Example 3: Another way to put three div side by side by using display property. display:table; This property is used for elements (div) which behaves like table. display:table-cell;This property is used for elements (div) which behaves like td. display:table-row;This property is used for elements (div) which behaves like tr. <!DOCTYPE html> <html> <head> <!-- CSS style to place three div side by side --> <style> .container .box { width:540px; margin:50px; display:table; } .container .box .box-row { display:table-row; } .container .box .box-cell { display:table-cell; width:33%; padding:10px; } .container .box .box-cell.box1 { background:green; color:white; text-align:justify; } .container .box .box-cell.box2 { background:lightgreen; text-align:justify } .container .box .box-cell.box3 { background:lime; text-align:justify; } </style> </head> <body> <center> <h1 style = "color:green;">GeeksforGeeks</h1> <div class="container"> <div class="box"> <div class="box-row"> <div class="box-cell box1"> It is a good platform to learn programming. It is an educational website. Prepare for the Recruitment drive of product based companies like Microsoft, Amazon, Adobe etc with a free online placement preparation course. </div> <div class="box-cell box2"> The course focuses on various MCQ's & Coding question likely to be asked in the interviews & make your upcoming placement season efficient and successful. </div> <div class="box-cell box3"> Any geeks can help other geeks by writing articles on the GeeksforGeeks, publishing articles follow few steps that are Articles that need little modification/improvement from reviewers are published first. </div> </div> </div> </div> </center> </body> </html> Output: CSS-Misc Picked Web-Programs Web Technologies Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments Roadmap to Become a Web Developer in 2022 Installation of Node.js on Linux Top 10 Projects For Beginners To Practice HTML and CSS Skills How to fetch data from an API in ReactJS ? How to insert spaces/tabs in text using HTML/CSS? Top 10 Angular Libraries For Web Developers Convert a string to an integer in JavaScript How to calculate the number of days between two dates in javascript? Difference between var, let and const keywords in JavaScript How to create footer to stay at the bottom of a Web page?
[ { "code": null, "e": 24816, "s": 24788, "text": "\n12 Feb, 2019" }, { "code": null, "e": 24993, "s": 24816, "text": "Three or more different div can be put side-by-side using CSS. Use CSS property to set the height and width of div and use display property to place div in side-by-side format." }, { "code": null, "e": 25081, "s": 24993, "text": "float:left; This property is used for those elements(div) that will float on left side." }, { "code": null, "e": 25171, "s": 25081, "text": "float:right; This property is used for those elements(div) that will float on right side." }, { "code": null, "e": 25246, "s": 25171, "text": "Example 1: This example place three div side by side using float property." }, { "code": "<!DOCTYPE html> <html> <head> <!-- CSS property to place div side by side --> <style> #leftbox { float:left; background:Red; width:25%; height:280px; } #middlebox{ float:left; background:Green; width:50%; height:280px; } #rightbox{ float:right; background:blue; width:25%; height:280px; } h1{ color:green; text-align:center; } </style> </head> <body> <div id = \"boxes\"> <h1>GeeksforGeeks</h1> <div id = \"leftbox\"> <h2>Learn:</h2> It is a good platform to learn programming. It is an educational website. Prepare for the Recruitment drive of product based companies like Microsoft, Amazon, Adobe etc with a free online placement preparation course. </div> <div id = \"middlebox\"> <h2>GeeksforGeeks:</h2> The course focuses on various MCQ's & Coding question likely to be asked in the interviews & make your upcoming placement season efficient and successful. </div> <div id = \"rightbox\"> <h2>Contribute:</h2> Any geeks can help other geeks by writing articles on the GeeksforGeeks, publishing articles follow few steps that are Articles that need little modification/improvement from reviewers are published first. </div> </div> </body> </html> ", "e": 27184, "s": 25246, "text": null }, { "code": null, "e": 27192, "s": 27184, "text": "Output:" }, { "code": null, "e": 27267, "s": 27192, "text": "Example 2: This example place three div side by side using float property." }, { "code": "<!DOCTYPE html> <html> <head> <!-- CSS style to put div side by side --> <style type=\"text/css\"> .container { width:600px; height:190px; background-color:green; padding-top:20px; padding-left:15px; padding-right:15px; } #st-box { float:left; width:180px; height:160px; background-color:white; border:solid black; } #nd-box { float:left; width:180px; height:160px; background-color:white; border:solid black; margin-left:20px; } #rd-box { float:right; width:180px; height:160px; background-color:white; border:solid black; } h1 { color:Green; } </style> </head> <body> <center> <h1>GeeksforGeeks</h1> <div class=\"container\"> <div id=\"st-box\"> <img src=\"https://media.geeksforgeeks.org/wp-content/uploads/download-49.png \"style=\"width:180px; height:160px;\"> </div> <div id=\"nd-box\"> <p> We can create as many div as many we want side by side with the same height and also with the different heights. </p> </div> <div id=\"rd-box\"> <img src=\"https://media.geeksforgeeks.org/wp-content/uploads/images-31.png \"style=\"width:180px; height:160px;\"> </div> </div> </center> </body></html> ", "e": 29028, "s": 27267, "text": null }, { "code": null, "e": 29036, "s": 29028, "text": "Output:" }, { "code": null, "e": 29116, "s": 29036, "text": "Example 3: Another way to put three div side by side by using display property." }, { "code": null, "e": 29198, "s": 29116, "text": "display:table; This property is used for elements (div) which behaves like table." }, { "code": null, "e": 29281, "s": 29198, "text": "display:table-cell;This property is used for elements (div) which behaves like td." }, { "code": null, "e": 29363, "s": 29281, "text": "display:table-row;This property is used for elements (div) which behaves like tr." }, { "code": "<!DOCTYPE html> <html> <head> <!-- CSS style to place three div side by side --> <style> .container .box { width:540px; margin:50px; display:table; } .container .box .box-row { display:table-row; } .container .box .box-cell { display:table-cell; width:33%; padding:10px; } .container .box .box-cell.box1 { background:green; color:white; text-align:justify; } .container .box .box-cell.box2 { background:lightgreen; text-align:justify } .container .box .box-cell.box3 { background:lime; text-align:justify; } </style> </head> <body> <center> <h1 style = \"color:green;\">GeeksforGeeks</h1> <div class=\"container\"> <div class=\"box\"> <div class=\"box-row\"> <div class=\"box-cell box1\"> It is a good platform to learn programming. It is an educational website. Prepare for the Recruitment drive of product based companies like Microsoft, Amazon, Adobe etc with a free online placement preparation course. </div> <div class=\"box-cell box2\"> The course focuses on various MCQ's & Coding question likely to be asked in the interviews & make your upcoming placement season efficient and successful. </div> <div class=\"box-cell box3\"> Any geeks can help other geeks by writing articles on the GeeksforGeeks, publishing articles follow few steps that are Articles that need little modification/improvement from reviewers are published first. </div> </div> </div> </div> </center> </body> </html> ", "e": 31800, "s": 29363, "text": null }, { "code": null, "e": 31808, "s": 31800, "text": "Output:" }, { "code": null, "e": 31817, "s": 31808, "text": "CSS-Misc" }, { "code": null, "e": 31824, "s": 31817, "text": "Picked" }, { "code": null, "e": 31837, "s": 31824, "text": "Web-Programs" }, { "code": null, "e": 31854, "s": 31837, "text": "Web Technologies" }, { "code": null, "e": 31952, "s": 31854, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 31961, "s": 31952, "text": "Comments" }, { "code": null, "e": 31974, "s": 31961, "text": "Old Comments" }, { "code": null, "e": 32016, "s": 31974, "text": "Roadmap to Become a Web Developer in 2022" }, { "code": null, "e": 32049, "s": 32016, "text": "Installation of Node.js on Linux" }, { "code": null, "e": 32111, "s": 32049, "text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills" }, { "code": null, "e": 32154, "s": 32111, "text": "How to fetch data from an API in ReactJS ?" }, { "code": null, "e": 32204, "s": 32154, "text": "How to insert spaces/tabs in text using HTML/CSS?" }, { "code": null, "e": 32248, "s": 32204, "text": "Top 10 Angular Libraries For Web Developers" }, { "code": null, "e": 32293, "s": 32248, "text": "Convert a string to an integer in JavaScript" }, { "code": null, "e": 32362, "s": 32293, "text": "How to calculate the number of days between two dates in javascript?" }, { "code": null, "e": 32423, "s": 32362, "text": "Difference between var, let and const keywords in JavaScript" } ]
Find Unique pair in an array with pairs of numbers - GeeksforGeeks
29 Apr, 2021 Given an array where every element appears twice except a pair (two elements). Find the elements of this unique pair.Examples: Input : 6, 1, 3, 5, 1, 3, 7, 6 Output : 5 7 All elements appear twice except 5 and 7 Input : 1 3 4 1 Output : 3 4 The idea is based on below post.Find Two Missing Numbers | Set 2 (XOR based solution)1. XOR each element of the array and you will left with the XOR of two different elements which are going to be our result. Let this XOR be “XOR” 2. Now find a set bit in XOR. 3. Now divide array elements in two groups. One group that has the bit found in step 2 as set and other group that has the bit as 0. 4. XOR of elements present in first group would be our first element. And XOR of elements present in second group would be our second element. C++ Java Python 3 C# PHP Javascript // C program to find a unique pair in an array// of pairs.#include <stdio.h> void findUniquePair(int arr[], int n){ // XOR each element and get XOR of two unique // elements(ans) int XOR = arr[0]; for (int i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing elements. Any set // bit in it must be set in one missing and unset in // other missing number // Get a set bit of XOR (We get the rightmost set bit) int set_bit_no = XOR & ~(XOR-1); // Now divide elements in two sets by comparing rightmost // set bit of XOR with bit at same position in each element. int x = 0, y = 0; // Initialize missing numbers for (int i = 0; i < n; i++) { if (arr[i] & set_bit_no) x = x ^ arr[i]; /*XOR of first set in arr[] */ else y = y ^ arr[i]; /*XOR of second set in arr[] */ } printf("The unique pair is (%d, %d)", x, y); } // Driver codeint main(){ int a[] = { 6, 1, 3, 5, 1, 3, 7, 6 }; int n = sizeof(a)/sizeof(a[0]); findUniquePair(a, n); return 0;} // Java program to find a unique pair// in an array of pairs.class GFG{ static void findUniquePair(int[] arr, int n) { // XOR each element and get XOR of two // unique elements(ans) int XOR = arr[0]; for (int i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing elements. // Any set bit in it must be set in one // missing and unset in other missing number // Get a set bit of XOR (We get the // rightmost set bit) int set_bit_no = XOR & ~(XOR-1); // Now divide elements in two sets by // comparing rightmost set bit of XOR with // bit at same position in each element. // Initialize missing numbers int x = 0, y = 0; for (int i = 0; i < n; i++) { if ((arr[i] & set_bit_no)>0) /*XOR of first set in arr[] */ x = x ^ arr[i]; else /*XOR of second set in arr[] */ y = y ^ arr[i]; } System.out.println("The unique pair is (" + x + "," + y + ")"); } // Driver code public static void main (String[] args) { int[] a = { 6, 1, 3, 5, 1, 3, 7, 6 }; int n = a.length; findUniquePair(a, n); } } /* This code is contributed by Mr. Somesh Awasthi */ # Python 3 program to find a unique# pair in an array of pairs.def findUniquePair(arr, n): # XOR each element and get XOR # of two unique elements(ans) XOR = arr[0] for i in range(1, n): XOR = XOR ^ arr[i] # Now XOR has XOR of two missing # elements. Any set bit in it # must be set in one missing and # unset in other missing number # Get a set bit of XOR (We get # the rightmost set bit) set_bit_no = XOR & ~(XOR - 1) # Now divide elements in two sets # by comparing rightmost set bit # of XOR with bit at same position # in each element. x = 0 y = 0 # Initialize missing numbers for i in range(0, n): if (arr[i] & set_bit_no): # XOR of first set in # arr[] x = x ^ arr[i] else: # XOR of second set # in arr[] y = y ^ arr[i] print("The unique pair is (", x, ", ", y, ")", sep = "") # Driver codea = [6, 1, 3, 5, 1, 3, 7, 6 ]n = len(a)findUniquePair(a, n) # This code is contributed by Smitha. // C# program to find a unique pair// in an array of pairs.using System; class GFG { static void findUniquePair(int[] arr, int n) { // XOR each element and get XOR of two // unique elements(ans) int XOR = arr[0]; for (int i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing // elements. Any set bit in it must // be set in one missing and unset // in other missing number // Get a set bit of XOR (We get the // rightmost set bit) int set_bit_no = XOR & ~(XOR - 1); // Now divide elements in two sets by // comparing rightmost set bit of XOR // with bit at same position in each // element. Initialize missing numbers int x = 0, y = 0; for (int i = 0; i < n; i++) { if ((arr[i] & set_bit_no) > 0) /*XOR of first set in arr[] */ x = x ^ arr[i]; else /*XOR of second set in arr[] */ y = y ^ arr[i]; } Console.WriteLine("The unique pair is (" + x + ", " + y + ")"); } // Driver code public static void Main () { int[] a = { 6, 1, 3, 5, 1, 3, 7, 6 }; int n = a.Length; findUniquePair(a, n); }} // This code is contributed by vt_m. <?php// PHP program to find a// unique pair in an array// of pairs. function findUniquePair($arr, $n){ // XOR each element and // get XOR of two unique // elements(ans) $XOR = $arr[0]; for ($i = 1; $i < $n; $i++) $XOR = $XOR ^ $arr[$i]; // Now XOR has XOR of two // missing elements. Any set // bit in it must be set in // one missing and unset in // other missing number // Get a set bit of XOR // (We get the rightmost set bit) $set_bit_no = $XOR & ~($XOR-1); // Now divide elements in two // sets by comparing rightmost // set bit of XOR with bit at // same position in each element. // Initialize missing numbers $x = 0; $y = 0; for ($i = 0; $i < $n; $i++) { if ($arr[$i] & $set_bit_no) // XOR of first set in arr[] $x = $x ^ $arr[$i]; else // XOR of second set in arr[] $y = $y ^ $arr[$i]; } echo"The unique pair is ", "(",$x," ", $y,")"; } // Driver code $a = array(6, 1, 3, 5, 1, 3, 7, 6); $n = count($a); findUniquePair($a, $n); // This code is contributed by anuj_67.?> <script>// Javascript program to find a unique pair// in an array of pairs. function findUniquePair(arr, n) { // XOR each element and get XOR of two // unique elements(ans) let XOR = arr[0]; for (let i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing elements. // Any set bit in it must be set in one // missing and unset in other missing number // Get a set bit of XOR (We get the // rightmost set bit) let set_bit_no = XOR & ~(XOR-1); // Now divide elements in two sets by // comparing rightmost set bit of XOR with // bit at same position in each element. // Initialize missing numbers let x = 0, y = 0; for (let i = 0; i < n; i++) { if ((arr[i] & set_bit_no)>0) /*XOR of first set in arr[] */ x = x ^ arr[i]; else /*XOR of second set in arr[] */ y = y ^ arr[i]; } document.write("The unique pair is (" + x + "," + y + ")" + "<br/>"); } // driver function let a = [ 6, 1, 3, 5, 1, 3, 7, 6 ]; let n = a.length; findUniquePair(a, n); </script> Output: The unique pair is (7, 5) This article is contributed by Dhiman Mayank. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. vt_m Smitha Dinesh Semwal Akanksha_Rai sanjoy_62 Bitwise-XOR Arrays Bit Magic Arrays Bit Magic Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments Window Sliding Technique Program to find sum of elements in a given array Reversal algorithm for array rotation Find duplicates in O(n) time and O(1) extra space | Set 1 Trapping Rain Water Bitwise Operators in C/C++ Left Shift and Right Shift Operators in C/C++ Travelling Salesman Problem | Set 1 (Naive and Dynamic Programming) Cyclic Redundancy Check and Modulo-2 Division Count set bits in an integer
[ { "code": null, "e": 24742, "s": 24714, "text": "\n29 Apr, 2021" }, { "code": null, "e": 24871, "s": 24742, "text": "Given an array where every element appears twice except a pair (two elements). Find the elements of this unique pair.Examples: " }, { "code": null, "e": 24988, "s": 24871, "text": "Input : 6, 1, 3, 5, 1, 3, 7, 6\nOutput : 5 7\nAll elements appear twice except 5 and 7\n\nInput : 1 3 4 1\nOutput : 3 4" }, { "code": null, "e": 25529, "s": 24990, "text": "The idea is based on below post.Find Two Missing Numbers | Set 2 (XOR based solution)1. XOR each element of the array and you will left with the XOR of two different elements which are going to be our result. Let this XOR be “XOR” 2. Now find a set bit in XOR. 3. Now divide array elements in two groups. One group that has the bit found in step 2 as set and other group that has the bit as 0. 4. XOR of elements present in first group would be our first element. And XOR of elements present in second group would be our second element. " }, { "code": null, "e": 25533, "s": 25529, "text": "C++" }, { "code": null, "e": 25538, "s": 25533, "text": "Java" }, { "code": null, "e": 25547, "s": 25538, "text": "Python 3" }, { "code": null, "e": 25550, "s": 25547, "text": "C#" }, { "code": null, "e": 25554, "s": 25550, "text": "PHP" }, { "code": null, "e": 25565, "s": 25554, "text": "Javascript" }, { "code": "// C program to find a unique pair in an array// of pairs.#include <stdio.h> void findUniquePair(int arr[], int n){ // XOR each element and get XOR of two unique // elements(ans) int XOR = arr[0]; for (int i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing elements. Any set // bit in it must be set in one missing and unset in // other missing number // Get a set bit of XOR (We get the rightmost set bit) int set_bit_no = XOR & ~(XOR-1); // Now divide elements in two sets by comparing rightmost // set bit of XOR with bit at same position in each element. int x = 0, y = 0; // Initialize missing numbers for (int i = 0; i < n; i++) { if (arr[i] & set_bit_no) x = x ^ arr[i]; /*XOR of first set in arr[] */ else y = y ^ arr[i]; /*XOR of second set in arr[] */ } printf(\"The unique pair is (%d, %d)\", x, y); } // Driver codeint main(){ int a[] = { 6, 1, 3, 5, 1, 3, 7, 6 }; int n = sizeof(a)/sizeof(a[0]); findUniquePair(a, n); return 0;}", "e": 26642, "s": 25565, "text": null }, { "code": "// Java program to find a unique pair// in an array of pairs.class GFG{ static void findUniquePair(int[] arr, int n) { // XOR each element and get XOR of two // unique elements(ans) int XOR = arr[0]; for (int i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing elements. // Any set bit in it must be set in one // missing and unset in other missing number // Get a set bit of XOR (We get the // rightmost set bit) int set_bit_no = XOR & ~(XOR-1); // Now divide elements in two sets by // comparing rightmost set bit of XOR with // bit at same position in each element. // Initialize missing numbers int x = 0, y = 0; for (int i = 0; i < n; i++) { if ((arr[i] & set_bit_no)>0) /*XOR of first set in arr[] */ x = x ^ arr[i]; else /*XOR of second set in arr[] */ y = y ^ arr[i]; } System.out.println(\"The unique pair is (\" + x + \",\" + y + \")\"); } // Driver code public static void main (String[] args) { int[] a = { 6, 1, 3, 5, 1, 3, 7, 6 }; int n = a.length; findUniquePair(a, n); } } /* This code is contributed by Mr. Somesh Awasthi */", "e": 28019, "s": 26642, "text": null }, { "code": "# Python 3 program to find a unique# pair in an array of pairs.def findUniquePair(arr, n): # XOR each element and get XOR # of two unique elements(ans) XOR = arr[0] for i in range(1, n): XOR = XOR ^ arr[i] # Now XOR has XOR of two missing # elements. Any set bit in it # must be set in one missing and # unset in other missing number # Get a set bit of XOR (We get # the rightmost set bit) set_bit_no = XOR & ~(XOR - 1) # Now divide elements in two sets # by comparing rightmost set bit # of XOR with bit at same position # in each element. x = 0 y = 0 # Initialize missing numbers for i in range(0, n): if (arr[i] & set_bit_no): # XOR of first set in # arr[] x = x ^ arr[i] else: # XOR of second set # in arr[] y = y ^ arr[i] print(\"The unique pair is (\", x, \", \", y, \")\", sep = \"\") # Driver codea = [6, 1, 3, 5, 1, 3, 7, 6 ]n = len(a)findUniquePair(a, n) # This code is contributed by Smitha.", "e": 29119, "s": 28019, "text": null }, { "code": "// C# program to find a unique pair// in an array of pairs.using System; class GFG { static void findUniquePair(int[] arr, int n) { // XOR each element and get XOR of two // unique elements(ans) int XOR = arr[0]; for (int i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing // elements. Any set bit in it must // be set in one missing and unset // in other missing number // Get a set bit of XOR (We get the // rightmost set bit) int set_bit_no = XOR & ~(XOR - 1); // Now divide elements in two sets by // comparing rightmost set bit of XOR // with bit at same position in each // element. Initialize missing numbers int x = 0, y = 0; for (int i = 0; i < n; i++) { if ((arr[i] & set_bit_no) > 0) /*XOR of first set in arr[] */ x = x ^ arr[i]; else /*XOR of second set in arr[] */ y = y ^ arr[i]; } Console.WriteLine(\"The unique pair is (\" + x + \", \" + y + \")\"); } // Driver code public static void Main () { int[] a = { 6, 1, 3, 5, 1, 3, 7, 6 }; int n = a.Length; findUniquePair(a, n); }} // This code is contributed by vt_m.", "e": 30539, "s": 29119, "text": null }, { "code": "<?php// PHP program to find a// unique pair in an array// of pairs. function findUniquePair($arr, $n){ // XOR each element and // get XOR of two unique // elements(ans) $XOR = $arr[0]; for ($i = 1; $i < $n; $i++) $XOR = $XOR ^ $arr[$i]; // Now XOR has XOR of two // missing elements. Any set // bit in it must be set in // one missing and unset in // other missing number // Get a set bit of XOR // (We get the rightmost set bit) $set_bit_no = $XOR & ~($XOR-1); // Now divide elements in two // sets by comparing rightmost // set bit of XOR with bit at // same position in each element. // Initialize missing numbers $x = 0; $y = 0; for ($i = 0; $i < $n; $i++) { if ($arr[$i] & $set_bit_no) // XOR of first set in arr[] $x = $x ^ $arr[$i]; else // XOR of second set in arr[] $y = $y ^ $arr[$i]; } echo\"The unique pair is \", \"(\",$x,\" \", $y,\")\"; } // Driver code $a = array(6, 1, 3, 5, 1, 3, 7, 6); $n = count($a); findUniquePair($a, $n); // This code is contributed by anuj_67.?>", "e": 31699, "s": 30539, "text": null }, { "code": "<script>// Javascript program to find a unique pair// in an array of pairs. function findUniquePair(arr, n) { // XOR each element and get XOR of two // unique elements(ans) let XOR = arr[0]; for (let i = 1; i < n; i++) XOR = XOR ^ arr[i]; // Now XOR has XOR of two missing elements. // Any set bit in it must be set in one // missing and unset in other missing number // Get a set bit of XOR (We get the // rightmost set bit) let set_bit_no = XOR & ~(XOR-1); // Now divide elements in two sets by // comparing rightmost set bit of XOR with // bit at same position in each element. // Initialize missing numbers let x = 0, y = 0; for (let i = 0; i < n; i++) { if ((arr[i] & set_bit_no)>0) /*XOR of first set in arr[] */ x = x ^ arr[i]; else /*XOR of second set in arr[] */ y = y ^ arr[i]; } document.write(\"The unique pair is (\" + x + \",\" + y + \")\" + \"<br/>\"); } // driver function let a = [ 6, 1, 3, 5, 1, 3, 7, 6 ]; let n = a.length; findUniquePair(a, n); </script> ", "e": 33001, "s": 31699, "text": null }, { "code": null, "e": 33011, "s": 33001, "text": "Output: " }, { "code": null, "e": 33037, "s": 33011, "text": "The unique pair is (7, 5)" }, { "code": null, "e": 33459, "s": 33037, "text": "This article is contributed by Dhiman Mayank. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to review-team@geeksforgeeks.org. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. " }, { "code": null, "e": 33464, "s": 33459, "text": "vt_m" }, { "code": null, "e": 33485, "s": 33464, "text": "Smitha Dinesh Semwal" }, { "code": null, "e": 33498, "s": 33485, "text": "Akanksha_Rai" }, { "code": null, "e": 33508, "s": 33498, "text": "sanjoy_62" }, { "code": null, "e": 33520, "s": 33508, "text": "Bitwise-XOR" }, { "code": null, "e": 33527, "s": 33520, "text": "Arrays" }, { "code": null, "e": 33537, "s": 33527, "text": "Bit Magic" }, { "code": null, "e": 33544, "s": 33537, "text": "Arrays" }, { "code": null, "e": 33554, "s": 33544, "text": "Bit Magic" }, { "code": null, "e": 33652, "s": 33554, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 33661, "s": 33652, "text": "Comments" }, { "code": null, "e": 33674, "s": 33661, "text": "Old Comments" }, { "code": null, "e": 33699, "s": 33674, "text": "Window Sliding Technique" }, { "code": null, "e": 33748, "s": 33699, "text": "Program to find sum of elements in a given array" }, { "code": null, "e": 33786, "s": 33748, "text": "Reversal algorithm for array rotation" }, { "code": null, "e": 33844, "s": 33786, "text": "Find duplicates in O(n) time and O(1) extra space | Set 1" }, { "code": null, "e": 33864, "s": 33844, "text": "Trapping Rain Water" }, { "code": null, "e": 33891, "s": 33864, "text": "Bitwise Operators in C/C++" }, { "code": null, "e": 33937, "s": 33891, "text": "Left Shift and Right Shift Operators in C/C++" }, { "code": null, "e": 34005, "s": 33937, "text": "Travelling Salesman Problem | Set 1 (Naive and Dynamic Programming)" }, { "code": null, "e": 34051, "s": 34005, "text": "Cyclic Redundancy Check and Modulo-2 Division" } ]
Improving Plotly’s Polar Bar Charts | by Ray Heberer | Towards Data Science
Polar Bar Charts can be great ways to display categorical information if you want to draw more attention to the categories with higher values. They were used to great effect in the Wall Street Journal’s article on Amazon’s HQ2 Shortlist. But even with the powerful visualization tools at our fingertips, producing charts like that takes a bit of legwork. I’m here today to provide you with the pieces of geometrical drudgery needed to turn Plotly’s polar bar chart into something visually acceptable. By the end, you can throw these annoying bits of housekeeping code into a function and never think about it again. This is the plot produced by Plotly’s tutorial code. While I understand that they were demonstrating how to create the Matplotlib logo in fewer lines of code, the example is not useful for practical applications. Below, I will be addressing the main issues I have with this example in a reproducible way. Those issues being: The slices of the plot and of the background are different in number and not aligned. The gaps between the plot slices are not uniform. It’s not clear from the example how to make a legend. As usual, you can skip to the end for the full code snippet. Let’s start by framing our problem. We have a set of values, and we’d like to map each value to the radius of a circle slice. Perhaps we also have labels for each value. vals = [3, 2, 5, 1, 2]labels = ["Strength", "Intelligence", "Dexterity", "Wisdom", "Stealth"] From these values alone, we’d like to determine the positioning and alignment of both the slices and the background. The radius, position, and width of each slice are determined by the r, theta, and width arguments of the Plotly barpolar function respectively. Note that theta corresponds to the angle of the center of each slice. If we wish to have slices with no space between them, and of equal width, we can infer these parameters from the length of our original values. num_slices = len(vals)theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]width = [360 / num_slices for _ in range(num_slices)] Here the “+ 1.5” will ensure that the right edge of our first slice will be at zero degrees. Something else I like to do is produce a color sequence for my slices by taking steps through a palette (e.g. Pastel): color_seq = px.colors.qualitative.Pastelcolor_indices = range(0, len(color_seq), len(color_seq) // num_slices)colors = [color_seq[i] for i in color_indices] Let’s take a look at our progress: fig = go.Figure(go.Barpolar(r=vals, theta=theta, width=width, marker_color=colors))fig.show() It’s getting better! As you can see, the slices start from zero degrees and are evenly spaced. But the background is still completely unaligned. Aligning the background is a matter of adjusting the polar_angularaxis_tickvals layout option of the figure. We’ll infer the sequence of angles similarly to the way we did with the slices. angular_tickvals = [(i + 1) * 360 / num_slices for i in range(num_slices)]fig.update_layout(polar_angularaxis_tickvals=angular_tickvals)fig.show() Good, now the background is aligned with the slices. There are many more options you can adjust with update_layout that will make the plot more aesthetic. I’ll leave you with a few of them at the end. Now, how to add a legend that associates a label with each slice? If you just use the name argument, you run into an issue: Plotly treats all of our slices as a single trace, which can only be associated with a single name. How might we associate each individual slice with its own name? This isn’t ideal, but at the moment I’ve found that making a separate barpolar object for each slice is the best way to do this. I’m hopeful that there exists a better way, or at least that one will be developed soon. barpolar_plots = [go.Barpolar(r=[r], theta=[t], width=[w], name=n, marker_color=[c])for r, t, w, n, c in zip(vals, theta, width, labels, colors)]fig = go.Figure(barpolar_plots)angular_tickvals = [(i + 1) * 360 / num_slices for i in range(num_slices)]fig.update_layout(polar_angularaxis_tickvals=angular_tickvals)fig.show() Since we inferred all the angle parameters just from the raw values, it makes sense to wrap all of that effort into a function. I’ve demonstrated its usage with a few extra layout options. You can find more at the Plotly Figure Reference. At the moment, it takes a little work to get from Plotly’s tutorial polar bar chart to something usable. In particular, some angle munging is required to align the slices and the background. In addition, adding a legend is not as straightforward as it could be. Here, I’ve gone through the steps required to make such adjustments, and packaged them all up in a reusable function. I hope you find it helpful.
[ { "code": null, "e": 527, "s": 172, "text": "Polar Bar Charts can be great ways to display categorical information if you want to draw more attention to the categories with higher values. They were used to great effect in the Wall Street Journal’s article on Amazon’s HQ2 Shortlist. But even with the powerful visualization tools at our fingertips, producing charts like that takes a bit of legwork." }, { "code": null, "e": 788, "s": 527, "text": "I’m here today to provide you with the pieces of geometrical drudgery needed to turn Plotly’s polar bar chart into something visually acceptable. By the end, you can throw these annoying bits of housekeeping code into a function and never think about it again." }, { "code": null, "e": 841, "s": 788, "text": "This is the plot produced by Plotly’s tutorial code." }, { "code": null, "e": 1001, "s": 841, "text": "While I understand that they were demonstrating how to create the Matplotlib logo in fewer lines of code, the example is not useful for practical applications." }, { "code": null, "e": 1113, "s": 1001, "text": "Below, I will be addressing the main issues I have with this example in a reproducible way. Those issues being:" }, { "code": null, "e": 1199, "s": 1113, "text": "The slices of the plot and of the background are different in number and not aligned." }, { "code": null, "e": 1249, "s": 1199, "text": "The gaps between the plot slices are not uniform." }, { "code": null, "e": 1303, "s": 1249, "text": "It’s not clear from the example how to make a legend." }, { "code": null, "e": 1364, "s": 1303, "text": "As usual, you can skip to the end for the full code snippet." }, { "code": null, "e": 1534, "s": 1364, "text": "Let’s start by framing our problem. We have a set of values, and we’d like to map each value to the radius of a circle slice. Perhaps we also have labels for each value." }, { "code": null, "e": 1628, "s": 1534, "text": "vals = [3, 2, 5, 1, 2]labels = [\"Strength\", \"Intelligence\", \"Dexterity\", \"Wisdom\", \"Stealth\"]" }, { "code": null, "e": 1745, "s": 1628, "text": "From these values alone, we’d like to determine the positioning and alignment of both the slices and the background." }, { "code": null, "e": 1959, "s": 1745, "text": "The radius, position, and width of each slice are determined by the r, theta, and width arguments of the Plotly barpolar function respectively. Note that theta corresponds to the angle of the center of each slice." }, { "code": null, "e": 2103, "s": 1959, "text": "If we wish to have slices with no space between them, and of equal width, we can infer these parameters from the length of our original values." }, { "code": null, "e": 2244, "s": 2103, "text": "num_slices = len(vals)theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]width = [360 / num_slices for _ in range(num_slices)]" }, { "code": null, "e": 2337, "s": 2244, "text": "Here the “+ 1.5” will ensure that the right edge of our first slice will be at zero degrees." }, { "code": null, "e": 2456, "s": 2337, "text": "Something else I like to do is produce a color sequence for my slices by taking steps through a palette (e.g. Pastel):" }, { "code": null, "e": 2613, "s": 2456, "text": "color_seq = px.colors.qualitative.Pastelcolor_indices = range(0, len(color_seq), len(color_seq) // num_slices)colors = [color_seq[i] for i in color_indices]" }, { "code": null, "e": 2648, "s": 2613, "text": "Let’s take a look at our progress:" }, { "code": null, "e": 2742, "s": 2648, "text": "fig = go.Figure(go.Barpolar(r=vals, theta=theta, width=width, marker_color=colors))fig.show()" }, { "code": null, "e": 2887, "s": 2742, "text": "It’s getting better! As you can see, the slices start from zero degrees and are evenly spaced. But the background is still completely unaligned." }, { "code": null, "e": 3076, "s": 2887, "text": "Aligning the background is a matter of adjusting the polar_angularaxis_tickvals layout option of the figure. We’ll infer the sequence of angles similarly to the way we did with the slices." }, { "code": null, "e": 3223, "s": 3076, "text": "angular_tickvals = [(i + 1) * 360 / num_slices for i in range(num_slices)]fig.update_layout(polar_angularaxis_tickvals=angular_tickvals)fig.show()" }, { "code": null, "e": 3424, "s": 3223, "text": "Good, now the background is aligned with the slices. There are many more options you can adjust with update_layout that will make the plot more aesthetic. I’ll leave you with a few of them at the end." }, { "code": null, "e": 3648, "s": 3424, "text": "Now, how to add a legend that associates a label with each slice? If you just use the name argument, you run into an issue: Plotly treats all of our slices as a single trace, which can only be associated with a single name." }, { "code": null, "e": 3712, "s": 3648, "text": "How might we associate each individual slice with its own name?" }, { "code": null, "e": 3930, "s": 3712, "text": "This isn’t ideal, but at the moment I’ve found that making a separate barpolar object for each slice is the best way to do this. I’m hopeful that there exists a better way, or at least that one will be developed soon." }, { "code": null, "e": 4253, "s": 3930, "text": "barpolar_plots = [go.Barpolar(r=[r], theta=[t], width=[w], name=n, marker_color=[c])for r, t, w, n, c in zip(vals, theta, width, labels, colors)]fig = go.Figure(barpolar_plots)angular_tickvals = [(i + 1) * 360 / num_slices for i in range(num_slices)]fig.update_layout(polar_angularaxis_tickvals=angular_tickvals)fig.show()" }, { "code": null, "e": 4492, "s": 4253, "text": "Since we inferred all the angle parameters just from the raw values, it makes sense to wrap all of that effort into a function. I’ve demonstrated its usage with a few extra layout options. You can find more at the Plotly Figure Reference." }, { "code": null, "e": 4754, "s": 4492, "text": "At the moment, it takes a little work to get from Plotly’s tutorial polar bar chart to something usable. In particular, some angle munging is required to align the slices and the background. In addition, adding a legend is not as straightforward as it could be." } ]
How to remove the mapped network drive using PowerShell?
To remove mapped drives from PowerShell on windows systems, you can use both PowerShell and cmd commands. You can use the below command for a single mapped drive-by specifying the disk drive letter or by using the wildcard character (*). To remove a single mapped drive. net use K: /delete PS C:\WINDOWS\system32> net use K: /delete K: was deleted successfully. To remove multiple mapped drives together. net use * /delete You need to confirm removing multiple mapped disks together. PS C:\WINDOWS\system32> net use * /delete You have these remote connections: K: \\remoteshare\shared M: \\remoteshare\shared folder Continuing will cancel the connections. Do you want to continue this operation? (Y/N) [N]: Y The command completed successfully. To remove the mapped network drive with PowerShell command, you need to provide the drive letter in the cmdlet. If you want to remove the multiple drives together then separate them by comma (,). Remove-PSDrive K,M –Force -Verbose PS C:\WINDOWS\system32> Remove-PSDrive K,M -Force -Verbose VERBOSE: Performing the operation "Remove Drive" on target "Name: K Provider: Microsoft.PowerShell.Core\FileSystem Root: K:\". VERBOSE: Performing the operation "Remove Drive" on target "Name: M Provider: Microsoft.PowerShell.Core\FileSystem Root: M:\". Alternatively, you can get the mapped drive using Get-PSDrive and pipeline Remove-PSDrive command to remove them. Get-PSDrive K,M | Remove-PSDrive -Force -Verbose PS C:\WINDOWS\system32> Get-PSDrive K,M | Remove-PSDrive -Force -Verbose VERBOSE: Performing the operation "Remove Drive" on target "Name: K Provider: Microsoft.PowerShell.Core\FileSystem Root: K:\". VERBOSE: Performing the operation "Remove Drive" on target "Name: M Provider: Microsoft.PowerShell.Core\FileSystem Root: M:\".
[ { "code": null, "e": 1168, "s": 1062, "text": "To remove mapped drives from PowerShell on windows systems, you can use both PowerShell and cmd commands." }, { "code": null, "e": 1300, "s": 1168, "text": "You can use the below command for a single mapped drive-by specifying the disk drive letter or by using the wildcard character (*)." }, { "code": null, "e": 1333, "s": 1300, "text": "To remove a single mapped drive." }, { "code": null, "e": 1352, "s": 1333, "text": "net use K: /delete" }, { "code": null, "e": 1424, "s": 1352, "text": "PS C:\\WINDOWS\\system32> net use K: /delete\nK: was deleted successfully." }, { "code": null, "e": 1467, "s": 1424, "text": "To remove multiple mapped drives together." }, { "code": null, "e": 1485, "s": 1467, "text": "net use * /delete" }, { "code": null, "e": 1546, "s": 1485, "text": "You need to confirm removing multiple mapped disks together." }, { "code": null, "e": 1844, "s": 1546, "text": "PS C:\\WINDOWS\\system32> net use * /delete\nYou have these remote connections:\n\n K: \\\\remoteshare\\shared\n M: \\\\remoteshare\\shared folder\nContinuing will cancel the connections.\n\nDo you want to continue this operation? (Y/N) [N]: Y\nThe command completed successfully.\n" }, { "code": null, "e": 2040, "s": 1844, "text": "To remove the mapped network drive with PowerShell command, you need to provide the drive letter in the cmdlet. If you want to remove the multiple drives together then separate them by comma (,)." }, { "code": null, "e": 2391, "s": 2040, "text": "Remove-PSDrive K,M –Force -Verbose\n\nPS C:\\WINDOWS\\system32> Remove-PSDrive K,M -Force -Verbose\nVERBOSE: Performing the operation \"Remove Drive\" on target \"Name: K Provider: Microsoft.PowerShell.Core\\FileSystem Root: K:\\\".\nVERBOSE: Performing the operation \"Remove Drive\" on target \"Name: M Provider: Microsoft.PowerShell.Core\\FileSystem Root: M:\\\".\n\n" }, { "code": null, "e": 2505, "s": 2391, "text": "Alternatively, you can get the mapped drive using Get-PSDrive and pipeline Remove-PSDrive command to remove them." }, { "code": null, "e": 2554, "s": 2505, "text": "Get-PSDrive K,M | Remove-PSDrive -Force -Verbose" }, { "code": null, "e": 2881, "s": 2554, "text": "PS C:\\WINDOWS\\system32> Get-PSDrive K,M | Remove-PSDrive -Force -Verbose\nVERBOSE: Performing the operation \"Remove Drive\" on target \"Name: K Provider: Microsoft.PowerShell.Core\\FileSystem Root: K:\\\".\nVERBOSE: Performing the operation \"Remove Drive\" on target \"Name: M Provider: Microsoft.PowerShell.Core\\FileSystem Root: M:\\\"." } ]
Image Processing with Python. Bytes, sampling, and filters — an... | by James Briggs | Towards Data Science
Image processing is often a reasonably infrequent task. Of course, this will depend heavily on our roles — but even in heavy analytical roles such as data science or machine learning — image manipulation is pretty common. In machine learning, we may need to take a number of images and classify them based on what their contents — we’ll likely need to downsample the images — taking them from an original 2, 4, 8K resolution image to a more manageable size. There are many other disciplines that require a small amount of image processing, such as web development — or significantly more, as for image processing engineers. We will touch upon the basics of how image data is organized in a computer, and then explore a few different methods for image manipulation: Images as Arrays - intro to images in code and their representation in bits and bytesResize and Scaling - resizing our images, save on compute with downsamplingFilters - how to build an overly complicated Photoshop with significantly less features We will use Numpy throughout the article — for quick and easy image manipulation, I would suggest another library called PIL. PIL code will be included too, but we won’t focus on it — as it abstracts everything to a level where we can’t focus on the actual processes. Throughout the article, we’ll work with the article cover image. In the original format, this image consists of 7207 x 4801 pixels. We will be representing this image as an array. In this array, we will have three ‘channels’ (layers) — representing red, green, and blue. All of that gives us a total of 7207*4801*3 = 103,802,421 numeric values contained within our image array. Now, let’s write some code to convert our image into a Numpy array: from PIL import Imageimport numpy as npimg = Image.open(PATH)img.load()img_array = np.asarray(img, dtype='int32') We can also take a look at the values that make up our array: All values in our array vary from zero, the minimum — to 255, the maximum. This range is used because each of our colors is a 8-bit byte. Each bit can be either 0 or 1. That gives us a total of 2^8 = 256 possible combinations, a range of 0–255. Where — 0 = 000000001 = 000000012 = 000000103 = 00000011...254 = 01111111255 = 11111111 As we can see, 0 shows all bytes to be off, whereas for 255 all bytes are on — this makes sense as 0 corresponds to the minimum of that respective color in the image, and 255 the maximum. Let’s try turning our colors off-and-on. red1 = np.copy(img_array)red1[:,:,0] = 255 # this is on (top row below)plt.imshow(red0)blue0 = np.copy(img_array)blue0[:,:,2] = 0 # this is off (bottom row below)plt.imshow(blue0) There are two major reasons to do this in machine learning applications: Downsampling to save on compute — training an image classifier with 8K resolution images will take an impressive setup — 360p is a little more realistic.Increasing the size of our dataset — (we have to stick to the same image size in classification, but that doesn’t mean we can’t use images with different resolutions) Downsampling to save on compute — training an image classifier with 8K resolution images will take an impressive setup — 360p is a little more realistic. Increasing the size of our dataset — (we have to stick to the same image size in classification, but that doesn’t mean we can’t use images with different resolutions) Both reasons require the same approach, we take our original array and compress groups of pixels into single pixels in a way that maximizes the information retained. Here, we will downsample our image by taking the average value of multiple pixels to create one new pixel. We use sklearn.measure.block_reduce to perform this operation and np.mean to specify the reduction operation using averages. To see a clear difference, we downsample our image by a factor of 20. downsample = 20# first, change to 0-1ds_array = img_array/255r = skimage.measure.block_reduce(ds_array[:, :, 0], (downsample, downsample), np.mean)g = skimage.measure.block_reduce(ds_array[:, :, 1], (downsample, downsample), np.mean)b = skimage.measure.block_reduce(ds_array[:, :, 2], (downsample, downsample), np.mean)ds_array = np.stack((r, g, b), axis=-1) We can compare the original and downsampled images using imshow, which gives us: Again — speaking from a ML/data science perspective — it is impossible (for any normal computer) to cope with training a classifier on big sets of 100M+ value arrays. But after downsampling by a factor of 20, our array has a total of just 241 * 361 * 3 = 261,003 values — an easily manageable size. This is the fun part — we’re going to write some code to modify our picture — like Instagram filters. The vignette is essentially a darkening of pixels towards the outer edges of the image — it looks like this: A while back, this was the go-to effect for edgy teenagers trying to cover up their lackluster photography skills. It’s a fun effect to get started with. All we need is a function that we can apply to our array that will reduce values towards the outer edges. To achieve this effect, we start with a Gaussian function — we will feed it a single value for every integer row-wise and column-wise. This code produces two Gaussian distributions that look like this: We then normalize these distributions and apply them to our image. Visualizing just the two distributions applied to a white square gives us this: This effect can be applied to an array easily: Taking this code and applying it to our Unsplash image gives us that edgy teen/hipster Instagram effect: Another cool thing we can do is modify the color layers in our image individually. Simple balancing is easily achieved by accessing each color layer individually and increasing/decreasing layer values. As we do this, we will inadvertently create numbers outside of our range of 0–255. To deal with these we use a simple clamp function to squeeze our array back into the correct range, shown above. Playing around with the r, g, b values in col_bal can produce some interesting effects: We use the exact same approach to modify image brightness. All we need to do is modify our RGB values by the exact same value: Finally, we can put all of these together. Some color-balancing with the vignette effect can build some cool effects really easily. One thing that we have avoided throughout this article is the use of the PIL library. PIL is the go-to for image processing in Python — so this article wouldn’t be complete without mentioning it. PIL is an excellent library, purpose-made for image processing in Python. With it, we can compress what would take us several lines of Numpy code — into a single function. However, PIL abstracts everything to a point where understanding the mechanics is not possible — hence our heavy use of Numpy. We’ve covered the basics behind images in bits/bytes, how we can downsample images, and how to edit images with vignettes and color-balancing. If you’re interested in learning more advanced concepts, I would highly recommend this course on Datacamp (the first part is free). I hope you have learned something from the article, if you have any questions or suggestions, get in touch on Twitter or in the comments below! Thanks for reading! What Image Processing Techniques Are Actually Used in the ML Industry (2020), Neptune.ai
[ { "code": null, "e": 394, "s": 172, "text": "Image processing is often a reasonably infrequent task. Of course, this will depend heavily on our roles — but even in heavy analytical roles such as data science or machine learning — image manipulation is pretty common." }, { "code": null, "e": 630, "s": 394, "text": "In machine learning, we may need to take a number of images and classify them based on what their contents — we’ll likely need to downsample the images — taking them from an original 2, 4, 8K resolution image to a more manageable size." }, { "code": null, "e": 796, "s": 630, "text": "There are many other disciplines that require a small amount of image processing, such as web development — or significantly more, as for image processing engineers." }, { "code": null, "e": 937, "s": 796, "text": "We will touch upon the basics of how image data is organized in a computer, and then explore a few different methods for image manipulation:" }, { "code": null, "e": 1185, "s": 937, "text": "Images as Arrays - intro to images in code and their representation in bits and bytesResize and Scaling - resizing our images, save on compute with downsamplingFilters - how to build an overly complicated Photoshop with significantly less features" }, { "code": null, "e": 1311, "s": 1185, "text": "We will use Numpy throughout the article — for quick and easy image manipulation, I would suggest another library called PIL." }, { "code": null, "e": 1453, "s": 1311, "text": "PIL code will be included too, but we won’t focus on it — as it abstracts everything to a level where we can’t focus on the actual processes." }, { "code": null, "e": 1518, "s": 1453, "text": "Throughout the article, we’ll work with the article cover image." }, { "code": null, "e": 1585, "s": 1518, "text": "In the original format, this image consists of 7207 x 4801 pixels." }, { "code": null, "e": 1724, "s": 1585, "text": "We will be representing this image as an array. In this array, we will have three ‘channels’ (layers) — representing red, green, and blue." }, { "code": null, "e": 1831, "s": 1724, "text": "All of that gives us a total of 7207*4801*3 = 103,802,421 numeric values contained within our image array." }, { "code": null, "e": 1899, "s": 1831, "text": "Now, let’s write some code to convert our image into a Numpy array:" }, { "code": null, "e": 2013, "s": 1899, "text": "from PIL import Imageimport numpy as npimg = Image.open(PATH)img.load()img_array = np.asarray(img, dtype='int32')" }, { "code": null, "e": 2075, "s": 2013, "text": "We can also take a look at the values that make up our array:" }, { "code": null, "e": 2244, "s": 2075, "text": "All values in our array vary from zero, the minimum — to 255, the maximum. This range is used because each of our colors is a 8-bit byte. Each bit can be either 0 or 1." }, { "code": null, "e": 2320, "s": 2244, "text": "That gives us a total of 2^8 = 256 possible combinations, a range of 0–255." }, { "code": null, "e": 2328, "s": 2320, "text": "Where —" }, { "code": null, "e": 2408, "s": 2328, "text": "0 = 000000001 = 000000012 = 000000103 = 00000011...254 = 01111111255 = 11111111" }, { "code": null, "e": 2637, "s": 2408, "text": "As we can see, 0 shows all bytes to be off, whereas for 255 all bytes are on — this makes sense as 0 corresponds to the minimum of that respective color in the image, and 255 the maximum. Let’s try turning our colors off-and-on." }, { "code": null, "e": 2818, "s": 2637, "text": "red1 = np.copy(img_array)red1[:,:,0] = 255 # this is on (top row below)plt.imshow(red0)blue0 = np.copy(img_array)blue0[:,:,2] = 0 # this is off (bottom row below)plt.imshow(blue0)" }, { "code": null, "e": 2891, "s": 2818, "text": "There are two major reasons to do this in machine learning applications:" }, { "code": null, "e": 3211, "s": 2891, "text": "Downsampling to save on compute — training an image classifier with 8K resolution images will take an impressive setup — 360p is a little more realistic.Increasing the size of our dataset — (we have to stick to the same image size in classification, but that doesn’t mean we can’t use images with different resolutions)" }, { "code": null, "e": 3365, "s": 3211, "text": "Downsampling to save on compute — training an image classifier with 8K resolution images will take an impressive setup — 360p is a little more realistic." }, { "code": null, "e": 3532, "s": 3365, "text": "Increasing the size of our dataset — (we have to stick to the same image size in classification, but that doesn’t mean we can’t use images with different resolutions)" }, { "code": null, "e": 3698, "s": 3532, "text": "Both reasons require the same approach, we take our original array and compress groups of pixels into single pixels in a way that maximizes the information retained." }, { "code": null, "e": 3805, "s": 3698, "text": "Here, we will downsample our image by taking the average value of multiple pixels to create one new pixel." }, { "code": null, "e": 3930, "s": 3805, "text": "We use sklearn.measure.block_reduce to perform this operation and np.mean to specify the reduction operation using averages." }, { "code": null, "e": 4000, "s": 3930, "text": "To see a clear difference, we downsample our image by a factor of 20." }, { "code": null, "e": 4551, "s": 4000, "text": "downsample = 20# first, change to 0-1ds_array = img_array/255r = skimage.measure.block_reduce(ds_array[:, :, 0], (downsample, downsample), np.mean)g = skimage.measure.block_reduce(ds_array[:, :, 1], (downsample, downsample), np.mean)b = skimage.measure.block_reduce(ds_array[:, :, 2], (downsample, downsample), np.mean)ds_array = np.stack((r, g, b), axis=-1)" }, { "code": null, "e": 4632, "s": 4551, "text": "We can compare the original and downsampled images using imshow, which gives us:" }, { "code": null, "e": 4799, "s": 4632, "text": "Again — speaking from a ML/data science perspective — it is impossible (for any normal computer) to cope with training a classifier on big sets of 100M+ value arrays." }, { "code": null, "e": 4931, "s": 4799, "text": "But after downsampling by a factor of 20, our array has a total of just 241 * 361 * 3 = 261,003 values — an easily manageable size." }, { "code": null, "e": 5033, "s": 4931, "text": "This is the fun part — we’re going to write some code to modify our picture — like Instagram filters." }, { "code": null, "e": 5142, "s": 5033, "text": "The vignette is essentially a darkening of pixels towards the outer edges of the image — it looks like this:" }, { "code": null, "e": 5257, "s": 5142, "text": "A while back, this was the go-to effect for edgy teenagers trying to cover up their lackluster photography skills." }, { "code": null, "e": 5402, "s": 5257, "text": "It’s a fun effect to get started with. All we need is a function that we can apply to our array that will reduce values towards the outer edges." }, { "code": null, "e": 5537, "s": 5402, "text": "To achieve this effect, we start with a Gaussian function — we will feed it a single value for every integer row-wise and column-wise." }, { "code": null, "e": 5604, "s": 5537, "text": "This code produces two Gaussian distributions that look like this:" }, { "code": null, "e": 5751, "s": 5604, "text": "We then normalize these distributions and apply them to our image. Visualizing just the two distributions applied to a white square gives us this:" }, { "code": null, "e": 5798, "s": 5751, "text": "This effect can be applied to an array easily:" }, { "code": null, "e": 5903, "s": 5798, "text": "Taking this code and applying it to our Unsplash image gives us that edgy teen/hipster Instagram effect:" }, { "code": null, "e": 5986, "s": 5903, "text": "Another cool thing we can do is modify the color layers in our image individually." }, { "code": null, "e": 6105, "s": 5986, "text": "Simple balancing is easily achieved by accessing each color layer individually and increasing/decreasing layer values." }, { "code": null, "e": 6301, "s": 6105, "text": "As we do this, we will inadvertently create numbers outside of our range of 0–255. To deal with these we use a simple clamp function to squeeze our array back into the correct range, shown above." }, { "code": null, "e": 6389, "s": 6301, "text": "Playing around with the r, g, b values in col_bal can produce some interesting effects:" }, { "code": null, "e": 6516, "s": 6389, "text": "We use the exact same approach to modify image brightness. All we need to do is modify our RGB values by the exact same value:" }, { "code": null, "e": 6648, "s": 6516, "text": "Finally, we can put all of these together. Some color-balancing with the vignette effect can build some cool effects really easily." }, { "code": null, "e": 6844, "s": 6648, "text": "One thing that we have avoided throughout this article is the use of the PIL library. PIL is the go-to for image processing in Python — so this article wouldn’t be complete without mentioning it." }, { "code": null, "e": 7016, "s": 6844, "text": "PIL is an excellent library, purpose-made for image processing in Python. With it, we can compress what would take us several lines of Numpy code — into a single function." }, { "code": null, "e": 7143, "s": 7016, "text": "However, PIL abstracts everything to a point where understanding the mechanics is not possible — hence our heavy use of Numpy." }, { "code": null, "e": 7286, "s": 7143, "text": "We’ve covered the basics behind images in bits/bytes, how we can downsample images, and how to edit images with vignettes and color-balancing." }, { "code": null, "e": 7418, "s": 7286, "text": "If you’re interested in learning more advanced concepts, I would highly recommend this course on Datacamp (the first part is free)." }, { "code": null, "e": 7562, "s": 7418, "text": "I hope you have learned something from the article, if you have any questions or suggestions, get in touch on Twitter or in the comments below!" }, { "code": null, "e": 7582, "s": 7562, "text": "Thanks for reading!" } ]
Bootstrap Confidence Interval with R Programming - GeeksforGeeks
10 Jul, 2021 Bootstrapping is a statistical method for inference about a population using sample data. It can be used to estimate the confidence interval(CI) by drawing samples with replacement from sample data. Bootstrapping can be used to assign CI to various statistics that have no closed-form or complicated solutions. Suppose we want to obtain a 95% confidence interval using bootstrap resampling the steps are as follows: Sample n elements with replacement from original sample data.For every sample calculate the desired statistic eg. mean, median etc.Repeat steps 1 and 2 m times and save the calculated stats.Plot the calculated stats which forms the bootstrap distributionUsing the bootstrap distribution of desired stat we can calculate the 95% CI Sample n elements with replacement from original sample data. For every sample calculate the desired statistic eg. mean, median etc. Repeat steps 1 and 2 m times and save the calculated stats. Plot the calculated stats which forms the bootstrap distribution Using the bootstrap distribution of desired stat we can calculate the 95% CI Illustration of the bootstrap distribution generation from sample: In R Programming the package boot allows a user to easily generate bootstrap samples of virtually any statistic that we can calculate. We can generate estimates of bias, bootstrap confidence intervals, or plots of bootstrap distribution from the calculated from the boot package. For demonstration purposes, we are going to use the iris dataset due to its simplicity and availability as one of the built-in datasets in R. The data set consists of 50 samples from each of the three species of Iris (Iris setosa, Iris Virginia, and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters. We can view the iris dataset using head command and note the features of interests. R # View the first row# of the iris datasethead(iris, 1) Output: Sepal.Length Sepal.Width Petal.Length Petal.Width Species 5.1 3.5 1.4 0.2 setosa We want to estimate the correlation between Petal Length and Petal Width. Steps to Compute the Bootstrap CI in R: 1. Import the boot library for calculation of bootstrap CI and ggplot2 for plotting. R # Import library for bootstrap methodslibrary(boot) # Import library for plottinglibrary(ggplot2) 2. Create a function that computes the statistic we want to use such as mean, median, correlation, etc. R # Custom function to find correlation# between the Petal Length and Widthcorr.fun <- function(data, idx){ df <- data[idx, ] # Find the spearman correlation between # the 3rd and 4th columns of dataset c(cor(df[, 3], df[, 4], method = 'spearman'))} 3. Using the boot function to find the R bootstrap of the statistic. R # Setting the seed for# reproducability of resultsset.seed(42) # Calling the boot function with the dataset# our function and no. of roundsbootstrap <- boot(iris, corr.fun, R = 1000) # Display the result of boot functionbootstrap Output: ORDINARY NONPARAMETRIC BOOTSTRAP Call: boot(data = iris, statistic = corr.fun, R = 1000) Bootstrap Statistics : original bias std. error t1* 0.9376668 -0.002717295 0.009436212 4. We can plot the generated bootstrap distribution using the plot command with calculated bootstrap. R # Plot the bootstrap sampling# distribution using ggplotplot(bootstrap) Output: 5. Using the boot.ci() function to get the confidence intervals. R # Function to find the# bootstrap Confidence Intervalsboot.ci(boot.out = bootstrap, type = c("norm", "basic", "perc", "bca")) Output: BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS Based on 1000 bootstrap replicates CALL : boot.ci(boot.out = bootstrap, type = c("norm", "basic", "perc", "bca")) Intervals : Level Normal Basic 95% ( 0.9219, 0.9589 ) ( 0.9235, 0.9611 ) Level Percentile BCa 95% ( 0.9142, 0.9519 ) ( 0.9178, 0.9535 ) Calculations and Intervals on Original Scale Inference for Bootstrap CI From the Output: Looking at the Normal method interval of (0.9219, 0.9589) we can be 95% certain that the actual correlation between petal length and width lies in this interval 95% of the time. As we have seen the output consists of multiple CI using different methods according to the type parameter in function boot.ci. The computed intervals correspond to the (“norm”, “basic”, “perc”, “bca”) or Normal, Basic, Percentile, and BCa which give different intervals for the same level of 95%. The specific method to use for any variable depends on various factors such as its distribution, homoscedastic, bias, etc. The 5 methods that boot package provides for bootstrap confidence intervals are summarized below: Normal bootstrap or Standard confidence limits methods use the standard deviation for calculation of CI. Use when statistic is unbiased.Is normally distributed.Basic bootstrap or Hall’s (second percentile) method use percentile to calculate upper and lower limit of test statistic. When statistic is unbiased and homoscedastic.The bootstrap statistic can be transformed to a standard normal distribution.Percentile bootstrap or Quantile-based, or Approximate intervals use quantiles eg 2.5%, 5% etc. to calculate the CI. Use when statistic is unbiased and homoscedastic.The standard error of your bootstrap statistic and sample statistics are the same.BCa bootstrap or Bias Corrected Accelerated use percentile limits with bias correction and estimate acceleration coefficient corrects the limit and find the CI. The bootstrap statistic can be transformed to a normal distribution.The normal-transformed statistic has a constant bias.Studentized bootstrap resamples the bootstrap sample to find a second-stage bootstrap statistic and use it to calculate the CI. Use when statistic is homoscedastic.The standard error of bootstrap statistic can be estimated by second-stage resampling. Normal bootstrap or Standard confidence limits methods use the standard deviation for calculation of CI. Use when statistic is unbiased.Is normally distributed. Use when statistic is unbiased. Is normally distributed. Basic bootstrap or Hall’s (second percentile) method use percentile to calculate upper and lower limit of test statistic. When statistic is unbiased and homoscedastic.The bootstrap statistic can be transformed to a standard normal distribution. When statistic is unbiased and homoscedastic. The bootstrap statistic can be transformed to a standard normal distribution. Percentile bootstrap or Quantile-based, or Approximate intervals use quantiles eg 2.5%, 5% etc. to calculate the CI. Use when statistic is unbiased and homoscedastic.The standard error of your bootstrap statistic and sample statistics are the same. Use when statistic is unbiased and homoscedastic. The standard error of your bootstrap statistic and sample statistics are the same. BCa bootstrap or Bias Corrected Accelerated use percentile limits with bias correction and estimate acceleration coefficient corrects the limit and find the CI. The bootstrap statistic can be transformed to a normal distribution.The normal-transformed statistic has a constant bias. The bootstrap statistic can be transformed to a normal distribution. The normal-transformed statistic has a constant bias. Studentized bootstrap resamples the bootstrap sample to find a second-stage bootstrap statistic and use it to calculate the CI. Use when statistic is homoscedastic.The standard error of bootstrap statistic can be estimated by second-stage resampling. Use when statistic is homoscedastic. The standard error of bootstrap statistic can be estimated by second-stage resampling. References : R bootstrap package Boot Bootstrapping Statistics Wikipedia Bootstrap for Confidence Interval anikakapoor Picked R Data-science R Machine-Learning R Language Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments How to Replace specific values in column in R DataFrame ? Loops in R (for, while, repeat) Filter data by multiple conditions in R using Dplyr How to change Row Names of DataFrame in R ? Change Color of Bars in Barchart using ggplot2 in R Printing Output of an R Program Remove rows with NA in one column of R DataFrame How to Change Axis Scales in R Plots? Group by function in R using Dplyr How to Split Column Into Multiple Columns in R DataFrame?
[ { "code": null, "e": 24756, "s": 24728, "text": "\n10 Jul, 2021" }, { "code": null, "e": 25173, "s": 24756, "text": "Bootstrapping is a statistical method for inference about a population using sample data. It can be used to estimate the confidence interval(CI) by drawing samples with replacement from sample data. Bootstrapping can be used to assign CI to various statistics that have no closed-form or complicated solutions. Suppose we want to obtain a 95% confidence interval using bootstrap resampling the steps are as follows: " }, { "code": null, "e": 25504, "s": 25173, "text": "Sample n elements with replacement from original sample data.For every sample calculate the desired statistic eg. mean, median etc.Repeat steps 1 and 2 m times and save the calculated stats.Plot the calculated stats which forms the bootstrap distributionUsing the bootstrap distribution of desired stat we can calculate the 95% CI" }, { "code": null, "e": 25566, "s": 25504, "text": "Sample n elements with replacement from original sample data." }, { "code": null, "e": 25637, "s": 25566, "text": "For every sample calculate the desired statistic eg. mean, median etc." }, { "code": null, "e": 25697, "s": 25637, "text": "Repeat steps 1 and 2 m times and save the calculated stats." }, { "code": null, "e": 25762, "s": 25697, "text": "Plot the calculated stats which forms the bootstrap distribution" }, { "code": null, "e": 25839, "s": 25762, "text": "Using the bootstrap distribution of desired stat we can calculate the 95% CI" }, { "code": null, "e": 25907, "s": 25839, "text": "Illustration of the bootstrap distribution generation from sample: " }, { "code": null, "e": 26187, "s": 25907, "text": "In R Programming the package boot allows a user to easily generate bootstrap samples of virtually any statistic that we can calculate. We can generate estimates of bias, bootstrap confidence intervals, or plots of bootstrap distribution from the calculated from the boot package." }, { "code": null, "e": 26653, "s": 26187, "text": "For demonstration purposes, we are going to use the iris dataset due to its simplicity and availability as one of the built-in datasets in R. The data set consists of 50 samples from each of the three species of Iris (Iris setosa, Iris Virginia, and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters. We can view the iris dataset using head command and note the features of interests. " }, { "code": null, "e": 26655, "s": 26653, "text": "R" }, { "code": "# View the first row# of the iris datasethead(iris, 1)", "e": 26710, "s": 26655, "text": null }, { "code": null, "e": 26719, "s": 26710, "text": "Output: " }, { "code": null, "e": 26860, "s": 26719, "text": " Sepal.Length Sepal.Width Petal.Length Petal.Width Species\n 5.1 3.5 1.4 0.2 setosa" }, { "code": null, "e": 26935, "s": 26860, "text": "We want to estimate the correlation between Petal Length and Petal Width. " }, { "code": null, "e": 26975, "s": 26935, "text": "Steps to Compute the Bootstrap CI in R:" }, { "code": null, "e": 27062, "s": 26975, "text": "1. Import the boot library for calculation of bootstrap CI and ggplot2 for plotting. " }, { "code": null, "e": 27064, "s": 27062, "text": "R" }, { "code": "# Import library for bootstrap methodslibrary(boot) # Import library for plottinglibrary(ggplot2)", "e": 27162, "s": 27064, "text": null }, { "code": null, "e": 27267, "s": 27162, "text": "2. Create a function that computes the statistic we want to use such as mean, median, correlation, etc. " }, { "code": null, "e": 27269, "s": 27267, "text": "R" }, { "code": "# Custom function to find correlation# between the Petal Length and Widthcorr.fun <- function(data, idx){ df <- data[idx, ] # Find the spearman correlation between # the 3rd and 4th columns of dataset c(cor(df[, 3], df[, 4], method = 'spearman'))}", "e": 27522, "s": 27269, "text": null }, { "code": null, "e": 27592, "s": 27522, "text": "3. Using the boot function to find the R bootstrap of the statistic. " }, { "code": null, "e": 27594, "s": 27592, "text": "R" }, { "code": "# Setting the seed for# reproducability of resultsset.seed(42) # Calling the boot function with the dataset# our function and no. of roundsbootstrap <- boot(iris, corr.fun, R = 1000) # Display the result of boot functionbootstrap", "e": 27824, "s": 27594, "text": null }, { "code": null, "e": 27833, "s": 27824, "text": "Output: " }, { "code": null, "e": 28027, "s": 27833, "text": "ORDINARY NONPARAMETRIC BOOTSTRAP\n\n\nCall:\nboot(data = iris, statistic = corr.fun, R = 1000)\n\n\nBootstrap Statistics :\n original bias std. error\nt1* 0.9376668 -0.002717295 0.009436212" }, { "code": null, "e": 28130, "s": 28027, "text": "4. We can plot the generated bootstrap distribution using the plot command with calculated bootstrap. " }, { "code": null, "e": 28132, "s": 28130, "text": "R" }, { "code": "# Plot the bootstrap sampling# distribution using ggplotplot(bootstrap)", "e": 28204, "s": 28132, "text": null }, { "code": null, "e": 28213, "s": 28204, "text": "Output: " }, { "code": null, "e": 28279, "s": 28213, "text": "5. Using the boot.ci() function to get the confidence intervals. " }, { "code": null, "e": 28281, "s": 28279, "text": "R" }, { "code": "# Function to find the# bootstrap Confidence Intervalsboot.ci(boot.out = bootstrap, type = c(\"norm\", \"basic\", \"perc\", \"bca\"))", "e": 28430, "s": 28281, "text": null }, { "code": null, "e": 28439, "s": 28430, "text": "Output: " }, { "code": null, "e": 28855, "s": 28439, "text": "BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS\nBased on 1000 bootstrap replicates\n\nCALL : \nboot.ci(boot.out = bootstrap, type = c(\"norm\", \"basic\", \"perc\", \n \"bca\"))\n\nIntervals : \nLevel Normal Basic \n95% ( 0.9219, 0.9589 ) ( 0.9235, 0.9611 ) \n\nLevel Percentile BCa \n95% ( 0.9142, 0.9519 ) ( 0.9178, 0.9535 ) \nCalculations and Intervals on Original Scale" }, { "code": null, "e": 28899, "s": 28855, "text": "Inference for Bootstrap CI From the Output:" }, { "code": null, "e": 29498, "s": 28899, "text": "Looking at the Normal method interval of (0.9219, 0.9589) we can be 95% certain that the actual correlation between petal length and width lies in this interval 95% of the time. As we have seen the output consists of multiple CI using different methods according to the type parameter in function boot.ci. The computed intervals correspond to the (“norm”, “basic”, “perc”, “bca”) or Normal, Basic, Percentile, and BCa which give different intervals for the same level of 95%. The specific method to use for any variable depends on various factors such as its distribution, homoscedastic, bias, etc." }, { "code": null, "e": 29596, "s": 29498, "text": "The 5 methods that boot package provides for bootstrap confidence intervals are summarized below:" }, { "code": null, "e": 30781, "s": 29596, "text": "Normal bootstrap or Standard confidence limits methods use the standard deviation for calculation of CI. Use when statistic is unbiased.Is normally distributed.Basic bootstrap or Hall’s (second percentile) method use percentile to calculate upper and lower limit of test statistic. When statistic is unbiased and homoscedastic.The bootstrap statistic can be transformed to a standard normal distribution.Percentile bootstrap or Quantile-based, or Approximate intervals use quantiles eg 2.5%, 5% etc. to calculate the CI. Use when statistic is unbiased and homoscedastic.The standard error of your bootstrap statistic and sample statistics are the same.BCa bootstrap or Bias Corrected Accelerated use percentile limits with bias correction and estimate acceleration coefficient corrects the limit and find the CI. The bootstrap statistic can be transformed to a normal distribution.The normal-transformed statistic has a constant bias.Studentized bootstrap resamples the bootstrap sample to find a second-stage bootstrap statistic and use it to calculate the CI. Use when statistic is homoscedastic.The standard error of bootstrap statistic can be estimated by second-stage resampling." }, { "code": null, "e": 30942, "s": 30781, "text": "Normal bootstrap or Standard confidence limits methods use the standard deviation for calculation of CI. Use when statistic is unbiased.Is normally distributed." }, { "code": null, "e": 30974, "s": 30942, "text": "Use when statistic is unbiased." }, { "code": null, "e": 30999, "s": 30974, "text": "Is normally distributed." }, { "code": null, "e": 31244, "s": 30999, "text": "Basic bootstrap or Hall’s (second percentile) method use percentile to calculate upper and lower limit of test statistic. When statistic is unbiased and homoscedastic.The bootstrap statistic can be transformed to a standard normal distribution." }, { "code": null, "e": 31290, "s": 31244, "text": "When statistic is unbiased and homoscedastic." }, { "code": null, "e": 31368, "s": 31290, "text": "The bootstrap statistic can be transformed to a standard normal distribution." }, { "code": null, "e": 31617, "s": 31368, "text": "Percentile bootstrap or Quantile-based, or Approximate intervals use quantiles eg 2.5%, 5% etc. to calculate the CI. Use when statistic is unbiased and homoscedastic.The standard error of your bootstrap statistic and sample statistics are the same." }, { "code": null, "e": 31667, "s": 31617, "text": "Use when statistic is unbiased and homoscedastic." }, { "code": null, "e": 31750, "s": 31667, "text": "The standard error of your bootstrap statistic and sample statistics are the same." }, { "code": null, "e": 32033, "s": 31750, "text": "BCa bootstrap or Bias Corrected Accelerated use percentile limits with bias correction and estimate acceleration coefficient corrects the limit and find the CI. The bootstrap statistic can be transformed to a normal distribution.The normal-transformed statistic has a constant bias." }, { "code": null, "e": 32102, "s": 32033, "text": "The bootstrap statistic can be transformed to a normal distribution." }, { "code": null, "e": 32156, "s": 32102, "text": "The normal-transformed statistic has a constant bias." }, { "code": null, "e": 32407, "s": 32156, "text": "Studentized bootstrap resamples the bootstrap sample to find a second-stage bootstrap statistic and use it to calculate the CI. Use when statistic is homoscedastic.The standard error of bootstrap statistic can be estimated by second-stage resampling." }, { "code": null, "e": 32444, "s": 32407, "text": "Use when statistic is homoscedastic." }, { "code": null, "e": 32531, "s": 32444, "text": "The standard error of bootstrap statistic can be estimated by second-stage resampling." }, { "code": null, "e": 32639, "s": 32531, "text": "References : R bootstrap package Boot Bootstrapping Statistics Wikipedia Bootstrap for Confidence Interval " }, { "code": null, "e": 32651, "s": 32639, "text": "anikakapoor" }, { "code": null, "e": 32658, "s": 32651, "text": "Picked" }, { "code": null, "e": 32673, "s": 32658, "text": "R Data-science" }, { "code": null, "e": 32692, "s": 32673, "text": "R Machine-Learning" }, { "code": null, "e": 32703, "s": 32692, "text": "R Language" }, { "code": null, "e": 32801, "s": 32703, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 32810, "s": 32801, "text": "Comments" }, { "code": null, "e": 32823, "s": 32810, "text": "Old Comments" }, { "code": null, "e": 32881, "s": 32823, "text": "How to Replace specific values in column in R DataFrame ?" }, { "code": null, "e": 32913, "s": 32881, "text": "Loops in R (for, while, repeat)" }, { "code": null, "e": 32965, "s": 32913, "text": "Filter data by multiple conditions in R using Dplyr" }, { "code": null, "e": 33009, "s": 32965, "text": "How to change Row Names of DataFrame in R ?" }, { "code": null, "e": 33061, "s": 33009, "text": "Change Color of Bars in Barchart using ggplot2 in R" }, { "code": null, "e": 33093, "s": 33061, "text": "Printing Output of an R Program" }, { "code": null, "e": 33142, "s": 33093, "text": "Remove rows with NA in one column of R DataFrame" }, { "code": null, "e": 33180, "s": 33142, "text": "How to Change Axis Scales in R Plots?" }, { "code": null, "e": 33215, "s": 33180, "text": "Group by function in R using Dplyr" } ]
ExpressJS - RESTFul APIs
An API is always needed to create mobile applications, single page applications, use AJAX calls and provide data to clients. An popular architectural style of how to structure and name these APIs and the endpoints is called REST(Representational Transfer State). HTTP 1.1 was designed keeping REST principles in mind. REST was introduced by Roy Fielding in 2000 in his Paper Fielding Dissertations. RESTful URIs and methods provide us with almost all information we need to process a request. The table given below summarizes how the various verbs should be used and how URIs should be named. We will be creating a movies API towards the end; let us now discuss how it will be structured. Let us now create this API in Express. We will be using JSON as our transport data format as it is easy to work with in JavaScript and has other benefits. Replace your index.js file with the movies.js file as in the following program. var express = require('express'); var bodyParser = require('body-parser'); var multer = require('multer'); var upload = multer(); var app = express(); app.use(cookieParser()); app.use(bodyParser.json()); app.use(bodyParser.urlencoded({ extended: true })); app.use(upload.array()); //Require the Router we defined in movies.js var movies = require('./movies.js'); //Use the Router on the sub route /movies app.use('/movies', movies); app.listen(3000); Now that we have our application set up, let us concentrate on creating the API. Start by setting up the movies.js file. We are not using a database to store the movies but are storing them in memory; so every time the server restarts, the movies added by us will vanish. This can easily be mimicked using a database or a file (using node fs module). Once you import Express then, create a Router and export it using module.exports − var express = require('express'); var router = express.Router(); var movies = [ {id: 101, name: "Fight Club", year: 1999, rating: 8.1}, {id: 102, name: "Inception", year: 2010, rating: 8.7}, {id: 103, name: "The Dark Knight", year: 2008, rating: 9}, {id: 104, name: "12 Angry Men", year: 1957, rating: 8.9} ]; //Routes will go here module.exports = router; Let us define the GET route for getting all the movies − router.get('/', function(req, res){ res.json(movies); }); To test out if this is working fine, run your app, then open your terminal and enter − curl -i -H "Accept: application/json" -H "Content-Type: application/json" -X GET localhost:3000/movies The following response will be displayed − [{"id":101,"name":"Fight Club","year":1999,"rating":8.1}, {"id":102,"name":"Inception","year":2010,"rating":8.7}, {"id":103,"name":"The Dark Knight","year":2008,"rating":9}, {"id":104,"name":"12 Angry Men","year":1957,"rating":8.9}] We have a route to get all the movies. Let us now create a route to get a specific movie by its id. router.get('/:id([0-9]{3,})', function(req, res){ var currMovie = movies.filter(function(movie){ if(movie.id == req.params.id){ return true; } }); if(currMovie.length == 1){ res.json(currMovie[0]) } else { res.status(404);//Set status to 404 as movie was not found res.json({message: "Not Found"}); } }); This will get us the movies according to the id that we provided. To check the output, use the following command in your terminal − curl -i -H "Accept: application/json" -H "Content-Type: application/json" -X GET localhost:3000/movies/101 You'll get the following response − {"id":101,"name":"Fight Club","year":1999,"rating":8.1} If you visit an invalid route, it will produce a cannot GET error while if you visit a valid route with an id that doesn’t exist, it will produce a 404 error. We are done with the GET routes, let us now move on to the POST route. Use the following route to handle the POSTed data − router.post('/', function(req, res){ //Check if all fields are provided and are valid: if(!req.body.name || !req.body.year.toString().match(/^[0-9]{4}$/g) || !req.body.rating.toString().match(/^[0-9]\.[0-9]$/g)){ res.status(400); res.json({message: "Bad Request"}); } else { var newId = movies[movies.length-1].id+1; movies.push({ id: newId, name: req.body.name, year: req.body.year, rating: req.body.rating }); res.json({message: "New movie created.", location: "/movies/" + newId}); } }); This will create a new movie and store it in the movies variable. To check this route, enter the following code in your terminal − curl -X POST --data "name = Toy%20story&year = 1995&rating = 8.5" http://localhost:3000/movies The following response will be displayed − {"message":"New movie created.","location":"/movies/105"} To test if this was added to the movies object, Run the get request for /movies/105 again. The following response will be displayed − {"id":105,"name":"Toy story","year":"1995","rating":"8.5"} Let us move on to create the PUT and DELETE routes. The PUT route is almost the same as the POST route. We will be specifying the id for the object that'll be updated/created. Create the route in the following way. router.put('/:id', function(req, res){ //Check if all fields are provided and are valid: if(!req.body.name || !req.body.year.toString().match(/^[0-9]{4}$/g) || !req.body.rating.toString().match(/^[0-9]\.[0-9]$/g) || !req.params.id.toString().match(/^[0-9]{3,}$/g)){ res.status(400); res.json({message: "Bad Request"}); } else { //Gets us the index of movie with given id. var updateIndex = movies.map(function(movie){ return movie.id; }).indexOf(parseInt(req.params.id)); if(updateIndex === -1){ //Movie not found, create new movies.push({ id: req.params.id, name: req.body.name, year: req.body.year, rating: req.body.rating }); res.json({message: "New movie created.", location: "/movies/" + req.params.id}); } else { //Update existing movie movies[updateIndex] = { id: req.params.id, name: req.body.name, year: req.body.year, rating: req.body.rating }; res.json({message: "Movie id " + req.params.id + " updated.", location: "/movies/" + req.params.id}); } } }); This route will perform the function specified in the above table. It will update the object with new details if it exists. If it doesn't exist, it will create a new object. To check the route, use the following curl command. This will update an existing movie. To create a new Movie, just change the id to a non-existing id. curl -X PUT --data "name = Toy%20story&year = 1995&rating = 8.5" http://localhost:3000/movies/101 Response {"message":"Movie id 101 updated.","location":"/movies/101"} Use the following code to create a delete route. − router.delete('/:id', function(req, res){ var removeIndex = movies.map(function(movie){ return movie.id; }).indexOf(req.params.id); //Gets us the index of movie with given id. if(removeIndex === -1){ res.json({message: "Not found"}); } else { movies.splice(removeIndex, 1); res.send({message: "Movie id " + req.params.id + " removed."}); } }); Check the route in the same way as we checked the other routes. On successful deletion(for example id 105), you will get the following output − {message: "Movie id 105 removed."} Finally, our movies.js file will look like the following. var express = require('express'); var router = express.Router(); var movies = [ {id: 101, name: "Fight Club", year: 1999, rating: 8.1}, {id: 102, name: "Inception", year: 2010, rating: 8.7}, {id: 103, name: "The Dark Knight", year: 2008, rating: 9}, {id: 104, name: "12 Angry Men", year: 1957, rating: 8.9} ]; router.get('/:id([0-9]{3,})', function(req, res){ var currMovie = movies.filter(function(movie){ if(movie.id == req.params.id){ return true; } }); if(currMovie.length == 1){ res.json(currMovie[0]) } else { res.status(404); //Set status to 404 as movie was not found res.json({message: "Not Found"}); } }); router.post('/', function(req, res){ //Check if all fields are provided and are valid: if(!req.body.name || !req.body.year.toString().match(/^[0-9]{4}$/g) || !req.body.rating.toString().match(/^[0-9]\.[0-9]$/g)){ res.status(400); res.json({message: "Bad Request"}); } else { var newId = movies[movies.length-1].id+1; movies.push({ id: newId, name: req.body.name, year: req.body.year, rating: req.body.rating }); res.json({message: "New movie created.", location: "/movies/" + newId}); } }); router.put('/:id', function(req, res) { //Check if all fields are provided and are valid: if(!req.body.name || !req.body.year.toString().match(/^[0-9]{4}$/g) || !req.body.rating.toString().match(/^[0-9]\.[0-9]$/g) || !req.params.id.toString().match(/^[0-9]{3,}$/g)){ res.status(400); res.json({message: "Bad Request"}); } else { //Gets us the index of movie with given id. var updateIndex = movies.map(function(movie){ return movie.id; }).indexOf(parseInt(req.params.id)); if(updateIndex === -1){ //Movie not found, create new movies.push({ id: req.params.id, name: req.body.name, year: req.body.year, rating: req.body.rating }); res.json({ message: "New movie created.", location: "/movies/" + req.params.id}); } else { //Update existing movie movies[updateIndex] = { id: req.params.id, name: req.body.name, year: req.body.year, rating: req.body.rating }; res.json({message: "Movie id " + req.params.id + " updated.", location: "/movies/" + req.params.id}); } } }); router.delete('/:id', function(req, res){ var removeIndex = movies.map(function(movie){ return movie.id; }).indexOf(req.params.id); //Gets us the index of movie with given id. if(removeIndex === -1){ res.json({message: "Not found"}); } else { movies.splice(removeIndex, 1); res.send({message: "Movie id " + req.params.id + " removed."}); } }); module.exports = router; This completes our REST API. Now you can create much more complex applications using this simple architectural style and Express. 16 Lectures 1 hours Anadi Sharma Print Add Notes Bookmark this page
[ { "code": null, "e": 2460, "s": 2061, "text": "An API is always needed to create mobile applications, single page applications, use AJAX calls and provide data to clients. An popular architectural style of how to structure and name these APIs and the endpoints is called REST(Representational Transfer State). HTTP 1.1 was designed keeping REST principles in mind. REST was introduced by Roy Fielding in 2000 in his Paper Fielding Dissertations." }, { "code": null, "e": 2750, "s": 2460, "text": "RESTful URIs and methods provide us with almost all information we need to process a request. The table given below summarizes how the various verbs should be used and how URIs should be named. We will be creating a movies API towards the end; let us now discuss how it will be structured." }, { "code": null, "e": 2985, "s": 2750, "text": "Let us now create this API in Express. We will be using JSON as our transport data format as it is easy to work with in JavaScript and has other benefits. Replace your index.js file with the movies.js file as in the following program." }, { "code": null, "e": 3441, "s": 2985, "text": "var express = require('express');\nvar bodyParser = require('body-parser');\nvar multer = require('multer');\nvar upload = multer();\n\nvar app = express();\n\napp.use(cookieParser());\napp.use(bodyParser.json());\napp.use(bodyParser.urlencoded({ extended: true }));\napp.use(upload.array());\n\n//Require the Router we defined in movies.js\nvar movies = require('./movies.js');\n\n//Use the Router on the sub route /movies\napp.use('/movies', movies);\n\napp.listen(3000);" }, { "code": null, "e": 3522, "s": 3441, "text": "Now that we have our application set up, let us concentrate on creating the API." }, { "code": null, "e": 3792, "s": 3522, "text": "Start by setting up the movies.js file. We are not using a database to store the movies but are storing them in memory; so every time the server restarts, the movies added by us will vanish. This can easily be mimicked using a database or a file (using node fs module)." }, { "code": null, "e": 3875, "s": 3792, "text": "Once you import Express then, create a Router and export it using module.exports −" }, { "code": null, "e": 4245, "s": 3875, "text": "var express = require('express');\nvar router = express.Router();\nvar movies = [\n {id: 101, name: \"Fight Club\", year: 1999, rating: 8.1},\n {id: 102, name: \"Inception\", year: 2010, rating: 8.7},\n {id: 103, name: \"The Dark Knight\", year: 2008, rating: 9},\n {id: 104, name: \"12 Angry Men\", year: 1957, rating: 8.9}\n];\n\n//Routes will go here\nmodule.exports = router;" }, { "code": null, "e": 4302, "s": 4245, "text": "Let us define the GET route for getting all the movies −" }, { "code": null, "e": 4363, "s": 4302, "text": "router.get('/', function(req, res){\n res.json(movies);\n});" }, { "code": null, "e": 4450, "s": 4363, "text": "To test out if this is working fine, run your app, then open your terminal and enter −" }, { "code": null, "e": 4555, "s": 4450, "text": "curl -i -H \"Accept: application/json\" -H \"Content-Type: application/json\" -X GET \nlocalhost:3000/movies\n" }, { "code": null, "e": 4598, "s": 4555, "text": "The following response will be displayed −" }, { "code": null, "e": 4832, "s": 4598, "text": "[{\"id\":101,\"name\":\"Fight Club\",\"year\":1999,\"rating\":8.1},\n{\"id\":102,\"name\":\"Inception\",\"year\":2010,\"rating\":8.7},\n{\"id\":103,\"name\":\"The Dark Knight\",\"year\":2008,\"rating\":9},\n{\"id\":104,\"name\":\"12 Angry Men\",\"year\":1957,\"rating\":8.9}]\n" }, { "code": null, "e": 4932, "s": 4832, "text": "We have a route to get all the movies. Let us now create a route to get a specific movie by its id." }, { "code": null, "e": 5291, "s": 4932, "text": "router.get('/:id([0-9]{3,})', function(req, res){\n var currMovie = movies.filter(function(movie){\n if(movie.id == req.params.id){\n return true;\n }\n });\n if(currMovie.length == 1){\n res.json(currMovie[0])\n } else {\n res.status(404);//Set status to 404 as movie was not found\n res.json({message: \"Not Found\"});\n }\n});" }, { "code": null, "e": 5423, "s": 5291, "text": "This will get us the movies according to the id that we provided. To check the output, use the following command in your terminal −" }, { "code": null, "e": 5532, "s": 5423, "text": "curl -i -H \"Accept: application/json\" -H \"Content-Type: application/json\" -X GET \nlocalhost:3000/movies/101\n" }, { "code": null, "e": 5568, "s": 5532, "text": "You'll get the following response −" }, { "code": null, "e": 5625, "s": 5568, "text": "{\"id\":101,\"name\":\"Fight Club\",\"year\":1999,\"rating\":8.1}\n" }, { "code": null, "e": 5784, "s": 5625, "text": "If you visit an invalid route, it will produce a cannot GET error while if you visit a valid route with an id that doesn’t exist, it will produce a 404 error." }, { "code": null, "e": 5855, "s": 5784, "text": "We are done with the GET routes, let us now move on to the POST route." }, { "code": null, "e": 5907, "s": 5855, "text": "Use the following route to handle the POSTed data −" }, { "code": null, "e": 6501, "s": 5907, "text": "router.post('/', function(req, res){\n //Check if all fields are provided and are valid:\n if(!req.body.name ||\n !req.body.year.toString().match(/^[0-9]{4}$/g) ||\n !req.body.rating.toString().match(/^[0-9]\\.[0-9]$/g)){\n \n res.status(400);\n res.json({message: \"Bad Request\"});\n } else {\n var newId = movies[movies.length-1].id+1;\n movies.push({\n id: newId,\n name: req.body.name,\n year: req.body.year,\n rating: req.body.rating\n });\n res.json({message: \"New movie created.\", location: \"/movies/\" + newId});\n }\n});" }, { "code": null, "e": 6632, "s": 6501, "text": "This will create a new movie and store it in the movies variable. To check this route, enter the following code in your terminal −" }, { "code": null, "e": 6728, "s": 6632, "text": "curl -X POST --data \"name = Toy%20story&year = 1995&rating = 8.5\" http://localhost:3000/movies\n" }, { "code": null, "e": 6771, "s": 6728, "text": "The following response will be displayed −" }, { "code": null, "e": 6830, "s": 6771, "text": "{\"message\":\"New movie created.\",\"location\":\"/movies/105\"}\n" }, { "code": null, "e": 6964, "s": 6830, "text": "To test if this was added to the movies object, Run the get request for /movies/105 again. The following response will be displayed −" }, { "code": null, "e": 7024, "s": 6964, "text": "{\"id\":105,\"name\":\"Toy story\",\"year\":\"1995\",\"rating\":\"8.5\"}\n" }, { "code": null, "e": 7076, "s": 7024, "text": "Let us move on to create the PUT and DELETE routes." }, { "code": null, "e": 7239, "s": 7076, "text": "The PUT route is almost the same as the POST route. We will be specifying the id for the object that'll be updated/created. Create the route in the following way." }, { "code": null, "e": 8486, "s": 7239, "text": "router.put('/:id', function(req, res){\n //Check if all fields are provided and are valid:\n if(!req.body.name ||\n !req.body.year.toString().match(/^[0-9]{4}$/g) ||\n !req.body.rating.toString().match(/^[0-9]\\.[0-9]$/g) ||\n !req.params.id.toString().match(/^[0-9]{3,}$/g)){\n \n res.status(400);\n res.json({message: \"Bad Request\"});\n } else {\n //Gets us the index of movie with given id.\n var updateIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(parseInt(req.params.id));\n \n if(updateIndex === -1){\n //Movie not found, create new\n movies.push({\n id: req.params.id,\n name: req.body.name,\n year: req.body.year,\n rating: req.body.rating\n });\n res.json({message: \"New movie created.\", location: \"/movies/\" + req.params.id});\n } else {\n //Update existing movie\n movies[updateIndex] = {\n id: req.params.id,\n name: req.body.name,\n year: req.body.year,\n rating: req.body.rating\n };\n res.json({message: \"Movie id \" + req.params.id + \" updated.\", \n location: \"/movies/\" + req.params.id});\n }\n }\n});" }, { "code": null, "e": 8812, "s": 8486, "text": "This route will perform the function specified in the above table. It will update the object with new details if it exists. If it doesn't exist, it will create a new object. To check the route, use the following curl command. This will update an existing movie. To create a new Movie, just change the id to a non-existing id." }, { "code": null, "e": 8912, "s": 8812, "text": "curl -X PUT --data \"name = Toy%20story&year = 1995&rating = 8.5\" \nhttp://localhost:3000/movies/101\n" }, { "code": null, "e": 8921, "s": 8912, "text": "Response" }, { "code": null, "e": 8983, "s": 8921, "text": "{\"message\":\"Movie id 101 updated.\",\"location\":\"/movies/101\"}\n" }, { "code": null, "e": 9034, "s": 8983, "text": "Use the following code to create a delete route. −" }, { "code": null, "e": 9421, "s": 9034, "text": "router.delete('/:id', function(req, res){\n var removeIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(req.params.id); //Gets us the index of movie with given id.\n \n if(removeIndex === -1){\n res.json({message: \"Not found\"});\n } else {\n movies.splice(removeIndex, 1);\n res.send({message: \"Movie id \" + req.params.id + \" removed.\"});\n }\n});" }, { "code": null, "e": 9565, "s": 9421, "text": "Check the route in the same way as we checked the other routes. On successful deletion(for example id 105), you will get the following output −" }, { "code": null, "e": 9601, "s": 9565, "text": "{message: \"Movie id 105 removed.\"}\n" }, { "code": null, "e": 9659, "s": 9601, "text": "Finally, our movies.js file will look like the following." }, { "code": null, "e": 12600, "s": 9659, "text": "var express = require('express');\nvar router = express.Router();\nvar movies = [\n {id: 101, name: \"Fight Club\", year: 1999, rating: 8.1},\n {id: 102, name: \"Inception\", year: 2010, rating: 8.7},\n {id: 103, name: \"The Dark Knight\", year: 2008, rating: 9},\n {id: 104, name: \"12 Angry Men\", year: 1957, rating: 8.9}\n];\nrouter.get('/:id([0-9]{3,})', function(req, res){\n var currMovie = movies.filter(function(movie){\n if(movie.id == req.params.id){\n return true;\n }\n });\n \n if(currMovie.length == 1){\n res.json(currMovie[0])\n } else {\n res.status(404); //Set status to 404 as movie was not found\n res.json({message: \"Not Found\"});\n }\n});\nrouter.post('/', function(req, res){\n //Check if all fields are provided and are valid:\n if(!req.body.name ||\n !req.body.year.toString().match(/^[0-9]{4}$/g) ||\n !req.body.rating.toString().match(/^[0-9]\\.[0-9]$/g)){\n res.status(400);\n res.json({message: \"Bad Request\"});\n } else {\n var newId = movies[movies.length-1].id+1;\n movies.push({\n id: newId,\n name: req.body.name,\n year: req.body.year,\n rating: req.body.rating\n });\n res.json({message: \"New movie created.\", location: \"/movies/\" + newId});\n }\n});\n\nrouter.put('/:id', function(req, res) {\n //Check if all fields are provided and are valid:\n if(!req.body.name ||\n !req.body.year.toString().match(/^[0-9]{4}$/g) ||\n !req.body.rating.toString().match(/^[0-9]\\.[0-9]$/g) ||\n !req.params.id.toString().match(/^[0-9]{3,}$/g)){\n res.status(400);\n res.json({message: \"Bad Request\"});\n } else {\n //Gets us the index of movie with given id.\n var updateIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(parseInt(req.params.id));\n \n if(updateIndex === -1){\n //Movie not found, create new\n movies.push({\n id: req.params.id,\n name: req.body.name,\n year: req.body.year,\n rating: req.body.rating\n });\n res.json({\n message: \"New movie created.\", location: \"/movies/\" + req.params.id});\n } else {\n //Update existing movie\n movies[updateIndex] = {\n id: req.params.id,\n name: req.body.name,\n year: req.body.year,\n rating: req.body.rating\n };\n res.json({message: \"Movie id \" + req.params.id + \" updated.\",\n location: \"/movies/\" + req.params.id});\n }\n }\n});\n\nrouter.delete('/:id', function(req, res){\n var removeIndex = movies.map(function(movie){\n return movie.id;\n }).indexOf(req.params.id); //Gets us the index of movie with given id.\n \n if(removeIndex === -1){\n res.json({message: \"Not found\"});\n } else {\n movies.splice(removeIndex, 1);\n res.send({message: \"Movie id \" + req.params.id + \" removed.\"});\n }\n});\nmodule.exports = router;" }, { "code": null, "e": 12730, "s": 12600, "text": "This completes our REST API. Now you can create much more complex applications using this simple architectural style and Express." }, { "code": null, "e": 12763, "s": 12730, "text": "\n 16 Lectures \n 1 hours \n" }, { "code": null, "e": 12777, "s": 12763, "text": " Anadi Sharma" }, { "code": null, "e": 12784, "s": 12777, "text": " Print" }, { "code": null, "e": 12795, "s": 12784, "text": " Add Notes" } ]
Create custom gym environments from scratch — A stock market example | by Adam King | Towards Data Science
OpenAI’s gym is an awesome package that allows you to create custom reinforcement learning agents. It comes with quite a few pre-built environments like CartPole, MountainCar, and a ton of free Atari games to experiment with. These environments are great for learning, but eventually you’ll want to setup an agent to solve a custom problem. To do this, you’ll need to create a custom environment, specific to your problem domain. Later, we will create a custom stock market environment for simulating stock trades. All of the code for this article will be available on my GitHub. First, let’s learn about what exactly an environment is. An environment contains all the necessary functionality to run an agent and allow it to learn. Each environment must implement the following gym interface: import gymfrom gym import spacesclass CustomEnv(gym.Env): """Custom Environment that follows gym interface""" metadata = {'render.modes': ['human']} def __init__(self, arg1, arg2, ...): super(CustomEnv, self).__init__() # Define action and observation space # They must be gym.spaces objects # Example when using discrete actions: self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS) # Example for using image as input: self.observation_space = spaces.Box(low=0, high=255, shape= (HEIGHT, WIDTH, N_CHANNELS), dtype=np.uint8) def step(self, action): # Execute one time step within the environment ... def reset(self): # Reset the state of the environment to an initial state ... def render(self, mode='human', close=False): # Render the environment to the screen ... In the constructor, we first define the type and shape of our action_space, which will contain all of the actions possible for an agent to take in the environment. Similarly, we’ll define the observation_space, which contains all of the environment’s data to be observed by the agent. Our reset method will be called to periodically reset the environment to an initial state. This is followed by many steps through the environment, in which an action will be provided by the model and must be executed, and the next observation returned. This is also where rewards are calculated, more on this later. Finally, the render method may be called periodically to print a rendition of the environment. This could be as simple as a print statement, or as complicated as rendering a 3D environment using openGL. For this example, we will stick with print statements. To demonstrate how this all works, we are going to create a stock trading environment. We will then train our agent to become a profitable trader within the environment. Let’s get started! The first thing we’ll need to consider is how a human trader would perceive their environment. What observations would they make before deciding to make a trade? A trader would most likely look at some charts of a stock’s price action, perhaps overlaid with a couple technical indicators. From there, they would combine this visual information with their prior knowledge of similar price action to make an informed decision of which direction the stock is likely to move. So let’s translate this into how our agent should perceive its environment. Our observation_space contains all of the input variables we want our agent to consider before making, or not making a trade. In this example, we want our agent to “see” the stock data points (open price, high, low, close, and daily volume) for the last five days, as well a couple other data points like its account balance, current stock positions, and current profit. The intuition here is that for each time step, we want our agent to consider the price action leading up to the current price, as well as their own portfolio’s status in order to make an informed decision for the next action. Once a trader has perceived their environment, they need to take an action. In our agent’s case, its action_space will consist of three possibilities: buy a stock, sell a stock, or do nothing. But this isn’t enough; we need to know the amount of a given stock to buy or sell each time. Using gym’s Box space, we can create an action space that has a discrete number of action types (buy, sell, and hold), as well as a continuous spectrum of amounts to buy/sell (0-100% of the account balance/position size respectively). You’ll notice the amount is not necessary for the hold action, but will be provided anyway. Our agent does not initially know this, but over time should learn that the amount is extraneous for this action. The last thing to consider before implementing our environment is the reward. We want to incentivize profit that is sustained over long periods of time. At each step, we will set the reward to the account balance multiplied by some fraction of the number of time steps so far. The purpose of this is to delay rewarding the agent too fast in the early stages and allow it to explore sufficiently before optimizing a single strategy too deeply. It will also reward agents that maintain a higher balance for longer, rather than those who rapidly gain money using unsustainable strategies. We're so pumped to have you! Already have an account? Sign In Now that we’ve defined our observation space, action space, and rewards, it’s time to implement our environment. First, we need define the action_space and observation_space in the environment’s constructor. The environment expects a pandas data frame to be passed in containing the stock data to be learned from. An example is provided in the Github repo. class StockTradingEnvironment(gym.Env): """A stock trading environment for OpenAI gym""" metadata = {'render.modes': ['human']} def __init__(self, df): super(StockTradingEnv, self).__init__() self.df = df self.reward_range = (0, MAX_ACCOUNT_BALANCE) # Actions of the format Buy x%, Sell x%, Hold, etc. self.action_space = spaces.Box( low=np.array([0, 0]), high=np.array([3, 1]), dtype=np.float16) # Prices contains the OHCL values for the last five prices self.observation_space = spaces.Box( low=0, high=1, shape=(6, 6), dtype=np.float16) Next, we’ll write the reset method, which is called any time a new environment is created or to reset an existing environment’s state. It’s here where we’ll set the starting balance of each agent and initialize its open positions to an empty list. def reset(self): # Reset the state of the environment to an initial state self.balance = INITIAL_ACCOUNT_BALANCE self.net_worth = INITIAL_ACCOUNT_BALANCE self.max_net_worth = INITIAL_ACCOUNT_BALANCE self.shares_held = 0 self.cost_basis = 0 self.total_shares_sold = 0 self.total_sales_value = 0 # Set the current step to a random point within the data frame self.current_step = random.randint(0, len(self.df.loc[:, 'Open'].values) - 6) return self._next_observation() We set the current step to a random point within the data frame, because it essentially gives our agent’s more unique experiences from the same data set. The _next_observation method compiles the stock data for the last five time steps, appends the agent’s account information, and scales all the values to between 0 and 1. def _next_observation(self): # Get the data points for the last 5 days and scale to between 0-1 frame = np.array([ self.df.loc[self.current_step: self.current_step + 5, 'Open'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'High'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'Low'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'Close'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'Volume'].values / MAX_NUM_SHARES, ]) # Append additional data and scale each value to between 0-1 obs = np.append(frame, [[ self.balance / MAX_ACCOUNT_BALANCE, self.max_net_worth / MAX_ACCOUNT_BALANCE, self.shares_held / MAX_NUM_SHARES, self.cost_basis / MAX_SHARE_PRICE, self.total_shares_sold / MAX_NUM_SHARES, self.total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE), ]], axis=0) return obs Next, our environment needs to be able to take a step. At each step we will take the specified action (chosen by our model), calculate the reward, and return the next observation. def step(self, action): # Execute one time step within the environment self._take_action(action) self.current_step += 1 if self.current_step > len(self.df.loc[:, 'Open'].values) - 6: self.current_step = 0 delay_modifier = (self.current_step / MAX_STEPS) reward = self.balance * delay_modifier done = self.net_worth <= 0 obs = self._next_observation() return obs, reward, done, {} Now, our _take_action method needs to take the action provided by the model and either buy, sell, or hold the stock. def _take_action(self, action): # Set the current price to a random price within the time step current_price = random.uniform( self.df.loc[self.current_step, "Open"], self.df.loc[self.current_step, "Close"]) action_type = action[0] amount = action[1] if action_type < 1: # Buy amount % of balance in shares total_possible = self.balance / current_price shares_bought = total_possible * amount prev_cost = self.cost_basis * self.shares_held additional_cost = shares_bought * current_price self.balance -= additional_cost self.cost_basis = (prev_cost + additional_cost) / (self.shares_held + shares_bought) self.shares_held += shares_bought elif actionType < 2: # Sell amount % of shares held shares_sold = self.shares_held * amount . self.balance += shares_sold * current_price self.shares_held -= shares_sold self.total_shares_sold += shares_sold self.total_sales_value += shares_sold * current_price self.netWorth = self.balance + self.shares_held * current_price if self.net_worth > self.max_net_worth: self.max_net_worth = net_worth if self.shares_held == 0: self.cost_basis = 0 The only thing left to do now is render the environment to the screen. For simplicity’s sake, we will just render the profit made so far and a couple other interesting metrics. def render(self, mode='human', close=False): # Render the environment to the screen profit = self.net_worth - INITIAL_ACCOUNT_BALANCE print(f'Step: {self.current_step}') print(f'Balance: {self.balance}') print(f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})') print(f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})') print(f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})') print(f'Profit: {profit}') Our environment is complete. We can now instantiate a StockTradingEnv environment with a data frame and test it with a model from stable-baselines. import gymimport jsonimport datetime as dtfrom stable_baselines.common.policies import MlpPolicyfrom stable_baselines.common.vec_env import DummyVecEnvfrom stable_baselines import PPO2from env.StockTradingEnv import StockTradingEnvimport pandas as pddf = pd.read_csv('./data/AAPL.csv')df = df.sort_values('Date')# The algorithms require a vectorized environment to runenv = DummyVecEnv([lambda: StockTradingEnv(df)])model = PPO2(MlpPolicy, env, verbose=1)model.learn(total_timesteps=20000)obs = env.reset()for i in range(2000): action, _states = model.predict(obs) obs, rewards, done, info = env.step(action) env.render() Now of course, this was all just for fun to test out creating an interesting, custom gym environment with some semi-complex actions, observations, and reward spaces. It’s going to take a lot more time and effort if we really want to get rich with deep learning in the stock market... Stay tuned for next week’s article where we’ll learn to create simple, yet elegant visualizations of our environments! towardsdatascience.com Thanks for reading! As always, all of the code for this tutorial can be found on my GitHub. Leave a comment below if you have any questions or feedback, I’d love to hear from you! I can also be reached on Twitter at @notadamking. You can also sponsor me on Github Sponsors or Patreon via the links below. github.com Github Sponsors is currently matching all donations 1:1 up to $5,000!
[ { "code": null, "e": 398, "s": 172, "text": "OpenAI’s gym is an awesome package that allows you to create custom reinforcement learning agents. It comes with quite a few pre-built environments like CartPole, MountainCar, and a ton of free Atari games to experiment with." }, { "code": null, "e": 752, "s": 398, "text": "These environments are great for learning, but eventually you’ll want to setup an agent to solve a custom problem. To do this, you’ll need to create a custom environment, specific to your problem domain. Later, we will create a custom stock market environment for simulating stock trades. All of the code for this article will be available on my GitHub." }, { "code": null, "e": 965, "s": 752, "text": "First, let’s learn about what exactly an environment is. An environment contains all the necessary functionality to run an agent and allow it to learn. Each environment must implement the following gym interface:" }, { "code": null, "e": 1798, "s": 965, "text": "import gymfrom gym import spacesclass CustomEnv(gym.Env): \"\"\"Custom Environment that follows gym interface\"\"\" metadata = {'render.modes': ['human']} def __init__(self, arg1, arg2, ...): super(CustomEnv, self).__init__() # Define action and observation space # They must be gym.spaces objects # Example when using discrete actions: self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS) # Example for using image as input: self.observation_space = spaces.Box(low=0, high=255, shape= (HEIGHT, WIDTH, N_CHANNELS), dtype=np.uint8) def step(self, action): # Execute one time step within the environment ... def reset(self): # Reset the state of the environment to an initial state ... def render(self, mode='human', close=False): # Render the environment to the screen ..." }, { "code": null, "e": 2083, "s": 1798, "text": "In the constructor, we first define the type and shape of our action_space, which will contain all of the actions possible for an agent to take in the environment. Similarly, we’ll define the observation_space, which contains all of the environment’s data to be observed by the agent." }, { "code": null, "e": 2399, "s": 2083, "text": "Our reset method will be called to periodically reset the environment to an initial state. This is followed by many steps through the environment, in which an action will be provided by the model and must be executed, and the next observation returned. This is also where rewards are calculated, more on this later." }, { "code": null, "e": 2657, "s": 2399, "text": "Finally, the render method may be called periodically to print a rendition of the environment. This could be as simple as a print statement, or as complicated as rendering a 3D environment using openGL. For this example, we will stick with print statements." }, { "code": null, "e": 2846, "s": 2657, "text": "To demonstrate how this all works, we are going to create a stock trading environment. We will then train our agent to become a profitable trader within the environment. Let’s get started!" }, { "code": null, "e": 3008, "s": 2846, "text": "The first thing we’ll need to consider is how a human trader would perceive their environment. What observations would they make before deciding to make a trade?" }, { "code": null, "e": 3318, "s": 3008, "text": "A trader would most likely look at some charts of a stock’s price action, perhaps overlaid with a couple technical indicators. From there, they would combine this visual information with their prior knowledge of similar price action to make an informed decision of which direction the stock is likely to move." }, { "code": null, "e": 3394, "s": 3318, "text": "So let’s translate this into how our agent should perceive its environment." }, { "code": null, "e": 3765, "s": 3394, "text": "Our observation_space contains all of the input variables we want our agent to consider before making, or not making a trade. In this example, we want our agent to “see” the stock data points (open price, high, low, close, and daily volume) for the last five days, as well a couple other data points like its account balance, current stock positions, and current profit." }, { "code": null, "e": 3991, "s": 3765, "text": "The intuition here is that for each time step, we want our agent to consider the price action leading up to the current price, as well as their own portfolio’s status in order to make an informed decision for the next action." }, { "code": null, "e": 4184, "s": 3991, "text": "Once a trader has perceived their environment, they need to take an action. In our agent’s case, its action_space will consist of three possibilities: buy a stock, sell a stock, or do nothing." }, { "code": null, "e": 4512, "s": 4184, "text": "But this isn’t enough; we need to know the amount of a given stock to buy or sell each time. Using gym’s Box space, we can create an action space that has a discrete number of action types (buy, sell, and hold), as well as a continuous spectrum of amounts to buy/sell (0-100% of the account balance/position size respectively)." }, { "code": null, "e": 4718, "s": 4512, "text": "You’ll notice the amount is not necessary for the hold action, but will be provided anyway. Our agent does not initially know this, but over time should learn that the amount is extraneous for this action." }, { "code": null, "e": 4995, "s": 4718, "text": "The last thing to consider before implementing our environment is the reward. We want to incentivize profit that is sustained over long periods of time. At each step, we will set the reward to the account balance multiplied by some fraction of the number of time steps so far." }, { "code": null, "e": 5304, "s": 4995, "text": "The purpose of this is to delay rewarding the agent too fast in the early stages and allow it to explore sufficiently before optimizing a single strategy too deeply. It will also reward agents that maintain a higher balance for longer, rather than those who rapidly gain money using unsustainable strategies." }, { "code": null, "e": 5333, "s": 5304, "text": "We're so pumped to have you!" }, { "code": null, "e": 5368, "s": 5333, "text": "\nAlready have an account? Sign In\n" }, { "code": null, "e": 5725, "s": 5368, "text": "Now that we’ve defined our observation space, action space, and rewards, it’s time to implement our environment. First, we need define the action_space and observation_space in the environment’s constructor. The environment expects a pandas data frame to be passed in containing the stock data to be learned from. An example is provided in the Github repo." }, { "code": null, "e": 6300, "s": 5725, "text": "class StockTradingEnvironment(gym.Env): \"\"\"A stock trading environment for OpenAI gym\"\"\" metadata = {'render.modes': ['human']} def __init__(self, df): super(StockTradingEnv, self).__init__() self.df = df self.reward_range = (0, MAX_ACCOUNT_BALANCE) # Actions of the format Buy x%, Sell x%, Hold, etc. self.action_space = spaces.Box( low=np.array([0, 0]), high=np.array([3, 1]), dtype=np.float16) # Prices contains the OHCL values for the last five prices self.observation_space = spaces.Box( low=0, high=1, shape=(6, 6), dtype=np.float16)" }, { "code": null, "e": 6548, "s": 6300, "text": "Next, we’ll write the reset method, which is called any time a new environment is created or to reset an existing environment’s state. It’s here where we’ll set the starting balance of each agent and initialize its open positions to an empty list." }, { "code": null, "e": 7027, "s": 6548, "text": "def reset(self): # Reset the state of the environment to an initial state self.balance = INITIAL_ACCOUNT_BALANCE self.net_worth = INITIAL_ACCOUNT_BALANCE self.max_net_worth = INITIAL_ACCOUNT_BALANCE self.shares_held = 0 self.cost_basis = 0 self.total_shares_sold = 0 self.total_sales_value = 0 # Set the current step to a random point within the data frame self.current_step = random.randint(0, len(self.df.loc[:, 'Open'].values) - 6) return self._next_observation()" }, { "code": null, "e": 7351, "s": 7027, "text": "We set the current step to a random point within the data frame, because it essentially gives our agent’s more unique experiences from the same data set. The _next_observation method compiles the stock data for the last five time steps, appends the agent’s account information, and scales all the values to between 0 and 1." }, { "code": null, "e": 8386, "s": 7351, "text": "def _next_observation(self): # Get the data points for the last 5 days and scale to between 0-1 frame = np.array([ self.df.loc[self.current_step: self.current_step + 5, 'Open'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'High'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'Low'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'Close'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step: self.current_step + 5, 'Volume'].values / MAX_NUM_SHARES, ]) # Append additional data and scale each value to between 0-1 obs = np.append(frame, [[ self.balance / MAX_ACCOUNT_BALANCE, self.max_net_worth / MAX_ACCOUNT_BALANCE, self.shares_held / MAX_NUM_SHARES, self.cost_basis / MAX_SHARE_PRICE, self.total_shares_sold / MAX_NUM_SHARES, self.total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE), ]], axis=0) return obs" }, { "code": null, "e": 8566, "s": 8386, "text": "Next, our environment needs to be able to take a step. At each step we will take the specified action (chosen by our model), calculate the reward, and return the next observation." }, { "code": null, "e": 8960, "s": 8566, "text": "def step(self, action): # Execute one time step within the environment self._take_action(action) self.current_step += 1 if self.current_step > len(self.df.loc[:, 'Open'].values) - 6: self.current_step = 0 delay_modifier = (self.current_step / MAX_STEPS) reward = self.balance * delay_modifier done = self.net_worth <= 0 obs = self._next_observation() return obs, reward, done, {}" }, { "code": null, "e": 9077, "s": 8960, "text": "Now, our _take_action method needs to take the action provided by the model and either buy, sell, or hold the stock." }, { "code": null, "e": 10251, "s": 9077, "text": "def _take_action(self, action): # Set the current price to a random price within the time step current_price = random.uniform( self.df.loc[self.current_step, \"Open\"], self.df.loc[self.current_step, \"Close\"]) action_type = action[0] amount = action[1] if action_type < 1: # Buy amount % of balance in shares total_possible = self.balance / current_price shares_bought = total_possible * amount prev_cost = self.cost_basis * self.shares_held additional_cost = shares_bought * current_price self.balance -= additional_cost self.cost_basis = (prev_cost + additional_cost) / (self.shares_held + shares_bought) self.shares_held += shares_bought elif actionType < 2: # Sell amount % of shares held shares_sold = self.shares_held * amount . self.balance += shares_sold * current_price self.shares_held -= shares_sold self.total_shares_sold += shares_sold self.total_sales_value += shares_sold * current_price self.netWorth = self.balance + self.shares_held * current_price if self.net_worth > self.max_net_worth: self.max_net_worth = net_worth if self.shares_held == 0: self.cost_basis = 0" }, { "code": null, "e": 10428, "s": 10251, "text": "The only thing left to do now is render the environment to the screen. For simplicity’s sake, we will just render the profit made so far and a couple other interesting metrics." }, { "code": null, "e": 10951, "s": 10428, "text": "def render(self, mode='human', close=False): # Render the environment to the screen profit = self.net_worth - INITIAL_ACCOUNT_BALANCE print(f'Step: {self.current_step}') print(f'Balance: {self.balance}') print(f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})') print(f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})') print(f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})') print(f'Profit: {profit}')" }, { "code": null, "e": 11099, "s": 10951, "text": "Our environment is complete. We can now instantiate a StockTradingEnv environment with a data frame and test it with a model from stable-baselines." }, { "code": null, "e": 11724, "s": 11099, "text": "import gymimport jsonimport datetime as dtfrom stable_baselines.common.policies import MlpPolicyfrom stable_baselines.common.vec_env import DummyVecEnvfrom stable_baselines import PPO2from env.StockTradingEnv import StockTradingEnvimport pandas as pddf = pd.read_csv('./data/AAPL.csv')df = df.sort_values('Date')# The algorithms require a vectorized environment to runenv = DummyVecEnv([lambda: StockTradingEnv(df)])model = PPO2(MlpPolicy, env, verbose=1)model.learn(total_timesteps=20000)obs = env.reset()for i in range(2000): action, _states = model.predict(obs) obs, rewards, done, info = env.step(action) env.render()" }, { "code": null, "e": 12008, "s": 11724, "text": "Now of course, this was all just for fun to test out creating an interesting, custom gym environment with some semi-complex actions, observations, and reward spaces. It’s going to take a lot more time and effort if we really want to get rich with deep learning in the stock market..." }, { "code": null, "e": 12127, "s": 12008, "text": "Stay tuned for next week’s article where we’ll learn to create simple, yet elegant visualizations of our environments!" }, { "code": null, "e": 12150, "s": 12127, "text": "towardsdatascience.com" }, { "code": null, "e": 12380, "s": 12150, "text": "Thanks for reading! As always, all of the code for this tutorial can be found on my GitHub. Leave a comment below if you have any questions or feedback, I’d love to hear from you! I can also be reached on Twitter at @notadamking." }, { "code": null, "e": 12455, "s": 12380, "text": "You can also sponsor me on Github Sponsors or Patreon via the links below." }, { "code": null, "e": 12466, "s": 12455, "text": "github.com" } ]
C program to explain the goto statement
The C program evaluates the square root for five numbers. The variable count stores the count of numbers read. When count is less than or equal to 5, goto read statement directs the control to the label read. Otherwise, the program prints a message and stops. It is used after the normal sequence of program execution by transferring the control to some other part of program. Following is the C program for usage of goto statement − #include <math.h> main(){ double x, y; int count; count = 1; printf("Enter FIVE real values in a LINE \n"); read: scanf("%lf", &x); printf("\n"); if (x < 0) printf("Value - %d is negative\n",count); else{ y = sqrt(x); printf("%lf\t %lf\n", x, y); } count = count + 1; if (count <= 5) goto read; printf("\nEnd of computation"); } When the above program is executed, it produces the following result − Enter FIVE real values in a LINE 2.3 -4.5 2 6.8 -44.7 2.300000 1.516575 Value - 2 is negative 2.000000 1.414214 6.800000 2.607681 Value - 5 is negative End of computation
[ { "code": null, "e": 1322, "s": 1062, "text": "The C program evaluates the square root for five numbers. The variable count stores the count of numbers read. When count is less than or equal to 5, goto read statement directs the control to the label read. Otherwise, the program prints a message and stops." }, { "code": null, "e": 1439, "s": 1322, "text": "It is used after the normal sequence of program execution by transferring the control to some other part of program." }, { "code": null, "e": 1496, "s": 1439, "text": "Following is the C program for usage of goto statement −" }, { "code": null, "e": 1888, "s": 1496, "text": "#include <math.h>\nmain(){\n double x, y;\n int count;\n count = 1;\n printf(\"Enter FIVE real values in a LINE \\n\");\n read:\n scanf(\"%lf\", &x);\n printf(\"\\n\");\n if (x < 0)\n printf(\"Value - %d is negative\\n\",count);\n else{\n y = sqrt(x);\n printf(\"%lf\\t %lf\\n\", x, y);\n }\n count = count + 1;\n if (count <= 5)\n goto read;\n printf(\"\\nEnd of computation\");\n}" }, { "code": null, "e": 1959, "s": 1888, "text": "When the above program is executed, it produces the following result −" }, { "code": null, "e": 2130, "s": 1959, "text": "Enter FIVE real values in a LINE\n2.3 -4.5 2 6.8 -44.7\n2.300000 1.516575\nValue - 2 is negative\n2.000000 1.414214\n6.800000 2.607681\nValue - 5 is negative\nEnd of computation" } ]
Find nth term of a given recurrence relation - GeeksforGeeks
08 Apr, 2021 Let an be a sequence of numbers, which is defined by the recurrence relation a1=1 and an+1/an=2n. The task is to find the value of log2(an) for a given n.Examples: Input: 5 Output: 10 Explanation: log2(an) = (n * (n - 1)) / 2 = (5*(5-1))/2 = 10 Input: 100 Output: 4950 , We multiply all of the above in order to reach Since . Then . Substituting n+1 for n: So, Below is the implementation of above approach. C++ Java Python3 C# PHP Javascript // C++ program to find nth term of// a given recurrence relation #include <bits/stdc++.h>using namespace std; // function to return required valueint sum(int n){ // Get the answer int ans = (n * (n - 1)) / 2; // Return the answer return ans;} // Driver programint main(){ // Get the value of n int n = 5; // function call to print result cout << sum(n); return 0;} // Java program to find nth term// of a given recurrence relationimport java.util.*; class solution{static int sum(int n){ // Get the answer int ans = (n * (n - 1)) / 2; // Return the answer return ans;} // Driver codepublic static void main(String arr[]){ // Get the value of n int n = 5; // function call to print result System.out.println(sum(n));}}//This code is contributed byte//Surendra_Gangwar # Python3 program to find nth# term of a given recurrence# relation # function to return# required valuedef sum(n): # Get the answer ans = (n * (n - 1)) / 2; # Return the answer return ans # Driver Code # Get the value of nn = 5 # function call to prresultprint(int(sum(n))) # This code is contributed by Raj // C# program to find nth term// of a given recurrence relationusing System; class GFG{static int sum(int n){ // Get the answer int ans = (n * (n - 1)) / 2; // Return the answer return ans;} // Driver codepublic static void Main(){ // Get the value of n int n = 5; // function call to print result Console.WriteLine(sum(n));}} // This code is contributed byte// inder_verma <?php// PHP program to find nth term of// a given recurrence relation // function to return required valuefunction sum($n){ // Get the answer $ans = ($n * ($n - 1)) / 2; // Return the answer return $ans;} // Driver Code // Get the value of n$n = 5; // function call to print resultecho sum($n); // This code is contributed by// inder_verma?> <script>// Javascript program to find nth term of// a given recurrence relation // function to return required valuefunction sum(n){ // Get the answer let ans = parseInt((n * (n - 1)) / 2); // Return the answer return ans;} // Driver program // Get the value of nlet n = 5; // function call to print resultdocument.write(sum(n)); // This code is contributed by subham348.</script> 10 Time Complexity: O(1) SURENDRA_GANGWAR inderDuMCA R_Raj Akanksha_Rai subham348 Algorithms-Recursion math Mathematical Mathematical Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Algorithm to solve Rubik's Cube Program to print prime numbers from 1 to N. Program to multiply two matrices Fizz Buzz Implementation Complexity Analysis of Binary Search Modular multiplicative inverse Check if a number is Palindrome Find Union and Intersection of two unsorted arrays Count ways to reach the n'th stair Find first and last digits of a number
[ { "code": null, "e": 24692, "s": 24664, "text": "\n08 Apr, 2021" }, { "code": null, "e": 24858, "s": 24692, "text": "Let an be a sequence of numbers, which is defined by the recurrence relation a1=1 and an+1/an=2n. The task is to find the value of log2(an) for a given n.Examples: " }, { "code": null, "e": 24965, "s": 24858, "text": "Input: 5\nOutput: 10\nExplanation: \nlog2(an) = (n * (n - 1)) / 2\n= (5*(5-1))/2\n= 10\n\nInput: 100\nOutput: 4950" }, { "code": null, "e": 25111, "s": 24967, "text": ", We multiply all of the above in order to reach Since . Then . Substituting n+1 for n: So, Below is the implementation of above approach. " }, { "code": null, "e": 25115, "s": 25111, "text": "C++" }, { "code": null, "e": 25120, "s": 25115, "text": "Java" }, { "code": null, "e": 25128, "s": 25120, "text": "Python3" }, { "code": null, "e": 25131, "s": 25128, "text": "C#" }, { "code": null, "e": 25135, "s": 25131, "text": "PHP" }, { "code": null, "e": 25146, "s": 25135, "text": "Javascript" }, { "code": "// C++ program to find nth term of// a given recurrence relation #include <bits/stdc++.h>using namespace std; // function to return required valueint sum(int n){ // Get the answer int ans = (n * (n - 1)) / 2; // Return the answer return ans;} // Driver programint main(){ // Get the value of n int n = 5; // function call to print result cout << sum(n); return 0;}", "e": 25543, "s": 25146, "text": null }, { "code": "// Java program to find nth term// of a given recurrence relationimport java.util.*; class solution{static int sum(int n){ // Get the answer int ans = (n * (n - 1)) / 2; // Return the answer return ans;} // Driver codepublic static void main(String arr[]){ // Get the value of n int n = 5; // function call to print result System.out.println(sum(n));}}//This code is contributed byte//Surendra_Gangwar", "e": 25972, "s": 25543, "text": null }, { "code": "# Python3 program to find nth# term of a given recurrence# relation # function to return# required valuedef sum(n): # Get the answer ans = (n * (n - 1)) / 2; # Return the answer return ans # Driver Code # Get the value of nn = 5 # function call to prresultprint(int(sum(n))) # This code is contributed by Raj", "e": 26299, "s": 25972, "text": null }, { "code": "// C# program to find nth term// of a given recurrence relationusing System; class GFG{static int sum(int n){ // Get the answer int ans = (n * (n - 1)) / 2; // Return the answer return ans;} // Driver codepublic static void Main(){ // Get the value of n int n = 5; // function call to print result Console.WriteLine(sum(n));}} // This code is contributed byte// inder_verma", "e": 26700, "s": 26299, "text": null }, { "code": "<?php// PHP program to find nth term of// a given recurrence relation // function to return required valuefunction sum($n){ // Get the answer $ans = ($n * ($n - 1)) / 2; // Return the answer return $ans;} // Driver Code // Get the value of n$n = 5; // function call to print resultecho sum($n); // This code is contributed by// inder_verma?>", "e": 27056, "s": 26700, "text": null }, { "code": "<script>// Javascript program to find nth term of// a given recurrence relation // function to return required valuefunction sum(n){ // Get the answer let ans = parseInt((n * (n - 1)) / 2); // Return the answer return ans;} // Driver program // Get the value of nlet n = 5; // function call to print resultdocument.write(sum(n)); // This code is contributed by subham348.</script>", "e": 27451, "s": 27056, "text": null }, { "code": null, "e": 27454, "s": 27451, "text": "10" }, { "code": null, "e": 27479, "s": 27456, "text": "Time Complexity: O(1) " }, { "code": null, "e": 27496, "s": 27479, "text": "SURENDRA_GANGWAR" }, { "code": null, "e": 27507, "s": 27496, "text": "inderDuMCA" }, { "code": null, "e": 27513, "s": 27507, "text": "R_Raj" }, { "code": null, "e": 27526, "s": 27513, "text": "Akanksha_Rai" }, { "code": null, "e": 27536, "s": 27526, "text": "subham348" }, { "code": null, "e": 27557, "s": 27536, "text": "Algorithms-Recursion" }, { "code": null, "e": 27562, "s": 27557, "text": "math" }, { "code": null, "e": 27575, "s": 27562, "text": "Mathematical" }, { "code": null, "e": 27588, "s": 27575, "text": "Mathematical" }, { "code": null, "e": 27686, "s": 27588, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 27718, "s": 27686, "text": "Algorithm to solve Rubik's Cube" }, { "code": null, "e": 27762, "s": 27718, "text": "Program to print prime numbers from 1 to N." }, { "code": null, "e": 27795, "s": 27762, "text": "Program to multiply two matrices" }, { "code": null, "e": 27820, "s": 27795, "text": "Fizz Buzz Implementation" }, { "code": null, "e": 27857, "s": 27820, "text": "Complexity Analysis of Binary Search" }, { "code": null, "e": 27888, "s": 27857, "text": "Modular multiplicative inverse" }, { "code": null, "e": 27920, "s": 27888, "text": "Check if a number is Palindrome" }, { "code": null, "e": 27971, "s": 27920, "text": "Find Union and Intersection of two unsorted arrays" }, { "code": null, "e": 28006, "s": 27971, "text": "Count ways to reach the n'th stair" } ]
Parsing Fixed Width Text Files with Pandas | by Amy Rask | Towards Data Science
A fixed width file is similar to a csv file, but rather than using a delimiter, each field has a set number of characters. This creates files with all the data tidily lined up with an appearance similar to a spreadsheet when opened in a text editor. This is convenient if you’re looking at raw data files in a text editor, but less ideal when you need to programmatically work with the data. Fixed width files have a few common quirks to keep in mind: When values don’t consume the total character count for a field, a padding character is used to bring the character count up to the total for that field. Any character can be used as a padding character as long as it is consistent throughout the file. White space is a common padding character. Values can be left or right aligned in a field and alignment must be consistent for all fields in the file. A thorough description of a fixed width file is available here. Note: All fields in a fixed width file do not need to have the same character count. For example: in a file with three fields, the first field could be 6 characters, the second 20, and the last 9. Upon initial examination, a fixed width file can look like a tab separated file when white space is used as the padding character. If you’re trying to read a fixed width file as a csv or tsv and getting mangled results, try opening it in a text editor. If the data all line up tidily, it’s probably a fixed width file. Many text editors also give character counts for cursor placement, which makes it easier to spot a pattern in the character counts. If your file is too large to easily open in a text editor, there are various ways to sample portions of it into a separate, smaller file on the command line. An easy method on a Unix/Linux system is the head command. The example below uses head with -n 50 to read the first 50 lines of large_file.txt and then copy them into a new file called first_50_rows.txt. head -n 50 large_file.txt > first_50_rows.txt The UniProt Knowledgebase (UniProtKB) is a freely accessible and comprehensive database for protein sequence and annotation data available under a CC-BY (4.0) license. The Swiss-Prot branch of the UniProtKB has manually annotated and reviewed information about proteins for various organisms. Complete datasets from UniProt data can be downloaded from ftp.uniprot.org. The data for human proteins are contained in a set of fixed width text files: humchr01.txt - humchr22.txt, humchrx.txt, and humchry.txt. We don’t need all 24 files for this example, so here’s the link to the first file in the set: https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/docs/humchr01.txt A quick glance at the file in a text editor shows a substantial header that we don’t need leading into 6 fields of data. Fixed width files don’t seem to be as common as many other data file formats and they can look like tab separated files at first glance. Visual inspection of a text file in a good text editor before trying to read a file with Pandas can substantially reduce frustration and help highlight formatting patterns. Note: All code for this example was written for Python3.6 and Pandas1.2.0. The documentation for pandas.read_fwf() lists 5 parameters: filepath_or_buffer, colspecs, widths, infer_nrows, and **kwds Two of the pandas.read_fwf() parameters, colspecs and infer_nrows, have default values that work to infer the columns based on a sampling of initial rows. Let’s utilize the default settings for pandas.read_fwf() to get our tidy DataFame. We’ll leave the colspecs parameter to its default value of ‘infer’, which in turn utilizes the default value (100) of the infer_nrows parameter. These two defaults attempt to find a pattern in the first 100 rows of data (after any skipped rows) and use that pattern to split the data into columns. There are several rows of file header that precede the tabular info in our example file. We need to skip them when we read the file. None of the parameters seem ideal for skipping rows when reading the file. So how do we do it? We utilize the **kwds parameter. Conveniently, pandas.read_fwf() uses the same TextFileReader context manager as pandas.read_table(). This combined with the **kwds parameter allows us to use parameters for pandas.read_table() with pandas.read_fwf(). So we can use the skiprows parameter to skip the first 35 rows in the example file. Similarly, we can use the skipfooter parameter to skip the last 5 rows of the example file that contain a footer that isn’t part of the tabular data. pandas.read_fwf('humchr01.txt', skiprows=35, skipfooter=5) The above attempt leaves the DataFrame a bit of a mess 😔: Note: Since we’re using the default values for colspecs and infer_nrows we don’t have to declare them. Part of the issue here is that the default colspecs parameter is trying to infer the column widths based on the first 100 rows, but the row right before the tabular data (row 36 in the file and shown in the column names above) doesn’t actually follow the character count patterns in the data table, so the inferred column widths are getting mangled. If we’d set skiprows to 36 instead of 35, we’d have ended up with the first row of data pushed into the column names, which also mangles the inferred column widths. There’s no winning here without some additional cleanup. Let’s settle the column names issue with the names parameter and see if that helps. Note: Using the names parameter means we are not allocating a row in the file to column names, so we as users have to make sure to account for the fact that skiprows must start at the first data row. So skiprows is set to 36 in the next example but it was 35 in previous examples when we didn’t use the names parameter. pandas.read_fwf('humchr01.txt', skiprows=36, skipfooter=5, names=['gene_name', 'chromosomal_position', 'uniprot', 'entry_name', 'mtm_code', 'description']) That’s better, but still a bit of a mess. Pandas inferred the column splits correctly, but pushed the first two fields to the index. Let’s fix the index issue by setting index_col=False. pandas.read_fwf('humchr01.txt', skiprows=36, skipfooter=5, index_col=False, names=['gene_name', 'chromosomal_position', 'uniprot', 'entry_name', 'mtm_code', 'description']) That looks good! The columns are split correctly, the column names make sense and the first row of data in the DataFrame matches the first row in the example file. We relied on the default settings for two of the pandas.read_fwf() specific parameters to get our tidy DataFame. The colspecs parameter was left to its default value of ‘infer’ which in turn utilizes the default value of the infer_nrows parameter and finds a pattern in the first 100 rows of data (after the skipped rows) and uses that to split the data into columns. The default parameters worked well for this example file, but we could also specify the colspecs parameter instead of letting pandas infer the columns. Just like with the example above, we need to start with some basic cleanup. We’ll drop the header and footer in the file and set the column names just like before. The next step is to build a list of tuples with the intervals of each field. The list below fits the example file. colspecs = [(0, 14), (14, 30), (30, 41), (41, 53), (53, 60), (60, -1)] Note the last tuple: (60, -1). We can use -1 to indicate the last index value. Alternately, we could use None instead of -1 to indicate the last index value. Note: When using colspecs the tuples don’t have to be exclusionary! The final columns can be set to tuples that overlap if that is desired. For example, if you want the first field duplicated: colspecs = [(0, 14), (0, 14), ... pandas.read_fwf('humchr01.txt', skiprows=36, skipfooter=5, colspecs=colspecs, names=['gene_name', 'chromosomal_position', 'uniprot', 'entry_name', 'mtm_code', 'description']) Once more we’ve attained a tidy DataFrame. This time we explicitly declared our field start and stop positions using the colspecs parameter rather than letting pandas infer the fields. Reading fixed width text files with Pandas is easy and accessible. The default parameters for pandas.read_fwf() work in most cases and the customization options are well documented. The Pandas library has many functions to read a variety of file types and the pandas.read_fwf() is one more useful Pandas tool to keep in mind.
[ { "code": null, "e": 564, "s": 172, "text": "A fixed width file is similar to a csv file, but rather than using a delimiter, each field has a set number of characters. This creates files with all the data tidily lined up with an appearance similar to a spreadsheet when opened in a text editor. This is convenient if you’re looking at raw data files in a text editor, but less ideal when you need to programmatically work with the data." }, { "code": null, "e": 624, "s": 564, "text": "Fixed width files have a few common quirks to keep in mind:" }, { "code": null, "e": 778, "s": 624, "text": "When values don’t consume the total character count for a field, a padding character is used to bring the character count up to the total for that field." }, { "code": null, "e": 919, "s": 778, "text": "Any character can be used as a padding character as long as it is consistent throughout the file. White space is a common padding character." }, { "code": null, "e": 1027, "s": 919, "text": "Values can be left or right aligned in a field and alignment must be consistent for all fields in the file." }, { "code": null, "e": 1091, "s": 1027, "text": "A thorough description of a fixed width file is available here." }, { "code": null, "e": 1288, "s": 1091, "text": "Note: All fields in a fixed width file do not need to have the same character count. For example: in a file with three fields, the first field could be 6 characters, the second 20, and the last 9." }, { "code": null, "e": 1739, "s": 1288, "text": "Upon initial examination, a fixed width file can look like a tab separated file when white space is used as the padding character. If you’re trying to read a fixed width file as a csv or tsv and getting mangled results, try opening it in a text editor. If the data all line up tidily, it’s probably a fixed width file. Many text editors also give character counts for cursor placement, which makes it easier to spot a pattern in the character counts." }, { "code": null, "e": 2101, "s": 1739, "text": "If your file is too large to easily open in a text editor, there are various ways to sample portions of it into a separate, smaller file on the command line. An easy method on a Unix/Linux system is the head command. The example below uses head with -n 50 to read the first 50 lines of large_file.txt and then copy them into a new file called first_50_rows.txt." }, { "code": null, "e": 2147, "s": 2101, "text": "head -n 50 large_file.txt > first_50_rows.txt" }, { "code": null, "e": 2653, "s": 2147, "text": "The UniProt Knowledgebase (UniProtKB) is a freely accessible and comprehensive database for protein sequence and annotation data available under a CC-BY (4.0) license. The Swiss-Prot branch of the UniProtKB has manually annotated and reviewed information about proteins for various organisms. Complete datasets from UniProt data can be downloaded from ftp.uniprot.org. The data for human proteins are contained in a set of fixed width text files: humchr01.txt - humchr22.txt, humchrx.txt, and humchry.txt." }, { "code": null, "e": 2747, "s": 2653, "text": "We don’t need all 24 files for this example, so here’s the link to the first file in the set:" }, { "code": null, "e": 2850, "s": 2747, "text": "https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/docs/humchr01.txt" }, { "code": null, "e": 2971, "s": 2850, "text": "A quick glance at the file in a text editor shows a substantial header that we don’t need leading into 6 fields of data." }, { "code": null, "e": 3281, "s": 2971, "text": "Fixed width files don’t seem to be as common as many other data file formats and they can look like tab separated files at first glance. Visual inspection of a text file in a good text editor before trying to read a file with Pandas can substantially reduce frustration and help highlight formatting patterns." }, { "code": null, "e": 3356, "s": 3281, "text": "Note: All code for this example was written for Python3.6 and Pandas1.2.0." }, { "code": null, "e": 3416, "s": 3356, "text": "The documentation for pandas.read_fwf() lists 5 parameters:" }, { "code": null, "e": 3478, "s": 3416, "text": "filepath_or_buffer, colspecs, widths, infer_nrows, and **kwds" }, { "code": null, "e": 3633, "s": 3478, "text": "Two of the pandas.read_fwf() parameters, colspecs and infer_nrows, have default values that work to infer the columns based on a sampling of initial rows." }, { "code": null, "e": 4014, "s": 3633, "text": "Let’s utilize the default settings for pandas.read_fwf() to get our tidy DataFame. We’ll leave the colspecs parameter to its default value of ‘infer’, which in turn utilizes the default value (100) of the infer_nrows parameter. These two defaults attempt to find a pattern in the first 100 rows of data (after any skipped rows) and use that pattern to split the data into columns." }, { "code": null, "e": 4147, "s": 4014, "text": "There are several rows of file header that precede the tabular info in our example file. We need to skip them when we read the file." }, { "code": null, "e": 4275, "s": 4147, "text": "None of the parameters seem ideal for skipping rows when reading the file. So how do we do it? We utilize the **kwds parameter." }, { "code": null, "e": 4726, "s": 4275, "text": "Conveniently, pandas.read_fwf() uses the same TextFileReader context manager as pandas.read_table(). This combined with the **kwds parameter allows us to use parameters for pandas.read_table() with pandas.read_fwf(). So we can use the skiprows parameter to skip the first 35 rows in the example file. Similarly, we can use the skipfooter parameter to skip the last 5 rows of the example file that contain a footer that isn’t part of the tabular data." }, { "code": null, "e": 4785, "s": 4726, "text": "pandas.read_fwf('humchr01.txt', skiprows=35, skipfooter=5)" }, { "code": null, "e": 4843, "s": 4785, "text": "The above attempt leaves the DataFrame a bit of a mess 😔:" }, { "code": null, "e": 4946, "s": 4843, "text": "Note: Since we’re using the default values for colspecs and infer_nrows we don’t have to declare them." }, { "code": null, "e": 5296, "s": 4946, "text": "Part of the issue here is that the default colspecs parameter is trying to infer the column widths based on the first 100 rows, but the row right before the tabular data (row 36 in the file and shown in the column names above) doesn’t actually follow the character count patterns in the data table, so the inferred column widths are getting mangled." }, { "code": null, "e": 5602, "s": 5296, "text": "If we’d set skiprows to 36 instead of 35, we’d have ended up with the first row of data pushed into the column names, which also mangles the inferred column widths. There’s no winning here without some additional cleanup. Let’s settle the column names issue with the names parameter and see if that helps." }, { "code": null, "e": 5922, "s": 5602, "text": "Note: Using the names parameter means we are not allocating a row in the file to column names, so we as users have to make sure to account for the fact that skiprows must start at the first data row. So skiprows is set to 36 in the next example but it was 35 in previous examples when we didn’t use the names parameter." }, { "code": null, "e": 6078, "s": 5922, "text": "pandas.read_fwf('humchr01.txt', skiprows=36, skipfooter=5, names=['gene_name', 'chromosomal_position', 'uniprot', 'entry_name', 'mtm_code', 'description'])" }, { "code": null, "e": 6265, "s": 6078, "text": "That’s better, but still a bit of a mess. Pandas inferred the column splits correctly, but pushed the first two fields to the index. Let’s fix the index issue by setting index_col=False." }, { "code": null, "e": 6438, "s": 6265, "text": "pandas.read_fwf('humchr01.txt', skiprows=36, skipfooter=5, index_col=False, names=['gene_name', 'chromosomal_position', 'uniprot', 'entry_name', 'mtm_code', 'description'])" }, { "code": null, "e": 6602, "s": 6438, "text": "That looks good! The columns are split correctly, the column names make sense and the first row of data in the DataFrame matches the first row in the example file." }, { "code": null, "e": 7122, "s": 6602, "text": "We relied on the default settings for two of the pandas.read_fwf() specific parameters to get our tidy DataFame. The colspecs parameter was left to its default value of ‘infer’ which in turn utilizes the default value of the infer_nrows parameter and finds a pattern in the first 100 rows of data (after the skipped rows) and uses that to split the data into columns. The default parameters worked well for this example file, but we could also specify the colspecs parameter instead of letting pandas infer the columns." }, { "code": null, "e": 7286, "s": 7122, "text": "Just like with the example above, we need to start with some basic cleanup. We’ll drop the header and footer in the file and set the column names just like before." }, { "code": null, "e": 7401, "s": 7286, "text": "The next step is to build a list of tuples with the intervals of each field. The list below fits the example file." }, { "code": null, "e": 7472, "s": 7401, "text": "colspecs = [(0, 14), (14, 30), (30, 41), (41, 53), (53, 60), (60, -1)]" }, { "code": null, "e": 7630, "s": 7472, "text": "Note the last tuple: (60, -1). We can use -1 to indicate the last index value. Alternately, we could use None instead of -1 to indicate the last index value." }, { "code": null, "e": 7857, "s": 7630, "text": "Note: When using colspecs the tuples don’t have to be exclusionary! The final columns can be set to tuples that overlap if that is desired. For example, if you want the first field duplicated: colspecs = [(0, 14), (0, 14), ..." }, { "code": null, "e": 8032, "s": 7857, "text": "pandas.read_fwf('humchr01.txt', skiprows=36, skipfooter=5, colspecs=colspecs, names=['gene_name', 'chromosomal_position', 'uniprot', 'entry_name', 'mtm_code', 'description'])" }, { "code": null, "e": 8217, "s": 8032, "text": "Once more we’ve attained a tidy DataFrame. This time we explicitly declared our field start and stop positions using the colspecs parameter rather than letting pandas infer the fields." } ]
The Trolley Problem Isn’t Theoretical Anymore | by Jessie J. Smith | Towards Data Science
You are the conductor of a run-a-way trolley that is hurtling down its track at 85 miles an hour, heading straight for a group of young boys playing on the tracks, blissfully unaware of their impending doom. You realize that you can pull a lever to switch the trolley to an alternate track, saving the lives of these boys. Before you pull the lever though, you see there is a young girl who is playing on the tracks of the alternate route. Pulling this lever would mean ending her life. You have ten seconds until it is too late to decide... What do you do? The Trolley problem was a thought experiment first introduced by Philippa Foot in 1967. In 1984, this problem was reintroduced in an academic paper by Dr. JJ Thomson. It has been cited over 1300 times. The good news is that discussions about ethics are becoming more common in computer science classrooms at universities. Engineers are finally beginning to discuss problems about values and fairness when it comes to digital systems and algorithms. What aren’t as highly discussed though, are the consequences — intended or not — of discriminatory systems and biased algorithms that are already in effect and being used by humans every day. The trolley problem is already being played out by companies like Tesla, Google, Uber, Lyft, Argo, Embark, and General Motors. The problem goes like this : If a self driving car finds itself in a situation where it has to swerve to save its driver, but swerving left means hitting a child crossing the street, and swerving right means hitting two elderly women crossing the road — which direction should it swerve? Previously, Google chose the values of deontology: always hit the smallest object no matter what (there was no difference between a trashcan and a baby in a stroller)*. Tesla opted out of accountability; crowd-source human driving data and mimic human driving behaviors. This includes speeding, swerving, and (sometimes) breaking the law. Why are CS classrooms discussing algorithms and AI theoretically? The technology is here. It isn’t theoretical anymore. It is time to assess the algorithms that already exist in the growing digital landscape. The ones that make decisions that could negatively or positively impact society. But first, we must discuss the moral frameworks that these systems are built upon. Before we can ethically assess algorithms and machine learning models, we must first discuss the values that are encoded into them. Although there are many frameworks for ethics and moral philosophy, I’m only going to review the most commonly occurring ones: This ethical theory is a numbers game. It focuses on the consequences of an action. According to utilitarianism, an action is ethical if it causes the most good/pleasure and the least pain/suffering. A utilitarian would be okay with harvesting one unsuspecting person’s organs if it meant saving the lives of five people who needed transplants. When it comes to the trolley problem, a utilitarian would always choose to hit the smallest number of people on the road — no matter who they were. This ethical framework is the easiest for digital systems to adopt, because it’s easy to turn a numbers game into code. There isn’t any room for granularity. Deontological theory was born from the famous philosopher, Immanuel Kant. This theory focuses less on the consequences and more on the actions themselves. In deontology, one rule is chosen that becomes the universal law. The ends never justify the means. In the case of the trolley problem, this would mean that the conductor must choose one metric for fairness that they never break. This could mean they choose to always save the most lives, the youngest lives, the oldest lives, etc.. No matter what — their metric must always be followed. In the following image, the deontological rule for a self driving car might be to “always save the most lives that will contribute to the most overall good.” This is similar to how laws are created. One rule covers all cases of a specific action. But, just as is the case with policy, there is a major flaw in deontology: all things in life are contextual. Sometimes following the same rule will result in fair decisions for some and unfair decisions for others. How would our deontological rule account for the following scenario? Finally, virtue ethics. This moral philosophy focuses less on actions or consequences and rather places all of the pressure on the moral character of the person who does the action. In other words, motivations of an action are the focus. If the trolley conductor saves the lives of 5 boys, but only so he can swerve the trolley into his ex-girlfriend (who recently broke up with him), his actions aren’t virtuous. Even though he saved five lives, his motivations weren’t pure. This gives humans greater agency to break rules and perform actions that might be controversial for some, as long as those actions come from virtuous motivations. This does lead to a big problem though: What is a virtuous motivation? It turns out that the answer to this question varies widely between people, cultures, and geographic locations. Now that we all understand the basics of ethical and moral philosophy, we can apply these concepts to the digital systems that are heavily impacting society today. A tool that can help us assess the underlying ethical implications of digital systems is the framework of Privacy as Contextual Integrity, created by Helen Nissenbaum. Although this framework was originally intended to help assess digital privacy, it can easily be applied to all digital innovations. Utilizing some of the techniques from Nissenbaum’s framework, I propose a framework to identify and modify unethical technology. In order to make this framework approachable for everyone, I’ll introduce it as a decision tree. In order to make sure we fully understand this framework, let’s put it to the test with a use case: Assessing a defendant’s risk for returning to crime. AKA: Assigning a recidivism risk score. Let’s go back in time to the 1700’s, 1800’s, and 1900’s: In this scenario, the non-digital alternative for assessing someone’s recidivism risk in court was often just a judge’s opinion. Evidence could be brought to light about past behaviors that might influence a defendant’s likelihood of returning to crime, but someone’s ‘risk assessment’ was an educated guess, at best. In the past, before statistics and technology were more widely adopted in court, a criminologist, judge, or jury member could simply mark someone as ‘high risk for recidivism’ because they didn’t like their demeanor. Or worse, because they didn’t like their race. Now fast forward to 1990, where a new digital alternative enters the scene: COMPAS, a software that predicts recidivism risk scores for defendants. It became widely used in some US states. “Scores like this — known as risk assessments — are increasingly common in courtrooms across the nation. They are used to inform decisions about who can be set free at every stage of the criminal justice system, from assigning bond amounts, to even more fundamental decisions about defendants’ freedom.” ~Machine Bias, ProPublica Unfortunately for COMPAS (and those who suffered the consequences of their software), it turns out their algorithm gave disproportionately higher risk scores to black defendants than it did to white defendants. Their algorithm was deontological, but their rules for assessing risk were unfair for anyone who wasn’t white. The software was unethical. [1][2][3] Now, let’s imagine a do-over. Let’s pretend that it is 1989 and we are the developers of the COMPAS algorithm. In order to not repeat the past, we decide that before we even begin to select which features we want to use in our training data sets, we are going to focus on the ethics of our algorithm. We go to our handy ETHItechniCAL framework. The players: System: COMPAS software Non-Digital Alternative: Judge/Jury opinions If it had been up to me in 2013, here’s how our COMPAS software would have measured up: Or, for the optimistically minded, at the very minimum, our COMPAS software would have mapped to here: Here’s my point :In the past, I doubt that the designers of the COMPAS algorithm had a conversation about deontology. I doubt that they researched what their algorithm might do if they had instead chosen a utilitarianism or values-based approach. Honestly, it doesn’t seem like the developers of this software had a conversation about ethics at all. Or if they did, it must have been long after their system became widely used. COMPAS is not alone. What other systems in place today could have caused much less societal harm by utilizing something like the ETHItechniCAL framework? Now that we finally understand the ethics and values that our systems are upholding. There are two important steps that should be taken : 1. Make sure the intended values of the system match its reported values (ask users if they think the system is fair or not).2. Explain these values to all stakeholders involved. This includes the users that are negatively/positively impacted by the system, the engineers who built the system, and anyone else who may interact with it. In the case of the COMPAS algorithm, defendants had a right to know what information about them was causing a high or low risk score. They weren’t given this information. Even worse, judges weren’t given this information either. Everyone was blindly trusting an algorithm that was just as racist as the hundreds of years of crime data that was fed into it. Transparency could have helped fix this. If you are involved in the creation of any kind of technology, or if you interact with any kind of technology, you should be actively concerned about these issues. Ideally, you should be having conversations about algorithmic ethics in your workplace or in your school. If your classes feel highly theoretical, ask your teachers if you can start using current case studies to drive the conversation. I understand that these problems aren’t easily solved, but change has to start somewhere. It will likely take a very interdisciplinary approach to assess algorithms and machine learning models for their values. There’s also a major obstacle that stands in the way: Companies aren’t willing to assess the ethics of their own algorithms. Even as I write this, I recognize that the legal departments of large tech companies might laugh at my words. Infamous algorithmic scandals [1][2][3][4][5] in the past few years have proved that the tech industry tends to focus on ethics only when it becomes a legal issue. This is why I propose an alternative approach to ethical assessment: use ethics as a business metric. If users are aware of the values that the systems they are using promote, they will understand the system better. When users aren’t blindsided by values that they weren’t aware of, they will trust a system more. Trust ties directly into user retention. Companies are afraid to assess or audit their algorithms in fear of discovering that something is wrong. If they find out they are unethical, they must spend the money and time to fix that problem. This is why most legal teams at companies advise engineers to avoid algorithmic audits whenever possible, unless there is a lawsuit at play or a legal policy that requires compliance. If we could utilize fairness and ethics as a business metric, maybe companies would be more willing to check themselves. Maybe legal teams would let up. It’s not a guarantee, but it’s a great start. We’ve debated ethical and moral philosophy for thousands of years. Philosophy is a theoretical field because there are no universal metrics for ‘good’ and ‘bad’ that satisfy everyone. When people with opposing beliefs debate morals and virtue, they are often left with more questions than answers. With the advent of technology, there is now an opportunity to create systems and artifacts with values built into them. If we don’t explicitly define which ethical frameworks or values we are selecting for a system, we run the risk of unintended consequences that may be ‘unfair’ for many. By creating easier to understand guidelines about the values that are selected for a system’s creation, engineers will more easily understand the societal implications of their work. This will make it much easier to explain this information to the users that these systems impact. Explainability builds transparency. Transparency builds trust. My goal isn’t to punish tech companies. Rather, my goal is to motivate them to want to be ethical and to want to hold themselves accountable, by utilizing the wonderful ethics conversations that are starting in the classroom. Let’s take these discussions and apply them to problems that already exist. Together, we can help code with greater intentionality. Intentional systems reduce the risk of unintended societal harm. In the trolley problem, we aren’t going to all agree on who we should save or kill. We have different values and different opinions. When it comes to self driving cars, it doesn’t matter who agrees or doesn’t agree, the choice for who to save has already been made for us. It is time to understand the consequences of these choices. It is time to be transparent about algorithmic design. This isn’t theory anymore. This is reality. When we raise our voice, we have a choice. Footnote: *My information about Google’s self driving cars is based on a dated article. If anyone works for Google or Waymo and would like to share the current objective function for your self driving car in the case of unavoidable collisions, your help and transparency would be greatly appreciated!
[ { "code": null, "e": 714, "s": 172, "text": "You are the conductor of a run-a-way trolley that is hurtling down its track at 85 miles an hour, heading straight for a group of young boys playing on the tracks, blissfully unaware of their impending doom. You realize that you can pull a lever to switch the trolley to an alternate track, saving the lives of these boys. Before you pull the lever though, you see there is a young girl who is playing on the tracks of the alternate route. Pulling this lever would mean ending her life. You have ten seconds until it is too late to decide..." }, { "code": null, "e": 730, "s": 714, "text": "What do you do?" }, { "code": null, "e": 932, "s": 730, "text": "The Trolley problem was a thought experiment first introduced by Philippa Foot in 1967. In 1984, this problem was reintroduced in an academic paper by Dr. JJ Thomson. It has been cited over 1300 times." }, { "code": null, "e": 1371, "s": 932, "text": "The good news is that discussions about ethics are becoming more common in computer science classrooms at universities. Engineers are finally beginning to discuss problems about values and fairness when it comes to digital systems and algorithms. What aren’t as highly discussed though, are the consequences — intended or not — of discriminatory systems and biased algorithms that are already in effect and being used by humans every day." }, { "code": null, "e": 1527, "s": 1371, "text": "The trolley problem is already being played out by companies like Tesla, Google, Uber, Lyft, Argo, Embark, and General Motors. The problem goes like this :" }, { "code": null, "e": 1786, "s": 1527, "text": "If a self driving car finds itself in a situation where it has to swerve to save its driver, but swerving left means hitting a child crossing the street, and swerving right means hitting two elderly women crossing the road — which direction should it swerve?" }, { "code": null, "e": 2125, "s": 1786, "text": "Previously, Google chose the values of deontology: always hit the smallest object no matter what (there was no difference between a trashcan and a baby in a stroller)*. Tesla opted out of accountability; crowd-source human driving data and mimic human driving behaviors. This includes speeding, swerving, and (sometimes) breaking the law." }, { "code": null, "e": 2415, "s": 2125, "text": "Why are CS classrooms discussing algorithms and AI theoretically? The technology is here. It isn’t theoretical anymore. It is time to assess the algorithms that already exist in the growing digital landscape. The ones that make decisions that could negatively or positively impact society." }, { "code": null, "e": 2498, "s": 2415, "text": "But first, we must discuss the moral frameworks that these systems are built upon." }, { "code": null, "e": 2757, "s": 2498, "text": "Before we can ethically assess algorithms and machine learning models, we must first discuss the values that are encoded into them. Although there are many frameworks for ethics and moral philosophy, I’m only going to review the most commonly occurring ones:" }, { "code": null, "e": 2957, "s": 2757, "text": "This ethical theory is a numbers game. It focuses on the consequences of an action. According to utilitarianism, an action is ethical if it causes the most good/pleasure and the least pain/suffering." }, { "code": null, "e": 3250, "s": 2957, "text": "A utilitarian would be okay with harvesting one unsuspecting person’s organs if it meant saving the lives of five people who needed transplants. When it comes to the trolley problem, a utilitarian would always choose to hit the smallest number of people on the road — no matter who they were." }, { "code": null, "e": 3408, "s": 3250, "text": "This ethical framework is the easiest for digital systems to adopt, because it’s easy to turn a numbers game into code. There isn’t any room for granularity." }, { "code": null, "e": 3629, "s": 3408, "text": "Deontological theory was born from the famous philosopher, Immanuel Kant. This theory focuses less on the consequences and more on the actions themselves. In deontology, one rule is chosen that becomes the universal law." }, { "code": null, "e": 3663, "s": 3629, "text": "The ends never justify the means." }, { "code": null, "e": 3951, "s": 3663, "text": "In the case of the trolley problem, this would mean that the conductor must choose one metric for fairness that they never break. This could mean they choose to always save the most lives, the youngest lives, the oldest lives, etc.. No matter what — their metric must always be followed." }, { "code": null, "e": 4109, "s": 3951, "text": "In the following image, the deontological rule for a self driving car might be to “always save the most lives that will contribute to the most overall good.”" }, { "code": null, "e": 4483, "s": 4109, "text": "This is similar to how laws are created. One rule covers all cases of a specific action. But, just as is the case with policy, there is a major flaw in deontology: all things in life are contextual. Sometimes following the same rule will result in fair decisions for some and unfair decisions for others. How would our deontological rule account for the following scenario?" }, { "code": null, "e": 4721, "s": 4483, "text": "Finally, virtue ethics. This moral philosophy focuses less on actions or consequences and rather places all of the pressure on the moral character of the person who does the action. In other words, motivations of an action are the focus." }, { "code": null, "e": 4960, "s": 4721, "text": "If the trolley conductor saves the lives of 5 boys, but only so he can swerve the trolley into his ex-girlfriend (who recently broke up with him), his actions aren’t virtuous. Even though he saved five lives, his motivations weren’t pure." }, { "code": null, "e": 5163, "s": 4960, "text": "This gives humans greater agency to break rules and perform actions that might be controversial for some, as long as those actions come from virtuous motivations. This does lead to a big problem though:" }, { "code": null, "e": 5194, "s": 5163, "text": "What is a virtuous motivation?" }, { "code": null, "e": 5306, "s": 5194, "text": "It turns out that the answer to this question varies widely between people, cultures, and geographic locations." }, { "code": null, "e": 5470, "s": 5306, "text": "Now that we all understand the basics of ethical and moral philosophy, we can apply these concepts to the digital systems that are heavily impacting society today." }, { "code": null, "e": 5771, "s": 5470, "text": "A tool that can help us assess the underlying ethical implications of digital systems is the framework of Privacy as Contextual Integrity, created by Helen Nissenbaum. Although this framework was originally intended to help assess digital privacy, it can easily be applied to all digital innovations." }, { "code": null, "e": 5997, "s": 5771, "text": "Utilizing some of the techniques from Nissenbaum’s framework, I propose a framework to identify and modify unethical technology. In order to make this framework approachable for everyone, I’ll introduce it as a decision tree." }, { "code": null, "e": 6097, "s": 5997, "text": "In order to make sure we fully understand this framework, let’s put it to the test with a use case:" }, { "code": null, "e": 6150, "s": 6097, "text": "Assessing a defendant’s risk for returning to crime." }, { "code": null, "e": 6190, "s": 6150, "text": "AKA: Assigning a recidivism risk score." }, { "code": null, "e": 6247, "s": 6190, "text": "Let’s go back in time to the 1700’s, 1800’s, and 1900’s:" }, { "code": null, "e": 6565, "s": 6247, "text": "In this scenario, the non-digital alternative for assessing someone’s recidivism risk in court was often just a judge’s opinion. Evidence could be brought to light about past behaviors that might influence a defendant’s likelihood of returning to crime, but someone’s ‘risk assessment’ was an educated guess, at best." }, { "code": null, "e": 6829, "s": 6565, "text": "In the past, before statistics and technology were more widely adopted in court, a criminologist, judge, or jury member could simply mark someone as ‘high risk for recidivism’ because they didn’t like their demeanor. Or worse, because they didn’t like their race." }, { "code": null, "e": 7018, "s": 6829, "text": "Now fast forward to 1990, where a new digital alternative enters the scene: COMPAS, a software that predicts recidivism risk scores for defendants. It became widely used in some US states." }, { "code": null, "e": 7348, "s": 7018, "text": "“Scores like this — known as risk assessments — are increasingly common in courtrooms across the nation. They are used to inform decisions about who can be set free at every stage of the criminal justice system, from assigning bond amounts, to even more fundamental decisions about defendants’ freedom.” ~Machine Bias, ProPublica" }, { "code": null, "e": 7708, "s": 7348, "text": "Unfortunately for COMPAS (and those who suffered the consequences of their software), it turns out their algorithm gave disproportionately higher risk scores to black defendants than it did to white defendants. Their algorithm was deontological, but their rules for assessing risk were unfair for anyone who wasn’t white. The software was unethical. [1][2][3]" }, { "code": null, "e": 8009, "s": 7708, "text": "Now, let’s imagine a do-over. Let’s pretend that it is 1989 and we are the developers of the COMPAS algorithm. In order to not repeat the past, we decide that before we even begin to select which features we want to use in our training data sets, we are going to focus on the ethics of our algorithm." }, { "code": null, "e": 8053, "s": 8009, "text": "We go to our handy ETHItechniCAL framework." }, { "code": null, "e": 8066, "s": 8053, "text": "The players:" }, { "code": null, "e": 8090, "s": 8066, "text": "System: COMPAS software" }, { "code": null, "e": 8135, "s": 8090, "text": "Non-Digital Alternative: Judge/Jury opinions" }, { "code": null, "e": 8223, "s": 8135, "text": "If it had been up to me in 2013, here’s how our COMPAS software would have measured up:" }, { "code": null, "e": 8326, "s": 8223, "text": "Or, for the optimistically minded, at the very minimum, our COMPAS software would have mapped to here:" }, { "code": null, "e": 8754, "s": 8326, "text": "Here’s my point :In the past, I doubt that the designers of the COMPAS algorithm had a conversation about deontology. I doubt that they researched what their algorithm might do if they had instead chosen a utilitarianism or values-based approach. Honestly, it doesn’t seem like the developers of this software had a conversation about ethics at all. Or if they did, it must have been long after their system became widely used." }, { "code": null, "e": 8908, "s": 8754, "text": "COMPAS is not alone. What other systems in place today could have caused much less societal harm by utilizing something like the ETHItechniCAL framework?" }, { "code": null, "e": 9046, "s": 8908, "text": "Now that we finally understand the ethics and values that our systems are upholding. There are two important steps that should be taken :" }, { "code": null, "e": 9382, "s": 9046, "text": "1. Make sure the intended values of the system match its reported values (ask users if they think the system is fair or not).2. Explain these values to all stakeholders involved. This includes the users that are negatively/positively impacted by the system, the engineers who built the system, and anyone else who may interact with it." }, { "code": null, "e": 9611, "s": 9382, "text": "In the case of the COMPAS algorithm, defendants had a right to know what information about them was causing a high or low risk score. They weren’t given this information. Even worse, judges weren’t given this information either." }, { "code": null, "e": 9739, "s": 9611, "text": "Everyone was blindly trusting an algorithm that was just as racist as the hundreds of years of crime data that was fed into it." }, { "code": null, "e": 9780, "s": 9739, "text": "Transparency could have helped fix this." }, { "code": null, "e": 10180, "s": 9780, "text": "If you are involved in the creation of any kind of technology, or if you interact with any kind of technology, you should be actively concerned about these issues. Ideally, you should be having conversations about algorithmic ethics in your workplace or in your school. If your classes feel highly theoretical, ask your teachers if you can start using current case studies to drive the conversation." }, { "code": null, "e": 10391, "s": 10180, "text": "I understand that these problems aren’t easily solved, but change has to start somewhere. It will likely take a very interdisciplinary approach to assess algorithms and machine learning models for their values." }, { "code": null, "e": 10445, "s": 10391, "text": "There’s also a major obstacle that stands in the way:" }, { "code": null, "e": 10516, "s": 10445, "text": "Companies aren’t willing to assess the ethics of their own algorithms." }, { "code": null, "e": 10790, "s": 10516, "text": "Even as I write this, I recognize that the legal departments of large tech companies might laugh at my words. Infamous algorithmic scandals [1][2][3][4][5] in the past few years have proved that the tech industry tends to focus on ethics only when it becomes a legal issue." }, { "code": null, "e": 11104, "s": 10790, "text": "This is why I propose an alternative approach to ethical assessment: use ethics as a business metric. If users are aware of the values that the systems they are using promote, they will understand the system better. When users aren’t blindsided by values that they weren’t aware of, they will trust a system more." }, { "code": null, "e": 11145, "s": 11104, "text": "Trust ties directly into user retention." }, { "code": null, "e": 11527, "s": 11145, "text": "Companies are afraid to assess or audit their algorithms in fear of discovering that something is wrong. If they find out they are unethical, they must spend the money and time to fix that problem. This is why most legal teams at companies advise engineers to avoid algorithmic audits whenever possible, unless there is a lawsuit at play or a legal policy that requires compliance." }, { "code": null, "e": 11680, "s": 11527, "text": "If we could utilize fairness and ethics as a business metric, maybe companies would be more willing to check themselves. Maybe legal teams would let up." }, { "code": null, "e": 11726, "s": 11680, "text": "It’s not a guarantee, but it’s a great start." }, { "code": null, "e": 12024, "s": 11726, "text": "We’ve debated ethical and moral philosophy for thousands of years. Philosophy is a theoretical field because there are no universal metrics for ‘good’ and ‘bad’ that satisfy everyone. When people with opposing beliefs debate morals and virtue, they are often left with more questions than answers." }, { "code": null, "e": 12144, "s": 12024, "text": "With the advent of technology, there is now an opportunity to create systems and artifacts with values built into them." }, { "code": null, "e": 12314, "s": 12144, "text": "If we don’t explicitly define which ethical frameworks or values we are selecting for a system, we run the risk of unintended consequences that may be ‘unfair’ for many." }, { "code": null, "e": 12595, "s": 12314, "text": "By creating easier to understand guidelines about the values that are selected for a system’s creation, engineers will more easily understand the societal implications of their work. This will make it much easier to explain this information to the users that these systems impact." }, { "code": null, "e": 12658, "s": 12595, "text": "Explainability builds transparency. Transparency builds trust." }, { "code": null, "e": 12960, "s": 12658, "text": "My goal isn’t to punish tech companies. Rather, my goal is to motivate them to want to be ethical and to want to hold themselves accountable, by utilizing the wonderful ethics conversations that are starting in the classroom. Let’s take these discussions and apply them to problems that already exist." }, { "code": null, "e": 13016, "s": 12960, "text": "Together, we can help code with greater intentionality." }, { "code": null, "e": 13081, "s": 13016, "text": "Intentional systems reduce the risk of unintended societal harm." }, { "code": null, "e": 13354, "s": 13081, "text": "In the trolley problem, we aren’t going to all agree on who we should save or kill. We have different values and different opinions. When it comes to self driving cars, it doesn’t matter who agrees or doesn’t agree, the choice for who to save has already been made for us." }, { "code": null, "e": 13556, "s": 13354, "text": "It is time to understand the consequences of these choices. It is time to be transparent about algorithmic design. This isn’t theory anymore. This is reality. When we raise our voice, we have a choice." } ]
8 Popular SQL Window Functions Replicated In Python | by AnBento | Towards Data Science
Update: Many of you contacted me asking for valuable resources to learn Python for Data Science or Data Analysis while transitioning from SQL. Below I share 4 courses/platforms that I strongly recommend to keep learning: SQL & Python Advanced Coding Problems (StrataScratch)→ Best platform I found to prepare SQL coding interviews so far! Much cheaper than LeetCode. Programming For Data Science With Python → Very high quality content. Data manipulation With Python: A Pandas Crash-Course SQL Summary Stats & Window Functions Hope you’ll find them useful too! Now enjoy the article :D This post includes affiliate links for which I may earn a small commission at no extra cost to you, should you make a purchase. Any data analyst who worked on building their company KPIs or created reports that analyzed business performance over time, knows that SQL Window Functions are a powerful tool. The real advantage using them is that a number of metrics can be calculated over different time frames without affecting the granularity of the original dataset. This in turn means that more can be achieved without the need of using multiple self joins or CTEs, then saving a lot of lines of code. If so far you have mainly used SQL to build metrics and extract insights and are in the process of learning Python, you are probably wondering how to replicate your beloved Window Functions in Pandas. In the tutorial that follows, I will show how you can take advantage of window functions in Python to make your code even more compact and efficient. For the first part of the tutorial, we will be working with a mock customer orders dataset that can be downloaded via GitHub. The dataset originally includes seven columns (Order date, Order ID, Customer ID, Item ID, Item Price, Quantity and Amount Paid (£)), but we will also add Order Month as shown below: We found that the dataset includes 50 distinct Order IDs and 8 columns, but if we wanted to explore it further, we could analyze the distribution of orders and amount paid by customer by running the following commands: As we can see, there are total of four customers, with customer_1 being the one that has completed the most orders (18) and spent the highest amount (£1291.75). Note how the DataFrame.groupby() method can be used in Python to achieve the same result we would achieve in SQL grouping by that column at the end of our query. But there is more, as this method is also used to replicate the over(partition by...) clause typical of window functions, as we will learn below. towardsdatascience.com Row Number() → Rank(method=’first’) Row Number() → Rank(method=’first’) The SQL Row Number() function, assigns a sequential integer to each row within the partition of a dataset. It is often used in practice to create an auxiliary column that ranks a field based on the specified partition and order. The column can then be used to more effectively filter or join the dataset. Let’s suppose that we wanted to rank orders by customer based on the order date, starting from the less recent one. To achieve this result in SQL , we can write: row number() over(partition by customer_id order by order_date) With pandas the same result can be achieved by applying the .rank(method = ‘first’) function to a GroupBy object filtered by Order Date to create the Row Num column: We can verify that customer_1 has indeed completed 18 orders, the first of which on 2019–02–13 (row num = 1) and the last on 2020–04–25 (row num = 18). Because we partitioned and ordered by Customer ID, once all the 18 orders have been ranked, the function will start ranking orders for customer_2: QUESTION 1: What if we wanted to assign the row number in descending order (from most recent to oldest order)? This would be straightforward because, despite it wasn’t shown in the code, pandas rank(method = ...) function has an ascending parameter that is set to true by default. By setting it to false we will invert the way the rank is assigned: orders.groupby([‘Customer ID’])[‘Order Date’].rank(method=’first’, ascending = False) QUESTION 2: What if we wanted to partition by multiple columns (like Customer ID and Order Month)? In this case we just need to add the preferred fields to the GroupBy object : #SQL Syntaxrow number() over(partition by customer_id, order_month order by order_date)#Python Syntaxorders.groupby([‘Customer ID’, 'Order Month'])[‘Order Date’].rank(method=’first') #2. Rank() → Rank(method=’min’) The SQL RANK() function, assigns a rank to each row within a partition of a result set. Unlike ROW NUMBER(), the rank is not sequential, meaning that rows within a partition that share the same values, will receive the same rank. Using the same example as above, the SQL syntax would be: rank() over(partition by customer_id order by order_date) Whereas its counterpart in Python would be: Because customer_1 executed two orders on 2019–02–21, both of them have been assigned rank = 3 and the following order has been assigned rank = 5, then completely skipping rank = 4. This is exactly the type of ranking we would get If we had to use the SQL rank() window function. #3. Dense_rank() → Rank(method=’dense’) If we wanted to avoid gaps in ranking values we should use the SQL dense_rank() function instead. In effect, unlike the rank() function, using dense_rank() returns consecutive rank values. In our case, the SQL syntax would be: dense_rank() over(partition by customer_id order by order_date) Whereas in Python we just need to replace method=’min’ with method=’dense’ in the rank() function: As expected, the order executed by customer_1 on 2019_02_23 has now been assigned rank = 4 , despite being the customer’s 5th order in the period of analysis. To summarize what we have learnt so far: despite in SQL there are 3 distinct functions to compute numerical data ranks, in pandas we just need to use the rank() function with the method (‘first’, ‘min’ or ‘dense’) and ascending (True or False) parameters to obtain the desired result. #4.Sum(...) over(partition by ... order by .. rows unbounded preceding) → cumsum() We now wish to compute the cumulative sum of the amount paid by each customer, in each month, sorted by order date. This calculation is also known as a running total and it’s probably one of the most used metrics in business analytics. One way to achieve this in SQL is: sum(amount_paid) over(partition by customer_id, order_month order by order_date rows unbounded preceding) The syntax above is not exactly the most efficient and intuitive: there is quite a lot of code to write and if we wish to sort by order_date, we must also include the rows... clause to compute a cumulative sum instead of a total sum in the chosen window. This is one of the cases where Python shines for its brevity, as to achieve the same result as above, we just need to write: As expected, the Run Tot (£) column displays a progressive sum of the Amount Paid (£) by each customer in a month, with the row corresponding to the last in-month order, eventually showing the total amount spent by the customer in that month. #5. Avg(...) over(partition by... ) → transform(np.mean) In a similar fashion, we may also wish to compute the average amount spent by each customer per month. This time the SQL syntax is quite intuitive: avg(amount_paid) over(partition by customer_id, order_month) whereas it’s worthwhile elaborating on the Python solution: The reason why we used pandas DataFrame.transform() function is that it allows us to keep the length of the Series equal to the length of the original dataset after computing the mean on the GroupBy object. This approach leads us to the desired result: In the second part of this tutorial, we are going to pull AAPL and AMZN stocks Adj. Close Prices from Yahoo Finance, using the yfinance package. We should already have imported it at the very beginning, but if it is not the case we can do it now by running the following commands: !pip install yfinance # <-- to install the packageimport yfinance as yfaapl = yf.download(“AAPL”, start=”2020–04–20", end=”2020–04–30").resample(‘D’).ffill()amzn = yf.download(“AMZN”, start=”2020–04–20", end=”2020–04–30").resample(‘D’).ffill() With the code above, we have also created two new datasets (aapl and amzn) for the period 2020–04–20 to 2020–04–30 using the yf.download() method. In order to include weekends and holidays, data has been resampled and the missing stock prices replaced with the prior most recent Adj. Close Price observation. Additional manipulation has been applied to the datasets (full code available in GitHub) that have been eventually combined in the stocks DataFrame: #6 Lead/lag(return_value, offset) → Shift(n) Using the stocks dataset, we now wish to compute the DoD and WoW Adj. Close Price % Change and to do that by keeping the DataFrame length unchanged, we need a function to access rows at a specific physical offset which comes before the current row. In SQL this window function is named lag() and in our example its syntax would be: #Retrieves the stock price located 1 row back from the current rowlag(adj_close, 1) over(partition by symbol order by date)#Retrieves the stock price located 7 rows back from the current rowlag(adj_close, 7) over(partition by symbol order by date) IMPORTANT: Because we have resampled it, our dataset has now a daily granularity, meaning that to get yesterday stock price, we simply need to move back one row, whereas to get the stock price recorded same day last week, we just need to move back 7 rows. If we had excluded weekends and holidays, these calculations would not have been so straightforward. In Python we can achieve the same result by applying the shift() function on a GroupBy object (grouped by Symbol and filtered by Adj. Close) to create the Lag1 and Lag7 columns. Note that because we wish to move back by 1 and 7 rows respectively (and our dataset has a descending order), the shift() functions takes a negative integer as an argument: We can see that the shift(n) function has been correctly applied on each stock, so that on the first observed date (2020–04–20), where no prior price is available, a NaN value is displayed. In the same fashion, the first valid value displayed in the Lag7 column appears 7 days after (2020–04–27) the first observed date. So far, we just managed to retrieve lagged stock prices, not to compute the % changes. In order to do that we could either apply the formula: [(Adj Close / Lag1) - 1] x 100 <-- DoD % Change[(Adj Close / Lag7) -1] x 100 <-- WoW % Change Or use the pct_change(n) function that computes the shifted stock prices under the hood and returns their % change: For the sake of brevity, in this tutorial we won’t show an equivalent example using the lead() function, but its easy to guess that the only real change would be the sign of the integer assigned to the shift() and pct_change() function. Bear in mind that, the way the dataset is sorted, will affect the sign that should be passed to the functions to get the expected result. #7. first/last value() → rank(method =’first’, ascending = True/False) == 1 The first value() and last value() window functions are used to retrieve the value of the first or last row, in an ordered partition of a dataset. Let’s pretend we wanted to find the first and the last available Adj. Close Prices in each month for the AAPL stock. The first 10 rows of the modified aapl dataset we will use are displayed below: and the way we will tackle the query in SQL is: #The aapl dataset includes prices for one stock only, therefore the #partition is just applied on order_monthfirst value(adj_close) over(partition by order_month order by date)last value(adj_close) over(partition by order_month order by date) In Python, a function that works exactly like first/last value() does not exist but it is possible to achieve a similar result by using the Pandas rank(method =’first’) function to rank the values in a partition and then return just the one with rank = 1. Depending on how we set the ascending parameter in the rank() function, the value with rank = 1 will match with either the first or last value in the partition. Despite not particularly challenging, this process is a little bit of a hack, as we will eventually need to back fill or forward fill the first or the last value on the entire partition to obtain the desired result: In effect for the period 2020–03–16 to 2020–04–14, the first (or less recent) available Adj. Close Price for AAPL in March is $242.2 (Rank Asc = 1) and the last (or most recent) is $254.3 (Rank Desc = 1). Similarly in April, the first available Adj. Close Price for AAPL is $240.9 and the last is $287. #8. Avg(...) over(partition by ... order by .. rows between n preceding and current row) → rolling(window = ...).mean() The last window function we are going to replicate in this tutorial is a moving average. The moving average is a special case of the more general function: avg(return_value) over() as to compute it in SQL, we need to declare a rows ... clause (like we did for the running total). For example, if we wanted to compute a 7 days moving average for the AAPL Adj. Close Price in SQL (assuming the dataset was in ascending order) we would write: avg(adj_close) over(order by date rows between 6 preceding and current row) In the rows ... clause we are simply stating that we wish to include the most recent observation while computing the average, therefore we just need the 6 preceding records to get a 7 days window. This concepts may seem trivial for more experienced analysts, but could cause confusion among newbies. Fortunately, computing moving averages with Pandas it’s a real delight, as we just need to create a rolling object (specifying the desired window) and then apply the mean() function on it. Alternatively, we may also wish to compute and expanding mean, that is itself a special case of moving average: In practice, the difference between the two metrics is that when we apply a function on a rolling object, the window size remains constant, whereas with an expanding object, the window keeps growing. The logic behind using an expanding window is that with every day that passes, we observe a new price that can be added to our mean calculation. That is new information that we may wish to include in our calculated metrics. In this article we have learnt how to replicate 8 popular SQL Window Functions in Python using Pandas. If you are working in analytics, you should now be well equipped to build KPIs to monitor performance using SQL or Python interchangeably. In both languages, there are of course multiple ways to achieve the same result, therefore feel free to share your experience in the response section. Hope you have enjoyed this tutorial and stay tuned for the next one! This post includes affiliate links for which I may make a small commission at no extra cost to you should, you make a purchase.
[ { "code": null, "e": 392, "s": 171, "text": "Update: Many of you contacted me asking for valuable resources to learn Python for Data Science or Data Analysis while transitioning from SQL. Below I share 4 courses/platforms that I strongly recommend to keep learning:" }, { "code": null, "e": 538, "s": 392, "text": "SQL & Python Advanced Coding Problems (StrataScratch)→ Best platform I found to prepare SQL coding interviews so far! Much cheaper than LeetCode." }, { "code": null, "e": 608, "s": 538, "text": "Programming For Data Science With Python → Very high quality content." }, { "code": null, "e": 661, "s": 608, "text": "Data manipulation With Python: A Pandas Crash-Course" }, { "code": null, "e": 698, "s": 661, "text": "SQL Summary Stats & Window Functions" }, { "code": null, "e": 885, "s": 698, "text": "Hope you’ll find them useful too! Now enjoy the article :D This post includes affiliate links for which I may earn a small commission at no extra cost to you, should you make a purchase." }, { "code": null, "e": 1360, "s": 885, "text": "Any data analyst who worked on building their company KPIs or created reports that analyzed business performance over time, knows that SQL Window Functions are a powerful tool. The real advantage using them is that a number of metrics can be calculated over different time frames without affecting the granularity of the original dataset. This in turn means that more can be achieved without the need of using multiple self joins or CTEs, then saving a lot of lines of code." }, { "code": null, "e": 1711, "s": 1360, "text": "If so far you have mainly used SQL to build metrics and extract insights and are in the process of learning Python, you are probably wondering how to replicate your beloved Window Functions in Pandas. In the tutorial that follows, I will show how you can take advantage of window functions in Python to make your code even more compact and efficient." }, { "code": null, "e": 2020, "s": 1711, "text": "For the first part of the tutorial, we will be working with a mock customer orders dataset that can be downloaded via GitHub. The dataset originally includes seven columns (Order date, Order ID, Customer ID, Item ID, Item Price, Quantity and Amount Paid (£)), but we will also add Order Month as shown below:" }, { "code": null, "e": 2239, "s": 2020, "text": "We found that the dataset includes 50 distinct Order IDs and 8 columns, but if we wanted to explore it further, we could analyze the distribution of orders and amount paid by customer by running the following commands:" }, { "code": null, "e": 2708, "s": 2239, "text": "As we can see, there are total of four customers, with customer_1 being the one that has completed the most orders (18) and spent the highest amount (£1291.75). Note how the DataFrame.groupby() method can be used in Python to achieve the same result we would achieve in SQL grouping by that column at the end of our query. But there is more, as this method is also used to replicate the over(partition by...) clause typical of window functions, as we will learn below." }, { "code": null, "e": 2731, "s": 2708, "text": "towardsdatascience.com" }, { "code": null, "e": 2767, "s": 2731, "text": "Row Number() → Rank(method=’first’)" }, { "code": null, "e": 2803, "s": 2767, "text": "Row Number() → Rank(method=’first’)" }, { "code": null, "e": 3108, "s": 2803, "text": "The SQL Row Number() function, assigns a sequential integer to each row within the partition of a dataset. It is often used in practice to create an auxiliary column that ranks a field based on the specified partition and order. The column can then be used to more effectively filter or join the dataset." }, { "code": null, "e": 3270, "s": 3108, "text": "Let’s suppose that we wanted to rank orders by customer based on the order date, starting from the less recent one. To achieve this result in SQL , we can write:" }, { "code": null, "e": 3334, "s": 3270, "text": "row number() over(partition by customer_id order by order_date)" }, { "code": null, "e": 3500, "s": 3334, "text": "With pandas the same result can be achieved by applying the .rank(method = ‘first’) function to a GroupBy object filtered by Order Date to create the Row Num column:" }, { "code": null, "e": 3799, "s": 3500, "text": "We can verify that customer_1 has indeed completed 18 orders, the first of which on 2019–02–13 (row num = 1) and the last on 2020–04–25 (row num = 18). Because we partitioned and ordered by Customer ID, once all the 18 orders have been ranked, the function will start ranking orders for customer_2:" }, { "code": null, "e": 3910, "s": 3799, "text": "QUESTION 1: What if we wanted to assign the row number in descending order (from most recent to oldest order)?" }, { "code": null, "e": 4148, "s": 3910, "text": "This would be straightforward because, despite it wasn’t shown in the code, pandas rank(method = ...) function has an ascending parameter that is set to true by default. By setting it to false we will invert the way the rank is assigned:" }, { "code": null, "e": 4234, "s": 4148, "text": "orders.groupby([‘Customer ID’])[‘Order Date’].rank(method=’first’, ascending = False)" }, { "code": null, "e": 4333, "s": 4234, "text": "QUESTION 2: What if we wanted to partition by multiple columns (like Customer ID and Order Month)?" }, { "code": null, "e": 4411, "s": 4333, "text": "In this case we just need to add the preferred fields to the GroupBy object :" }, { "code": null, "e": 4594, "s": 4411, "text": "#SQL Syntaxrow number() over(partition by customer_id, order_month order by order_date)#Python Syntaxorders.groupby([‘Customer ID’, 'Order Month'])[‘Order Date’].rank(method=’first')" }, { "code": null, "e": 4626, "s": 4594, "text": "#2. Rank() → Rank(method=’min’)" }, { "code": null, "e": 4914, "s": 4626, "text": "The SQL RANK() function, assigns a rank to each row within a partition of a result set. Unlike ROW NUMBER(), the rank is not sequential, meaning that rows within a partition that share the same values, will receive the same rank. Using the same example as above, the SQL syntax would be:" }, { "code": null, "e": 4972, "s": 4914, "text": "rank() over(partition by customer_id order by order_date)" }, { "code": null, "e": 5016, "s": 4972, "text": "Whereas its counterpart in Python would be:" }, { "code": null, "e": 5296, "s": 5016, "text": "Because customer_1 executed two orders on 2019–02–21, both of them have been assigned rank = 3 and the following order has been assigned rank = 5, then completely skipping rank = 4. This is exactly the type of ranking we would get If we had to use the SQL rank() window function." }, { "code": null, "e": 5336, "s": 5296, "text": "#3. Dense_rank() → Rank(method=’dense’)" }, { "code": null, "e": 5563, "s": 5336, "text": "If we wanted to avoid gaps in ranking values we should use the SQL dense_rank() function instead. In effect, unlike the rank() function, using dense_rank() returns consecutive rank values. In our case, the SQL syntax would be:" }, { "code": null, "e": 5627, "s": 5563, "text": "dense_rank() over(partition by customer_id order by order_date)" }, { "code": null, "e": 5726, "s": 5627, "text": "Whereas in Python we just need to replace method=’min’ with method=’dense’ in the rank() function:" }, { "code": null, "e": 5885, "s": 5726, "text": "As expected, the order executed by customer_1 on 2019_02_23 has now been assigned rank = 4 , despite being the customer’s 5th order in the period of analysis." }, { "code": null, "e": 6170, "s": 5885, "text": "To summarize what we have learnt so far: despite in SQL there are 3 distinct functions to compute numerical data ranks, in pandas we just need to use the rank() function with the method (‘first’, ‘min’ or ‘dense’) and ascending (True or False) parameters to obtain the desired result." }, { "code": null, "e": 6253, "s": 6170, "text": "#4.Sum(...) over(partition by ... order by .. rows unbounded preceding) → cumsum()" }, { "code": null, "e": 6524, "s": 6253, "text": "We now wish to compute the cumulative sum of the amount paid by each customer, in each month, sorted by order date. This calculation is also known as a running total and it’s probably one of the most used metrics in business analytics. One way to achieve this in SQL is:" }, { "code": null, "e": 6630, "s": 6524, "text": "sum(amount_paid) over(partition by customer_id, order_month order by order_date rows unbounded preceding)" }, { "code": null, "e": 6885, "s": 6630, "text": "The syntax above is not exactly the most efficient and intuitive: there is quite a lot of code to write and if we wish to sort by order_date, we must also include the rows... clause to compute a cumulative sum instead of a total sum in the chosen window." }, { "code": null, "e": 7010, "s": 6885, "text": "This is one of the cases where Python shines for its brevity, as to achieve the same result as above, we just need to write:" }, { "code": null, "e": 7253, "s": 7010, "text": "As expected, the Run Tot (£) column displays a progressive sum of the Amount Paid (£) by each customer in a month, with the row corresponding to the last in-month order, eventually showing the total amount spent by the customer in that month." }, { "code": null, "e": 7310, "s": 7253, "text": "#5. Avg(...) over(partition by... ) → transform(np.mean)" }, { "code": null, "e": 7458, "s": 7310, "text": "In a similar fashion, we may also wish to compute the average amount spent by each customer per month. This time the SQL syntax is quite intuitive:" }, { "code": null, "e": 7519, "s": 7458, "text": "avg(amount_paid) over(partition by customer_id, order_month)" }, { "code": null, "e": 7579, "s": 7519, "text": "whereas it’s worthwhile elaborating on the Python solution:" }, { "code": null, "e": 7832, "s": 7579, "text": "The reason why we used pandas DataFrame.transform() function is that it allows us to keep the length of the Series equal to the length of the original dataset after computing the mean on the GroupBy object. This approach leads us to the desired result:" }, { "code": null, "e": 8113, "s": 7832, "text": "In the second part of this tutorial, we are going to pull AAPL and AMZN stocks Adj. Close Prices from Yahoo Finance, using the yfinance package. We should already have imported it at the very beginning, but if it is not the case we can do it now by running the following commands:" }, { "code": null, "e": 8357, "s": 8113, "text": "!pip install yfinance # <-- to install the packageimport yfinance as yfaapl = yf.download(“AAPL”, start=”2020–04–20\", end=”2020–04–30\").resample(‘D’).ffill()amzn = yf.download(“AMZN”, start=”2020–04–20\", end=”2020–04–30\").resample(‘D’).ffill()" }, { "code": null, "e": 8815, "s": 8357, "text": "With the code above, we have also created two new datasets (aapl and amzn) for the period 2020–04–20 to 2020–04–30 using the yf.download() method. In order to include weekends and holidays, data has been resampled and the missing stock prices replaced with the prior most recent Adj. Close Price observation. Additional manipulation has been applied to the datasets (full code available in GitHub) that have been eventually combined in the stocks DataFrame:" }, { "code": null, "e": 8860, "s": 8815, "text": "#6 Lead/lag(return_value, offset) → Shift(n)" }, { "code": null, "e": 9192, "s": 8860, "text": "Using the stocks dataset, we now wish to compute the DoD and WoW Adj. Close Price % Change and to do that by keeping the DataFrame length unchanged, we need a function to access rows at a specific physical offset which comes before the current row. In SQL this window function is named lag() and in our example its syntax would be:" }, { "code": null, "e": 9440, "s": 9192, "text": "#Retrieves the stock price located 1 row back from the current rowlag(adj_close, 1) over(partition by symbol order by date)#Retrieves the stock price located 7 rows back from the current rowlag(adj_close, 7) over(partition by symbol order by date)" }, { "code": null, "e": 9797, "s": 9440, "text": "IMPORTANT: Because we have resampled it, our dataset has now a daily granularity, meaning that to get yesterday stock price, we simply need to move back one row, whereas to get the stock price recorded same day last week, we just need to move back 7 rows. If we had excluded weekends and holidays, these calculations would not have been so straightforward." }, { "code": null, "e": 10148, "s": 9797, "text": "In Python we can achieve the same result by applying the shift() function on a GroupBy object (grouped by Symbol and filtered by Adj. Close) to create the Lag1 and Lag7 columns. Note that because we wish to move back by 1 and 7 rows respectively (and our dataset has a descending order), the shift() functions takes a negative integer as an argument:" }, { "code": null, "e": 10469, "s": 10148, "text": "We can see that the shift(n) function has been correctly applied on each stock, so that on the first observed date (2020–04–20), where no prior price is available, a NaN value is displayed. In the same fashion, the first valid value displayed in the Lag7 column appears 7 days after (2020–04–27) the first observed date." }, { "code": null, "e": 10611, "s": 10469, "text": "So far, we just managed to retrieve lagged stock prices, not to compute the % changes. In order to do that we could either apply the formula:" }, { "code": null, "e": 10705, "s": 10611, "text": "[(Adj Close / Lag1) - 1] x 100 <-- DoD % Change[(Adj Close / Lag7) -1] x 100 <-- WoW % Change" }, { "code": null, "e": 10821, "s": 10705, "text": "Or use the pct_change(n) function that computes the shifted stock prices under the hood and returns their % change:" }, { "code": null, "e": 11196, "s": 10821, "text": "For the sake of brevity, in this tutorial we won’t show an equivalent example using the lead() function, but its easy to guess that the only real change would be the sign of the integer assigned to the shift() and pct_change() function. Bear in mind that, the way the dataset is sorted, will affect the sign that should be passed to the functions to get the expected result." }, { "code": null, "e": 11272, "s": 11196, "text": "#7. first/last value() → rank(method =’first’, ascending = True/False) == 1" }, { "code": null, "e": 11616, "s": 11272, "text": "The first value() and last value() window functions are used to retrieve the value of the first or last row, in an ordered partition of a dataset. Let’s pretend we wanted to find the first and the last available Adj. Close Prices in each month for the AAPL stock. The first 10 rows of the modified aapl dataset we will use are displayed below:" }, { "code": null, "e": 11664, "s": 11616, "text": "and the way we will tackle the query in SQL is:" }, { "code": null, "e": 11907, "s": 11664, "text": "#The aapl dataset includes prices for one stock only, therefore the #partition is just applied on order_monthfirst value(adj_close) over(partition by order_month order by date)last value(adj_close) over(partition by order_month order by date)" }, { "code": null, "e": 12324, "s": 11907, "text": "In Python, a function that works exactly like first/last value() does not exist but it is possible to achieve a similar result by using the Pandas rank(method =’first’) function to rank the values in a partition and then return just the one with rank = 1. Depending on how we set the ascending parameter in the rank() function, the value with rank = 1 will match with either the first or last value in the partition." }, { "code": null, "e": 12540, "s": 12324, "text": "Despite not particularly challenging, this process is a little bit of a hack, as we will eventually need to back fill or forward fill the first or the last value on the entire partition to obtain the desired result:" }, { "code": null, "e": 12843, "s": 12540, "text": "In effect for the period 2020–03–16 to 2020–04–14, the first (or less recent) available Adj. Close Price for AAPL in March is $242.2 (Rank Asc = 1) and the last (or most recent) is $254.3 (Rank Desc = 1). Similarly in April, the first available Adj. Close Price for AAPL is $240.9 and the last is $287." }, { "code": null, "e": 12963, "s": 12843, "text": "#8. Avg(...) over(partition by ... order by .. rows between n preceding and current row) → rolling(window = ...).mean()" }, { "code": null, "e": 13119, "s": 12963, "text": "The last window function we are going to replicate in this tutorial is a moving average. The moving average is a special case of the more general function:" }, { "code": null, "e": 13144, "s": 13119, "text": "avg(return_value) over()" }, { "code": null, "e": 13403, "s": 13144, "text": "as to compute it in SQL, we need to declare a rows ... clause (like we did for the running total). For example, if we wanted to compute a 7 days moving average for the AAPL Adj. Close Price in SQL (assuming the dataset was in ascending order) we would write:" }, { "code": null, "e": 13479, "s": 13403, "text": "avg(adj_close) over(order by date rows between 6 preceding and current row)" }, { "code": null, "e": 13779, "s": 13479, "text": "In the rows ... clause we are simply stating that we wish to include the most recent observation while computing the average, therefore we just need the 6 preceding records to get a 7 days window. This concepts may seem trivial for more experienced analysts, but could cause confusion among newbies." }, { "code": null, "e": 14080, "s": 13779, "text": "Fortunately, computing moving averages with Pandas it’s a real delight, as we just need to create a rolling object (specifying the desired window) and then apply the mean() function on it. Alternatively, we may also wish to compute and expanding mean, that is itself a special case of moving average:" }, { "code": null, "e": 14504, "s": 14080, "text": "In practice, the difference between the two metrics is that when we apply a function on a rolling object, the window size remains constant, whereas with an expanding object, the window keeps growing. The logic behind using an expanding window is that with every day that passes, we observe a new price that can be added to our mean calculation. That is new information that we may wish to include in our calculated metrics." }, { "code": null, "e": 14966, "s": 14504, "text": "In this article we have learnt how to replicate 8 popular SQL Window Functions in Python using Pandas. If you are working in analytics, you should now be well equipped to build KPIs to monitor performance using SQL or Python interchangeably. In both languages, there are of course multiple ways to achieve the same result, therefore feel free to share your experience in the response section. Hope you have enjoyed this tutorial and stay tuned for the next one!" } ]
The 6 Steps of a SQL Select Statement Process | by Wendy Navarrete | Towards Data Science
In this article, I will describe step by step the logical process phases during the execution of a query into a relational database. For doing that, I am going to use two simple unnormalized form tables: Citizen and City. They are described as followed: Citizen table contains data of distinguished citizens and the identification number of the city they live on, and City is the table with city names and their respective identification number. Let’s say that we want to know the name of only two city names, except San Bruno, where two or more citizens are living on. We also want the result ordered alphabetically. This is the query to get the required information. SELECT city.city_name AS "City"FROM citizenJOIN city ON citizen.city_id = city.city_id WHERE city.city_name != 'San Bruno'GROUP BY city.city_nameHAVING COUNT(*) >= 2ORDER BY city.city_name ASCLIMIT 2 1. Getting Data (From, Join) 2. Row Filter (Where) 3. Grouping (Group by) 4. Group Filter (Having) 5. Return Expressions (Select) 6. Order & Paging (Order by & Limit / Offset) FROM citizenJOIN city The first step in the process is the execution of the statements in From clause followed by the Join clause. The result of these operations is getting a cartesian product of our two tables. Once the From and Join were executed, the processor will get the qualified rows based on the condition On. ON citizen.city_id = city.city_id After getting qualified rows, it is passed on to the Where clause. This evaluates every row using conditional expressions. When rows do not evaluate to true, they will be removed from the set. WHERE city.city_name != 'San Bruno' The next step is to execute Group by clause, it will group rows that have the same values into summary rows. After this point, all Select expressions will be evaluated per group, instead of being evaluated per row. GROUP BY city.city_name The Having clause consists of a logical predicate, it is processed after the Group by and it can no longer refer to individual rows, only to groups of rows. HAVING COUNT(*) >= 2 The result of executing this operation will keep the set as the figure above. This is because there are two or more elements in every group. During this step, the processor evaluates what will be printed as a result of the query, and if there are some functions to run on data like Distinct, Max, Sqrt, Date, Lower, etc. In this case, the select clause just prints the city names and alias the city_name column with the identifier “City”. SELECT city.city_name AS "City" The final processing steps of the query deal with presentation ordering and the ability to limit the size of the result set. In our example, it is required to present a maximum of two records ordered alphabetically. ORDER BY city.city_name ASCLIMIT 2 A better understanding of how things work, better results can be obtained. Having fundamental understandings of how a Select statement is executed, it will make easier to get the expected result-set. Hopefully, this article is helpful for you as well.
[ { "code": null, "e": 426, "s": 172, "text": "In this article, I will describe step by step the logical process phases during the execution of a query into a relational database. For doing that, I am going to use two simple unnormalized form tables: Citizen and City. They are described as followed:" }, { "code": null, "e": 618, "s": 426, "text": "Citizen table contains data of distinguished citizens and the identification number of the city they live on, and City is the table with city names and their respective identification number." }, { "code": null, "e": 790, "s": 618, "text": "Let’s say that we want to know the name of only two city names, except San Bruno, where two or more citizens are living on. We also want the result ordered alphabetically." }, { "code": null, "e": 841, "s": 790, "text": "This is the query to get the required information." }, { "code": null, "e": 1041, "s": 841, "text": "SELECT city.city_name AS \"City\"FROM citizenJOIN city ON citizen.city_id = city.city_id WHERE city.city_name != 'San Bruno'GROUP BY city.city_nameHAVING COUNT(*) >= 2ORDER BY city.city_name ASCLIMIT 2" }, { "code": null, "e": 1217, "s": 1041, "text": "1. Getting Data (From, Join) 2. Row Filter (Where) 3. Grouping (Group by) 4. Group Filter (Having) 5. Return Expressions (Select) 6. Order & Paging (Order by & Limit / Offset)" }, { "code": null, "e": 1240, "s": 1217, "text": "FROM citizenJOIN city " }, { "code": null, "e": 1430, "s": 1240, "text": "The first step in the process is the execution of the statements in From clause followed by the Join clause. The result of these operations is getting a cartesian product of our two tables." }, { "code": null, "e": 1537, "s": 1430, "text": "Once the From and Join were executed, the processor will get the qualified rows based on the condition On." }, { "code": null, "e": 1571, "s": 1537, "text": "ON citizen.city_id = city.city_id" }, { "code": null, "e": 1764, "s": 1571, "text": "After getting qualified rows, it is passed on to the Where clause. This evaluates every row using conditional expressions. When rows do not evaluate to true, they will be removed from the set." }, { "code": null, "e": 1800, "s": 1764, "text": "WHERE city.city_name != 'San Bruno'" }, { "code": null, "e": 2015, "s": 1800, "text": "The next step is to execute Group by clause, it will group rows that have the same values into summary rows. After this point, all Select expressions will be evaluated per group, instead of being evaluated per row." }, { "code": null, "e": 2039, "s": 2015, "text": "GROUP BY city.city_name" }, { "code": null, "e": 2196, "s": 2039, "text": "The Having clause consists of a logical predicate, it is processed after the Group by and it can no longer refer to individual rows, only to groups of rows." }, { "code": null, "e": 2217, "s": 2196, "text": "HAVING COUNT(*) >= 2" }, { "code": null, "e": 2358, "s": 2217, "text": "The result of executing this operation will keep the set as the figure above. This is because there are two or more elements in every group." }, { "code": null, "e": 2656, "s": 2358, "text": "During this step, the processor evaluates what will be printed as a result of the query, and if there are some functions to run on data like Distinct, Max, Sqrt, Date, Lower, etc. In this case, the select clause just prints the city names and alias the city_name column with the identifier “City”." }, { "code": null, "e": 2688, "s": 2656, "text": "SELECT city.city_name AS \"City\"" }, { "code": null, "e": 2904, "s": 2688, "text": "The final processing steps of the query deal with presentation ordering and the ability to limit the size of the result set. In our example, it is required to present a maximum of two records ordered alphabetically." }, { "code": null, "e": 2939, "s": 2904, "text": "ORDER BY city.city_name ASCLIMIT 2" }, { "code": null, "e": 3139, "s": 2939, "text": "A better understanding of how things work, better results can be obtained. Having fundamental understandings of how a Select statement is executed, it will make easier to get the expected result-set." } ]
Program to find area of largest square of 1s in a given matrix in python
Suppose we have a binary matrix, we have to find largest square of 1s in that given matrix. So, if the input is like then the output will be 16. To solve this, we will follow these steps − res := 0 for i in range 0 to size of matrix, dores := maximum of res and matrix[i, 0] res := maximum of res and matrix[i, 0] for i in range 0 to size of matrix[0], dores := maximum of res and matrix[0, i] res := maximum of res and matrix[0, i] for i in range 1 to row count of matrix, dofor j in range 1 to column count of matrix, doif matrix[i, j] is same as 1, thenmatrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1res = maximum of res and matrix[i, j] for j in range 1 to column count of matrix, doif matrix[i, j] is same as 1, thenmatrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1res = maximum of res and matrix[i, j] if matrix[i, j] is same as 1, thenmatrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1 matrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1 res = maximum of res and matrix[i, j] return res^2 Let us see the following implementation to get better understanding − Live Demo class Solution: def solve(self, matrix): res = 0 for i in range(len(matrix)): res = max(res, matrix[i][0]) for i in range(len(matrix[0])): res = max(res, matrix[0][i]) for i in range(1, len(matrix)): for j in range(1, len(matrix[0])): if matrix[i][j] == 1: matrix[i][j] = min(matrix[i - 1][j], matrix[i - 1][j - 1], matrix[i][j - 1]) + 1 res = max(res, matrix[i][j]) return res * res ob = Solution() matrix = [ [1, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1, 1], [0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0] ] print(ob.solve(matrix)) matrix = [ [1, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1, 1], [0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0] ] 16
[ { "code": null, "e": 1154, "s": 1062, "text": "Suppose we have a binary matrix, we have to find largest square of 1s in that given matrix." }, { "code": null, "e": 1179, "s": 1154, "text": "So, if the input is like" }, { "code": null, "e": 1207, "s": 1179, "text": "then the output will be 16." }, { "code": null, "e": 1251, "s": 1207, "text": "To solve this, we will follow these steps −" }, { "code": null, "e": 1260, "s": 1251, "text": "res := 0" }, { "code": null, "e": 1337, "s": 1260, "text": "for i in range 0 to size of matrix, dores := maximum of res and matrix[i, 0]" }, { "code": null, "e": 1376, "s": 1337, "text": "res := maximum of res and matrix[i, 0]" }, { "code": null, "e": 1456, "s": 1376, "text": "for i in range 0 to size of matrix[0], dores := maximum of res and matrix[0, i]" }, { "code": null, "e": 1495, "s": 1456, "text": "res := maximum of res and matrix[0, i]" }, { "code": null, "e": 1747, "s": 1495, "text": "for i in range 1 to row count of matrix, dofor j in range 1 to column count of matrix, doif matrix[i, j] is same as 1, thenmatrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1res = maximum of res and matrix[i, j]" }, { "code": null, "e": 1956, "s": 1747, "text": "for j in range 1 to column count of matrix, doif matrix[i, j] is same as 1, thenmatrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1res = maximum of res and matrix[i, j]" }, { "code": null, "e": 2082, "s": 1956, "text": "if matrix[i, j] is same as 1, thenmatrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1" }, { "code": null, "e": 2174, "s": 2082, "text": "matrix[i, j] = minimum of (matrix[i - 1, j], matrix[i - 1, j - 1] and matrix[i, j - 1]) + 1" }, { "code": null, "e": 2212, "s": 2174, "text": "res = maximum of res and matrix[i, j]" }, { "code": null, "e": 2225, "s": 2212, "text": "return res^2" }, { "code": null, "e": 2295, "s": 2225, "text": "Let us see the following implementation to get better understanding −" }, { "code": null, "e": 2305, "s": 2295, "text": "Live Demo" }, { "code": null, "e": 3003, "s": 2305, "text": "class Solution:\n def solve(self, matrix):\n res = 0\n for i in range(len(matrix)):\n res = max(res, matrix[i][0])\n for i in range(len(matrix[0])):\n res = max(res, matrix[0][i])\n\n for i in range(1, len(matrix)):\n for j in range(1, len(matrix[0])):\n if matrix[i][j] == 1:\n matrix[i][j] = min(matrix[i - 1][j], matrix[i - 1][j - 1], matrix[i][j - 1]) + 1\n\n res = max(res, matrix[i][j])\n\n return res * res\n\nob = Solution()\nmatrix = [\n [1, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1],\n [0, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 0, 0]\n]\nprint(ob.solve(matrix))" }, { "code": null, "e": 3165, "s": 3003, "text": "matrix = [ \n[1, 0, 0, 0, 0, 1, 1], \n[0, 0, 0, 0, 0, 1, 1], \n[0, 1, 1, 1, 1, 0, 0], \n[0, 1, 1, 1, 1, 0, 0], \n[0, 1, 1, 1, 1, 0, 0], \n[0, 1, 1, 1, 1, 0, 0] ]" }, { "code": null, "e": 3168, "s": 3165, "text": "16" } ]
Python 3 - time localtime() Method
The method localtime() is similar to gmtime() but it converts number of seconds to local time. If secs is not provided or None, the current time as returned by time() is used. The dst flag is set to 1 when DST applies to the given time. Following is the syntax for localtime() method − time.localtime([ sec ]) sec − These are the number of seconds to be converted into structure struct_time representation. This method does not return any value. The following example shows the usage of localtime() method. #!/usr/bin/python3 import time print ("time.localtime() : %s" , time.localtime()) When we run the above program, it produces the following result: time.localtime() : time.struct_time(tm_year = 2016, tm_mon = 2, tm_mday = 15, tm_hour = 10, tm_min = 13, tm_sec = 50, tm_wday = 0, tm_yday = 46, tm_isdst = 0) 187 Lectures 17.5 hours Malhar Lathkar 55 Lectures 8 hours Arnab Chakraborty 136 Lectures 11 hours In28Minutes Official 75 Lectures 13 hours Eduonix Learning Solutions 70 Lectures 8.5 hours Lets Kode It 63 Lectures 6 hours Abhilash Nelson Print Add Notes Bookmark this page
[ { "code": null, "e": 2577, "s": 2340, "text": "The method localtime() is similar to gmtime() but it converts number of seconds to local time. If secs is not provided or None, the current time as returned by time() is used. The dst flag is set to 1 when DST applies to the given time." }, { "code": null, "e": 2626, "s": 2577, "text": "Following is the syntax for localtime() method −" }, { "code": null, "e": 2651, "s": 2626, "text": "time.localtime([ sec ])\n" }, { "code": null, "e": 2749, "s": 2651, "text": "sec − These are the number of seconds to be converted into structure struct_time representation. " }, { "code": null, "e": 2788, "s": 2749, "text": "This method does not return any value." }, { "code": null, "e": 2849, "s": 2788, "text": "The following example shows the usage of localtime() method." }, { "code": null, "e": 2932, "s": 2849, "text": "#!/usr/bin/python3\nimport time\n\nprint (\"time.localtime() : %s\" , time.localtime())" }, { "code": null, "e": 2997, "s": 2932, "text": "When we run the above program, it produces the following result:" }, { "code": null, "e": 3161, "s": 2997, "text": "time.localtime() : time.struct_time(tm_year = 2016, tm_mon = 2, tm_mday = 15, \n tm_hour = 10, tm_min = 13, tm_sec = 50, tm_wday = 0, tm_yday = 46, tm_isdst = 0)\n" }, { "code": null, "e": 3198, "s": 3161, "text": "\n 187 Lectures \n 17.5 hours \n" }, { "code": null, "e": 3214, "s": 3198, "text": " Malhar Lathkar" }, { "code": null, "e": 3247, "s": 3214, "text": "\n 55 Lectures \n 8 hours \n" }, { "code": null, "e": 3266, "s": 3247, "text": " Arnab Chakraborty" }, { "code": null, "e": 3301, "s": 3266, "text": "\n 136 Lectures \n 11 hours \n" }, { "code": null, "e": 3323, "s": 3301, "text": " In28Minutes Official" }, { "code": null, "e": 3357, "s": 3323, "text": "\n 75 Lectures \n 13 hours \n" }, { "code": null, "e": 3385, "s": 3357, "text": " Eduonix Learning Solutions" }, { "code": null, "e": 3420, "s": 3385, "text": "\n 70 Lectures \n 8.5 hours \n" }, { "code": null, "e": 3434, "s": 3420, "text": " Lets Kode It" }, { "code": null, "e": 3467, "s": 3434, "text": "\n 63 Lectures \n 6 hours \n" }, { "code": null, "e": 3484, "s": 3467, "text": " Abhilash Nelson" }, { "code": null, "e": 3491, "s": 3484, "text": " Print" }, { "code": null, "e": 3502, "s": 3491, "text": " Add Notes" } ]
Maximum number of overlapping string - GeeksforGeeks
15 Oct, 2020 Given two strings S and T, the task is to count the number of the overlapping string T in the string S.Note: If there are some mismatch words as a subsequence which do not matche to the string T, then print -1. Examples: Input: S = “chirpchirp”, T = “chirp” Output: 0 Explanation: There are no overlapping strings of “chirp”.Input: S = “chcirphirp”, T = “chirp” Output: 2 There are two overlapping string of T: First “chirp” can be chcirphirp. Second “chirp” can be chcirphirp. Approach: The idea is to iterate over the string S and increase the overlapping count at an instant when the first character of the string T occurs If at any instant the current character is equal to the last character then decrement the overlapping count. Meanwhile, update the maximum overlapping count if it is greater than 2. Finally, to check that every subsequence matches to the string T check overlapping count is equal to zero or not. If yes return the maximum overlap count.Below is the implementation of the above approach: C++ Java Python3 C# // C++ implementation to find the// maximum number of occurrence of// the overlapping count #include <bits/stdc++.h> using namespace std; // Function to find the maximum// overlapping stringsint maxOverlap(string S, string T){ string str = T; int count[T.length()] = { 0 }; int overlap = 0; int max_overlap = 0; for (int i = 0; i <= S.length(); i++) { // Get the current character int index = str.find(S[i]); // Condition to check if the current // character is the first character // of the string T then increment the // overlapping count if (index == 0) { overlap++; if (overlap >= 2) max_overlap = max(overlap, max_overlap); count[index]++; } else { // Condition to check // previous character is also // occurred if (count[index - 1] <= 0) return -1; // Update count of previous // and current character count[index]++; count[index - 1]--; } // Condition to check the current // character is the last character // of the string T if (index == 4) overlap--; } // Condition to check the every // subsequence is a valid string T if (overlap == 0) return max_overlap; else return -1;} // Driver Codeint main(){ string S = "chcirphirp"; string T = "chirp"; // Function Call cout << maxOverlap(S, T); return 0;} // Java implementation to find the// maximum number of occurrence of// the overlapping countimport java.util.*; class GFG{ // Function to find the maximum// overlapping Stringsstatic int maxOverlap(String S, String T){ String str = T; int count[] = new int[T.length()]; int overlap = 0; int max_overlap = 0; for(int i = 0; i < S.length(); i++) { // Get the current character int index = str.indexOf(S.charAt(i)); // Condition to check if the current // character is the first character // of the String T then increment the // overlapping count if (index == 0) { overlap++; if (overlap >= 2) max_overlap = Math.max(overlap, max_overlap); count[index]++; } else { // Condition to check // previous character is also // occurred if (count[index - 1] <= 0) return -1; // Update count of previous // and current character count[index]++; count[index - 1]--; } // Condition to check the current // character is the last character // of the String T if (index == 4) overlap--; } // Condition to check the every // subsequence is a valid String T if (overlap == 0) return max_overlap; else return -1;} // Driver codepublic static void main(String[] args){ String S = "chcirphirp"; String T = "chirp"; // Function call System.out.print(maxOverlap(S, T));}} // This code is contributed by Princi Singh # Python3 implementation to find the# maximum number of occurrence of# the overlapping count # Function to find the maximum# overlapping stringsdef maxOverlap(S, T): str = T count = [0 for i in range(len(T))] overlap = 0 max_overlap = 0 for i in range(0, len(S)): # Get the current character index = str.find(S[i]) # Condition to check if # the current character is # the first character of the # string T then increment the # overlapping count if(index == 0): overlap += 1 if(overlap >= 2): max_overlap = max(overlap, max_overlap) count[index] += 1 else: # Condition to check # previous character is also # occurred if(count[index - 1] <= 0): return -1 # Update count of previous # and current character count[index] += 1 count[index - 1] -= 1 # Condition to check the current # character is the last character # of the string T if(index == 4): overlap -= 1 # Condition to check the every # subsequence is a valid string T if(overlap == 0): return max_overlap else: return -1 # Driver CodeS = "chcirphirp"T = "chirp" # Function Callprint(maxOverlap(S, T)) # This code is contributed by avanitrachhadiya2155 // C# implementation to find the// maximum number of occurrence of// the overlapping countusing System; class GFG{ // Function to find the maximum// overlapping Stringsstatic int maxOverlap(String S, String T){ String str = T; int []count = new int[T.Length]; int overlap = 0; int max_overlap = 0; for(int i = 0; i < S.Length; i++) { // Get the current character int index = str.IndexOf(S[i]); // Condition to check if the current // character is the first character // of the String T then increment the // overlapping count if (index == 0) { overlap++; if (overlap >= 2) { max_overlap = Math.Max(overlap, max_overlap); } count[index]++; } else { // Condition to check // previous character is also // occurred if (count[index - 1] <= 0) return -1; // Update count of previous // and current character count[index]++; count[index - 1]--; } // Condition to check the current // character is the last character // of the String T if (index == 4) overlap--; } // Condition to check the every // subsequence is a valid String T if (overlap == 0) return max_overlap; else return -1;} // Driver codepublic static void Main(String[] args){ String S = "chcirphirp"; String T = "chirp"; // Function call Console.Write(maxOverlap(S, T));}} // This code is contributed by sapnasingh4991 2 princi singh sapnasingh4991 nidhi_biet avanitrachhadiya2155 Algorithms Strings Strings Algorithms Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments DSA Sheet by Love Babbar Quadratic Probing in Hashing Difference between Informed and Uninformed Search in AI K means Clustering - Introduction SCAN (Elevator) Disk Scheduling Algorithms Reverse a string in Java Longest Common Subsequence | DP-4 Write a program to reverse an array or string Write a program to print all permutations of a given string C++ Data Types
[ { "code": null, "e": 24301, "s": 24273, "text": "\n15 Oct, 2020" }, { "code": null, "e": 24522, "s": 24301, "text": "Given two strings S and T, the task is to count the number of the overlapping string T in the string S.Note: If there are some mismatch words as a subsequence which do not matche to the string T, then print -1. Examples:" }, { "code": null, "e": 24780, "s": 24522, "text": "Input: S = “chirpchirp”, T = “chirp” Output: 0 Explanation: There are no overlapping strings of “chirp”.Input: S = “chcirphirp”, T = “chirp” Output: 2 There are two overlapping string of T: First “chirp” can be chcirphirp. Second “chirp” can be chcirphirp. " }, { "code": null, "e": 25316, "s": 24780, "text": "Approach: The idea is to iterate over the string S and increase the overlapping count at an instant when the first character of the string T occurs If at any instant the current character is equal to the last character then decrement the overlapping count. Meanwhile, update the maximum overlapping count if it is greater than 2. Finally, to check that every subsequence matches to the string T check overlapping count is equal to zero or not. If yes return the maximum overlap count.Below is the implementation of the above approach: " }, { "code": null, "e": 25320, "s": 25316, "text": "C++" }, { "code": null, "e": 25325, "s": 25320, "text": "Java" }, { "code": null, "e": 25333, "s": 25325, "text": "Python3" }, { "code": null, "e": 25336, "s": 25333, "text": "C#" }, { "code": "// C++ implementation to find the// maximum number of occurrence of// the overlapping count #include <bits/stdc++.h> using namespace std; // Function to find the maximum// overlapping stringsint maxOverlap(string S, string T){ string str = T; int count[T.length()] = { 0 }; int overlap = 0; int max_overlap = 0; for (int i = 0; i <= S.length(); i++) { // Get the current character int index = str.find(S[i]); // Condition to check if the current // character is the first character // of the string T then increment the // overlapping count if (index == 0) { overlap++; if (overlap >= 2) max_overlap = max(overlap, max_overlap); count[index]++; } else { // Condition to check // previous character is also // occurred if (count[index - 1] <= 0) return -1; // Update count of previous // and current character count[index]++; count[index - 1]--; } // Condition to check the current // character is the last character // of the string T if (index == 4) overlap--; } // Condition to check the every // subsequence is a valid string T if (overlap == 0) return max_overlap; else return -1;} // Driver Codeint main(){ string S = \"chcirphirp\"; string T = \"chirp\"; // Function Call cout << maxOverlap(S, T); return 0;}", "e": 26892, "s": 25336, "text": null }, { "code": "// Java implementation to find the// maximum number of occurrence of// the overlapping countimport java.util.*; class GFG{ // Function to find the maximum// overlapping Stringsstatic int maxOverlap(String S, String T){ String str = T; int count[] = new int[T.length()]; int overlap = 0; int max_overlap = 0; for(int i = 0; i < S.length(); i++) { // Get the current character int index = str.indexOf(S.charAt(i)); // Condition to check if the current // character is the first character // of the String T then increment the // overlapping count if (index == 0) { overlap++; if (overlap >= 2) max_overlap = Math.max(overlap, max_overlap); count[index]++; } else { // Condition to check // previous character is also // occurred if (count[index - 1] <= 0) return -1; // Update count of previous // and current character count[index]++; count[index - 1]--; } // Condition to check the current // character is the last character // of the String T if (index == 4) overlap--; } // Condition to check the every // subsequence is a valid String T if (overlap == 0) return max_overlap; else return -1;} // Driver codepublic static void main(String[] args){ String S = \"chcirphirp\"; String T = \"chirp\"; // Function call System.out.print(maxOverlap(S, T));}} // This code is contributed by Princi Singh", "e": 28610, "s": 26892, "text": null }, { "code": "# Python3 implementation to find the# maximum number of occurrence of# the overlapping count # Function to find the maximum# overlapping stringsdef maxOverlap(S, T): str = T count = [0 for i in range(len(T))] overlap = 0 max_overlap = 0 for i in range(0, len(S)): # Get the current character index = str.find(S[i]) # Condition to check if # the current character is # the first character of the # string T then increment the # overlapping count if(index == 0): overlap += 1 if(overlap >= 2): max_overlap = max(overlap, max_overlap) count[index] += 1 else: # Condition to check # previous character is also # occurred if(count[index - 1] <= 0): return -1 # Update count of previous # and current character count[index] += 1 count[index - 1] -= 1 # Condition to check the current # character is the last character # of the string T if(index == 4): overlap -= 1 # Condition to check the every # subsequence is a valid string T if(overlap == 0): return max_overlap else: return -1 # Driver CodeS = \"chcirphirp\"T = \"chirp\" # Function Callprint(maxOverlap(S, T)) # This code is contributed by avanitrachhadiya2155", "e": 30086, "s": 28610, "text": null }, { "code": "// C# implementation to find the// maximum number of occurrence of// the overlapping countusing System; class GFG{ // Function to find the maximum// overlapping Stringsstatic int maxOverlap(String S, String T){ String str = T; int []count = new int[T.Length]; int overlap = 0; int max_overlap = 0; for(int i = 0; i < S.Length; i++) { // Get the current character int index = str.IndexOf(S[i]); // Condition to check if the current // character is the first character // of the String T then increment the // overlapping count if (index == 0) { overlap++; if (overlap >= 2) { max_overlap = Math.Max(overlap, max_overlap); } count[index]++; } else { // Condition to check // previous character is also // occurred if (count[index - 1] <= 0) return -1; // Update count of previous // and current character count[index]++; count[index - 1]--; } // Condition to check the current // character is the last character // of the String T if (index == 4) overlap--; } // Condition to check the every // subsequence is a valid String T if (overlap == 0) return max_overlap; else return -1;} // Driver codepublic static void Main(String[] args){ String S = \"chcirphirp\"; String T = \"chirp\"; // Function call Console.Write(maxOverlap(S, T));}} // This code is contributed by sapnasingh4991", "e": 31799, "s": 30086, "text": null }, { "code": null, "e": 31803, "s": 31799, "text": "2\n\n" }, { "code": null, "e": 31818, "s": 31805, "text": "princi singh" }, { "code": null, "e": 31833, "s": 31818, "text": "sapnasingh4991" }, { "code": null, "e": 31844, "s": 31833, "text": "nidhi_biet" }, { "code": null, "e": 31865, "s": 31844, "text": "avanitrachhadiya2155" }, { "code": null, "e": 31876, "s": 31865, "text": "Algorithms" }, { "code": null, "e": 31884, "s": 31876, "text": "Strings" }, { "code": null, "e": 31892, "s": 31884, "text": "Strings" }, { "code": null, "e": 31903, "s": 31892, "text": "Algorithms" }, { "code": null, "e": 32001, "s": 31903, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 32010, "s": 32001, "text": "Comments" }, { "code": null, "e": 32023, "s": 32010, "text": "Old Comments" }, { "code": null, "e": 32048, "s": 32023, "text": "DSA Sheet by Love Babbar" }, { "code": null, "e": 32077, "s": 32048, "text": "Quadratic Probing in Hashing" }, { "code": null, "e": 32133, "s": 32077, "text": "Difference between Informed and Uninformed Search in AI" }, { "code": null, "e": 32167, "s": 32133, "text": "K means Clustering - Introduction" }, { "code": null, "e": 32210, "s": 32167, "text": "SCAN (Elevator) Disk Scheduling Algorithms" }, { "code": null, "e": 32235, "s": 32210, "text": "Reverse a string in Java" }, { "code": null, "e": 32269, "s": 32235, "text": "Longest Common Subsequence | DP-4" }, { "code": null, "e": 32315, "s": 32269, "text": "Write a program to reverse an array or string" }, { "code": null, "e": 32375, "s": 32315, "text": "Write a program to print all permutations of a given string" } ]
How to create Latex tables directly from Python code | by Joseph Early | Towards Data Science
Creating tables of results plays a major part in communicating the outcomes of experiments in data science. Various solutions exist for producing tables in Python and drawing them in the console output, but how do you transfer them to a written report? Usually, this requires some time-consuming and mind-numbing copying and pasting, but there are some better solutions. A few libraries exist for Python that allow you to draw tables. In this article I’m going to focus on two in particular: tabulate and texttable. Both of them fulfil the same goal — the production of nicely formatted tables with as little hassle as possible. Tabulate is the more popular library, with over 10 million downloads last month. In comparison, texttable comes in at just under 2 million (according to PyPi Stats at the end of June 2020): Both libraries allow you to format the tables in a particular style. Texttable gives complete autonomy to the user in terms of choosing their table design; it’s possible to customise column alignment, header alignment, table decoration, column types and even the characters used to draw the lines between the rows and columns. Tabulate chooses to focus instead on pre-designed formats, but the user is still able to override these if they choose to do so. My personal preference is to use texttable. In all likelihood, I use it because it was the first library that I came across for creating tables, but it hasn’t let me down yet. The tabulate formats are mostly geared towards use in other platforms or to emulate their designs, e.g. github, html, psql. One of these formats allows the creation of latex code, providing you with text that you can directly copy and paste into your Latex document. Job done — that was easy. Texttable, being more lightweight than tabulate, offers no such solution. However, as a personal side project, I put together my first Python library that acts as a wrapper around texttable to create Latex tables. I creatively called it latextable, and it’s currently available on PyPi. It provides a similar output to the tabulate Latex format, as well as providing extra functionality such as matching the formatting of the texttable console output and allowing the user to add table captions and labels. Using either library is incredibly straightforward; they both do exactly what they’re designed for and have pretty similar usages. For a list of rows containing your table data, the tables are created as such: print('Tabulate Table:')print(tabulate(rows, headers='firstrow'))table = Texttable()table.set_cols_align(["c"] * 4)table.set_deco(Texttable.HEADER | Texttable.VLINES)print('\nTexttable Table:')print(table.draw()) Outputting the Latex code is equally easy: print(‘\nTabulate Latex:’)print(tabulate(rows, headers=’firstrow’, tablefmt=’latex’))print(‘\nTexttable Latex:’)print(latextable.draw_latex(table, caption=”A comparison of rocket features.”)) And it gives the following Latex code: Both Latex outputs can be copied directly into a Latex document, and the latextable output has the advantage of being automatically indented. A comparison of the console output with the rendered Latex is given below: I’ve provided the complete source code for this exampled in a Colab Notebook. It’s likely that you’ll still have to make some alterations to the Latex code to get it exactly how you want it, but by using one of these tools at least you don’t have to waste time copying and pasting stuff across. It also gives the benefit that you won’t make any mistakes when copying across!
[ { "code": null, "e": 543, "s": 172, "text": "Creating tables of results plays a major part in communicating the outcomes of experiments in data science. Various solutions exist for producing tables in Python and drawing them in the console output, but how do you transfer them to a written report? Usually, this requires some time-consuming and mind-numbing copying and pasting, but there are some better solutions." }, { "code": null, "e": 991, "s": 543, "text": "A few libraries exist for Python that allow you to draw tables. In this article I’m going to focus on two in particular: tabulate and texttable. Both of them fulfil the same goal — the production of nicely formatted tables with as little hassle as possible. Tabulate is the more popular library, with over 10 million downloads last month. In comparison, texttable comes in at just under 2 million (according to PyPi Stats at the end of June 2020):" }, { "code": null, "e": 1447, "s": 991, "text": "Both libraries allow you to format the tables in a particular style. Texttable gives complete autonomy to the user in terms of choosing their table design; it’s possible to customise column alignment, header alignment, table decoration, column types and even the characters used to draw the lines between the rows and columns. Tabulate chooses to focus instead on pre-designed formats, but the user is still able to override these if they choose to do so." }, { "code": null, "e": 1623, "s": 1447, "text": "My personal preference is to use texttable. In all likelihood, I use it because it was the first library that I came across for creating tables, but it hasn’t let me down yet." }, { "code": null, "e": 1916, "s": 1623, "text": "The tabulate formats are mostly geared towards use in other platforms or to emulate their designs, e.g. github, html, psql. One of these formats allows the creation of latex code, providing you with text that you can directly copy and paste into your Latex document. Job done — that was easy." }, { "code": null, "e": 2423, "s": 1916, "text": "Texttable, being more lightweight than tabulate, offers no such solution. However, as a personal side project, I put together my first Python library that acts as a wrapper around texttable to create Latex tables. I creatively called it latextable, and it’s currently available on PyPi. It provides a similar output to the tabulate Latex format, as well as providing extra functionality such as matching the formatting of the texttable console output and allowing the user to add table captions and labels." }, { "code": null, "e": 2633, "s": 2423, "text": "Using either library is incredibly straightforward; they both do exactly what they’re designed for and have pretty similar usages. For a list of rows containing your table data, the tables are created as such:" }, { "code": null, "e": 2846, "s": 2633, "text": "print('Tabulate Table:')print(tabulate(rows, headers='firstrow'))table = Texttable()table.set_cols_align([\"c\"] * 4)table.set_deco(Texttable.HEADER | Texttable.VLINES)print('\\nTexttable Table:')print(table.draw())" }, { "code": null, "e": 2889, "s": 2846, "text": "Outputting the Latex code is equally easy:" }, { "code": null, "e": 3081, "s": 2889, "text": "print(‘\\nTabulate Latex:’)print(tabulate(rows, headers=’firstrow’, tablefmt=’latex’))print(‘\\nTexttable Latex:’)print(latextable.draw_latex(table, caption=”A comparison of rocket features.”))" }, { "code": null, "e": 3120, "s": 3081, "text": "And it gives the following Latex code:" }, { "code": null, "e": 3337, "s": 3120, "text": "Both Latex outputs can be copied directly into a Latex document, and the latextable output has the advantage of being automatically indented. A comparison of the console output with the rendered Latex is given below:" } ]
Understanding NumPy sum. If you are not clear on what NumPy is... | by Kshitij Bajracharya | Towards Data Science
If you are not clear on what NumPy is or how it works, see this post first. towardsdatascience.com In that post on introduction to NumPy, I did a row-wise addition on a NumPy array. Then one of the readers of the post responded by saying that what I had done was a column-wise addition, not row-wise. Indeed, when I was learning it, I felt the same that this is not how it should work. It is the opposite of how it should work. So, I looked at the docs, but all it said was we can sum each row of an array, in which case we operate along columns, or axis 1 It didn’t help. I was still confused. I kept looking and then I found this post by Aerin Kim and it changed the way I looked at summing in NumPy arrays. So using her post as the base, this is my take on NumPy array sums. If you’re not clear what the problem is, let’s define it formally. We have a 5x5 NumPy array like the following a = array([[ 12., -22., -20., -19., -3.], [-23., 21., -17., -11., -1.], [ -4., -5., 16., -9., -14.], [-10., -6., -18., 15., -8.], [-25., -2., -13., -7., 24.]]) We do a row-wise sum on the above array using sum_matrix = a.sum(axis=1) And we end up having the following array in sum_matrix array([-52., -31., -16., -27., -23.]) And that seems to be correct if axis=1 meant row-wise addition. No issues. Except that, axis=0 should have actually meant rows and not axis=1. This will be clearer as we see how a NumPy array is formed. Let’s take another NumPy array. arr = np.arange(12).reshape(4,3) What we get back in arr is array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) Clearly, it’s 4 rows and 3 columns. So, the first axis, axis=0, should denote rows and the second axis, axis=1, should denote columns. This will also be confirmed if you look at pandas. Let’s take a DataFrame example. df = pd.DataFrame(data=np.arange(12).reshape(4,3),index=['row1','row2','row3','row4'],columns=['col1','col2','col3']) df looks like this If you now do df.drop(labels=[‘row1’], axis=0), what you’ll get back is And if you do df.drop(labels=[‘col2’], axis=1), you’ll get Clearly, axis=0 means rows and axis=1 means columns. Then, why is it that NumPy sum does it differently? To quote Aerin Kim, in her post, she wrote The way to understand the “axis” of numpy sum is it collapses the specified axis. So when it collapses the axis 0 (row), it becomes just one row and column-wise sum. Let’s see what that means. Now, it can get a little confusing in 2D, so let’s understand this first in a higher dimension and then we’ll step it down into 2D; much like what she did in her post. So, let’s take a 3D array with a shape of (4,3,2). three_d_array = np.arange(24).reshape(4,3,2) three_d_array now becomes equal to array([[[ 0, 1], [ 2, 3], [ 4, 5]], [[ 6, 7], [ 8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]], [[18, 19], [20, 21], [22, 23]]]) Now, let us look at elements along the first axis, axis=0. We have 4 elements along axis=0. These can be viewed withthree_d_array[0], three_d_array[1], three_d_array[2], and three_d_array[3] Each of these is a 2D array of shape (3,2). Now, if we add all of the above 2D arrays, three_d_array[0]+three_d_array[1]+three_d_array[2]+three_d_array[3] after element by element addition, we get back a 3x2 array as follows: array([[36, 40], [44, 48], [52, 56]]) This is exactly what we get when we do three_d_array.sum(axis=0). We are taking all the elements along axis=0. And then we sum them, element by element. Originally, we had a 3D array of shape (4,3,2). After summing, we have a 2D array of shape (3,2). So, we lost the first axis 4 and retained the remaining two (3,2). That is probably what Kim meant when she said “it collapses the axis”. Now, let’s look at axis=1. This time we keep the first axis fixed, and sum along the second axis, axis=1. Here, we have 12 elements, 3 along each from the first axis 0. Adding all of these, we get back Similarly, we add for the remaining three Combine all 4 and we get an array as below: array([[ 6, 9], [24, 27], [42, 45], [60, 63]]) This is exactly what we get when we do three_d_array.sum(axis=1); performing element by element addition along axis=1. Again, the shape of the sum matrix is (4,2), which shows that we got rid of the second axis 3 from the original (4,3,2). For the final axis 2, we do the same thing. This time we have all 24 elements and we sum along axis=2 by keeping the first two axes fixed. Here, we look at only the first 6 elements. This just gives us one row of the sum array. We do the same for the rest and in the end we have the following array, array([[ 1, 5, 9], [13, 17, 21], [25, 29, 33], [37, 41, 45]]) which is exactly what we get on three_d_array.sum(axis=2). Again the shape of the sum array is (4,3) where we lose the last axis 2. The array we have is a = array([[ 12., -22., -20., -19., -3.], [-23., 21., -17., -11., -1.], [ -4., -5., 16., -9., -14.], [-10., -6., -18., 15., -8.], [-25., -2., -13., -7., 24.]]) Now, when we take elements along axis=0, we get And element by element addition of these elements give us And that is equivalent to a.sum(axis=0). And similarly when we take elements along axis=1, Now, if we sum all a[0]s, all a[1]s, all a[2]s, all a[3]s and all a[4]s separately, we get which is what we get with a.sum(axis=1). If we check the shape, we get (5,). This may not be clear if we look at only the 2D array, but as we saw earlier, this just collapses the axis 1 and returns the shape for the remaining axis 0. Now if we look back at the statement from the docs, “we can sum each row of an array, in which case we operate along columns, or axis 1”, I think it makes a lot more sense. So, although we calculated the sum of each row, technically it is a column-wise addition rather than a row-wise addition as axis=0 is row and axis=1 is column. Why did NumPy not make it straightforward as in case of pandas? Well, to be honest, I don’t have an answer to that. But that’s the way the cookie crumbles.
[ { "code": null, "e": 248, "s": 172, "text": "If you are not clear on what NumPy is or how it works, see this post first." }, { "code": null, "e": 271, "s": 248, "text": "towardsdatascience.com" }, { "code": null, "e": 646, "s": 271, "text": "In that post on introduction to NumPy, I did a row-wise addition on a NumPy array. Then one of the readers of the post responded by saying that what I had done was a column-wise addition, not row-wise. Indeed, when I was learning it, I felt the same that this is not how it should work. It is the opposite of how it should work. So, I looked at the docs, but all it said was" }, { "code": null, "e": 729, "s": 646, "text": "we can sum each row of an array, in which case we operate along columns, or axis 1" }, { "code": null, "e": 950, "s": 729, "text": "It didn’t help. I was still confused. I kept looking and then I found this post by Aerin Kim and it changed the way I looked at summing in NumPy arrays. So using her post as the base, this is my take on NumPy array sums." }, { "code": null, "e": 1017, "s": 950, "text": "If you’re not clear what the problem is, let’s define it formally." }, { "code": null, "e": 1062, "s": 1017, "text": "We have a 5x5 NumPy array like the following" }, { "code": null, "e": 1258, "s": 1062, "text": "a = array([[ 12., -22., -20., -19., -3.], [-23., 21., -17., -11., -1.], [ -4., -5., 16., -9., -14.], [-10., -6., -18., 15., -8.], [-25., -2., -13., -7., 24.]])" }, { "code": null, "e": 1304, "s": 1258, "text": "We do a row-wise sum on the above array using" }, { "code": null, "e": 1331, "s": 1304, "text": "sum_matrix = a.sum(axis=1)" }, { "code": null, "e": 1386, "s": 1331, "text": "And we end up having the following array in sum_matrix" }, { "code": null, "e": 1424, "s": 1386, "text": "array([-52., -31., -16., -27., -23.])" }, { "code": null, "e": 1659, "s": 1424, "text": "And that seems to be correct if axis=1 meant row-wise addition. No issues. Except that, axis=0 should have actually meant rows and not axis=1. This will be clearer as we see how a NumPy array is formed. Let’s take another NumPy array." }, { "code": null, "e": 1692, "s": 1659, "text": "arr = np.arange(12).reshape(4,3)" }, { "code": null, "e": 1719, "s": 1692, "text": "What we get back in arr is" }, { "code": null, "e": 1801, "s": 1719, "text": "array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]])" }, { "code": null, "e": 1936, "s": 1801, "text": "Clearly, it’s 4 rows and 3 columns. So, the first axis, axis=0, should denote rows and the second axis, axis=1, should denote columns." }, { "code": null, "e": 2019, "s": 1936, "text": "This will also be confirmed if you look at pandas. Let’s take a DataFrame example." }, { "code": null, "e": 2137, "s": 2019, "text": "df = pd.DataFrame(data=np.arange(12).reshape(4,3),index=['row1','row2','row3','row4'],columns=['col1','col2','col3'])" }, { "code": null, "e": 2156, "s": 2137, "text": "df looks like this" }, { "code": null, "e": 2228, "s": 2156, "text": "If you now do df.drop(labels=[‘row1’], axis=0), what you’ll get back is" }, { "code": null, "e": 2287, "s": 2228, "text": "And if you do df.drop(labels=[‘col2’], axis=1), you’ll get" }, { "code": null, "e": 2392, "s": 2287, "text": "Clearly, axis=0 means rows and axis=1 means columns. Then, why is it that NumPy sum does it differently?" }, { "code": null, "e": 2435, "s": 2392, "text": "To quote Aerin Kim, in her post, she wrote" }, { "code": null, "e": 2601, "s": 2435, "text": "The way to understand the “axis” of numpy sum is it collapses the specified axis. So when it collapses the axis 0 (row), it becomes just one row and column-wise sum." }, { "code": null, "e": 2796, "s": 2601, "text": "Let’s see what that means. Now, it can get a little confusing in 2D, so let’s understand this first in a higher dimension and then we’ll step it down into 2D; much like what she did in her post." }, { "code": null, "e": 2847, "s": 2796, "text": "So, let’s take a 3D array with a shape of (4,3,2)." }, { "code": null, "e": 2892, "s": 2847, "text": "three_d_array = np.arange(24).reshape(4,3,2)" }, { "code": null, "e": 2927, "s": 2892, "text": "three_d_array now becomes equal to" }, { "code": null, "e": 3137, "s": 2927, "text": "array([[[ 0, 1], [ 2, 3], [ 4, 5]], [[ 6, 7], [ 8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]], [[18, 19], [20, 21], [22, 23]]])" }, { "code": null, "e": 3328, "s": 3137, "text": "Now, let us look at elements along the first axis, axis=0. We have 4 elements along axis=0. These can be viewed withthree_d_array[0], three_d_array[1], three_d_array[2], and three_d_array[3]" }, { "code": null, "e": 3415, "s": 3328, "text": "Each of these is a 2D array of shape (3,2). Now, if we add all of the above 2D arrays," }, { "code": null, "e": 3483, "s": 3415, "text": "three_d_array[0]+three_d_array[1]+three_d_array[2]+three_d_array[3]" }, { "code": null, "e": 3554, "s": 3483, "text": "after element by element addition, we get back a 3x2 array as follows:" }, { "code": null, "e": 3604, "s": 3554, "text": "array([[36, 40], [44, 48], [52, 56]])" }, { "code": null, "e": 3993, "s": 3604, "text": "This is exactly what we get when we do three_d_array.sum(axis=0). We are taking all the elements along axis=0. And then we sum them, element by element. Originally, we had a 3D array of shape (4,3,2). After summing, we have a 2D array of shape (3,2). So, we lost the first axis 4 and retained the remaining two (3,2). That is probably what Kim meant when she said “it collapses the axis”." }, { "code": null, "e": 4162, "s": 3993, "text": "Now, let’s look at axis=1. This time we keep the first axis fixed, and sum along the second axis, axis=1. Here, we have 12 elements, 3 along each from the first axis 0." }, { "code": null, "e": 4195, "s": 4162, "text": "Adding all of these, we get back" }, { "code": null, "e": 4237, "s": 4195, "text": "Similarly, we add for the remaining three" }, { "code": null, "e": 4281, "s": 4237, "text": "Combine all 4 and we get an array as below:" }, { "code": null, "e": 4347, "s": 4281, "text": "array([[ 6, 9], [24, 27], [42, 45], [60, 63]])" }, { "code": null, "e": 4587, "s": 4347, "text": "This is exactly what we get when we do three_d_array.sum(axis=1); performing element by element addition along axis=1. Again, the shape of the sum matrix is (4,2), which shows that we got rid of the second axis 3 from the original (4,3,2)." }, { "code": null, "e": 4770, "s": 4587, "text": "For the final axis 2, we do the same thing. This time we have all 24 elements and we sum along axis=2 by keeping the first two axes fixed. Here, we look at only the first 6 elements." }, { "code": null, "e": 4887, "s": 4770, "text": "This just gives us one row of the sum array. We do the same for the rest and in the end we have the following array," }, { "code": null, "e": 4969, "s": 4887, "text": "array([[ 1, 5, 9], [13, 17, 21], [25, 29, 33], [37, 41, 45]])" }, { "code": null, "e": 5101, "s": 4969, "text": "which is exactly what we get on three_d_array.sum(axis=2). Again the shape of the sum array is (4,3) where we lose the last axis 2." }, { "code": null, "e": 5122, "s": 5101, "text": "The array we have is" }, { "code": null, "e": 5318, "s": 5122, "text": "a = array([[ 12., -22., -20., -19., -3.], [-23., 21., -17., -11., -1.], [ -4., -5., 16., -9., -14.], [-10., -6., -18., 15., -8.], [-25., -2., -13., -7., 24.]])" }, { "code": null, "e": 5366, "s": 5318, "text": "Now, when we take elements along axis=0, we get" }, { "code": null, "e": 5424, "s": 5366, "text": "And element by element addition of these elements give us" }, { "code": null, "e": 5465, "s": 5424, "text": "And that is equivalent to a.sum(axis=0)." }, { "code": null, "e": 5515, "s": 5465, "text": "And similarly when we take elements along axis=1," }, { "code": null, "e": 5606, "s": 5515, "text": "Now, if we sum all a[0]s, all a[1]s, all a[2]s, all a[3]s and all a[4]s separately, we get" }, { "code": null, "e": 5840, "s": 5606, "text": "which is what we get with a.sum(axis=1). If we check the shape, we get (5,). This may not be clear if we look at only the 2D array, but as we saw earlier, this just collapses the axis 1 and returns the shape for the remaining axis 0." }, { "code": null, "e": 6173, "s": 5840, "text": "Now if we look back at the statement from the docs, “we can sum each row of an array, in which case we operate along columns, or axis 1”, I think it makes a lot more sense. So, although we calculated the sum of each row, technically it is a column-wise addition rather than a row-wise addition as axis=0 is row and axis=1 is column." } ]
SQLite - Python
In this chapter, you will learn how to use SQLite in Python programs. SQLite3 can be integrated with Python using sqlite3 module, which was written by Gerhard Haring. It provides an SQL interface compliant with the DB-API 2.0 specification described by PEP 249. You do not need to install this module separately because it is shipped by default along with Python version 2.5.x onwards. To use sqlite3 module, you must first create a connection object that represents the database and then optionally you can create a cursor object, which will help you in executing all the SQL statements. Following are important sqlite3 module routines, which can suffice your requirement to work with SQLite database from your Python program. If you are looking for a more sophisticated application, then you can look into Python sqlite3 module's official documentation. sqlite3.connect(database [,timeout ,other optional arguments]) This API opens a connection to the SQLite database file. You can use ":memory:" to open a database connection to a database that resides in RAM instead of on disk. If database is opened successfully, it returns a connection object. When a database is accessed by multiple connections, and one of the processes modifies the database, the SQLite database is locked until that transaction is committed. The timeout parameter specifies how long the connection should wait for the lock to go away until raising an exception. The default for the timeout parameter is 5.0 (five seconds). If the given database name does not exist then this call will create the database. You can specify filename with the required path as well if you want to create a database anywhere else except in the current directory. connection.cursor([cursorClass]) This routine creates a cursor which will be used throughout of your database programming with Python. This method accepts a single optional parameter cursorClass. If supplied, this must be a custom cursor class that extends sqlite3.Cursor. cursor.execute(sql [, optional parameters]) This routine executes an SQL statement. The SQL statement may be parameterized (i. e. placeholders instead of SQL literals). The sqlite3 module supports two kinds of placeholders: question marks and named placeholders (named style). For example − cursor.execute("insert into people values (?, ?)", (who, age)) connection.execute(sql [, optional parameters]) This routine is a shortcut of the above execute method provided by the cursor object and it creates an intermediate cursor object by calling the cursor method, then calls the cursor's execute method with the parameters given. cursor.executemany(sql, seq_of_parameters) This routine executes an SQL command against all parameter sequences or mappings found in the sequence sql. connection.executemany(sql[, parameters]) This routine is a shortcut that creates an intermediate cursor object by calling the cursor method, then calls the cursor.s executemany method with the parameters given. cursor.executescript(sql_script) This routine executes multiple SQL statements at once provided in the form of script. It issues a COMMIT statement first, then executes the SQL script it gets as a parameter. All the SQL statements should be separated by a semi colon (;). connection.executescript(sql_script) This routine is a shortcut that creates an intermediate cursor object by calling the cursor method, then calls the cursor's executescript method with the parameters given. connection.total_changes() This routine returns the total number of database rows that have been modified, inserted, or deleted since the database connection was opened. connection.commit() This method commits the current transaction. If you don't call this method, anything you did since the last call to commit() is not visible from other database connections. connection.rollback() This method rolls back any changes to the database since the last call to commit(). connection.close() This method closes the database connection. Note that this does not automatically call commit(). If you just close your database connection without calling commit() first, your changes will be lost! cursor.fetchone() This method fetches the next row of a query result set, returning a single sequence, or None when no more data is available. cursor.fetchmany([size = cursor.arraysize]) This routine fetches the next set of rows of a query result, returning a list. An empty list is returned when no more rows are available. The method tries to fetch as many rows as indicated by the size parameter. cursor.fetchall() This routine fetches all (remaining) rows of a query result, returning a list. An empty list is returned when no rows are available. Following Python code shows how to connect to an existing database. If the database does not exist, then it will be created and finally a database object will be returned. #!/usr/bin/python import sqlite3 conn = sqlite3.connect('test.db') print "Opened database successfully"; Here, you can also supply database name as the special name :memory: to create a database in RAM. Now, let's run the above program to create our database test.db in the current directory. You can change your path as per your requirement. Keep the above code in sqlite.py file and execute it as shown below. If the database is successfully created, then it will display the following message. $chmod +x sqlite.py $./sqlite.py Open database successfully Following Python program will be used to create a table in the previously created database. #!/usr/bin/python import sqlite3 conn = sqlite3.connect('test.db') print "Opened database successfully"; conn.execute('''CREATE TABLE COMPANY (ID INT PRIMARY KEY NOT NULL, NAME TEXT NOT NULL, AGE INT NOT NULL, ADDRESS CHAR(50), SALARY REAL);''') print "Table created successfully"; conn.close() When the above program is executed, it will create the COMPANY table in your test.db and it will display the following messages − Opened database successfully Table created successfully Following Python program shows how to create records in the COMPANY table created in the above example. #!/usr/bin/python import sqlite3 conn = sqlite3.connect('test.db') print "Opened database successfully"; conn.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \ VALUES (1, 'Paul', 32, 'California', 20000.00 )"); conn.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \ VALUES (2, 'Allen', 25, 'Texas', 15000.00 )"); conn.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \ VALUES (3, 'Teddy', 23, 'Norway', 20000.00 )"); conn.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \ VALUES (4, 'Mark', 25, 'Rich-Mond ', 65000.00 )"); conn.commit() print "Records created successfully"; conn.close() When the above program is executed, it will create the given records in the COMPANY table and it will display the following two lines − Opened database successfully Records created successfully Following Python program shows how to fetch and display records from the COMPANY table created in the above example. #!/usr/bin/python import sqlite3 conn = sqlite3.connect('test.db') print "Opened database successfully"; cursor = conn.execute("SELECT id, name, address, salary from COMPANY") for row in cursor: print "ID = ", row[0] print "NAME = ", row[1] print "ADDRESS = ", row[2] print "SALARY = ", row[3], "\n" print "Operation done successfully"; conn.close() When the above program is executed, it will produce the following result. Opened database successfully ID = 1 NAME = Paul ADDRESS = California SALARY = 20000.0 ID = 2 NAME = Allen ADDRESS = Texas SALARY = 15000.0 ID = 3 NAME = Teddy ADDRESS = Norway SALARY = 20000.0 ID = 4 NAME = Mark ADDRESS = Rich-Mond SALARY = 65000.0 Operation done successfully Following Python code shows how to use UPDATE statement to update any record and then fetch and display the updated records from the COMPANY table. #!/usr/bin/python import sqlite3 conn = sqlite3.connect('test.db') print "Opened database successfully"; conn.execute("UPDATE COMPANY set SALARY = 25000.00 where ID = 1") conn.commit() print "Total number of rows updated :", conn.total_changes cursor = conn.execute("SELECT id, name, address, salary from COMPANY") for row in cursor: print "ID = ", row[0] print "NAME = ", row[1] print "ADDRESS = ", row[2] print "SALARY = ", row[3], "\n" print "Operation done successfully"; conn.close() When the above program is executed, it will produce the following result. Opened database successfully Total number of rows updated : 1 ID = 1 NAME = Paul ADDRESS = California SALARY = 25000.0 ID = 2 NAME = Allen ADDRESS = Texas SALARY = 15000.0 ID = 3 NAME = Teddy ADDRESS = Norway SALARY = 20000.0 ID = 4 NAME = Mark ADDRESS = Rich-Mond SALARY = 65000.0 Operation done successfully Following Python code shows how to use DELETE statement to delete any record and then fetch and display the remaining records from the COMPANY table. #!/usr/bin/python import sqlite3 conn = sqlite3.connect('test.db') print "Opened database successfully"; conn.execute("DELETE from COMPANY where ID = 2;") conn.commit() print "Total number of rows deleted :", conn.total_changes cursor = conn.execute("SELECT id, name, address, salary from COMPANY") for row in cursor: print "ID = ", row[0] print "NAME = ", row[1] print "ADDRESS = ", row[2] print "SALARY = ", row[3], "\n" print "Operation done successfully"; conn.close() When the above program is executed, it will produce the following result. Opened database successfully Total number of rows deleted : 1 ID = 1 NAME = Paul ADDRESS = California SALARY = 20000.0 ID = 3 NAME = Teddy ADDRESS = Norway SALARY = 20000.0 ID = 4 NAME = Mark ADDRESS = Rich-Mond SALARY = 65000.0 Operation done successfully 25 Lectures 4.5 hours Sandip Bhattacharya 17 Lectures 1 hours Laurence Svekis 5 Lectures 51 mins Vinay Kumar Print Add Notes Bookmark this page
[ { "code": null, "e": 2708, "s": 2638, "text": "In this chapter, you will learn how to use SQLite in Python programs." }, { "code": null, "e": 3024, "s": 2708, "text": "SQLite3 can be integrated with Python using sqlite3 module, which was written by Gerhard Haring. It provides an SQL interface compliant with the DB-API 2.0 specification described by PEP 249. You do not need to install this module separately because it is shipped by default along with Python version 2.5.x onwards." }, { "code": null, "e": 3227, "s": 3024, "text": "To use sqlite3 module, you must first create a connection object that represents the database and then optionally you can create a cursor object, which will help you in executing all the SQL statements." }, { "code": null, "e": 3494, "s": 3227, "text": "Following are important sqlite3 module routines, which can suffice your requirement to work with SQLite database from your Python program. If you are looking for a more sophisticated application, then you can look into Python sqlite3 module's official documentation." }, { "code": null, "e": 3557, "s": 3494, "text": "sqlite3.connect(database [,timeout ,other optional arguments])" }, { "code": null, "e": 3789, "s": 3557, "text": "This API opens a connection to the SQLite database file. You can use \":memory:\" to open a database connection to a database that resides in RAM instead of on disk. If database is opened successfully, it returns a connection object." }, { "code": null, "e": 4138, "s": 3789, "text": "When a database is accessed by multiple connections, and one of the processes modifies the database, the SQLite database is locked until that transaction is committed. The timeout parameter specifies how long the connection should wait for the lock to go away until raising an exception. The default for the timeout parameter is 5.0 (five seconds)." }, { "code": null, "e": 4357, "s": 4138, "text": "If the given database name does not exist then this call will create the database. You can specify filename with the required path as well if you want to create a database anywhere else except in the current directory." }, { "code": null, "e": 4390, "s": 4357, "text": "connection.cursor([cursorClass])" }, { "code": null, "e": 4630, "s": 4390, "text": "This routine creates a cursor which will be used throughout of your database programming with Python. This method accepts a single optional parameter cursorClass. If supplied, this must be a custom cursor class that extends sqlite3.Cursor." }, { "code": null, "e": 4674, "s": 4630, "text": "cursor.execute(sql [, optional parameters])" }, { "code": null, "e": 4907, "s": 4674, "text": "This routine executes an SQL statement. The SQL statement may be parameterized (i. e. placeholders instead of SQL literals). The sqlite3 module supports two kinds of placeholders: question marks and named placeholders (named style)." }, { "code": null, "e": 4984, "s": 4907, "text": "For example − cursor.execute(\"insert into people values (?, ?)\", (who, age))" }, { "code": null, "e": 5032, "s": 4984, "text": "connection.execute(sql [, optional parameters])" }, { "code": null, "e": 5258, "s": 5032, "text": "This routine is a shortcut of the above execute method provided by the cursor object and it creates an intermediate cursor object by calling the cursor method, then calls the cursor's execute method with the parameters given." }, { "code": null, "e": 5301, "s": 5258, "text": "cursor.executemany(sql, seq_of_parameters)" }, { "code": null, "e": 5409, "s": 5301, "text": "This routine executes an SQL command against all parameter sequences or mappings found in the sequence sql." }, { "code": null, "e": 5451, "s": 5409, "text": "connection.executemany(sql[, parameters])" }, { "code": null, "e": 5621, "s": 5451, "text": "This routine is a shortcut that creates an intermediate cursor object by calling the cursor method, then calls the cursor.s executemany method with the parameters given." }, { "code": null, "e": 5654, "s": 5621, "text": "cursor.executescript(sql_script)" }, { "code": null, "e": 5893, "s": 5654, "text": "This routine executes multiple SQL statements at once provided in the form of script. It issues a COMMIT statement first, then executes the SQL script it gets as a parameter. All the SQL statements should be separated by a semi colon (;)." }, { "code": null, "e": 5930, "s": 5893, "text": "connection.executescript(sql_script)" }, { "code": null, "e": 6102, "s": 5930, "text": "This routine is a shortcut that creates an intermediate cursor object by calling the cursor method, then calls the cursor's executescript method with the parameters given." }, { "code": null, "e": 6129, "s": 6102, "text": "connection.total_changes()" }, { "code": null, "e": 6272, "s": 6129, "text": "This routine returns the total number of database rows that have been modified, inserted, or deleted since the database connection was opened." }, { "code": null, "e": 6292, "s": 6272, "text": "connection.commit()" }, { "code": null, "e": 6465, "s": 6292, "text": "This method commits the current transaction. If you don't call this method, anything you did since the last call to commit() is not visible from other database connections." }, { "code": null, "e": 6487, "s": 6465, "text": "connection.rollback()" }, { "code": null, "e": 6571, "s": 6487, "text": "This method rolls back any changes to the database since the last call to commit()." }, { "code": null, "e": 6590, "s": 6571, "text": "connection.close()" }, { "code": null, "e": 6789, "s": 6590, "text": "This method closes the database connection. Note that this does not automatically call commit(). If you just close your database connection without calling commit() first, your changes will be lost!" }, { "code": null, "e": 6807, "s": 6789, "text": "cursor.fetchone()" }, { "code": null, "e": 6932, "s": 6807, "text": "This method fetches the next row of a query result set, returning a single sequence, or None when no more data is available." }, { "code": null, "e": 6976, "s": 6932, "text": "cursor.fetchmany([size = cursor.arraysize])" }, { "code": null, "e": 7189, "s": 6976, "text": "This routine fetches the next set of rows of a query result, returning a list. An empty list is returned when no more rows are available. The method tries to fetch as many rows as indicated by the size parameter." }, { "code": null, "e": 7207, "s": 7189, "text": "cursor.fetchall()" }, { "code": null, "e": 7340, "s": 7207, "text": "This routine fetches all (remaining) rows of a query result, returning a list. An empty list is returned when no rows are available." }, { "code": null, "e": 7512, "s": 7340, "text": "Following Python code shows how to connect to an existing database. If the database does not exist, then it will be created and finally a database object will be returned." }, { "code": null, "e": 7620, "s": 7512, "text": "#!/usr/bin/python\n\nimport sqlite3\n\nconn = sqlite3.connect('test.db')\n\nprint \"Opened database successfully\";" }, { "code": null, "e": 8012, "s": 7620, "text": "Here, you can also supply database name as the special name :memory: to create a database in RAM. Now, let's run the above program to create our database test.db in the current directory. You can change your path as per your requirement. Keep the above code in sqlite.py file and execute it as shown below. If the database is successfully created, then it will display the following message." }, { "code": null, "e": 8073, "s": 8012, "text": "$chmod +x sqlite.py\n$./sqlite.py\nOpen database successfully\n" }, { "code": null, "e": 8165, "s": 8073, "text": "Following Python program will be used to create a table in the previously created database." }, { "code": null, "e": 8556, "s": 8165, "text": "#!/usr/bin/python\n\nimport sqlite3\n\nconn = sqlite3.connect('test.db')\nprint \"Opened database successfully\";\n\nconn.execute('''CREATE TABLE COMPANY\n (ID INT PRIMARY KEY NOT NULL,\n NAME TEXT NOT NULL,\n AGE INT NOT NULL,\n ADDRESS CHAR(50),\n SALARY REAL);''')\nprint \"Table created successfully\";\n\nconn.close()" }, { "code": null, "e": 8686, "s": 8556, "text": "When the above program is executed, it will create the COMPANY table in your test.db and it will display the following messages −" }, { "code": null, "e": 8743, "s": 8686, "text": "Opened database successfully\nTable created successfully\n" }, { "code": null, "e": 8847, "s": 8743, "text": "Following Python program shows how to create records in the COMPANY table created in the above example." }, { "code": null, "e": 9505, "s": 8847, "text": "#!/usr/bin/python\n\nimport sqlite3\n\nconn = sqlite3.connect('test.db')\nprint \"Opened database successfully\";\n\nconn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (1, 'Paul', 32, 'California', 20000.00 )\");\n\nconn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (2, 'Allen', 25, 'Texas', 15000.00 )\");\n\nconn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (3, 'Teddy', 23, 'Norway', 20000.00 )\");\n\nconn.execute(\"INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \\\n VALUES (4, 'Mark', 25, 'Rich-Mond ', 65000.00 )\");\n\nconn.commit()\nprint \"Records created successfully\";\nconn.close()" }, { "code": null, "e": 9641, "s": 9505, "text": "When the above program is executed, it will create the given records in the COMPANY table and it will display the following two lines −" }, { "code": null, "e": 9700, "s": 9641, "text": "Opened database successfully\nRecords created successfully\n" }, { "code": null, "e": 9817, "s": 9700, "text": "Following Python program shows how to fetch and display records from the COMPANY table created in the above example." }, { "code": null, "e": 10183, "s": 9817, "text": "#!/usr/bin/python\n\nimport sqlite3\n\nconn = sqlite3.connect('test.db')\nprint \"Opened database successfully\";\n\ncursor = conn.execute(\"SELECT id, name, address, salary from COMPANY\")\nfor row in cursor:\n print \"ID = \", row[0]\n print \"NAME = \", row[1]\n print \"ADDRESS = \", row[2]\n print \"SALARY = \", row[3], \"\\n\"\n\nprint \"Operation done successfully\";\nconn.close()" }, { "code": null, "e": 10257, "s": 10183, "text": "When the above program is executed, it will produce the following result." }, { "code": null, "e": 10539, "s": 10257, "text": "Opened database successfully\nID = 1\nNAME = Paul\nADDRESS = California\nSALARY = 20000.0\n\nID = 2\nNAME = Allen\nADDRESS = Texas\nSALARY = 15000.0\n\nID = 3\nNAME = Teddy\nADDRESS = Norway\nSALARY = 20000.0\n\nID = 4\nNAME = Mark\nADDRESS = Rich-Mond\nSALARY = 65000.0\n\nOperation done successfully\n" }, { "code": null, "e": 10687, "s": 10539, "text": "Following Python code shows how to use UPDATE statement to update any record and then fetch and display the updated records from the COMPANY table." }, { "code": null, "e": 11193, "s": 10687, "text": "#!/usr/bin/python\n\nimport sqlite3\n\nconn = sqlite3.connect('test.db')\nprint \"Opened database successfully\";\n\nconn.execute(\"UPDATE COMPANY set SALARY = 25000.00 where ID = 1\")\nconn.commit()\nprint \"Total number of rows updated :\", conn.total_changes\n\ncursor = conn.execute(\"SELECT id, name, address, salary from COMPANY\")\nfor row in cursor:\n print \"ID = \", row[0]\n print \"NAME = \", row[1]\n print \"ADDRESS = \", row[2]\n print \"SALARY = \", row[3], \"\\n\"\n\nprint \"Operation done successfully\";\nconn.close()" }, { "code": null, "e": 11267, "s": 11193, "text": "When the above program is executed, it will produce the following result." }, { "code": null, "e": 11582, "s": 11267, "text": "Opened database successfully\nTotal number of rows updated : 1\nID = 1\nNAME = Paul\nADDRESS = California\nSALARY = 25000.0\n\nID = 2\nNAME = Allen\nADDRESS = Texas\nSALARY = 15000.0\n\nID = 3\nNAME = Teddy\nADDRESS = Norway\nSALARY = 20000.0\n\nID = 4\nNAME = Mark\nADDRESS = Rich-Mond\nSALARY = 65000.0\n\nOperation done successfully\n" }, { "code": null, "e": 11732, "s": 11582, "text": "Following Python code shows how to use DELETE statement to delete any record and then fetch and display the remaining records from the COMPANY table." }, { "code": null, "e": 12222, "s": 11732, "text": "#!/usr/bin/python\n\nimport sqlite3\n\nconn = sqlite3.connect('test.db')\nprint \"Opened database successfully\";\n\nconn.execute(\"DELETE from COMPANY where ID = 2;\")\nconn.commit()\nprint \"Total number of rows deleted :\", conn.total_changes\n\ncursor = conn.execute(\"SELECT id, name, address, salary from COMPANY\")\nfor row in cursor:\n print \"ID = \", row[0]\n print \"NAME = \", row[1]\n print \"ADDRESS = \", row[2]\n print \"SALARY = \", row[3], \"\\n\"\n\nprint \"Operation done successfully\";\nconn.close()" }, { "code": null, "e": 12296, "s": 12222, "text": "When the above program is executed, it will produce the following result." }, { "code": null, "e": 12557, "s": 12296, "text": "Opened database successfully\nTotal number of rows deleted : 1\nID = 1\nNAME = Paul\nADDRESS = California\nSALARY = 20000.0\n\nID = 3\nNAME = Teddy\nADDRESS = Norway\nSALARY = 20000.0\n\nID = 4\nNAME = Mark\nADDRESS = Rich-Mond\nSALARY = 65000.0\n\nOperation done successfully\n" }, { "code": null, "e": 12592, "s": 12557, "text": "\n 25 Lectures \n 4.5 hours \n" }, { "code": null, "e": 12613, "s": 12592, "text": " Sandip Bhattacharya" }, { "code": null, "e": 12646, "s": 12613, "text": "\n 17 Lectures \n 1 hours \n" }, { "code": null, "e": 12663, "s": 12646, "text": " Laurence Svekis" }, { "code": null, "e": 12694, "s": 12663, "text": "\n 5 Lectures \n 51 mins\n" }, { "code": null, "e": 12707, "s": 12694, "text": " Vinay Kumar" }, { "code": null, "e": 12714, "s": 12707, "text": " Print" }, { "code": null, "e": 12725, "s": 12714, "text": " Add Notes" } ]
How to get the name of a test method that was run in a TestNG teardown method?
TestNG supports native dependency injection. It allows to declare additional parameters in methods. At runtime, TestNG automatically fills these parameters with the correct values. Here's a set of native dependencies in TestNG: ITestContext XmlTest Method ITestResult These dependencies help to retrieve the name of Test method. The name of a Test method can be retrieved before or after the execution of the test. If the user wants to get the name of a Test method prior to its execution, then @BeforeMethod can be useful to retrieve it. If the user wants to get the name of a Test method prior to its execution, then @BeforeMethod can be useful to retrieve it. On the other hand, if the user wants to know which Test method is just executed, then @AfterMethod can be used. The actual code can be written in either of these methods to retrieve the name of a Test method. On the other hand, if the user wants to know which Test method is just executed, then @AfterMethod can be used. The actual code can be written in either of these methods to retrieve the name of a Test method. @BeforeMethod and @AfterMethod support all these native dependencies. The full access of these dependencies is given below − In this article, we will use Method dependency to show how to retrieve the name of a Test method. However, any of these dependencies can be used for @BeforeMethod or @AfterMethod. The only change will be in the import part where the corresponding library should be imported as per the native dependency used. Suppose the user wants to retrieve the name of a Test method after its execution. In this case, the code will be written inside @AfterMethod to retrieve the name of the Test method. As @AfterMethod executes each time after the @Test method, the name of the Test method will be printed after its execution. Step 1 − Create a TestNG class NewTestngClass and write the @AfterMethod method. Step 1 − Create a TestNG class NewTestngClass and write the @AfterMethod method. Step 2 − Write the following code inside @AfterMethod Step 2 − Write the following code inside @AfterMethod public void tearDown(Method method) { System.out.println("Test name: " + method.getName()); } Note − Instead of parameter Method, any of remaining three native dependencies can be used. For example, ITestContext or XmlTest or ITestResult. Step 3 − Write two different @Test methods in the class NewTestngClass. Step 3 − Write two different @Test methods in the class NewTestngClass. Step 4 − Create the testNG.xml as given below to run the TestNG classes. Step 4 − Create the testNG.xml as given below to run the TestNG classes. Step 5 − Finally, run the testNG.xml or directly testNG class in IDE or compile and run it using command line. Step 5 − Finally, run the testNG.xml or directly testNG class in IDE or compile and run it using command line. Use the following code for the common TestNG class, NewTestngClass− import org.testng.annotations.AfterMethod; import org.testng.annotations.Test; import java.lang.reflect.Method; public class NewTestngClass { @Test public void testCase1() { System.out.println("in test case 1 of NewTestngClass"); } @Test public void testCase2() { System.out.println("in test case 2 of NewTestngClass"); } @AfterMethod public void tearDown(Method method) { System.out.println("Test name: " + method.getName()); } } This is a configuration file that is used to organize and run the TestNG test cases. It is very handy when limited tests are needed to execute rather than the full suite. <?xml version = "1.0" encoding = "UTF-8"?> <!DOCTYPE suite SYSTEM "http://testng.org/testng-1.0.dtd" > <suite name = "Suite1"> <test name = "test1"> <classes> <class name = "NewTestngClass"/> </classes> </test> </suite> in test case 1 of NewTestngClass Test name: testCase1 in test case 2 of NewTestngClass Test name: testCase2 =============================================== Suite1 Total tests run: 2, Passes: 2, Failures: 0, Skips: 0 ===============================================
[ { "code": null, "e": 1290, "s": 1062, "text": "TestNG supports native dependency injection. It allows to declare additional parameters in methods. At runtime, TestNG automatically fills these parameters with the correct values. Here's a set of native dependencies in TestNG:" }, { "code": null, "e": 1303, "s": 1290, "text": "ITestContext" }, { "code": null, "e": 1311, "s": 1303, "text": "XmlTest" }, { "code": null, "e": 1318, "s": 1311, "text": "Method" }, { "code": null, "e": 1330, "s": 1318, "text": "ITestResult" }, { "code": null, "e": 1477, "s": 1330, "text": "These dependencies help to retrieve the name of Test method. The name of a Test method can be retrieved before or after the execution of the test." }, { "code": null, "e": 1601, "s": 1477, "text": "If the user wants to get the name of a Test method prior to its execution, then @BeforeMethod can be useful to retrieve it." }, { "code": null, "e": 1725, "s": 1601, "text": "If the user wants to get the name of a Test method prior to its execution, then @BeforeMethod can be useful to retrieve it." }, { "code": null, "e": 1934, "s": 1725, "text": "On the other hand, if the user wants to know which Test method is just executed, then @AfterMethod can be used. The actual code can be written in either of these methods to retrieve the name of a Test method." }, { "code": null, "e": 2143, "s": 1934, "text": "On the other hand, if the user wants to know which Test method is just executed, then @AfterMethod can be used. The actual code can be written in either of these methods to retrieve the name of a Test method." }, { "code": null, "e": 2268, "s": 2143, "text": "@BeforeMethod and @AfterMethod support all these native dependencies. The full access of these dependencies is given below −" }, { "code": null, "e": 2577, "s": 2268, "text": "In this article, we will use Method dependency to show how to retrieve the name of a Test method. However, any of these dependencies can be used for @BeforeMethod or @AfterMethod. The only change will be in the import part where the corresponding library should be imported as per the native dependency used." }, { "code": null, "e": 2883, "s": 2577, "text": "Suppose the user wants to retrieve the name of a Test method after its execution. In this case, the code will be written inside @AfterMethod to retrieve the name of the Test method. As @AfterMethod executes each time after the @Test method, the name of the Test method will be printed after its execution." }, { "code": null, "e": 2964, "s": 2883, "text": "Step 1 − Create a TestNG class NewTestngClass and write the @AfterMethod method." }, { "code": null, "e": 3045, "s": 2964, "text": "Step 1 − Create a TestNG class NewTestngClass and write the @AfterMethod method." }, { "code": null, "e": 3099, "s": 3045, "text": "Step 2 − Write the following code inside @AfterMethod" }, { "code": null, "e": 3153, "s": 3099, "text": "Step 2 − Write the following code inside @AfterMethod" }, { "code": null, "e": 3250, "s": 3153, "text": "public void tearDown(Method method) {\n System.out.println(\"Test name: \" + method.getName());\n}" }, { "code": null, "e": 3395, "s": 3250, "text": "Note − Instead of parameter Method, any of remaining three native dependencies can be used. For example, ITestContext or XmlTest or ITestResult." }, { "code": null, "e": 3467, "s": 3395, "text": "Step 3 − Write two different @Test methods in the class NewTestngClass." }, { "code": null, "e": 3539, "s": 3467, "text": "Step 3 − Write two different @Test methods in the class NewTestngClass." }, { "code": null, "e": 3612, "s": 3539, "text": "Step 4 − Create the testNG.xml as given below to run the TestNG classes." }, { "code": null, "e": 3685, "s": 3612, "text": "Step 4 − Create the testNG.xml as given below to run the TestNG classes." }, { "code": null, "e": 3796, "s": 3685, "text": "Step 5 − Finally, run the testNG.xml or directly testNG class in IDE or compile and run it using command line." }, { "code": null, "e": 3907, "s": 3796, "text": "Step 5 − Finally, run the testNG.xml or directly testNG class in IDE or compile and run it using command line." }, { "code": null, "e": 3975, "s": 3907, "text": "Use the following code for the common TestNG class, NewTestngClass−" }, { "code": null, "e": 4451, "s": 3975, "text": "import org.testng.annotations.AfterMethod;\nimport org.testng.annotations.Test;\nimport java.lang.reflect.Method;\npublic class NewTestngClass {\n @Test\n public void testCase1() {\n System.out.println(\"in test case 1 of NewTestngClass\");\n }\n @Test\n public void testCase2() {\n System.out.println(\"in test case 2 of NewTestngClass\");\n }\n @AfterMethod\n public void tearDown(Method method) {\n System.out.println(\"Test name: \" + method.getName());\n }\n}" }, { "code": null, "e": 4622, "s": 4451, "text": "This is a configuration file that is used to organize and run the TestNG test cases. It is very handy when limited tests are needed to execute rather than the full suite." }, { "code": null, "e": 4870, "s": 4622, "text": "<?xml version = \"1.0\" encoding = \"UTF-8\"?>\n<!DOCTYPE suite SYSTEM \"http://testng.org/testng-1.0.dtd\" >\n\n<suite name = \"Suite1\">\n <test name = \"test1\">\n <classes>\n <class name = \"NewTestngClass\"/>\n </classes>\n </test>\n</suite>" }, { "code": null, "e": 5134, "s": 4870, "text": "in test case 1 of NewTestngClass\nTest name: testCase1\nin test case 2 of NewTestngClass\nTest name: testCase2\n===============================================\nSuite1\nTotal tests run: 2, Passes: 2, Failures: 0, Skips: 0\n===============================================" } ]
What is Metapackage in C# Asp.net Core?
It is known that Microsoft.AspNetCore package is one of the packages added to many ASP.NET Core templates. The Microsoft.AspNetCore package is repeatedly included as one of the usual project dependencies when opening a new ASP.NET Core project. It delivers many of the crucial packages to position up a basic ASP.NET Core application. Though, this package does not contain any actual dlls or code itself, it merely contains a series of dependencies on additional packages. By adding this package to your project, you bring in all the relevant packages along with their dlls on which it depends and it is called a metapackage. Specifically, the packages it lists are − Microsoft.AspNetCore.Diagnostics Microsoft.AspNetCore.Hosting Microsoft.AspNetCore.Routing Microsoft.AspNetCore.Server.IISIntegration Microsoft.AspNetCore.Server.Kestrel Microsoft.Extensions.Configuration.EnvironmentVariables Microsoft.Extensions.Configuration.FileExtensions Microsoft.Extensions.Configuration.Json Microsoft.Extensions.Logging Microsoft.Extensions.Logging.Console Microsoft.Extensions.Options.ConfigurationExtensions NETStandard.Library The versions of these packages you will receive depends on which version of the Microsoft.AspNetCore package you install. These dependencies deliver the primary basic libraries for setting up a basic ASP.NET Core server that uses the Kestrel web server and includes IIS Integration. In terms of the application itself, with the help of this package alone you can load application settings and environment variables into configuration, use the IOptions interface, and configure logging to the console. For middleware, only the Microsoft.AspNetCore.Diagnostics package is included, which would allow adding middleware such as the ExceptionHandlerMiddleware, the DeveloperExceptionPageMiddleware and the StatusCodePagesMiddleware. To complete an application, we cannot use only Metapackage because it does not provide sufficient controls but we can use Microsoft.AspNetCore.Mvc or Microsoft.AspNetCore.MvcCore package to add MVC capabilities to our application, and also some other packages would be needed. The metapackage just try to use a number of packages that can be applied to many applications so that we don’t need to load more dependencies but it actual does not do that because it requires other packages as well. Thus, if the number of packages is large then the dependencies increases which impacts the real use of metapackage. For example, one of the dependencies on which the Microsoft.AspNetCore depends is the NETStandard.Library package, which is also a metapackage and hence the dependencies increases.
[ { "code": null, "e": 1169, "s": 1062, "text": "It is known that Microsoft.AspNetCore package is one of the packages added to\nmany ASP.NET Core templates." }, { "code": null, "e": 1397, "s": 1169, "text": "The Microsoft.AspNetCore package is repeatedly included as one of the usual project\ndependencies when opening a new ASP.NET Core project. It delivers many of the\ncrucial packages to position up a basic ASP.NET Core application." }, { "code": null, "e": 1688, "s": 1397, "text": "Though, this package does not contain any actual dlls or code itself, it merely contains\na series of dependencies on additional packages. By adding this package to your\nproject, you bring in all the relevant packages along with their dlls on which it\ndepends and it is called a metapackage." }, { "code": null, "e": 1730, "s": 1688, "text": "Specifically, the packages it lists are −" }, { "code": null, "e": 2185, "s": 1730, "text": "Microsoft.AspNetCore.Diagnostics\nMicrosoft.AspNetCore.Hosting\nMicrosoft.AspNetCore.Routing\nMicrosoft.AspNetCore.Server.IISIntegration\nMicrosoft.AspNetCore.Server.Kestrel\nMicrosoft.Extensions.Configuration.EnvironmentVariables\nMicrosoft.Extensions.Configuration.FileExtensions\nMicrosoft.Extensions.Configuration.Json\nMicrosoft.Extensions.Logging\nMicrosoft.Extensions.Logging.Console\nMicrosoft.Extensions.Options.ConfigurationExtensions\nNETStandard.Library" }, { "code": null, "e": 2307, "s": 2185, "text": "The versions of these packages you will receive depends on which version of the Microsoft.AspNetCore package you install." }, { "code": null, "e": 2468, "s": 2307, "text": "These dependencies deliver the primary basic libraries for setting up a basic\nASP.NET Core server that uses the Kestrel web server and includes IIS Integration." }, { "code": null, "e": 2686, "s": 2468, "text": "In terms of the application itself, with the help of this package alone you can load\napplication settings and environment variables into configuration, use the IOptions\ninterface, and configure logging to the console." }, { "code": null, "e": 2913, "s": 2686, "text": "For middleware, only the Microsoft.AspNetCore.Diagnostics package is included,\nwhich would allow adding middleware such as the ExceptionHandlerMiddleware, the\nDeveloperExceptionPageMiddleware and the StatusCodePagesMiddleware." }, { "code": null, "e": 3190, "s": 2913, "text": "To complete an application, we cannot use only Metapackage because it does not\nprovide sufficient controls but we can use Microsoft.AspNetCore.Mvc or\nMicrosoft.AspNetCore.MvcCore package to add MVC capabilities to our application,\nand also some other packages would be needed." }, { "code": null, "e": 3704, "s": 3190, "text": "The metapackage just try to use a number of packages that can be applied to many\napplications so that we don’t need to load more dependencies but it actual does not do\nthat because it requires other packages as well. Thus, if the number of packages is\nlarge then the dependencies increases which impacts the real use of metapackage. For\nexample, one of the dependencies on which the Microsoft.AspNetCore depends is the\nNETStandard.Library package, which is also a metapackage and hence the\ndependencies increases." } ]
Java Program to validate if a String contains only numbers
To validate if a String has only numbers, you can try the following codes. We have used the matches() method in Java here to check for number in a string. Live Demo public class Demo { public static void main(String []args) { String str = "978"; System.out.println("Checking for string that has only numbers..."); System.out.println("String: "+str); if(str.matches("[0-9]+") && str.length() > 2) System.out.println("String has only numbers!"); else System.out.println("String consist of characters as well!"); } } Checking for string that has only numbers... String: 978 String has only numbers! Let us see another example, wherein our string has numbers as well as characters. Live Demo public class Demo { public static void main(String []args) { String str = "s987jyg"; System.out.println("Checking for string that has only numbers..."); System.out.println("String: "+str); if(str.matches("[0-9]+") && str.length() > 2) System.out.println("String has only numbers!"); else System.out.println("String consist of characters as well!"); } } Checking for string that has only numbers... String: s987jyg String consist of characters as well!
[ { "code": null, "e": 1217, "s": 1062, "text": "To validate if a String has only numbers, you can try the following codes. We have used the matches() method in Java here to check for number in a string." }, { "code": null, "e": 1228, "s": 1217, "text": " Live Demo" }, { "code": null, "e": 1625, "s": 1228, "text": "public class Demo {\n public static void main(String []args) {\n String str = \"978\";\n System.out.println(\"Checking for string that has only numbers...\");\n System.out.println(\"String: \"+str);\n if(str.matches(\"[0-9]+\") && str.length() > 2)\n System.out.println(\"String has only numbers!\");\n else\n System.out.println(\"String consist of characters as well!\");\n }\n}" }, { "code": null, "e": 1707, "s": 1625, "text": "Checking for string that has only numbers...\nString: 978\nString has only numbers!" }, { "code": null, "e": 1789, "s": 1707, "text": "Let us see another example, wherein our string has numbers as well as characters." }, { "code": null, "e": 1800, "s": 1789, "text": " Live Demo" }, { "code": null, "e": 2201, "s": 1800, "text": "public class Demo {\n public static void main(String []args) {\n String str = \"s987jyg\";\n System.out.println(\"Checking for string that has only numbers...\");\n System.out.println(\"String: \"+str);\n if(str.matches(\"[0-9]+\") && str.length() > 2)\n System.out.println(\"String has only numbers!\");\n else\n System.out.println(\"String consist of characters as well!\");\n }\n}" }, { "code": null, "e": 2300, "s": 2201, "text": "Checking for string that has only numbers...\nString: s987jyg\nString consist of characters as well!" } ]
Perl - Sending Email
If you are working on Linux/Unix machine then you can simply use sendmail utility inside your Perl program to send email. Here is a sample script that can send an email to a given email ID. Just make sure the given path for sendmail utility is correct. This may be different for your Linux/Unix machine. #!/usr/bin/perl $to = 'abcd@gmail.com'; $from = 'webmaster@yourdomain.com'; $subject = 'Test Email'; $message = 'This is test email sent by Perl Script'; open(MAIL, "|/usr/sbin/sendmail -t"); # Email Header print MAIL "To: $to\n"; print MAIL "From: $from\n"; print MAIL "Subject: $subject\n\n"; # Email Body print MAIL $message; close(MAIL); print "Email Sent Successfully\n"; Actually, the above script is a client email script, which will draft email and submit to the server running locally on your Linux/Unix machine. This script will not be responsible for sending email to actual destination. So you have to make sure email server is properly configured and running on your machine to send email to the given email ID. If you want to send HTML formatted email using sendmail, then you simply need to add Content-type: text/html\n in the header part of the email as follows − #!/usr/bin/perl $to = 'abcd@gmail.com'; $from = 'webmaster@yourdomain.com'; $subject = 'Test Email'; $message = '<h1>This is test email sent by Perl Script</h1>'; open(MAIL, "|/usr/sbin/sendmail -t"); # Email Header print MAIL "To: $to\n"; print MAIL "From: $from\n"; print MAIL "Subject: $subject\n\n"; print MAIL "Content-type: text/html\n"; # Email Body print MAIL $message; close(MAIL); print "Email Sent Successfully\n"; If you are working on windows machine, then you will not have access on sendmail utility. But you have alternate to write your own email client using MIME:Lite perl module. You can download this module from MIME-Lite-3.01.tar.gz and install it on your either machine Windows or Linux/Unix. To install it follow the simple steps − $tar xvfz MIME-Lite-3.01.tar.gz $cd MIME-Lite-3.01 $perl Makefile.PL $make $make install That's it and you will have MIME::Lite module installed on your machine. Now you are ready to send your email with simple scripts explained below. Now following is a script which will take care of sending email to the given email ID − #!/usr/bin/perl use MIME::Lite; $to = 'abcd@gmail.com'; $cc = 'efgh@mail.com'; $from = 'webmaster@yourdomain.com'; $subject = 'Test Email'; $message = 'This is test email sent by Perl Script'; $msg = MIME::Lite->new( From => $from, To => $to, Cc => $cc, Subject => $subject, Data => $message ); $msg->send; print "Email Sent Successfully\n"; If you want to send HTML formatted email using sendmail, then you simply need to add Content-type: text/html\n in the header part of the email. Following is the script, which will take care of sending HTML formatted email − #!/usr/bin/perl use MIME::Lite; $to = 'abcd@gmail.com'; $cc = 'efgh@mail.com'; $from = 'webmaster@yourdomain.com'; $subject = 'Test Email'; $message = '<h1>This is test email sent by Perl Script</h1>'; $msg = MIME::Lite->new( From => $from, To => $to, Cc => $cc, Subject => $subject, Data => $message ); $msg->attr("content-type" => "text/html"); $msg->send; print "Email Sent Successfully\n"; If you want to send an attachment, then following script serves the purpose − #!/usr/bin/perl use MIME::Lite; $to = 'abcd@gmail.com'; $cc = 'efgh@mail.com'; $from = 'webmaster@yourdomain.com'; $subject = 'Test Email'; $message = 'This is test email sent by Perl Script'; $msg = MIME::Lite->new( From => $from, To => $to, Cc => $cc, Subject => $subject, Type => 'multipart/mixed' ); # Add your text message. $msg->attach(Type => 'text', Data => $message ); # Specify your file as attachement. $msg->attach(Type => 'image/gif', Path => '/tmp/logo.gif', Filename => 'logo.gif', Disposition => 'attachment' ); $msg->send; print "Email Sent Successfully\n"; You can attach as many files as you like in your email using attach() method. If your machine is not running an email server then you can use any other email server available at the remote location. But to use any other email server you will need to have an id, its password, URL, etc. Once you have all the required information, you simple need to provide that information in send() method as follows − $msg->send('smtp', "smtp.myisp.net", AuthUser=>"id", AuthPass=>"password" ); You can contact your email server administrator to have the above used information and if a user id and password is not already available then your administrator can create it in minutes. 46 Lectures 4.5 hours Devi Killada 11 Lectures 1.5 hours Harshit Srivastava 30 Lectures 6 hours TELCOMA Global 24 Lectures 2 hours Mohammad Nauman 68 Lectures 7 hours Stone River ELearning 58 Lectures 6.5 hours Stone River ELearning Print Add Notes Bookmark this page
[ { "code": null, "e": 2524, "s": 2220, "text": "If you are working on Linux/Unix machine then you can simply use sendmail utility inside your Perl program to send email. Here is a sample script that can send an email to a given email ID. Just make sure the given path for sendmail utility is correct. This may be different for your Linux/Unix machine." }, { "code": null, "e": 2908, "s": 2524, "text": "#!/usr/bin/perl\n \n$to = 'abcd@gmail.com';\n$from = 'webmaster@yourdomain.com';\n$subject = 'Test Email';\n$message = 'This is test email sent by Perl Script';\n \nopen(MAIL, \"|/usr/sbin/sendmail -t\");\n \n# Email Header\nprint MAIL \"To: $to\\n\";\nprint MAIL \"From: $from\\n\";\nprint MAIL \"Subject: $subject\\n\\n\";\n# Email Body\nprint MAIL $message;\n\nclose(MAIL);\nprint \"Email Sent Successfully\\n\";" }, { "code": null, "e": 3256, "s": 2908, "text": "Actually, the above script is a client email script, which will draft email and submit to the server running locally on your Linux/Unix machine. This script will not be responsible for sending email to actual destination. So you have to make sure email server is properly configured and running on your machine to send email to the given email ID." }, { "code": null, "e": 3412, "s": 3256, "text": "If you want to send HTML formatted email using sendmail, then you simply need to add Content-type: text/html\\n in the header part of the email as follows −" }, { "code": null, "e": 3845, "s": 3412, "text": "#!/usr/bin/perl\n \n$to = 'abcd@gmail.com';\n$from = 'webmaster@yourdomain.com';\n$subject = 'Test Email';\n$message = '<h1>This is test email sent by Perl Script</h1>';\n \nopen(MAIL, \"|/usr/sbin/sendmail -t\");\n \n# Email Header\nprint MAIL \"To: $to\\n\";\nprint MAIL \"From: $from\\n\";\nprint MAIL \"Subject: $subject\\n\\n\";\nprint MAIL \"Content-type: text/html\\n\";\n# Email Body\nprint MAIL $message;\n\nclose(MAIL);\nprint \"Email Sent Successfully\\n\";" }, { "code": null, "e": 4175, "s": 3845, "text": "If you are working on windows machine, then you will not have access on sendmail utility. But you have alternate to write your own email client using MIME:Lite perl module. You can download this module from MIME-Lite-3.01.tar.gz and install it on your either machine Windows or Linux/Unix. To install it follow the simple steps −" }, { "code": null, "e": 4264, "s": 4175, "text": "$tar xvfz MIME-Lite-3.01.tar.gz\n$cd MIME-Lite-3.01\n$perl Makefile.PL\n$make\n$make install" }, { "code": null, "e": 4411, "s": 4264, "text": "That's it and you will have MIME::Lite module installed on your machine. Now you are ready to send your email with simple scripts explained below." }, { "code": null, "e": 4499, "s": 4411, "text": "Now following is a script which will take care of sending email to the given email ID −" }, { "code": null, "e": 4985, "s": 4499, "text": "#!/usr/bin/perl\nuse MIME::Lite;\n \n$to = 'abcd@gmail.com';\n$cc = 'efgh@mail.com';\n$from = 'webmaster@yourdomain.com';\n$subject = 'Test Email';\n$message = 'This is test email sent by Perl Script';\n\n$msg = MIME::Lite->new(\n From => $from,\n To => $to,\n Cc => $cc,\n Subject => $subject,\n Data => $message\n );\n \n$msg->send;\nprint \"Email Sent Successfully\\n\";" }, { "code": null, "e": 5209, "s": 4985, "text": "If you want to send HTML formatted email using sendmail, then you simply need to add Content-type: text/html\\n in the header part of the email. Following is the script, which will take care of sending HTML formatted email −" }, { "code": null, "e": 5756, "s": 5209, "text": "#!/usr/bin/perl\nuse MIME::Lite;\n \n$to = 'abcd@gmail.com';\n$cc = 'efgh@mail.com';\n$from = 'webmaster@yourdomain.com';\n$subject = 'Test Email';\n$message = '<h1>This is test email sent by Perl Script</h1>';\n\n$msg = MIME::Lite->new(\n From => $from,\n To => $to,\n Cc => $cc,\n Subject => $subject,\n Data => $message\n );\n \n$msg->attr(\"content-type\" => \"text/html\"); \n$msg->send;\nprint \"Email Sent Successfully\\n\";" }, { "code": null, "e": 5834, "s": 5756, "text": "If you want to send an attachment, then following script serves the purpose −" }, { "code": null, "e": 6687, "s": 5834, "text": "#!/usr/bin/perl\nuse MIME::Lite;\n \n$to = 'abcd@gmail.com';\n$cc = 'efgh@mail.com';\n$from = 'webmaster@yourdomain.com';\n$subject = 'Test Email';\n$message = 'This is test email sent by Perl Script';\n\n$msg = MIME::Lite->new(\n From => $from,\n To => $to,\n Cc => $cc,\n Subject => $subject,\n Type => 'multipart/mixed'\n );\n \n# Add your text message.\n$msg->attach(Type => 'text',\n Data => $message\n );\n \n# Specify your file as attachement.\n$msg->attach(Type => 'image/gif',\n Path => '/tmp/logo.gif',\n Filename => 'logo.gif',\n Disposition => 'attachment'\n ); \n$msg->send;\nprint \"Email Sent Successfully\\n\";" }, { "code": null, "e": 6765, "s": 6687, "text": "You can attach as many files as you like in your email using attach() method." }, { "code": null, "e": 7091, "s": 6765, "text": "If your machine is not running an email server then you can use any other email server available at the remote location. But to use any other email server you will need to have an id, its password, URL, etc. Once you have all the required information, you simple need to provide that information in send() method as follows −" }, { "code": null, "e": 7168, "s": 7091, "text": "$msg->send('smtp', \"smtp.myisp.net\", AuthUser=>\"id\", AuthPass=>\"password\" );" }, { "code": null, "e": 7356, "s": 7168, "text": "You can contact your email server administrator to have the above used information and if a user id and password is not already available then your administrator can create it in minutes." }, { "code": null, "e": 7391, "s": 7356, "text": "\n 46 Lectures \n 4.5 hours \n" }, { "code": null, "e": 7405, "s": 7391, "text": " Devi Killada" }, { "code": null, "e": 7440, "s": 7405, "text": "\n 11 Lectures \n 1.5 hours \n" }, { "code": null, "e": 7460, "s": 7440, "text": " Harshit Srivastava" }, { "code": null, "e": 7493, "s": 7460, "text": "\n 30 Lectures \n 6 hours \n" }, { "code": null, "e": 7509, "s": 7493, "text": " TELCOMA Global" }, { "code": null, "e": 7542, "s": 7509, "text": "\n 24 Lectures \n 2 hours \n" }, { "code": null, "e": 7559, "s": 7542, "text": " Mohammad Nauman" }, { "code": null, "e": 7592, "s": 7559, "text": "\n 68 Lectures \n 7 hours \n" }, { "code": null, "e": 7615, "s": 7592, "text": " Stone River ELearning" }, { "code": null, "e": 7650, "s": 7615, "text": "\n 58 Lectures \n 6.5 hours \n" }, { "code": null, "e": 7673, "s": 7650, "text": " Stone River ELearning" }, { "code": null, "e": 7680, "s": 7673, "text": " Print" }, { "code": null, "e": 7691, "s": 7680, "text": " Add Notes" } ]
How to make a Google Translation API using Python? - GeeksforGeeks
07 Aug, 2020 Google Translate is a free multilingual translation service, based on statistical and neural machine translation, developed by Google. It is widely used to translate complete websites or webpages from one languages to another. We will be creating a python terminal application which will take the source language, target language, a phrase to translate and return translated text. We will be implementing unit testing and web scraping techniques with selenium in python. Web scrapping is a concept of capturing the required data from a website. Selenium is an industry grade library used for web-scraping and unit testing of various software. As a prerequisite, we will be requiring the following tools to be installed in our system. Python 3.x: A version of python 3.0 or above should be installed. Selenium library: A python library required for scrapping the websites. Copy the following statement to install selenium on your system.Installation: python3 -m pip install selenium Webdriver: An instance of a web browser required by selenium to open webpages. Download the latest version of Chrome Webdriver from the link below and save it in the same folder in which your main program is. Link: https://chromedriver.chromium.org/downloads We will divide the code section into three portions: Setting up the selenium and chrome webdriver tool. Taking input and testing for error in input. Translating using Google Translate. Part 1: Setting the selenium tool and webdriver settings. from selenium import webdriverfrom selenium.webdriver.common.keys import Keysfrom selenium.common.exceptions import NoSuchElementExceptionfrom selenium.common.exceptions import JavascriptException # local variablesfrom selenium.webdriver.chrome.options import Options as ChromeOptionschrome_op = ChromeOptions()chrome_op.add_argument('--headless')browser = webdriver.Chrome(executable_path ='chromedriver', options = chrome_op) Importing webdriver object to connect to the chrome browser instance. Importing keys library to connect the basic keyboard commands to the browser instance. Importing exception handlers for browser instance. Import browser options and set ‘–headless’ property to run the browser instance in background. Comment the ”chrome_op.add_argument(‘–headless’)” statement to bring the webdriver to foreground processes. Part 2: Taking input and testing for in input. def takeInput(): languages = {"English": 'en', "French": 'fr', "Spanish": 'es', "German": 'de', "Italian": 'it'} print("Select a source and target language (enter codes)") print("Language", " ", "Code") for x in languages: print(x, " ", languages[x]) print("\n\nSource: ", end ="") src = input() sflag = 0 for x in languages: if(languages[x] == src and not sflag): sflag = 1 break if(not sflag): print("Source code not from the list, Exiting....") exit() print("Target: ", end ="") trg = input() tflag = 0 for x in languages: if(languages[x] == trg and not tflag): tflag = 1 break if(not tflag): print("Target code not from the list, Exiting....") exit() if(src == trg): print("Source and Target cannot be same, Exiting...") exit() print("Enter the phrase: ", end ="") phrase = input() return src, trg, phrase This is a demo code so the languages code are kept limited to {English, Spanish, German, Italian, French}. You can add more languages and their codes later. Taking input for source language and target language code. Checking if the codes entered are supported or not. Source language and target language code should not be same. Part 3: Translating using Google Translate: def makeCall(url, script, default): response = default try: browser.get(url) while(response == default): response = browser.execute_script(script) except JavascriptException: print(JavascriptException.args) except NoSuchElementException: print(NoSuchElementException.args) if(response != default): return response else: return 'Not Available' def googleTranslate(src, trg, phrase): url = 'https://translate.google.co.in/# view = home&op = translate&sl =' + \ src + '&tl =' + trg+'&text ='+phrase script = 'return document.getElementsByClassName("tlid-translation")[0].textContent' return makeCall(url, script, None) googleTranslate() function receives the three parameters i.e. source code, target code and phrase. It generates the URL for the browser to request for. script contains a javascript statement which searches for an HTML element with class = “tlid-translation” and returns it’s text contents. makeCall() function makes a request with the URL created, executes the script when the webpage is ready and returns the fetched text. Combining the above three parts. if __name__ == "__main__": src, trg, phrase = takeInput() print("\nResult: ", googleTranslate(src, trg, phrase)) Paste all the parts shown above in a single .py file and execute it using Python3. Execution: python3 <filename.py> Output: Input section : If you have commented the ‘–headless’ property statement, then a browser window like below, will appear: The result will appear on the terminal window like below: Note: This is demo project so language supported are limited. You can increase the language support by adding more language codes in the declaration. Python-projects Python Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Python Dictionary Enumerate() in Python How to Install PIP on Windows ? Different ways to create Pandas Dataframe Python String | replace() Create a Pandas DataFrame from Lists Reading and Writing to text files in Python sum() function in Python *args and **kwargs in Python How to drop one or multiple columns in Pandas Dataframe
[ { "code": null, "e": 24828, "s": 24800, "text": "\n07 Aug, 2020" }, { "code": null, "e": 25056, "s": 24828, "text": "Google Translate is a free multilingual translation service, based on statistical and neural machine translation, developed by Google. It is widely used to translate complete websites or webpages from one languages to another. " }, { "code": null, "e": 25563, "s": 25056, "text": "We will be creating a python terminal application which will take the source language, target language, a phrase to translate and return translated text. We will be implementing unit testing and web scraping techniques with selenium in python. Web scrapping is a concept of capturing the required data from a website. Selenium is an industry grade library used for web-scraping and unit testing of various software. As a prerequisite, we will be requiring the following tools to be installed in our system." }, { "code": null, "e": 25629, "s": 25563, "text": "Python 3.x: A version of python 3.0 or above should be installed." }, { "code": null, "e": 25811, "s": 25629, "text": "Selenium library: A python library required for scrapping the websites. Copy the following statement to install selenium on your system.Installation: python3 -m pip install selenium" }, { "code": null, "e": 26070, "s": 25811, "text": "Webdriver: An instance of a web browser required by selenium to open webpages. Download the latest version of Chrome Webdriver from the link below and save it in the same folder in which your main program is. Link: https://chromedriver.chromium.org/downloads" }, { "code": null, "e": 26123, "s": 26070, "text": "We will divide the code section into three portions:" }, { "code": null, "e": 26174, "s": 26123, "text": "Setting up the selenium and chrome webdriver tool." }, { "code": null, "e": 26219, "s": 26174, "text": "Taking input and testing for error in input." }, { "code": null, "e": 26255, "s": 26219, "text": "Translating using Google Translate." }, { "code": null, "e": 26313, "s": 26255, "text": "Part 1: Setting the selenium tool and webdriver settings." }, { "code": "from selenium import webdriverfrom selenium.webdriver.common.keys import Keysfrom selenium.common.exceptions import NoSuchElementExceptionfrom selenium.common.exceptions import JavascriptException # local variablesfrom selenium.webdriver.chrome.options import Options as ChromeOptionschrome_op = ChromeOptions()chrome_op.add_argument('--headless')browser = webdriver.Chrome(executable_path ='chromedriver', options = chrome_op)", "e": 26742, "s": 26313, "text": null }, { "code": null, "e": 26812, "s": 26742, "text": "Importing webdriver object to connect to the chrome browser instance." }, { "code": null, "e": 26899, "s": 26812, "text": "Importing keys library to connect the basic keyboard commands to the browser instance." }, { "code": null, "e": 26950, "s": 26899, "text": "Importing exception handlers for browser instance." }, { "code": null, "e": 27153, "s": 26950, "text": "Import browser options and set ‘–headless’ property to run the browser instance in background. Comment the ”chrome_op.add_argument(‘–headless’)” statement to bring the webdriver to foreground processes." }, { "code": null, "e": 27201, "s": 27153, "text": "Part 2: Taking input and testing for in input." }, { "code": "def takeInput(): languages = {\"English\": 'en', \"French\": 'fr', \"Spanish\": 'es', \"German\": 'de', \"Italian\": 'it'} print(\"Select a source and target language (enter codes)\") print(\"Language\", \" \", \"Code\") for x in languages: print(x, \" \", languages[x]) print(\"\\n\\nSource: \", end =\"\") src = input() sflag = 0 for x in languages: if(languages[x] == src and not sflag): sflag = 1 break if(not sflag): print(\"Source code not from the list, Exiting....\") exit() print(\"Target: \", end =\"\") trg = input() tflag = 0 for x in languages: if(languages[x] == trg and not tflag): tflag = 1 break if(not tflag): print(\"Target code not from the list, Exiting....\") exit() if(src == trg): print(\"Source and Target cannot be same, Exiting...\") exit() print(\"Enter the phrase: \", end =\"\") phrase = input() return src, trg, phrase", "e": 28212, "s": 27201, "text": null }, { "code": null, "e": 28369, "s": 28212, "text": "This is a demo code so the languages code are kept limited to {English, Spanish, German, Italian, French}. You can add more languages and their codes later." }, { "code": null, "e": 28428, "s": 28369, "text": "Taking input for source language and target language code." }, { "code": null, "e": 28480, "s": 28428, "text": "Checking if the codes entered are supported or not." }, { "code": null, "e": 28542, "s": 28480, "text": "Source language and target language code should not be same. " }, { "code": null, "e": 28586, "s": 28542, "text": "Part 3: Translating using Google Translate:" }, { "code": "def makeCall(url, script, default): response = default try: browser.get(url) while(response == default): response = browser.execute_script(script) except JavascriptException: print(JavascriptException.args) except NoSuchElementException: print(NoSuchElementException.args) if(response != default): return response else: return 'Not Available' def googleTranslate(src, trg, phrase): url = 'https://translate.google.co.in/# view = home&op = translate&sl =' + \\ src + '&tl =' + trg+'&text ='+phrase script = 'return document.getElementsByClassName(\"tlid-translation\")[0].textContent' return makeCall(url, script, None)", "e": 29298, "s": 28586, "text": null }, { "code": null, "e": 29451, "s": 29298, "text": "googleTranslate() function receives the three parameters i.e. source code, target code and phrase. It generates the URL for the browser to request for. " }, { "code": null, "e": 29589, "s": 29451, "text": "script contains a javascript statement which searches for an HTML element with class = “tlid-translation” and returns it’s text contents." }, { "code": null, "e": 29723, "s": 29589, "text": "makeCall() function makes a request with the URL created, executes the script when the webpage is ready and returns the fetched text." }, { "code": null, "e": 29756, "s": 29723, "text": "Combining the above three parts." }, { "code": "if __name__ == \"__main__\": src, trg, phrase = takeInput() print(\"\\nResult: \", googleTranslate(src, trg, phrase))", "e": 29875, "s": 29756, "text": null }, { "code": null, "e": 29958, "s": 29875, "text": "Paste all the parts shown above in a single .py file and execute it using Python3." }, { "code": null, "e": 29991, "s": 29958, "text": "Execution: python3 <filename.py>" }, { "code": null, "e": 30000, "s": 29991, "text": "Output: " }, { "code": null, "e": 30016, "s": 30000, "text": "Input section :" }, { "code": null, "e": 30121, "s": 30016, "text": "If you have commented the ‘–headless’ property statement, then a browser window like below, will appear:" }, { "code": null, "e": 30179, "s": 30121, "text": "The result will appear on the terminal window like below:" }, { "code": null, "e": 30330, "s": 30179, "text": "Note: This is demo project so language supported are limited. You can increase the language support by adding more language codes in the declaration." }, { "code": null, "e": 30346, "s": 30330, "text": "Python-projects" }, { "code": null, "e": 30353, "s": 30346, "text": "Python" }, { "code": null, "e": 30451, "s": 30353, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 30469, "s": 30451, "text": "Python Dictionary" }, { "code": null, "e": 30491, "s": 30469, "text": "Enumerate() in Python" }, { "code": null, "e": 30523, "s": 30491, "text": "How to Install PIP on Windows ?" }, { "code": null, "e": 30565, "s": 30523, "text": "Different ways to create Pandas Dataframe" }, { "code": null, "e": 30591, "s": 30565, "text": "Python String | replace()" }, { "code": null, "e": 30628, "s": 30591, "text": "Create a Pandas DataFrame from Lists" }, { "code": null, "e": 30672, "s": 30628, "text": "Reading and Writing to text files in Python" }, { "code": null, "e": 30697, "s": 30672, "text": "sum() function in Python" }, { "code": null, "e": 30726, "s": 30697, "text": "*args and **kwargs in Python" } ]
Training on a TPU in parallel using PyTorch XLA | by Abhishek Swain | Towards Data Science
Taken from the Kaggle TPU documentation: TPUs are now available on Kaggle, for free. TPUs are hardware accelerators specialized in deep learning tasks. They are supported in Tensorflow 2.1 both through the Keras high-level API and, at a lower level, in models using a custom training loop. You can use up to 30 hours per week of TPUs and up to 3h at a time in a single session. The Kaggle documentation only mentions how to train a model on a TPU with Tensorflow, but I wanted to do it using PyTorch. PyTorch has XLA which is what we are gonna use to run our code on TPU. Anyway, the problem I faced was there was no single source of information about how to do it. It was all scattered all over the place! I did quite a bit of research and found this amazing kernel by Abhishek Thakur. He explained how to train on a TPU parallelly on all it’s 8 cores. He even has a youtube video that explains training on a TPU. Check it out here https://www.youtube.com/watch?v=2oWf4v6QEV8. Okay, so let’s begin! First, we need to install torch xla, for that all you need to do is copy, paste these two lines on colab or kaggle and run them: Next are important imports: So, I used the TPU to train my model for a Kaggle competition. It’s a simple one called: Plant Pathology 2020. You can check it out. I am going to skip over the data preprocessing, modeling code as that is a topic for another blog. Here, we are only concerned with running the model on TPU. I will attach the link to the complete Ipython notebook for you. So jumping straight to the training code, I will highlight the things needed for running the model parallelly. The first important thing is a distributed sampler for our Dataloader: xm.xrt_world_size() retrieves the number of devices that are taking part in the replication. (basically the number of cores) xm.get_ordinal() retrieves the replication ordinal of the current process. The ordinals range from 0 to xrt_world_size()-1 The next thing is to train the model parallelly, traditional DataLoader has to be made into a ParallelLoaderobject and then passed into the training function. For this we do, pl.ParallelLoader(<your-dataloader>, [device]) The device here is device = xm.xla_device() . We are simply, specifying were to send our model to run. In this case, it is a TPU or as PyTorch likes to call it and XLA device(If your’e a PyTorch user then you can think of it as similar to torch.device('cuda') used to send the tensors to a GPU) Torch.xla has it’s own specific requirements. U can’t simply make a device using xm.xla_device() and pass the model to it. With that: Optimizer has to stepped with xm.optimizer_step(optimizer).You have to save the model with xm.save(model.state_dict(), '<your-model-name>)You have to use xm.master_print(...) to print.For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object as I explained above.While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only). Optimizer has to stepped with xm.optimizer_step(optimizer). You have to save the model with xm.save(model.state_dict(), '<your-model-name>) You have to use xm.master_print(...) to print. For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object as I explained above. While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only). With all of the specifiactions in place now you’re ready to train your model on a TPU in kaggle, not just kaggle but on colab too. I know it doesn’t quite make complete sense. But this is just a preliminary explanation for you where you can come and refer the specifics. Once you look at the complete code you will understand everything. Best of luck! as promised here is my complete Kernel for Kaggle :) Hey there, This is a kernel to teach you how to train on a tpu in kaggle on all cores parellely. This a work in progress kernel as I will keep it updating as I learn new things! # for TPU !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py !python pytorch-xla-env-setup.py --apt-packages libomp5 libopenblas-dev !pip install efficientnet_pytorch > /dev/null !pip install albumentations > /dev/null import warnings import torch_xla import torch_xla.debug.metrics as met import torch_xla.distributed.data_parallel as dp import torch_xla.distributed.parallel_loader as pl import torch_xla.utils.utils as xu import torch_xla.core.xla_model as xm import torch_xla.distributed.xla_multiprocessing as xmp import torch_xla.test.test_utils as test_utils import warnings warnings.filterwarnings("ignore") import torch import torchvision import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset import numpy as np import pandas as pd import cv2 import matplotlib.pyplot as plt import operator from PIL import Image from sklearn.model_selection import StratifiedKFold, train_test_split from sklearn.metrics import roc_auc_score from torchvision.transforms import ToTensor, RandomHorizontalFlip, Resize from efficientnet_pytorch import EfficientNet from transformers import AdamW, get_cosine_schedule_with_warmup from albumentations import * from albumentations.pytorch import ToTensor from tqdm import tqdm import json import time BASE_DIR = '../input/plant-pathology-2020-fgvc7/' train_df = pd.read_csv(BASE_DIR +'train.csv') train_df.head() train_df['image_id'] = BASE_DIR + 'images/' + train_df['image_id'] + '.jpg' train_df['label'] = [np.argmax(label) for label in train_df[['healthy','multiple_diseases','rust','scab']].values] train_df.head() In this part let's try simple pytorch model and train it for 20 epochs straight without CV. The model of my choice is: 'EfficientNet-b5'. Parellely training on all 8 cores In this part let's try simple pytorch model and train it for 20 epochs straight without CV. The model of my choice is: 'EfficientNet-b5'. Parellely training on all 8 cores class SimpleDataset(Dataset): def __init__(self, image_ids_df, labels_df, transform=None): self.image_ids = image_ids_df self.labels = labels_df self.transform = transform def __getitem__(self, idx): image = cv2.imread(self.image_ids.values[idx]) label = self.labels.values[idx] sample = { 'image': image, 'label': label } if self.transform: sample = self.transform(**sample) image, label = sample['image'], sample['label'] return image, label def __len__(self): return len(self.image_ids) image_ids = train_df['image_id'] labels = train_df['label'] I split the datset simply using sklearn's train_test_split() X_train, X_test, y_train, y_test = train_test_split(image_ids, labels, test_size=0.25, random_state=42) train_transform = Compose( [ Resize(224, 224), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), # ShiftScaleRotate(rotate_limit=25.0, p=0.7), # OneOf( # [ # IAAEmboss(p=1), # IAASharpen(p=1), # Blur(p=1) # ], # p=0.5 # ), # IAAPiecewiseAffine(p=0.5), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True), ToTensor() ] ) model = EfficientNet.from_pretrained('efficientnet-b5', num_classes=4) torch.xla has it's own specific requirements. U can't simply make a device using xm.xla_device() and pass the model to it. With that: Optimizer has to stepped with xm.optimizer_step(optimizer). You have to save the model with xm.save(model.state_dict(), '<your-model-name>) You have to use xm.master_print(...) to print. This you can try for yourself below. Try to change the xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple print(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed. For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only). torch.xla has it's own specific requirements. U can't simply make a device using xm.xla_device() and pass the model to it. With that: Optimizer has to stepped with xm.optimizer_step(optimizer). You have to save the model with xm.save(model.state_dict(), '<your-model-name>) You have to use xm.master_print(...) to print. This you can try for yourself below. Try to change the xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple print(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed. For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only). Optimizer has to stepped with xm.optimizer_step(optimizer). You have to save the model with xm.save(model.state_dict(), '<your-model-name>) You have to use xm.master_print(...) to print. This you can try for yourself below. Try to change the xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple print(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed. For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only). Optimizer has to stepped with xm.optimizer_step(optimizer). You have to save the model with xm.save(model.state_dict(), '<your-model-name>) You have to use xm.master_print(...) to print. This you can try for yourself below. Try to change the xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple print(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed. For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only). def _run(model): def train_fn(epoch, train_dataloader, optimizer, criterion, scheduler, device): running_loss = 0 total = 0 model.train() for batch_idx, (images, labels) in enumerate(train_dataloader, 1): optimizer.zero_grad() images = images.to(device) labels = labels.to(device) outputs = model(images) loss = criterion(outputs, labels) xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') loss.backward() xm.optimizer_step(optimizer) lr_scheduler.step() def valid_fn(epoch, valid_dataloader, criterion, device): running_loss = 0 total = 0 preds_acc = [] labels_acc = [] model.eval() for batch_idx, (images, labels) in enumerate(valid_dataloader, 1): images = images.to(device) labels = labels.to(device) outputs = model(images) loss = criterion(outputs, labels) xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') running_loss += loss.item() EPOCHS = 20 BATCH_SIZE = 64 train_dataset = SimpleDataset(X_train, y_train, transform=train_transform) valid_dataset = SimpleDataset(X_test, y_test, transform=train_transform) train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=True) valid_sampler = torch.utils.data.distributed.DistributedSampler( valid_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal(), shuffle=False) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, sampler=train_sampler, num_workers=1) valid_dataloader = DataLoader(valid_dataset, batch_size=32, sampler=valid_sampler, num_workers=1) device = xm.xla_device() model = model.to(device) lr = 0.4 * 1e-5 * xm.xrt_world_size() criterion = nn.CrossEntropyLoss() optimizer = AdamW(model.parameters(), lr=lr) num_train_steps = int(len(train_dataset) / BATCH_SIZE / xm.xrt_world_size() * EPOCHS) xm.master_print(f'num_train_steps = {num_train_steps}, world_size={xm.xrt_world_size()}') num_train_steps = int(len(train_dataset) / BATCH_SIZE * EPOCHS) lr_scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=num_train_steps ) train_loss = [] valid_loss = [] best_loss = 1 train_begin = time.time() for epoch in range(EPOCHS): para_loader = pl.ParallelLoader(train_dataloader, [device]) start = time.time() print('*'*15) print(f'EPOCH: {epoch+1}') print('*'*15) print('Training.....') train_fn(epoch=epoch+1, train_dataloader=para_loader.per_device_loader(device), optimizer=optimizer, criterion=criterion, scheduler=lr_scheduler, device=device) with torch.no_grad(): para_loader = pl.ParallelLoader(valid_dataloader, [device]) print('Validating....') valid_fn(epoch=epoch+1, valid_dataloader=para_loader.per_device_loader(device), criterion=criterion, device=device) xm.save( model.state_dict(), f'efficientnet-b0-bs-8.pt' ) print(f'Epoch completed in {(time.time() - start)/60} minutes') print(f'Training completed in {(time.time() - train_begin)/60} minutes') # Start training processes def _mp_fn(rank, flags): torch.set_default_tensor_type('torch.FloatTensor') a = _run(model) FLAGS={} xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=8, start_method='fork') With parellely running on all 8 cores, my training time was 15 minutes (20 epochs) with a batch size of 64 for training and 32 for validation, as opposed to 1 hr on my local device (which has a gtx 1050 with 4 gb memory) with a batch size of 8 for both training and validation. Okay I get it, it's not a fair comparison as we have a more powerful gpu on kaggle, but I am guessing you get what I am trying to say. :P This kernel was for me to keep as a future reference, but I want to share it with all of you. You are the ones from whom I learn so much. About the accuracy and inference that I haven't done and I am currently working on. I just ran it once to see how long it takes. :P With parellely running on all 8 cores, my training time was 15 minutes (20 epochs) with a batch size of 64 for training and 32 for validation, as opposed to 1 hr on my local device (which has a gtx 1050 with 4 gb memory) with a batch size of 8 for both training and validation. Okay I get it, it's not a fair comparison as we have a more powerful gpu on kaggle, but I am guessing you get what I am trying to say. :P This kernel was for me to keep as a future reference, but I want to share it with all of you. You are the ones from whom I learn so much. About the accuracy and inference that I haven't done and I am currently working on. I just ran it once to see how long it takes. :P You're free to correct me, make suggestions and tell me on what I can improve on. :) Lastly if u found it any useful please consider to upvote :) Also for more information: Torch XLA documentation Also here is my Kaggle kernel link https://www.kaggle.com/abhiswain/pytorch-tpu-efficientnet-b5-tutorial-reference. If you found this useful you can upvote it! You can also go there and directly run it and see for yourself the magic. Let me tell you something, I made this to keep as a reference for myself but decided to share it. Let this be a stop for you in your journey of deep learning :) Torch XLA documentation: Torch XLA
[ { "code": null, "e": 212, "s": 171, "text": "Taken from the Kaggle TPU documentation:" }, { "code": null, "e": 461, "s": 212, "text": "TPUs are now available on Kaggle, for free. TPUs are hardware accelerators specialized in deep learning tasks. They are supported in Tensorflow 2.1 both through the Keras high-level API and, at a lower level, in models using a custom training loop." }, { "code": null, "e": 549, "s": 461, "text": "You can use up to 30 hours per week of TPUs and up to 3h at a time in a single session." }, { "code": null, "e": 878, "s": 549, "text": "The Kaggle documentation only mentions how to train a model on a TPU with Tensorflow, but I wanted to do it using PyTorch. PyTorch has XLA which is what we are gonna use to run our code on TPU. Anyway, the problem I faced was there was no single source of information about how to do it. It was all scattered all over the place!" }, { "code": null, "e": 1149, "s": 878, "text": "I did quite a bit of research and found this amazing kernel by Abhishek Thakur. He explained how to train on a TPU parallelly on all it’s 8 cores. He even has a youtube video that explains training on a TPU. Check it out here https://www.youtube.com/watch?v=2oWf4v6QEV8." }, { "code": null, "e": 1171, "s": 1149, "text": "Okay, so let’s begin!" }, { "code": null, "e": 1300, "s": 1171, "text": "First, we need to install torch xla, for that all you need to do is copy, paste these two lines on colab or kaggle and run them:" }, { "code": null, "e": 1328, "s": 1300, "text": "Next are important imports:" }, { "code": null, "e": 1684, "s": 1328, "text": "So, I used the TPU to train my model for a Kaggle competition. It’s a simple one called: Plant Pathology 2020. You can check it out. I am going to skip over the data preprocessing, modeling code as that is a topic for another blog. Here, we are only concerned with running the model on TPU. I will attach the link to the complete Ipython notebook for you." }, { "code": null, "e": 1866, "s": 1684, "text": "So jumping straight to the training code, I will highlight the things needed for running the model parallelly. The first important thing is a distributed sampler for our Dataloader:" }, { "code": null, "e": 1991, "s": 1866, "text": "xm.xrt_world_size() retrieves the number of devices that are taking part in the replication. (basically the number of cores)" }, { "code": null, "e": 2114, "s": 1991, "text": "xm.get_ordinal() retrieves the replication ordinal of the current process. The ordinals range from 0 to xrt_world_size()-1" }, { "code": null, "e": 2336, "s": 2114, "text": "The next thing is to train the model parallelly, traditional DataLoader has to be made into a ParallelLoaderobject and then passed into the training function. For this we do, pl.ParallelLoader(<your-dataloader>, [device])" }, { "code": null, "e": 2631, "s": 2336, "text": "The device here is device = xm.xla_device() . We are simply, specifying were to send our model to run. In this case, it is a TPU or as PyTorch likes to call it and XLA device(If your’e a PyTorch user then you can think of it as similar to torch.device('cuda') used to send the tensors to a GPU)" }, { "code": null, "e": 2754, "s": 2631, "text": "Torch.xla has it’s own specific requirements. U can’t simply make a device using xm.xla_device() and pass the model to it." }, { "code": null, "e": 2765, "s": 2754, "text": "With that:" }, { "code": null, "e": 3444, "s": 2765, "text": "Optimizer has to stepped with xm.optimizer_step(optimizer).You have to save the model with xm.save(model.state_dict(), '<your-model-name>)You have to use xm.master_print(...) to print.For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object as I explained above.While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only)." }, { "code": null, "e": 3504, "s": 3444, "text": "Optimizer has to stepped with xm.optimizer_step(optimizer)." }, { "code": null, "e": 3584, "s": 3504, "text": "You have to save the model with xm.save(model.state_dict(), '<your-model-name>)" }, { "code": null, "e": 3631, "s": 3584, "text": "You have to use xm.master_print(...) to print." }, { "code": null, "e": 3878, "s": 3631, "text": "For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object as I explained above." }, { "code": null, "e": 4127, "s": 3878, "text": "While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only)." }, { "code": null, "e": 4532, "s": 4127, "text": "With all of the specifiactions in place now you’re ready to train your model on a TPU in kaggle, not just kaggle but on colab too. I know it doesn’t quite make complete sense. But this is just a preliminary explanation for you where you can come and refer the specifics. Once you look at the complete code you will understand everything. Best of luck! as promised here is my complete Kernel for Kaggle :)" }, { "code": null, "e": 4710, "s": 4532, "text": "Hey there, This is a kernel to teach you how to train on a tpu in kaggle on all cores parellely. This a work in progress kernel as I will keep it updating as I learn new things!" }, { "code": null, "e": 4909, "s": 4710, "text": "# for TPU\n!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py\n!python pytorch-xla-env-setup.py --apt-packages libomp5 libopenblas-dev\n" }, { "code": null, "e": 4996, "s": 4909, "text": "!pip install efficientnet_pytorch > /dev/null\n!pip install albumentations > /dev/null\n" }, { "code": null, "e": 5395, "s": 4996, "text": "import warnings\nimport torch_xla\nimport torch_xla.debug.metrics as met\nimport torch_xla.distributed.data_parallel as dp\nimport torch_xla.distributed.parallel_loader as pl\nimport torch_xla.utils.utils as xu\nimport torch_xla.core.xla_model as xm\nimport torch_xla.distributed.xla_multiprocessing as xmp\nimport torch_xla.test.test_utils as test_utils\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n" }, { "code": null, "e": 6067, "s": 5395, "text": "import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, Dataset\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport matplotlib.pyplot as plt\nimport operator\nfrom PIL import Image \nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nfrom sklearn.metrics import roc_auc_score\nfrom torchvision.transforms import ToTensor, RandomHorizontalFlip, Resize\nfrom efficientnet_pytorch import EfficientNet\nfrom transformers import AdamW, get_cosine_schedule_with_warmup\nfrom albumentations import *\nfrom albumentations.pytorch import ToTensor\nfrom tqdm import tqdm\nimport json\nimport time\n" }, { "code": null, "e": 6118, "s": 6067, "text": "BASE_DIR = '../input/plant-pathology-2020-fgvc7/'\n" }, { "code": null, "e": 6165, "s": 6118, "text": "train_df = pd.read_csv(BASE_DIR +'train.csv')\n" }, { "code": null, "e": 6182, "s": 6165, "text": "train_df.head()\n" }, { "code": null, "e": 6259, "s": 6182, "text": "train_df['image_id'] = BASE_DIR + 'images/' + train_df['image_id'] + '.jpg'\n" }, { "code": null, "e": 6375, "s": 6259, "text": "train_df['label'] = [np.argmax(label) for label in train_df[['healthy','multiple_diseases','rust','scab']].values]\n" }, { "code": null, "e": 6392, "s": 6375, "text": "train_df.head()\n" }, { "code": null, "e": 6566, "s": 6392, "text": "\nIn this part let's try simple pytorch model and train it for 20 epochs straight without CV.\nThe model of my choice is: 'EfficientNet-b5'.\nParellely training on all 8 cores\n" }, { "code": null, "e": 6658, "s": 6566, "text": "In this part let's try simple pytorch model and train it for 20 epochs straight without CV." }, { "code": null, "e": 6704, "s": 6658, "text": "The model of my choice is: 'EfficientNet-b5'." }, { "code": null, "e": 6738, "s": 6704, "text": "Parellely training on all 8 cores" }, { "code": null, "e": 7430, "s": 6738, "text": "class SimpleDataset(Dataset):\n def __init__(self, image_ids_df, labels_df, transform=None):\n self.image_ids = image_ids_df\n self.labels = labels_df\n self.transform = transform\n \n def __getitem__(self, idx):\n image = cv2.imread(self.image_ids.values[idx])\n label = self.labels.values[idx]\n \n sample = {\n 'image': image,\n 'label': label\n }\n \n if self.transform:\n sample = self.transform(**sample)\n \n image, label = sample['image'], sample['label']\n \n return image, label\n \n def __len__(self):\n return len(self.image_ids)\n \n\n \n" }, { "code": null, "e": 7491, "s": 7430, "text": "image_ids = train_df['image_id']\nlabels = train_df['label']\n" }, { "code": null, "e": 7552, "s": 7491, "text": "I split the datset simply using sklearn's train_test_split()" }, { "code": null, "e": 7657, "s": 7552, "text": "X_train, X_test, y_train, y_test = train_test_split(image_ids, labels, test_size=0.25, random_state=42)\n" }, { "code": null, "e": 8160, "s": 7657, "text": "train_transform = Compose(\n [\n Resize(224, 224),\n HorizontalFlip(p=0.5),\n VerticalFlip(p=0.5),\n# ShiftScaleRotate(rotate_limit=25.0, p=0.7),\n# OneOf(\n# [\n# IAAEmboss(p=1),\n# IAASharpen(p=1),\n# Blur(p=1)\n# ], \n# p=0.5\n# ),\n# IAAPiecewiseAffine(p=0.5),\n Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), always_apply=True),\n ToTensor()\n ]\n)\n" }, { "code": null, "e": 8232, "s": 8160, "text": "model = EfficientNet.from_pretrained('efficientnet-b5', num_classes=4)\n" }, { "code": null, "e": 9287, "s": 8232, "text": "\ntorch.xla has it's own specific requirements. U can't simply make a device using xm.xla_device() and pass the model to it. \n\nWith that:\nOptimizer has to stepped with xm.optimizer_step(optimizer).\nYou have to save the model with xm.save(model.state_dict(), '<your-model-name>)\nYou have to use xm.master_print(...) to print. This you can try for yourself below. Try to change \nthe xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple \nprint(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed.\nFor parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object \nWhile passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only).\n\n\n" }, { "code": null, "e": 10340, "s": 9287, "text": "torch.xla has it's own specific requirements. U can't simply make a device using xm.xla_device() and pass the model to it. \n\nWith that:\nOptimizer has to stepped with xm.optimizer_step(optimizer).\nYou have to save the model with xm.save(model.state_dict(), '<your-model-name>)\nYou have to use xm.master_print(...) to print. This you can try for yourself below. Try to change \nthe xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple \nprint(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed.\nFor parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object \nWhile passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only).\n\n" }, { "code": null, "e": 11257, "s": 10340, "text": "\nOptimizer has to stepped with xm.optimizer_step(optimizer).\nYou have to save the model with xm.save(model.state_dict(), '<your-model-name>)\nYou have to use xm.master_print(...) to print. This you can try for yourself below. Try to change \nthe xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple \nprint(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed.\nFor parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object \nWhile passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only).\n" }, { "code": null, "e": 11317, "s": 11257, "text": "Optimizer has to stepped with xm.optimizer_step(optimizer)." }, { "code": null, "e": 11397, "s": 11317, "text": "You have to save the model with xm.save(model.state_dict(), '<your-model-name>)" }, { "code": null, "e": 11697, "s": 11397, "text": "You have to use xm.master_print(...) to print. This you can try for yourself below. Try to change \nthe xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}') in the training function(train_fn()) to simple \nprint(f'Batch: {batch_idx}, loss: {loss.item()}'). You will see it deos not get printed." }, { "code": null, "e": 11923, "s": 11697, "text": "For parellel training we first define the distributed train & valid sampler, then we wrap the dataloaders in torch_xla.distributed.parallel_loader(<your-data-loader>) and create a torch_xla.distributed.parallel_loader object " }, { "code": null, "e": 12172, "s": 11923, "text": "While passing it to training and validation function we specify this para_loader.per_device_loader(device). This is what you will iterate over in the training function, i.e. we pass a parelleloader and not a dataloader (for parellel training only)." }, { "code": null, "e": 16131, "s": 12172, "text": "def _run(model):\n \n def train_fn(epoch, train_dataloader, optimizer, criterion, scheduler, device):\n\n running_loss = 0\n total = 0\n model.train()\n\n for batch_idx, (images, labels) in enumerate(train_dataloader, 1):\n\n optimizer.zero_grad()\n images = images.to(device)\n labels = labels.to(device)\n\n outputs = model(images)\n\n loss = criterion(outputs, labels)\n\n xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}')\n\n loss.backward()\n xm.optimizer_step(optimizer)\n\n lr_scheduler.step()\n\n def valid_fn(epoch, valid_dataloader, criterion, device):\n\n running_loss = 0\n total = 0\n preds_acc = []\n labels_acc = []\n\n model.eval()\n\n for batch_idx, (images, labels) in enumerate(valid_dataloader, 1):\n\n images = images.to(device)\n labels = labels.to(device)\n\n outputs = model(images)\n\n loss = criterion(outputs, labels)\n \n xm.master_print(f'Batch: {batch_idx}, loss: {loss.item()}')\n\n running_loss += loss.item()\n \n \n EPOCHS = 20\n BATCH_SIZE = 64\n \n train_dataset = SimpleDataset(X_train, y_train, transform=train_transform)\n valid_dataset = SimpleDataset(X_test, y_test, transform=train_transform)\n \n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=True)\n valid_sampler = torch.utils.data.distributed.DistributedSampler(\n valid_dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=False)\n\n train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, sampler=train_sampler, num_workers=1)\n valid_dataloader = DataLoader(valid_dataset, batch_size=32, sampler=valid_sampler, num_workers=1)\n \n device = xm.xla_device()\n model = model.to(device)\n \n lr = 0.4 * 1e-5 * xm.xrt_world_size()\n criterion = nn.CrossEntropyLoss()\n \n optimizer = AdamW(model.parameters(), lr=lr)\n num_train_steps = int(len(train_dataset) / BATCH_SIZE / xm.xrt_world_size() * EPOCHS)\n xm.master_print(f'num_train_steps = {num_train_steps}, world_size={xm.xrt_world_size()}')\n num_train_steps = int(len(train_dataset) / BATCH_SIZE * EPOCHS)\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=num_train_steps\n )\n \n train_loss = []\n valid_loss = []\n best_loss = 1\n \n train_begin = time.time()\n for epoch in range(EPOCHS):\n \n para_loader = pl.ParallelLoader(train_dataloader, [device])\n\n start = time.time()\n print('*'*15)\n print(f'EPOCH: {epoch+1}')\n print('*'*15)\n\n print('Training.....')\n train_fn(epoch=epoch+1, \n train_dataloader=para_loader.per_device_loader(device), \n optimizer=optimizer, \n criterion=criterion,\n scheduler=lr_scheduler,\n device=device)\n\n\n \n with torch.no_grad():\n \n para_loader = pl.ParallelLoader(valid_dataloader, [device])\n \n print('Validating....')\n valid_fn(epoch=epoch+1, \n valid_dataloader=para_loader.per_device_loader(device), \n criterion=criterion, \n device=device)\n xm.save(\n model.state_dict(),\n f'efficientnet-b0-bs-8.pt'\n )\n \n print(f'Epoch completed in {(time.time() - start)/60} minutes')\n print(f'Training completed in {(time.time() - train_begin)/60} minutes')\n" }, { "code": null, "e": 16333, "s": 16131, "text": "# Start training processes\ndef _mp_fn(rank, flags):\n torch.set_default_tensor_type('torch.FloatTensor')\n a = _run(model)\n\nFLAGS={}\nxmp.spawn(_mp_fn, args=(FLAGS,), nprocs=8, start_method='fork')\n" }, { "code": null, "e": 17023, "s": 16333, "text": "\nWith parellely running on all 8 cores, my training time was 15 minutes (20 epochs) with a batch size of 64 for training and 32 for validation, as opposed to 1 hr on my local device (which has a gtx 1050 with 4 gb memory) with a batch size of 8 for both training and validation. Okay I get it, it's not a fair comparison as we have a more powerful gpu on kaggle, but I am guessing you get what I am trying to say. :P \nThis kernel was for me to keep as a future reference, but I want to share it with all of you. You are the ones from whom I learn so much.\nAbout the accuracy and inference that I haven't done and I am currently working on. I just ran it once to see how long it takes. :P\n" }, { "code": null, "e": 17441, "s": 17023, "text": "With parellely running on all 8 cores, my training time was 15 minutes (20 epochs) with a batch size of 64 for training and 32 for validation, as opposed to 1 hr on my local device (which has a gtx 1050 with 4 gb memory) with a batch size of 8 for both training and validation. Okay I get it, it's not a fair comparison as we have a more powerful gpu on kaggle, but I am guessing you get what I am trying to say. :P " }, { "code": null, "e": 17579, "s": 17441, "text": "This kernel was for me to keep as a future reference, but I want to share it with all of you. You are the ones from whom I learn so much." }, { "code": null, "e": 17711, "s": 17579, "text": "About the accuracy and inference that I haven't done and I am currently working on. I just ran it once to see how long it takes. :P" }, { "code": null, "e": 17858, "s": 17711, "text": "You're free to correct me, make suggestions and tell me on what I can improve on. :) \nLastly if u found it any useful please consider to upvote :)" }, { "code": null, "e": 17909, "s": 17858, "text": "Also for more information: Torch XLA documentation" }, { "code": null, "e": 18146, "s": 17912, "text": "Also here is my Kaggle kernel link https://www.kaggle.com/abhiswain/pytorch-tpu-efficientnet-b5-tutorial-reference. If you found this useful you can upvote it! You can also go there and directly run it and see for yourself the magic." }, { "code": null, "e": 18307, "s": 18146, "text": "Let me tell you something, I made this to keep as a reference for myself but decided to share it. Let this be a stop for you in your journey of deep learning :)" } ]
GATE | GATE-CS-2015 (Set 2) | Question 65 - GeeksforGeeks
29 Sep, 2021 A half adder is implemented with XOR and AND gates. A full adder is implemented with two half adders and one OR gate. The propagation delay of an XOR gate is twice that of an AND/OR gate. The propagation delay of an AND/OR gate is 1.2 microseconds. A 4-bit ripple-carry binary adder is implemented by using full adders. The total propagation time of this 4-bit binary adder in microseconds is(A) 19.2 microsecondsAnswer: (A)Explanation: A Ripple Carry Adder allows to add two n-bit numbers. It uses half and full adders. Following diagram shows a ripple adder using full adders. Let us first calculate propagation delay of a single 1 bit full adder. Propagation Delay by n bit full adder is (2n + 2) gate delays. [See this for formula]. Here n = 1, so total delay of a 1 bit full adder is (2 + 2)*1.2 = 4.8 ms Delay of 4 full adders is = 4 * 4.8 = 19.2 ms YouTubeGeeksforGeeks GATE Computer Science16.1K subscribersCombinational Circuits Part-2 with Rishabh Setiya | GeeksforGeeks GATEWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:0030:03 / 37:14•Live•<div class="player-unavailable"><h1 class="message">An error occurred.</h1><div class="submessage"><a href="https://www.youtube.com/watch?v=fhyTnWhFIpA" target="_blank">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div>Quiz of this Question GATE-CS-2015 (Set 2) GATE-GATE-CS-2015 (Set 2) GATE Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments GATE | GATE-CS-2016 (Set 2) | Question 48 GATE | GATE-CS-2014-(Set-1) | Question 30 GATE | GATE-CS-2001 | Question 23 GATE | GATE-CS-2015 (Set 1) | Question 65 GATE | GATE CS 2010 | Question 45 GATE | GATE-CS-2015 (Set 3) | Question 65 GATE | GATE-CS-2014-(Set-1) | Question 65 C++ Program to count Vowels in a string using Pointer GATE | GATE-CS-2004 | Question 3 GATE | GATE-CS-2015 (Set 1) | Question 42
[ { "code": null, "e": 24049, "s": 24021, "text": "\n29 Sep, 2021" }, { "code": null, "e": 24628, "s": 24049, "text": "A half adder is implemented with XOR and AND gates. A full adder is implemented with two half adders and one OR gate. The propagation delay of an XOR gate is twice that of an AND/OR gate. The propagation delay of an AND/OR gate is 1.2 microseconds. A 4-bit ripple-carry binary adder is implemented by using full adders. The total propagation time of this 4-bit binary adder in microseconds is(A) 19.2 microsecondsAnswer: (A)Explanation: A Ripple Carry Adder allows to add two n-bit numbers. It uses half and full adders. Following diagram shows a ripple adder using full adders." }, { "code": null, "e": 24911, "s": 24628, "text": "\nLet us first calculate propagation delay of a single\n1 bit full adder.\n\nPropagation Delay by n bit full adder is (2n + 2) \ngate delays.\n[See this for formula].\n\nHere n = 1, so total delay of a 1 bit full adder \nis (2 + 2)*1.2 = 4.8 ms\n\nDelay of 4 full adders is = 4 * 4.8 = 19.2 ms" }, { "code": null, "e": 25810, "s": 24911, "text": "YouTubeGeeksforGeeks GATE Computer Science16.1K subscribersCombinational Circuits Part-2 with Rishabh Setiya | GeeksforGeeks GATEWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:0030:03 / 37:14•Live•<div class=\"player-unavailable\"><h1 class=\"message\">An error occurred.</h1><div class=\"submessage\"><a href=\"https://www.youtube.com/watch?v=fhyTnWhFIpA\" target=\"_blank\">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div>Quiz of this Question" }, { "code": null, "e": 25831, "s": 25810, "text": "GATE-CS-2015 (Set 2)" }, { "code": null, "e": 25857, "s": 25831, "text": "GATE-GATE-CS-2015 (Set 2)" }, { "code": null, "e": 25862, "s": 25857, "text": "GATE" }, { "code": null, "e": 25960, "s": 25862, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 25969, "s": 25960, "text": "Comments" }, { "code": null, "e": 25982, "s": 25969, "text": "Old Comments" }, { "code": null, "e": 26024, "s": 25982, "text": "GATE | GATE-CS-2016 (Set 2) | Question 48" }, { "code": null, "e": 26066, "s": 26024, "text": "GATE | GATE-CS-2014-(Set-1) | Question 30" }, { "code": null, "e": 26100, "s": 26066, "text": "GATE | GATE-CS-2001 | Question 23" }, { "code": null, "e": 26142, "s": 26100, "text": "GATE | GATE-CS-2015 (Set 1) | Question 65" }, { "code": null, "e": 26176, "s": 26142, "text": "GATE | GATE CS 2010 | Question 45" }, { "code": null, "e": 26218, "s": 26176, "text": "GATE | GATE-CS-2015 (Set 3) | Question 65" }, { "code": null, "e": 26260, "s": 26218, "text": "GATE | GATE-CS-2014-(Set-1) | Question 65" }, { "code": null, "e": 26314, "s": 26260, "text": "C++ Program to count Vowels in a string using Pointer" }, { "code": null, "e": 26347, "s": 26314, "text": "GATE | GATE-CS-2004 | Question 3" } ]
ES6 - while loop
The while loop executes the instructions each time the condition specified evaluates to true. In other words, the loop evaluates the condition before the block of code is executed. Following is the syntax for the while loop. while (expression) { Statement(s) to be executed if expression is true } var num = 5; var factorial = 1; while(num >=1) { factorial = factorial * num; num--; } console.log("The factorial is "+factorial); The above code uses a while loop to calculate the factorial of the value in the variable num. The following output is displayed on successful execution of the code. The factorial is 120 32 Lectures 3.5 hours Sharad Kumar 40 Lectures 5 hours Richa Maheshwari 16 Lectures 1 hours Anadi Sharma 50 Lectures 6.5 hours Gowthami Swarna 14 Lectures 1 hours Deepti Trivedi 31 Lectures 1.5 hours Shweta Print Add Notes Bookmark this page
[ { "code": null, "e": 2458, "s": 2277, "text": "The while loop executes the instructions each time the condition specified evaluates to true. In other words, the loop evaluates the condition before the block of code is executed." }, { "code": null, "e": 2502, "s": 2458, "text": "Following is the syntax for the while loop." }, { "code": null, "e": 2579, "s": 2502, "text": "while (expression) {\n Statement(s) to be executed if expression is true\n}\n" }, { "code": null, "e": 2717, "s": 2579, "text": "var num = 5;\nvar factorial = 1;\nwhile(num >=1) {\n factorial = factorial * num;\n num--;\n}\nconsole.log(\"The factorial is \"+factorial);\n" }, { "code": null, "e": 2811, "s": 2717, "text": "The above code uses a while loop to calculate the factorial of the value in the variable num." }, { "code": null, "e": 2882, "s": 2811, "text": "The following output is displayed on successful execution of the code." }, { "code": null, "e": 2904, "s": 2882, "text": "The factorial is 120\n" }, { "code": null, "e": 2939, "s": 2904, "text": "\n 32 Lectures \n 3.5 hours \n" }, { "code": null, "e": 2953, "s": 2939, "text": " Sharad Kumar" }, { "code": null, "e": 2986, "s": 2953, "text": "\n 40 Lectures \n 5 hours \n" }, { "code": null, "e": 3004, "s": 2986, "text": " Richa Maheshwari" }, { "code": null, "e": 3037, "s": 3004, "text": "\n 16 Lectures \n 1 hours \n" }, { "code": null, "e": 3051, "s": 3037, "text": " Anadi Sharma" }, { "code": null, "e": 3086, "s": 3051, "text": "\n 50 Lectures \n 6.5 hours \n" }, { "code": null, "e": 3103, "s": 3086, "text": " Gowthami Swarna" }, { "code": null, "e": 3136, "s": 3103, "text": "\n 14 Lectures \n 1 hours \n" }, { "code": null, "e": 3152, "s": 3136, "text": " Deepti Trivedi" }, { "code": null, "e": 3187, "s": 3152, "text": "\n 31 Lectures \n 1.5 hours \n" }, { "code": null, "e": 3195, "s": 3187, "text": " Shweta" }, { "code": null, "e": 3202, "s": 3195, "text": " Print" }, { "code": null, "e": 3213, "s": 3202, "text": " Add Notes" } ]
Explain the difference between a table, view and synonym in SQL
Let us understand what table, view and synonym in the structured query language (SQL) are. A table is a repository of data, where in the table it is a physical entity. A table resides physically in the database. A view is not a part of the database’s physical representation. It is precompiled, so that data retrieval behaves faster and also provides a secure accessibility mechanism. A synonym is an alternate name assigned to a table, view, sequence or program unit. Create table employee (empID integer primary key, name varchar2(30), skill varchar2(30), salary number(20), DOB datetime). Let’s say there is a scenario where salary is not shown to a group of users, a view may be created to display allowable information. Create view emp_some_details as (select empID,name,skill,DOB from employee); Given below is an program for creating a table, view and synonym in the SQL for the above mentioned example − create table employee (empID integer primary key, name varchar2(30), skill varchar2(30), salary number(20), DOB datetime); insert into employee values(100,'AAA','JAVA',30000,2020-08-20); insert into employee values(101,'BBB','PHP',35000,2020-07-02); select * from employee; create view [salary_hide] as select empID,name,skill,DOB from employee ; select * from salary_hide; You will get the following output − 100|AAA|JAVA|30000|1992 101|BBB|PHP|35000|2011 100|AAA|JAVA|1992 101|BBB|PHP|2011 The advantages of using a view are as follows − It may access data from a table, multiple tables, view, multiple views or combinations of these. It may access data from a table, multiple tables, view, multiple views or combinations of these. A view connects to the data of its base table(s). A view connects to the data of its base table(s). Provides a secure mechanism of data accessibility. Provides a secure mechanism of data accessibility. A view can be updated with the CREATE OR REPLACE VIEW statement. The syntax is as follows − sql create or replace view view-name As SELECT column1, column2, ... FROM table_name WHERE condition; To delete view, we can use drop view command − DROP view view-name; Synonym is used as an alternate name assigned to a table or view. It may be used to shadow the original name and owner of the actual entity. It extends the reach of tables, by allowing public access to the synonym.
[ { "code": null, "e": 1153, "s": 1062, "text": "Let us understand what table, view and synonym in the structured query language (SQL) are." }, { "code": null, "e": 1274, "s": 1153, "text": "A table is a repository of data, where in the table it is a physical entity. A table resides physically in the database." }, { "code": null, "e": 1447, "s": 1274, "text": "A view is not a part of the database’s physical representation. It is precompiled, so that data retrieval behaves faster and also provides a secure accessibility mechanism." }, { "code": null, "e": 1531, "s": 1447, "text": "A synonym is an alternate name assigned to a table, view, sequence or program unit." }, { "code": null, "e": 1654, "s": 1531, "text": "Create table employee (empID integer primary key, name varchar2(30), skill varchar2(30), salary number(20), DOB datetime)." }, { "code": null, "e": 1787, "s": 1654, "text": "Let’s say there is a scenario where salary is not shown to a group of users, a view may be created to display allowable information." }, { "code": null, "e": 1864, "s": 1787, "text": "Create view emp_some_details as (select empID,name,skill,DOB from employee);" }, { "code": null, "e": 1974, "s": 1864, "text": "Given below is an program for creating a table, view and synonym in the SQL for the above mentioned example −" }, { "code": null, "e": 2349, "s": 1974, "text": "create table employee (empID integer primary key, name varchar2(30), skill varchar2(30), salary number(20), DOB datetime);\ninsert into employee values(100,'AAA','JAVA',30000,2020-08-20);\ninsert into employee values(101,'BBB','PHP',35000,2020-07-02);\nselect * from employee;\n\ncreate view [salary_hide] as select empID,name,skill,DOB from employee ;\nselect * from salary_hide;" }, { "code": null, "e": 2385, "s": 2349, "text": "You will get the following output −" }, { "code": null, "e": 2467, "s": 2385, "text": "100|AAA|JAVA|30000|1992\n101|BBB|PHP|35000|2011\n100|AAA|JAVA|1992\n101|BBB|PHP|2011" }, { "code": null, "e": 2515, "s": 2467, "text": "The advantages of using a view are as follows −" }, { "code": null, "e": 2612, "s": 2515, "text": "It may access data from a table, multiple tables, view, multiple views or combinations of these." }, { "code": null, "e": 2709, "s": 2612, "text": "It may access data from a table, multiple tables, view, multiple views or combinations of these." }, { "code": null, "e": 2759, "s": 2709, "text": "A view connects to the data of its base table(s)." }, { "code": null, "e": 2809, "s": 2759, "text": "A view connects to the data of its base table(s)." }, { "code": null, "e": 2860, "s": 2809, "text": "Provides a secure mechanism of data accessibility." }, { "code": null, "e": 2911, "s": 2860, "text": "Provides a secure mechanism of data accessibility." }, { "code": null, "e": 2976, "s": 2911, "text": "A view can be updated with the CREATE OR REPLACE VIEW statement." }, { "code": null, "e": 3003, "s": 2976, "text": "The syntax is as follows −" }, { "code": null, "e": 3105, "s": 3003, "text": "sql create or replace view view-name As\nSELECT column1, column2, ...\nFROM table_name\nWHERE condition;" }, { "code": null, "e": 3152, "s": 3105, "text": "To delete view, we can use drop view command −" }, { "code": null, "e": 3173, "s": 3152, "text": "DROP view view-name;" }, { "code": null, "e": 3388, "s": 3173, "text": "Synonym is used as an alternate name assigned to a table or view. It may be used to shadow the original name and owner of the actual entity. It extends the reach of tables, by allowing public access to the synonym." } ]
WebSockets - Functionalities
Web Socket represents a major upgrade in the history of web communications. Before its existence, all communication between the web clients and the servers relied only on HTTP. Web Socket helps in dynamic flow of the connections that are persistent full duplex. Full duplex refers to the communication from both the ends with considerable fast speed. It is termed as a game changer because of its efficiency of overcoming all the drawbacks of existing protocols. Importance of Web Socket for developers and architects − Web Socket is an independent TCP-based protocol, but it is designed to support any other protocol that would traditionally run only on top of a pure TCP connection. Web Socket is an independent TCP-based protocol, but it is designed to support any other protocol that would traditionally run only on top of a pure TCP connection. Web Socket is a transport layer on top of which any other protocol can run. The Web Socket API supports the ability to define sub-protocols: protocol libraries that can interpret specific protocols. Web Socket is a transport layer on top of which any other protocol can run. The Web Socket API supports the ability to define sub-protocols: protocol libraries that can interpret specific protocols. Examples of such protocols include XMPP, STOMP, and AMQP. The developers no longer have to think in terms of the HTTP request-response paradigm. Examples of such protocols include XMPP, STOMP, and AMQP. The developers no longer have to think in terms of the HTTP request-response paradigm. The only requirement on the browser-side is to run a JavaScript library that can interpret the Web Socket handshake, establish and maintain a Web Socket connection. The only requirement on the browser-side is to run a JavaScript library that can interpret the Web Socket handshake, establish and maintain a Web Socket connection. On the server side, the industry standard is to use existing protocol libraries that run on top of TCP and leverage a Web Socket Gateway. On the server side, the industry standard is to use existing protocol libraries that run on top of TCP and leverage a Web Socket Gateway. The following diagram describes the functionalities of Web Sockets − Web Socket connections are initiated via HTTP; HTTP servers typically interpret Web Socket handshakes as an Upgrade request. Web Sockets can both be a complementary add-on to an existing HTTP environment and can provide the required infrastructure to add web functionality. It relies on more advanced, full duplex protocols that allow data to flow in both directions between client and server. Web Sockets provide a connection between the web server and a client such that both the parties can start sending the data. The steps for establishing the connection of Web Socket are as follows − The client establishes a connection through a process known as Web Socket handshake. The client establishes a connection through a process known as Web Socket handshake. The process begins with the client sending a regular HTTP request to the server. The process begins with the client sending a regular HTTP request to the server. An Upgrade header is requested. In this request, it informs the server that request is for Web Socket connection. An Upgrade header is requested. In this request, it informs the server that request is for Web Socket connection. Web Socket URLs use the ws scheme. They are also used for secure Web Socket connections, which are the equivalent to HTTPs. Web Socket URLs use the ws scheme. They are also used for secure Web Socket connections, which are the equivalent to HTTPs. A simple example of initial request headers is as follows − GET ws://websocket.example.com/ HTTP/1.1 Origin: http://example.com Connection: Upgrade Host: websocket.example.com Upgrade: websocket Print Add Notes Bookmark this page
[ { "code": null, "e": 2296, "s": 2119, "text": "Web Socket represents a major upgrade in the history of web communications. Before its existence, all communication between the web clients and the servers relied only on HTTP." }, { "code": null, "e": 2470, "s": 2296, "text": "Web Socket helps in dynamic flow of the connections that are persistent full duplex. Full duplex refers to the communication from both the ends with considerable fast speed." }, { "code": null, "e": 2582, "s": 2470, "text": "It is termed as a game changer because of its efficiency of overcoming all the drawbacks of existing protocols." }, { "code": null, "e": 2639, "s": 2582, "text": "Importance of Web Socket for developers and architects −" }, { "code": null, "e": 2804, "s": 2639, "text": "Web Socket is an independent TCP-based protocol, but it is designed to support any other protocol that would traditionally run only on top of a pure TCP connection." }, { "code": null, "e": 2969, "s": 2804, "text": "Web Socket is an independent TCP-based protocol, but it is designed to support any other protocol that would traditionally run only on top of a pure TCP connection." }, { "code": null, "e": 3168, "s": 2969, "text": "Web Socket is a transport layer on top of which any other protocol can run. The Web Socket API supports the ability to define sub-protocols: protocol libraries that can interpret specific protocols." }, { "code": null, "e": 3367, "s": 3168, "text": "Web Socket is a transport layer on top of which any other protocol can run. The Web Socket API supports the ability to define sub-protocols: protocol libraries that can interpret specific protocols." }, { "code": null, "e": 3512, "s": 3367, "text": "Examples of such protocols include XMPP, STOMP, and AMQP. The developers no longer have to think in terms of the HTTP request-response paradigm." }, { "code": null, "e": 3657, "s": 3512, "text": "Examples of such protocols include XMPP, STOMP, and AMQP. The developers no longer have to think in terms of the HTTP request-response paradigm." }, { "code": null, "e": 3822, "s": 3657, "text": "The only requirement on the browser-side is to run a JavaScript library that can interpret the Web Socket handshake, establish and maintain a Web Socket connection." }, { "code": null, "e": 3987, "s": 3822, "text": "The only requirement on the browser-side is to run a JavaScript library that can interpret the Web Socket handshake, establish and maintain a Web Socket connection." }, { "code": null, "e": 4125, "s": 3987, "text": "On the server side, the industry standard is to use existing protocol libraries that run on top of TCP and leverage a Web Socket Gateway." }, { "code": null, "e": 4263, "s": 4125, "text": "On the server side, the industry standard is to use existing protocol libraries that run on top of TCP and leverage a Web Socket Gateway." }, { "code": null, "e": 4332, "s": 4263, "text": "The following diagram describes the functionalities of Web Sockets −" }, { "code": null, "e": 4457, "s": 4332, "text": "Web Socket connections are initiated via HTTP; HTTP servers typically interpret Web Socket handshakes as an Upgrade request." }, { "code": null, "e": 4726, "s": 4457, "text": "Web Sockets can both be a complementary add-on to an existing HTTP environment and can provide the required infrastructure to add web functionality. It relies on more advanced, full duplex protocols that allow data to flow in both directions between client and server." }, { "code": null, "e": 4850, "s": 4726, "text": "Web Sockets provide a connection between the web server and a client such that both the parties can start sending the data." }, { "code": null, "e": 4923, "s": 4850, "text": "The steps for establishing the connection of Web Socket are as follows −" }, { "code": null, "e": 5008, "s": 4923, "text": "The client establishes a connection through a process known as Web Socket handshake." }, { "code": null, "e": 5093, "s": 5008, "text": "The client establishes a connection through a process known as Web Socket handshake." }, { "code": null, "e": 5174, "s": 5093, "text": "The process begins with the client sending a regular HTTP request to the server." }, { "code": null, "e": 5255, "s": 5174, "text": "The process begins with the client sending a regular HTTP request to the server." }, { "code": null, "e": 5369, "s": 5255, "text": "An Upgrade header is requested. In this request, it informs the server that request is for Web Socket connection." }, { "code": null, "e": 5483, "s": 5369, "text": "An Upgrade header is requested. In this request, it informs the server that request is for Web Socket connection." }, { "code": null, "e": 5607, "s": 5483, "text": "Web Socket URLs use the ws scheme. They are also used for secure Web Socket connections, which are the equivalent to HTTPs." }, { "code": null, "e": 5731, "s": 5607, "text": "Web Socket URLs use the ws scheme. They are also used for secure Web Socket connections, which are the equivalent to HTTPs." }, { "code": null, "e": 5791, "s": 5731, "text": "A simple example of initial request headers is as follows −" }, { "code": null, "e": 5926, "s": 5791, "text": "GET ws://websocket.example.com/ HTTP/1.1\nOrigin: http://example.com\nConnection: Upgrade\nHost: websocket.example.com\nUpgrade: websocket" }, { "code": null, "e": 5933, "s": 5926, "text": " Print" }, { "code": null, "e": 5944, "s": 5933, "text": " Add Notes" } ]
Regularization in R Programming - GeeksforGeeks
23 Dec, 2021 Regularization is a form of regression technique that shrinks or regularizes or constraints the coefficient estimates towards 0 (or zero). In this technique, a penalty is added to the various parameters of the model in order to reduce the freedom of the given model. The concept of Regularization can be broadly classified into: Ridge Regression Lasso Regression Elastic Net Regression In the R language, to perform Regularization we need a handful of packages to be installed before we start working on them. The required packages are glmnet package for ridge regression and lasso regression dplyr package for data cleaning psych package in order to perform or compute the trace function of a matrix caret package To install these packages we have to use the install.packages() in the R Console. After installing the packages successfully, we include these packages in our R Script using the library() command. To implement the Regularization regression technique we need to follow either of the three types of regularization techniques. The Ridge Regression is a modified version of linear regression and is also known as L2 Regularization. Unlike linear regression, the loss function is modified in order to minimize the model’s complexity and this is done by adding some penalty parameter which is equivalent to the square of the value or magnitude of the coefficient. Basically, to implement Ridge Regression in R we are going to use the “glmnet” package. The cv.glmnet() function will be used to determine the ridge regression. Example: In this example, we will implement the ridge regression technique on the mtcars dataset for a better illustration. Our task is to predict the miles per gallon on the basis of other characteristics of the cars. We are going to use the set.seed() function to set seed for reproducibility. We are going to set the value of lambda in three ways: by performing 10 fold cross-validation based on the information derived optimal lambda based on both the criteria R # Regularization# Ridge Regression in R# Load libraries, get data & set# seed for reproducibility set.seed(123) library(glmnet) library(dplyr) library(psych) data("mtcars")# Center y, X will be standardized # in the modelling functiony <- mtcars %>% select(mpg) %>% scale(center = TRUE, scale = FALSE) %>% as.matrix()X <- mtcars %>% select(-mpg) %>% as.matrix() # Perform 10-fold cross-validation to select lambdalambdas_to_try <- 10^seq(-3, 5, length.out = 100) # Setting alpha = 0 implements ridge regressionridge_cv <- cv.glmnet(X, y, alpha = 0, lambda = lambdas_to_try, standardize = TRUE, nfolds = 10) # Plot cross-validation resultsplot(ridge_cv) # Best cross-validated lambdalambda_cv <- ridge_cv$lambda.min # Fit final model, get its sum of squared# residuals and multiple R-squaredmodel_cv <- glmnet(X, y, alpha = 0, lambda = lambda_cv, standardize = TRUE)y_hat_cv <- predict(model_cv, X)ssr_cv <- t(y - y_hat_cv) %*% (y - y_hat_cv)rsq_ridge_cv <- cor(y, y_hat_cv)^2 # selecting lambda based on the informationX_scaled <- scale(X)aic <- c()bic <- c()for (lambda in seq(lambdas_to_try)) { # Run model model <- glmnet(X, y, alpha = 0, lambda = lambdas_to_try[lambda], standardize = TRUE) # Extract coefficients and residuals (remove first # row for the intercept) betas <- as.vector((as.matrix(coef(model))[-1, ])) resid <- y - (X_scaled %*% betas) # Compute hat-matrix and degrees of freedom ld <- lambdas_to_try[lambda] * diag(ncol(X_scaled)) H <- X_scaled %*% solve(t(X_scaled) %*% X_scaled + ld) %*% t(X_scaled) df <- tr(H) # Compute information criteria aic[lambda] <- nrow(X_scaled) * log(t(resid) %*% resid) + 2 * df bic[lambda] <- nrow(X_scaled) * log(t(resid) %*% resid) + 2 * df * log(nrow(X_scaled))} # Plot information criteria against tried values of lambdasplot(log(lambdas_to_try), aic, col = "orange", type = "l", ylim = c(190, 260), ylab = "Information Criterion")lines(log(lambdas_to_try), bic, col = "skyblue3")legend("bottomright", lwd = 1, col = c("orange", "skyblue3"), legend = c("AIC", "BIC")) # Optimal lambdas according to both criterialambda_aic <- lambdas_to_try[which.min(aic)]lambda_bic <- lambdas_to_try[which.min(bic)] # Fit final models, get their sum of # squared residuals and multiple R-squaredmodel_aic <- glmnet(X, y, alpha = 0, lambda = lambda_aic, standardize = TRUE)y_hat_aic <- predict(model_aic, X)ssr_aic <- t(y - y_hat_aic) %*% (y - y_hat_aic)rsq_ridge_aic <- cor(y, y_hat_aic)^2 model_bic <- glmnet(X, y, alpha = 0, lambda = lambda_bic, standardize = TRUE)y_hat_bic <- predict(model_bic, X)ssr_bic <- t(y - y_hat_bic) %*% (y - y_hat_bic)rsq_ridge_bic <- cor(y, y_hat_bic)^2 # The higher the lambda, the more the # coefficients are shrinked towards zero.res <- glmnet(X, y, alpha = 0, lambda = lambdas_to_try, standardize = FALSE)plot(res, xvar = "lambda")legend("bottomright", lwd = 1, col = 1:6, legend = colnames(X), cex = .7) Output: Moving forward to Lasso Regression. It is also known as L1 Regression, Selection Operator, and Least Absolute Shrinkage. It is also a modified version of Linear Regression where again the loss function is modified in order to minimize the model’s complexity. This is done by limiting the summation of the absolute values of the coefficients of the model. In R, we can implement the lasso regression using the same “glmnet” package like ridge regression. Example: Again in this example, we are using the mtcars dataset. Here also we are going to set the lambda value like the previous example. R # Regularization# Lasso Regression# Load libraries, get data & set # seed for reproducibility set.seed(123) library(glmnet) library(dplyr) library(psych) data("mtcars")# Center y, X will be standardized in the modelling functiony <- mtcars %>% select(mpg) %>% scale(center = TRUE, scale = FALSE) %>% as.matrix()X <- mtcars %>% select(-mpg) %>% as.matrix() # Perform 10-fold cross-validation to select lambda lambdas_to_try <- 10^seq(-3, 5, length.out = 100) # Setting alpha = 1 implements lasso regressionlasso_cv <- cv.glmnet(X, y, alpha = 1, lambda = lambdas_to_try, standardize = TRUE, nfolds = 10) # Plot cross-validation resultsplot(lasso_cv) # Best cross-validated lambdalambda_cv <- lasso_cv$lambda.min # Fit final model, get its sum of squared # residuals and multiple R-squaredmodel_cv <- glmnet(X, y, alpha = 1, lambda = lambda_cv, standardize = TRUE)y_hat_cv <- predict(model_cv, X)ssr_cv <- t(y - y_hat_cv) %*% (y - y_hat_cv)rsq_lasso_cv <- cor(y, y_hat_cv)^2 # The higher the lambda, the more the # coefficients are shrinked towards zero.res <- glmnet(X, y, alpha = 1, lambda = lambdas_to_try, standardize = FALSE)plot(res, xvar = "lambda")legend("bottomright", lwd = 1, col = 1:6, legend = colnames(X), cex = .7) Output: If we compare Lasso and Ridge Regression techniques we will notice that both the techniques are more or less the same. But there are few characteristics where they differ from each other. Unlike Ridge, Lasso can set some of its parameters to zero. In ridge the coefficient of the predictor that is correlated is similar. While in lasso only one of the coefficient of predictor is larger and the rest tends to zero. Ridge works well if there exist many huge or large parameters that are of the same value. While lasso works well if there exist only a small number of definite or significant parameters and rest tending to zero. We shall now move on to Elastic Net Regression. Elastic Net Regression can be stated as the convex combination of the lasso and ridge regression. We can work with the glmnet package here even. But now we shall see how the package caret can be used to implement the Elastic Net Regression. Example: R # Regularization# Elastic Net Regressionlibrary(caret) # Set training controltrain_control <- trainControl(method = "repeatedcv", number = 5, repeats = 5, search = "random", verboseIter = TRUE) # Train the modelelastic_net_model <- train(mpg ~ ., data = cbind(y, X), method = "glmnet", preProcess = c("center", "scale"), tuneLength = 25, trControl = train_control) # Check multiple R-squaredy_hat_enet <- predict(elastic_net_model, X)rsq_enet <- cor(y, y_hat_enet)^2 print(y_hat_enet)print(rsq_enet) Output: > print(y_hat_enet) Mazda RX4 Mazda RX4 Wag Datsun 710 Hornet 4 Drive Hornet Sportabout Valiant 2.13185747 1.76214273 6.07598463 0.50410531 -3.15668592 0.08734383 Duster 360 Merc 240D Merc 230 Merc 280 Merc 280C Merc 450SE -5.23690809 2.82725225 2.85570982 -0.19421572 -0.16329225 -4.37306992 Merc 450SL Merc 450SLC Cadillac Fleetwood Lincoln Continental Chrysler Imperial Fiat 128 -3.83132657 -3.88886320 -8.00151118 -8.29125966 -8.08243188 6.98344302 Honda Civic Toyota Corolla Toyota Corona Dodge Challenger AMC Javelin Camaro Z28 8.30013895 7.74742320 3.93737683 -3.13404917 -2.56900144 -5.17326892 Pontiac Firebird Fiat X1-9 Porsche 914-2 Lotus Europa Ford Pantera L Ferrari Dino -4.02993835 7.36692700 5.87750517 6.69642869 -2.02711333 0.06597788 Maserati Bora Volvo 142E -5.90030273 4.83362156 > print(rsq_enet) [,1] mpg 0.8485501 nnr223442 R Data-science R Machine-Learning R regression R Language Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments Change Color of Bars in Barchart using ggplot2 in R How to Change Axis Scales in R Plots? Group by function in R using Dplyr How to Split Column Into Multiple Columns in R DataFrame? Replace Specific Characters in String in R How to filter R DataFrame by values in a column? How to filter R dataframe by multiple conditions? R - if statement How to import an Excel File into R ? Time Series Analysis in R
[ { "code": null, "e": 24851, "s": 24823, "text": "\n23 Dec, 2021" }, { "code": null, "e": 25180, "s": 24851, "text": "Regularization is a form of regression technique that shrinks or regularizes or constraints the coefficient estimates towards 0 (or zero). In this technique, a penalty is added to the various parameters of the model in order to reduce the freedom of the given model. The concept of Regularization can be broadly classified into:" }, { "code": null, "e": 25197, "s": 25180, "text": "Ridge Regression" }, { "code": null, "e": 25214, "s": 25197, "text": "Lasso Regression" }, { "code": null, "e": 25237, "s": 25214, "text": "Elastic Net Regression" }, { "code": null, "e": 25388, "s": 25237, "text": "In the R language, to perform Regularization we need a handful of packages to be installed before we start working on them. The required packages are " }, { "code": null, "e": 25445, "s": 25388, "text": "glmnet package for ridge regression and lasso regression" }, { "code": null, "e": 25477, "s": 25445, "text": "dplyr package for data cleaning" }, { "code": null, "e": 25553, "s": 25477, "text": "psych package in order to perform or compute the trace function of a matrix" }, { "code": null, "e": 25567, "s": 25553, "text": "caret package" }, { "code": null, "e": 25891, "s": 25567, "text": "To install these packages we have to use the install.packages() in the R Console. After installing the packages successfully, we include these packages in our R Script using the library() command. To implement the Regularization regression technique we need to follow either of the three types of regularization techniques." }, { "code": null, "e": 26387, "s": 25891, "text": "The Ridge Regression is a modified version of linear regression and is also known as L2 Regularization. Unlike linear regression, the loss function is modified in order to minimize the model’s complexity and this is done by adding some penalty parameter which is equivalent to the square of the value or magnitude of the coefficient. Basically, to implement Ridge Regression in R we are going to use the “glmnet” package. The cv.glmnet() function will be used to determine the ridge regression. " }, { "code": null, "e": 26396, "s": 26387, "text": "Example:" }, { "code": null, "e": 26738, "s": 26396, "text": "In this example, we will implement the ridge regression technique on the mtcars dataset for a better illustration. Our task is to predict the miles per gallon on the basis of other characteristics of the cars. We are going to use the set.seed() function to set seed for reproducibility. We are going to set the value of lambda in three ways:" }, { "code": null, "e": 26777, "s": 26738, "text": "by performing 10 fold cross-validation" }, { "code": null, "e": 26810, "s": 26777, "text": "based on the information derived" }, { "code": null, "e": 26852, "s": 26810, "text": "optimal lambda based on both the criteria" }, { "code": null, "e": 26854, "s": 26852, "text": "R" }, { "code": "# Regularization# Ridge Regression in R# Load libraries, get data & set# seed for reproducibility set.seed(123) library(glmnet) library(dplyr) library(psych) data(\"mtcars\")# Center y, X will be standardized # in the modelling functiony <- mtcars %>% select(mpg) %>% scale(center = TRUE, scale = FALSE) %>% as.matrix()X <- mtcars %>% select(-mpg) %>% as.matrix() # Perform 10-fold cross-validation to select lambdalambdas_to_try <- 10^seq(-3, 5, length.out = 100) # Setting alpha = 0 implements ridge regressionridge_cv <- cv.glmnet(X, y, alpha = 0, lambda = lambdas_to_try, standardize = TRUE, nfolds = 10) # Plot cross-validation resultsplot(ridge_cv) # Best cross-validated lambdalambda_cv <- ridge_cv$lambda.min # Fit final model, get its sum of squared# residuals and multiple R-squaredmodel_cv <- glmnet(X, y, alpha = 0, lambda = lambda_cv, standardize = TRUE)y_hat_cv <- predict(model_cv, X)ssr_cv <- t(y - y_hat_cv) %*% (y - y_hat_cv)rsq_ridge_cv <- cor(y, y_hat_cv)^2 # selecting lambda based on the informationX_scaled <- scale(X)aic <- c()bic <- c()for (lambda in seq(lambdas_to_try)) { # Run model model <- glmnet(X, y, alpha = 0, lambda = lambdas_to_try[lambda], standardize = TRUE) # Extract coefficients and residuals (remove first # row for the intercept) betas <- as.vector((as.matrix(coef(model))[-1, ])) resid <- y - (X_scaled %*% betas) # Compute hat-matrix and degrees of freedom ld <- lambdas_to_try[lambda] * diag(ncol(X_scaled)) H <- X_scaled %*% solve(t(X_scaled) %*% X_scaled + ld) %*% t(X_scaled) df <- tr(H) # Compute information criteria aic[lambda] <- nrow(X_scaled) * log(t(resid) %*% resid) + 2 * df bic[lambda] <- nrow(X_scaled) * log(t(resid) %*% resid) + 2 * df * log(nrow(X_scaled))} # Plot information criteria against tried values of lambdasplot(log(lambdas_to_try), aic, col = \"orange\", type = \"l\", ylim = c(190, 260), ylab = \"Information Criterion\")lines(log(lambdas_to_try), bic, col = \"skyblue3\")legend(\"bottomright\", lwd = 1, col = c(\"orange\", \"skyblue3\"), legend = c(\"AIC\", \"BIC\")) # Optimal lambdas according to both criterialambda_aic <- lambdas_to_try[which.min(aic)]lambda_bic <- lambdas_to_try[which.min(bic)] # Fit final models, get their sum of # squared residuals and multiple R-squaredmodel_aic <- glmnet(X, y, alpha = 0, lambda = lambda_aic, standardize = TRUE)y_hat_aic <- predict(model_aic, X)ssr_aic <- t(y - y_hat_aic) %*% (y - y_hat_aic)rsq_ridge_aic <- cor(y, y_hat_aic)^2 model_bic <- glmnet(X, y, alpha = 0, lambda = lambda_bic, standardize = TRUE)y_hat_bic <- predict(model_bic, X)ssr_bic <- t(y - y_hat_bic) %*% (y - y_hat_bic)rsq_ridge_bic <- cor(y, y_hat_bic)^2 # The higher the lambda, the more the # coefficients are shrinked towards zero.res <- glmnet(X, y, alpha = 0, lambda = lambdas_to_try, standardize = FALSE)plot(res, xvar = \"lambda\")legend(\"bottomright\", lwd = 1, col = 1:6, legend = colnames(X), cex = .7)", "e": 30091, "s": 26854, "text": null }, { "code": null, "e": 30099, "s": 30091, "text": "Output:" }, { "code": null, "e": 30553, "s": 30099, "text": "Moving forward to Lasso Regression. It is also known as L1 Regression, Selection Operator, and Least Absolute Shrinkage. It is also a modified version of Linear Regression where again the loss function is modified in order to minimize the model’s complexity. This is done by limiting the summation of the absolute values of the coefficients of the model. In R, we can implement the lasso regression using the same “glmnet” package like ridge regression." }, { "code": null, "e": 30562, "s": 30553, "text": "Example:" }, { "code": null, "e": 30692, "s": 30562, "text": "Again in this example, we are using the mtcars dataset. Here also we are going to set the lambda value like the previous example." }, { "code": null, "e": 30694, "s": 30692, "text": "R" }, { "code": "# Regularization# Lasso Regression# Load libraries, get data & set # seed for reproducibility set.seed(123) library(glmnet) library(dplyr) library(psych) data(\"mtcars\")# Center y, X will be standardized in the modelling functiony <- mtcars %>% select(mpg) %>% scale(center = TRUE, scale = FALSE) %>% as.matrix()X <- mtcars %>% select(-mpg) %>% as.matrix() # Perform 10-fold cross-validation to select lambda lambdas_to_try <- 10^seq(-3, 5, length.out = 100) # Setting alpha = 1 implements lasso regressionlasso_cv <- cv.glmnet(X, y, alpha = 1, lambda = lambdas_to_try, standardize = TRUE, nfolds = 10) # Plot cross-validation resultsplot(lasso_cv) # Best cross-validated lambdalambda_cv <- lasso_cv$lambda.min # Fit final model, get its sum of squared # residuals and multiple R-squaredmodel_cv <- glmnet(X, y, alpha = 1, lambda = lambda_cv, standardize = TRUE)y_hat_cv <- predict(model_cv, X)ssr_cv <- t(y - y_hat_cv) %*% (y - y_hat_cv)rsq_lasso_cv <- cor(y, y_hat_cv)^2 # The higher the lambda, the more the # coefficients are shrinked towards zero.res <- glmnet(X, y, alpha = 1, lambda = lambdas_to_try, standardize = FALSE)plot(res, xvar = \"lambda\")legend(\"bottomright\", lwd = 1, col = 1:6, legend = colnames(X), cex = .7)", "e": 32096, "s": 30694, "text": null }, { "code": null, "e": 32104, "s": 32096, "text": "Output:" }, { "code": null, "e": 32292, "s": 32104, "text": "If we compare Lasso and Ridge Regression techniques we will notice that both the techniques are more or less the same. But there are few characteristics where they differ from each other." }, { "code": null, "e": 32352, "s": 32292, "text": "Unlike Ridge, Lasso can set some of its parameters to zero." }, { "code": null, "e": 32519, "s": 32352, "text": "In ridge the coefficient of the predictor that is correlated is similar. While in lasso only one of the coefficient of predictor is larger and the rest tends to zero." }, { "code": null, "e": 32731, "s": 32519, "text": "Ridge works well if there exist many huge or large parameters that are of the same value. While lasso works well if there exist only a small number of definite or significant parameters and rest tending to zero." }, { "code": null, "e": 33020, "s": 32731, "text": "We shall now move on to Elastic Net Regression. Elastic Net Regression can be stated as the convex combination of the lasso and ridge regression. We can work with the glmnet package here even. But now we shall see how the package caret can be used to implement the Elastic Net Regression." }, { "code": null, "e": 33029, "s": 33020, "text": "Example:" }, { "code": null, "e": 33031, "s": 33029, "text": "R" }, { "code": "# Regularization# Elastic Net Regressionlibrary(caret) # Set training controltrain_control <- trainControl(method = \"repeatedcv\", number = 5, repeats = 5, search = \"random\", verboseIter = TRUE) # Train the modelelastic_net_model <- train(mpg ~ ., data = cbind(y, X), method = \"glmnet\", preProcess = c(\"center\", \"scale\"), tuneLength = 25, trControl = train_control) # Check multiple R-squaredy_hat_enet <- predict(elastic_net_model, X)rsq_enet <- cor(y, y_hat_enet)^2 print(y_hat_enet)print(rsq_enet)", "e": 33781, "s": 33031, "text": null }, { "code": null, "e": 33789, "s": 33781, "text": "Output:" }, { "code": null, "e": 35148, "s": 33789, "text": "> print(y_hat_enet)\n Mazda RX4 Mazda RX4 Wag Datsun 710 Hornet 4 Drive Hornet Sportabout Valiant \n 2.13185747 1.76214273 6.07598463 0.50410531 -3.15668592 0.08734383 \n Duster 360 Merc 240D Merc 230 Merc 280 Merc 280C Merc 450SE \n -5.23690809 2.82725225 2.85570982 -0.19421572 -0.16329225 -4.37306992 \n Merc 450SL Merc 450SLC Cadillac Fleetwood Lincoln Continental Chrysler Imperial Fiat 128 \n -3.83132657 -3.88886320 -8.00151118 -8.29125966 -8.08243188 6.98344302 \n Honda Civic Toyota Corolla Toyota Corona Dodge Challenger AMC Javelin Camaro Z28 \n 8.30013895 7.74742320 3.93737683 -3.13404917 -2.56900144 -5.17326892 \n Pontiac Firebird Fiat X1-9 Porsche 914-2 Lotus Europa Ford Pantera L Ferrari Dino \n -4.02993835 7.36692700 5.87750517 6.69642869 -2.02711333 0.06597788 \n Maserati Bora Volvo 142E \n -5.90030273 4.83362156 \n> print(rsq_enet)\n [,1]\nmpg 0.8485501\n" }, { "code": null, "e": 35158, "s": 35148, "text": "nnr223442" }, { "code": null, "e": 35173, "s": 35158, "text": "R Data-science" }, { "code": null, "e": 35192, "s": 35173, "text": "R Machine-Learning" }, { "code": null, "e": 35205, "s": 35192, "text": "R regression" }, { "code": null, "e": 35216, "s": 35205, "text": "R Language" }, { "code": null, "e": 35314, "s": 35216, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 35323, "s": 35314, "text": "Comments" }, { "code": null, "e": 35336, "s": 35323, "text": "Old Comments" }, { "code": null, "e": 35388, "s": 35336, "text": "Change Color of Bars in Barchart using ggplot2 in R" }, { "code": null, "e": 35426, "s": 35388, "text": "How to Change Axis Scales in R Plots?" }, { "code": null, "e": 35461, "s": 35426, "text": "Group by function in R using Dplyr" }, { "code": null, "e": 35519, "s": 35461, "text": "How to Split Column Into Multiple Columns in R DataFrame?" }, { "code": null, "e": 35562, "s": 35519, "text": "Replace Specific Characters in String in R" }, { "code": null, "e": 35611, "s": 35562, "text": "How to filter R DataFrame by values in a column?" }, { "code": null, "e": 35661, "s": 35611, "text": "How to filter R dataframe by multiple conditions?" }, { "code": null, "e": 35678, "s": 35661, "text": "R - if statement" }, { "code": null, "e": 35715, "s": 35678, "text": "How to import an Excel File into R ?" } ]
ES6 - Array Method splice()
splice() method changes the content of an array, adding new elements while removing old elements. array.splice(index, howMany, [element1][, ..., elementN]); index − Index at which to start changing the array. index − Index at which to start changing the array. howMany − An integer indicating the number of old array elements to remove. If howMany is 0, no elements are removed. howMany − An integer indicating the number of old array elements to remove. If howMany is 0, no elements are removed. element1, ..., elementN − The elements to add to the array. If you don't specify any elements, splice simply removes the elements from the array. element1, ..., elementN − The elements to add to the array. If you don't specify any elements, splice simply removes the elements from the array. Returns the extracted array based on the passed parameters. var arr = ["orange", "mango", "banana", "sugar", "tea"]; var removed = arr.splice(2, 0, "water"); console.log("After adding 1: " + arr ); console.log("removed is: " + removed); removed = arr.splice(3, 1); console.log("After adding 1: " + arr ); console.log("removed is: " + removed); On compiling, it will generate the same code in JavaScript. After adding 1: orange,mango,water,banana,sugar,tea removed is: After adding 1: orange,mango,water,sugar,tea removed is: banana 32 Lectures 3.5 hours Sharad Kumar 40 Lectures 5 hours Richa Maheshwari 16 Lectures 1 hours Anadi Sharma 50 Lectures 6.5 hours Gowthami Swarna 14 Lectures 1 hours Deepti Trivedi 31 Lectures 1.5 hours Shweta Print Add Notes Bookmark this page
[ { "code": null, "e": 2375, "s": 2277, "text": "splice() method changes the content of an array, adding new elements while removing old elements." }, { "code": null, "e": 2440, "s": 2375, "text": "array.splice(index, howMany, [element1][, ..., elementN]); \n" }, { "code": null, "e": 2492, "s": 2440, "text": "index − Index at which to start changing the array." }, { "code": null, "e": 2544, "s": 2492, "text": "index − Index at which to start changing the array." }, { "code": null, "e": 2662, "s": 2544, "text": "howMany − An integer indicating the number of old array elements to remove. If howMany is 0, no elements are removed." }, { "code": null, "e": 2780, "s": 2662, "text": "howMany − An integer indicating the number of old array elements to remove. If howMany is 0, no elements are removed." }, { "code": null, "e": 2926, "s": 2780, "text": "element1, ..., elementN − The elements to add to the array. If you don't specify any elements, splice simply removes the elements from the array." }, { "code": null, "e": 3072, "s": 2926, "text": "element1, ..., elementN − The elements to add to the array. If you don't specify any elements, splice simply removes the elements from the array." }, { "code": null, "e": 3132, "s": 3072, "text": "Returns the extracted array based on the passed parameters." }, { "code": null, "e": 3431, "s": 3132, "text": "var arr = [\"orange\", \"mango\", \"banana\", \"sugar\", \"tea\"]; \nvar removed = arr.splice(2, 0, \"water\"); \nconsole.log(\"After adding 1: \" + arr ); \nconsole.log(\"removed is: \" + removed); \n\nremoved = arr.splice(3, 1); \nconsole.log(\"After adding 1: \" + arr ); \nconsole.log(\"removed is: \" + removed); " }, { "code": null, "e": 3491, "s": 3431, "text": "On compiling, it will generate the same code in JavaScript." }, { "code": null, "e": 3625, "s": 3491, "text": "After adding 1: orange,mango,water,banana,sugar,tea \nremoved is: \nAfter adding 1: orange,mango,water,sugar,tea \nremoved is: banana \n" }, { "code": null, "e": 3660, "s": 3625, "text": "\n 32 Lectures \n 3.5 hours \n" }, { "code": null, "e": 3674, "s": 3660, "text": " Sharad Kumar" }, { "code": null, "e": 3707, "s": 3674, "text": "\n 40 Lectures \n 5 hours \n" }, { "code": null, "e": 3725, "s": 3707, "text": " Richa Maheshwari" }, { "code": null, "e": 3758, "s": 3725, "text": "\n 16 Lectures \n 1 hours \n" }, { "code": null, "e": 3772, "s": 3758, "text": " Anadi Sharma" }, { "code": null, "e": 3807, "s": 3772, "text": "\n 50 Lectures \n 6.5 hours \n" }, { "code": null, "e": 3824, "s": 3807, "text": " Gowthami Swarna" }, { "code": null, "e": 3857, "s": 3824, "text": "\n 14 Lectures \n 1 hours \n" }, { "code": null, "e": 3873, "s": 3857, "text": " Deepti Trivedi" }, { "code": null, "e": 3908, "s": 3873, "text": "\n 31 Lectures \n 1.5 hours \n" }, { "code": null, "e": 3916, "s": 3908, "text": " Shweta" }, { "code": null, "e": 3923, "s": 3916, "text": " Print" }, { "code": null, "e": 3934, "s": 3923, "text": " Add Notes" } ]
How to get away with few Labels: Label Propagation | by Dr. Robert Kübler | Towards Data Science
A classic task for us data scientists is building a classification model for some problem. In a perfect world, data samples — including their corresponding labels — are handed to us on a silver plate. We then do our machine learning tricks and mathemagic to come to some useful insights that we derived from the data. So far so good. However, what often happens in our imperfect yet beautiful world is one of the following: We get an extremely small dataset that is at least completely labeled. In this case, building a model can be extremely tricky. We have to use advanced feature engineering, maybe even Bayesian methods, and other tools to try to tackle this problem. Take the Overfitting Challenge on Kaggle as an example: The training set consists of 250 training examples and 200 features. Have fun.We get enough data, but with no labels at all. Well, tough luck. Try clustering, but this does not necessarily solve your classification problem.We get enough data, but it is only partially labeled. This is exactly what we will be talking about in this article! Keep reading. We get an extremely small dataset that is at least completely labeled. In this case, building a model can be extremely tricky. We have to use advanced feature engineering, maybe even Bayesian methods, and other tools to try to tackle this problem. Take the Overfitting Challenge on Kaggle as an example: The training set consists of 250 training examples and 200 features. Have fun. We get enough data, but with no labels at all. Well, tough luck. Try clustering, but this does not necessarily solve your classification problem. We get enough data, but it is only partially labeled. This is exactly what we will be talking about in this article! Keep reading. Let us assume that we are in the third scenario from now on: The size of our dataset is quite okay, we have some thousand samples, maybe even a million. But looking at the labels, frustration sets in — only a small fraction of the data is labeled! In this article, I will show you how you can deal with these kinds of common situations. The easiest way to approach this setting is to transform it into something we are more familiar with. Concretely: Throw away the unlabeled data points and train a classifier on the remaining completely labeled, but smaller dataset. Let’s analyze this approach. easy to understand and implement fast transformation and faster training as fewer samples mean less computation the model might overfit to the remaining data bias in the process of which data is labeled might lead to wrong decision boundaries of the model While the arguments on the pro side should be easy to understand, let’s look at a picture to understand the disadvantages better. As humans, we can clearly see that there are two blobs. The left one should be blue, the right one should be red. There might be some overlap in the middle, but all in all, they can be separated quite well with a straight line, i.e. logistic regression or a linear SVM. However, if we throw away the unlabeled data and fit a logistic regression, we end up with the following decision regions: Not so great. The result is not bad because of overfitting, as logistic regression is a simple model. But the positions of the labeled data points are biased, i.e. they have some weird pattern that confuses the classifier. To be fair, if the labeled data points would have been kind of in the centers of the two blobs, the logistic regression would have worked much better. Of course, the overfitting problem would occur as well if we tried to throw random forests and neural networks on a training set of size four. We can conclude: It is a bad idea to simply drop the unlabeled data. Let us now turn to a smarter technique that allows us to incorporate not only the knowledge of the labeled data but also the features of the unlabeled data samples. This is what people sometimes refer to as semi-supervised learning. Label propagation is a neat idea originally introduced by Xiaojin Zhu and Zoubin Ghahramani [1] in 2002. Important note: Here, I present a slight variation of the idea of the original paper as it is easier to explain and understand. The gist of both — and the other existing — variations is still the same. From a very high perspective, it works as follows: This screams for an example. Let’s assume that we have another two-dimensional dataset that consists of only five samples. There are two classes and one sample is not labeled. The samples are the nodes of our graph. Now, let us build a complete graph, i.e. connect each node with any other node. We also annotate the edges with the distances between the nodes (=samples). You can choose any distance you like (i.e. Euclidean), it is just another hyperparameter of the algorithm. Note: I leave out the edges between labeled samples because it keeps the visualization clear, and the algorithm does not need those anyway. Remember that we said that closer samples should have higher weights between them? So far, it’s the other way around! There are several ways to fix this, the easiest ones being: Put a minus in front of all the numbers, or (multiplicatively) invert the numbers, e.g. 4 → 1/4=0.25. What the authors in [1] propose is using some Gaussian function, sometimes also called radial basis function (rbf). where x and x’ are samples. If two samples are really close, i.e. |x-x’| is around 0, their weight of the edge is around 1. The further they are apart, the more the weight approaches zero. σ is a hyperparameter you can play around with. Scikit-learn’s default value for it is σ = 20, for example. Anyway, let us use the multiplication inverse for now. The graph becomes This is the end of step 1. This sounds more difficult than it actually is. Let us start in the lower white, unlabeled node, for example. To proceed, we have to define probabilities for jumping to the other unlabeled node, one of the two blue nodes and the red node. An easy way to do this is by normalizing. There are four outgoing edges with the weights 1 (leading to the blue node), 0.25 (other blue node), 0.5 (other unlabeled node), and 0.5 (red node). So, for example, let us just define the probability to jump to the red node as 0.5/(1+0.25+0.5+0.5)=2/9. Jumping to the closer blue node happens with a probability of 1/(1+0.25+0.5+0.5)=4/9. You can calculate the rest yourself. Using these probabilities, there is a lot of theory involved in how to compute the probabilities in ending up in a blue or red node first. You can do it via Markov chains, a fascinating field within mathematics. I might even write an article about it someday, but for now, I will just supply you with the results. One can calculate the following probabilities of landing in either color: With this result, we can say that the upper unlabeled node could belong to the red class, while the bottom one should be blue. We could also keep these probabilities as soft labels if we don’t want to commit to a single class. This is probably also what you have expected intuitively, which speaks for this method. Let us now see this method in action! Using label propagation is easy, yet again thanks to scikit-learn! In the following snippet I load all libraries and the MNIST dataset,mask around 90% of the labels with a -1, the expected input for a missing label, and thenuse label propagation to restore the labels that I just masked. load all libraries and the MNIST dataset, mask around 90% of the labels with a -1, the expected input for a missing label, and then use label propagation to restore the labels that I just masked. Since we know the real labels, in this case, we can even assess the performance on the masked set. Note, however, that usually, we cannot do this. import numpy as npfrom sklearn.datasets import load_digitsfrom sklearn.metrics import classification_reportfrom sklearn.semi_supervised import LabelPropagationnp.random.seed(0)X, y_true = load_digits(return_X_y=True)n = len(y)mask = np.random.choice(range(n), 9*n//10, replace=False)y_missing = y_true.copy()y_missing[mask] = -1 # -1 indicates a missing labellp = LabelPropagation(gamma=.25) # rbf is the default, gamma = 1/σ2!lp.fit(X, y_missing) # run the algorithm we described aboveprint(classification_report(y_true[mask], lp.transduction_[mask])) The output is the following: precision recall f1-score support 0 0.98 0.99 0.98 161 1 0.90 0.99 0.94 163 2 1.00 0.96 0.98 159 3 0.90 0.95 0.92 168 4 0.98 0.97 0.97 159 5 0.95 0.97 0.96 161 6 0.99 0.98 0.98 166 7 0.99 0.98 0.98 159 8 0.91 0.85 0.88 160 9 0.95 0.88 0.91 161 accuracy 0.95 1617 macro avg 0.95 0.95 0.95 1617weighted avg 0.95 0.95 0.95 1617 An accuracy of 95%, what else can I say? It’s amazing! The algorithm merely had access to labels for 10% of the data, yet it managed to label the other samples correctly in nearly all cases. When I saw this example in a different form on the scikit-learn page first, that’s when I became a believer. And I bet that this might be useful for your everyday work, too! Sure, it’s not 100% correct, but it is a valid option if labeling thousands or even millions of samples yourself is not where you want to go. As a final official act, let me point you to some interesting details. If you think about it, the label propagation feels a bit k-nearest neighbor-ish, doesn’t it? Imagine that you trained a KNN classifier. During prediction time, a new point with no label comes in. You scan your whole training dataset and pick the closest points from there. The closer the points are to the new point, the more important they are. It’s the same for label propagation, as we have seen. The closer two samples x, x’ are, the larger the weight of the edge between them in the graph, the higher the probability that you jump from x to x’ and vice versa. The parallels are there, yet label propagation is a bit more complicated than k-nearest neighbors. Label propagation considers numerous unlabeled samples at once, and they help each other propagating the correct labels everywhere in the graph / dataset. In the case of KNN, each sample is on its own. Therefore, label propagation is a smarter algorithm in a sense. Although we should not compare apples and pears, as we say in Germany: both algorithms solve different problems. Imagine that you have a dataset consisting of 1,000,000 samples. The graph that is created in the course of label propagation then has around 1,000,000 * (1,000,000–1) / 2 = 499,999,500,000 edges. If you store the weights of these edges as 64 bit floats, that would be 4 TB already. Too much for your RAM, too slow to handle when writing it to disk. Be aware that this is how I explained the algorithm and that this is also the default behavior of scikit-learns LabelPropagation. What you can do in these cases is building an incomplete graph. Instead of connecting every sample node with every other node, connect it with its k nearest neighbors. (There it is again.) In this case, there are only k * 1,000,000 edges, and for a small value such as k=7, this is still easy to handle. You can use this approach in scikit-learn via setting kernel='knn' and then playing around with the n_neighbors parameter as well. In this article, we have examined the problem of having a dataset with only a small portion of labeled data. We have established that throwing away the unlabeled data points can end in a disaster and that smarter approaches are needed, one of them being label propagation. This algorithm works by building a graph where the samples from the dataset are the nodes and there is an edge between each pair of samples. For an unlabeled sample, start a random walk from there and see in which class of the labeled samples you end up most of the time. We have then seen that this approach can work extremely well, demonstrated with an MNIST example with only 10% labeled data. The accuracy was a whopping 95%, which is not perfect, but better than the alternative: labeling the remaining 90%, or 1617 samples, by hand. But wait... labeling the remaining dataset by hand is actually not the only alternative. Another path that we did not speak about today is active learning. Whatever the case, this is a story for another time. [1] Xiaojin Zhu and Zoubin Ghahramani, Learning from labeled and unlabeled data with label propagation (2002), Technical Report CMU-CALD-02–107, Carnegie Mellon University [more] The scikit-learn label propagation user guide I hope that you learned something new, interesting, and useful today. Thanks for reading! As the last point, if you want to support me in writing more about machine learning andplan to get a Medium subscription anyway, want to support me in writing more about machine learning and plan to get a Medium subscription anyway, why not do it via this link? This would help me a lot! 😊 To be transparent, the price for you does not change, but about half of the subscription fees go directly to me. Thanks a lot, if you consider supporting me! If you have any questions, write me on LinkedIn!
[ { "code": null, "e": 506, "s": 172, "text": "A classic task for us data scientists is building a classification model for some problem. In a perfect world, data samples — including their corresponding labels — are handed to us on a silver plate. We then do our machine learning tricks and mathemagic to come to some useful insights that we derived from the data. So far so good." }, { "code": null, "e": 596, "s": 506, "text": "However, what often happens in our imperfect yet beautiful world is one of the following:" }, { "code": null, "e": 1254, "s": 596, "text": "We get an extremely small dataset that is at least completely labeled. In this case, building a model can be extremely tricky. We have to use advanced feature engineering, maybe even Bayesian methods, and other tools to try to tackle this problem. Take the Overfitting Challenge on Kaggle as an example: The training set consists of 250 training examples and 200 features. Have fun.We get enough data, but with no labels at all. Well, tough luck. Try clustering, but this does not necessarily solve your classification problem.We get enough data, but it is only partially labeled. This is exactly what we will be talking about in this article! Keep reading." }, { "code": null, "e": 1637, "s": 1254, "text": "We get an extremely small dataset that is at least completely labeled. In this case, building a model can be extremely tricky. We have to use advanced feature engineering, maybe even Bayesian methods, and other tools to try to tackle this problem. Take the Overfitting Challenge on Kaggle as an example: The training set consists of 250 training examples and 200 features. Have fun." }, { "code": null, "e": 1783, "s": 1637, "text": "We get enough data, but with no labels at all. Well, tough luck. Try clustering, but this does not necessarily solve your classification problem." }, { "code": null, "e": 1914, "s": 1783, "text": "We get enough data, but it is only partially labeled. This is exactly what we will be talking about in this article! Keep reading." }, { "code": null, "e": 2162, "s": 1914, "text": "Let us assume that we are in the third scenario from now on: The size of our dataset is quite okay, we have some thousand samples, maybe even a million. But looking at the labels, frustration sets in — only a small fraction of the data is labeled!" }, { "code": null, "e": 2251, "s": 2162, "text": "In this article, I will show you how you can deal with these kinds of common situations." }, { "code": null, "e": 2365, "s": 2251, "text": "The easiest way to approach this setting is to transform it into something we are more familiar with. Concretely:" }, { "code": null, "e": 2483, "s": 2365, "text": "Throw away the unlabeled data points and train a classifier on the remaining completely labeled, but smaller dataset." }, { "code": null, "e": 2512, "s": 2483, "text": "Let’s analyze this approach." }, { "code": null, "e": 2545, "s": 2512, "text": "easy to understand and implement" }, { "code": null, "e": 2624, "s": 2545, "text": "fast transformation and faster training as fewer samples mean less computation" }, { "code": null, "e": 2670, "s": 2624, "text": "the model might overfit to the remaining data" }, { "code": null, "e": 2768, "s": 2670, "text": "bias in the process of which data is labeled might lead to wrong decision boundaries of the model" }, { "code": null, "e": 2898, "s": 2768, "text": "While the arguments on the pro side should be easy to understand, let’s look at a picture to understand the disadvantages better." }, { "code": null, "e": 3168, "s": 2898, "text": "As humans, we can clearly see that there are two blobs. The left one should be blue, the right one should be red. There might be some overlap in the middle, but all in all, they can be separated quite well with a straight line, i.e. logistic regression or a linear SVM." }, { "code": null, "e": 3291, "s": 3168, "text": "However, if we throw away the unlabeled data and fit a logistic regression, we end up with the following decision regions:" }, { "code": null, "e": 3665, "s": 3291, "text": "Not so great. The result is not bad because of overfitting, as logistic regression is a simple model. But the positions of the labeled data points are biased, i.e. they have some weird pattern that confuses the classifier. To be fair, if the labeled data points would have been kind of in the centers of the two blobs, the logistic regression would have worked much better." }, { "code": null, "e": 3825, "s": 3665, "text": "Of course, the overfitting problem would occur as well if we tried to throw random forests and neural networks on a training set of size four. We can conclude:" }, { "code": null, "e": 3877, "s": 3825, "text": "It is a bad idea to simply drop the unlabeled data." }, { "code": null, "e": 4042, "s": 3877, "text": "Let us now turn to a smarter technique that allows us to incorporate not only the knowledge of the labeled data but also the features of the unlabeled data samples." }, { "code": null, "e": 4110, "s": 4042, "text": "This is what people sometimes refer to as semi-supervised learning." }, { "code": null, "e": 4215, "s": 4110, "text": "Label propagation is a neat idea originally introduced by Xiaojin Zhu and Zoubin Ghahramani [1] in 2002." }, { "code": null, "e": 4417, "s": 4215, "text": "Important note: Here, I present a slight variation of the idea of the original paper as it is easier to explain and understand. The gist of both — and the other existing — variations is still the same." }, { "code": null, "e": 4468, "s": 4417, "text": "From a very high perspective, it works as follows:" }, { "code": null, "e": 4684, "s": 4468, "text": "This screams for an example. Let’s assume that we have another two-dimensional dataset that consists of only five samples. There are two classes and one sample is not labeled. The samples are the nodes of our graph." }, { "code": null, "e": 4947, "s": 4684, "text": "Now, let us build a complete graph, i.e. connect each node with any other node. We also annotate the edges with the distances between the nodes (=samples). You can choose any distance you like (i.e. Euclidean), it is just another hyperparameter of the algorithm." }, { "code": null, "e": 5087, "s": 4947, "text": "Note: I leave out the edges between labeled samples because it keeps the visualization clear, and the algorithm does not need those anyway." }, { "code": null, "e": 5367, "s": 5087, "text": "Remember that we said that closer samples should have higher weights between them? So far, it’s the other way around! There are several ways to fix this, the easiest ones being: Put a minus in front of all the numbers, or (multiplicatively) invert the numbers, e.g. 4 → 1/4=0.25." }, { "code": null, "e": 5483, "s": 5367, "text": "What the authors in [1] propose is using some Gaussian function, sometimes also called radial basis function (rbf)." }, { "code": null, "e": 5672, "s": 5483, "text": "where x and x’ are samples. If two samples are really close, i.e. |x-x’| is around 0, their weight of the edge is around 1. The further they are apart, the more the weight approaches zero." }, { "code": null, "e": 5780, "s": 5672, "text": "σ is a hyperparameter you can play around with. Scikit-learn’s default value for it is σ = 20, for example." }, { "code": null, "e": 5853, "s": 5780, "text": "Anyway, let us use the multiplication inverse for now. The graph becomes" }, { "code": null, "e": 5880, "s": 5853, "text": "This is the end of step 1." }, { "code": null, "e": 6161, "s": 5880, "text": "This sounds more difficult than it actually is. Let us start in the lower white, unlabeled node, for example. To proceed, we have to define probabilities for jumping to the other unlabeled node, one of the two blue nodes and the red node. An easy way to do this is by normalizing." }, { "code": null, "e": 6538, "s": 6161, "text": "There are four outgoing edges with the weights 1 (leading to the blue node), 0.25 (other blue node), 0.5 (other unlabeled node), and 0.5 (red node). So, for example, let us just define the probability to jump to the red node as 0.5/(1+0.25+0.5+0.5)=2/9. Jumping to the closer blue node happens with a probability of 1/(1+0.25+0.5+0.5)=4/9. You can calculate the rest yourself." }, { "code": null, "e": 6852, "s": 6538, "text": "Using these probabilities, there is a lot of theory involved in how to compute the probabilities in ending up in a blue or red node first. You can do it via Markov chains, a fascinating field within mathematics. I might even write an article about it someday, but for now, I will just supply you with the results." }, { "code": null, "e": 6926, "s": 6852, "text": "One can calculate the following probabilities of landing in either color:" }, { "code": null, "e": 7153, "s": 6926, "text": "With this result, we can say that the upper unlabeled node could belong to the red class, while the bottom one should be blue. We could also keep these probabilities as soft labels if we don’t want to commit to a single class." }, { "code": null, "e": 7241, "s": 7153, "text": "This is probably also what you have expected intuitively, which speaks for this method." }, { "code": null, "e": 7279, "s": 7241, "text": "Let us now see this method in action!" }, { "code": null, "e": 7373, "s": 7279, "text": "Using label propagation is easy, yet again thanks to scikit-learn! In the following snippet I" }, { "code": null, "e": 7567, "s": 7373, "text": "load all libraries and the MNIST dataset,mask around 90% of the labels with a -1, the expected input for a missing label, and thenuse label propagation to restore the labels that I just masked." }, { "code": null, "e": 7609, "s": 7567, "text": "load all libraries and the MNIST dataset," }, { "code": null, "e": 7699, "s": 7609, "text": "mask around 90% of the labels with a -1, the expected input for a missing label, and then" }, { "code": null, "e": 7763, "s": 7699, "text": "use label propagation to restore the labels that I just masked." }, { "code": null, "e": 7910, "s": 7763, "text": "Since we know the real labels, in this case, we can even assess the performance on the masked set. Note, however, that usually, we cannot do this." }, { "code": null, "e": 8463, "s": 7910, "text": "import numpy as npfrom sklearn.datasets import load_digitsfrom sklearn.metrics import classification_reportfrom sklearn.semi_supervised import LabelPropagationnp.random.seed(0)X, y_true = load_digits(return_X_y=True)n = len(y)mask = np.random.choice(range(n), 9*n//10, replace=False)y_missing = y_true.copy()y_missing[mask] = -1 # -1 indicates a missing labellp = LabelPropagation(gamma=.25) # rbf is the default, gamma = 1/σ2!lp.fit(X, y_missing) # run the algorithm we described aboveprint(classification_report(y_true[mask], lp.transduction_[mask]))" }, { "code": null, "e": 8492, "s": 8463, "text": "The output is the following:" }, { "code": null, "e": 9235, "s": 8492, "text": " precision recall f1-score support 0 0.98 0.99 0.98 161 1 0.90 0.99 0.94 163 2 1.00 0.96 0.98 159 3 0.90 0.95 0.92 168 4 0.98 0.97 0.97 159 5 0.95 0.97 0.96 161 6 0.99 0.98 0.98 166 7 0.99 0.98 0.98 159 8 0.91 0.85 0.88 160 9 0.95 0.88 0.91 161 accuracy 0.95 1617 macro avg 0.95 0.95 0.95 1617weighted avg 0.95 0.95 0.95 1617" }, { "code": null, "e": 9600, "s": 9235, "text": "An accuracy of 95%, what else can I say? It’s amazing! The algorithm merely had access to labels for 10% of the data, yet it managed to label the other samples correctly in nearly all cases. When I saw this example in a different form on the scikit-learn page first, that’s when I became a believer. And I bet that this might be useful for your everyday work, too!" }, { "code": null, "e": 9742, "s": 9600, "text": "Sure, it’s not 100% correct, but it is a valid option if labeling thousands or even millions of samples yourself is not where you want to go." }, { "code": null, "e": 9813, "s": 9742, "text": "As a final official act, let me point you to some interesting details." }, { "code": null, "e": 10159, "s": 9813, "text": "If you think about it, the label propagation feels a bit k-nearest neighbor-ish, doesn’t it? Imagine that you trained a KNN classifier. During prediction time, a new point with no label comes in. You scan your whole training dataset and pick the closest points from there. The closer the points are to the new point, the more important they are." }, { "code": null, "e": 10477, "s": 10159, "text": "It’s the same for label propagation, as we have seen. The closer two samples x, x’ are, the larger the weight of the edge between them in the graph, the higher the probability that you jump from x to x’ and vice versa. The parallels are there, yet label propagation is a bit more complicated than k-nearest neighbors." }, { "code": null, "e": 10856, "s": 10477, "text": "Label propagation considers numerous unlabeled samples at once, and they help each other propagating the correct labels everywhere in the graph / dataset. In the case of KNN, each sample is on its own. Therefore, label propagation is a smarter algorithm in a sense. Although we should not compare apples and pears, as we say in Germany: both algorithms solve different problems." }, { "code": null, "e": 10998, "s": 10856, "text": "Imagine that you have a dataset consisting of 1,000,000 samples. The graph that is created in the course of label propagation then has around" }, { "code": null, "e": 11046, "s": 10998, "text": "1,000,000 * (1,000,000–1) / 2 = 499,999,500,000" }, { "code": null, "e": 11336, "s": 11046, "text": "edges. If you store the weights of these edges as 64 bit floats, that would be 4 TB already. Too much for your RAM, too slow to handle when writing it to disk. Be aware that this is how I explained the algorithm and that this is also the default behavior of scikit-learns LabelPropagation." }, { "code": null, "e": 11525, "s": 11336, "text": "What you can do in these cases is building an incomplete graph. Instead of connecting every sample node with every other node, connect it with its k nearest neighbors. (There it is again.)" }, { "code": null, "e": 11771, "s": 11525, "text": "In this case, there are only k * 1,000,000 edges, and for a small value such as k=7, this is still easy to handle. You can use this approach in scikit-learn via setting kernel='knn' and then playing around with the n_neighbors parameter as well." }, { "code": null, "e": 12044, "s": 11771, "text": "In this article, we have examined the problem of having a dataset with only a small portion of labeled data. We have established that throwing away the unlabeled data points can end in a disaster and that smarter approaches are needed, one of them being label propagation." }, { "code": null, "e": 12316, "s": 12044, "text": "This algorithm works by building a graph where the samples from the dataset are the nodes and there is an edge between each pair of samples. For an unlabeled sample, start a random walk from there and see in which class of the labeled samples you end up most of the time." }, { "code": null, "e": 12583, "s": 12316, "text": "We have then seen that this approach can work extremely well, demonstrated with an MNIST example with only 10% labeled data. The accuracy was a whopping 95%, which is not perfect, but better than the alternative: labeling the remaining 90%, or 1617 samples, by hand." }, { "code": null, "e": 12739, "s": 12583, "text": "But wait... labeling the remaining dataset by hand is actually not the only alternative. Another path that we did not speak about today is active learning." }, { "code": null, "e": 12792, "s": 12739, "text": "Whatever the case, this is a story for another time." }, { "code": null, "e": 12964, "s": 12792, "text": "[1] Xiaojin Zhu and Zoubin Ghahramani, Learning from labeled and unlabeled data with label propagation (2002), Technical Report CMU-CALD-02–107, Carnegie Mellon University" }, { "code": null, "e": 13017, "s": 12964, "text": "[more] The scikit-learn label propagation user guide" }, { "code": null, "e": 13107, "s": 13017, "text": "I hope that you learned something new, interesting, and useful today. Thanks for reading!" }, { "code": null, "e": 13133, "s": 13107, "text": "As the last point, if you" }, { "code": null, "e": 13236, "s": 13133, "text": "want to support me in writing more about machine learning andplan to get a Medium subscription anyway," }, { "code": null, "e": 13298, "s": 13236, "text": "want to support me in writing more about machine learning and" }, { "code": null, "e": 13340, "s": 13298, "text": "plan to get a Medium subscription anyway," }, { "code": null, "e": 13397, "s": 13340, "text": "why not do it via this link? This would help me a lot! 😊" }, { "code": null, "e": 13510, "s": 13397, "text": "To be transparent, the price for you does not change, but about half of the subscription fees go directly to me." }, { "code": null, "e": 13555, "s": 13510, "text": "Thanks a lot, if you consider supporting me!" } ]
ML basics: Loan prediction. The complete Data Science pipeline on a... | by Tariq Massaoudi | Towards Data Science
Dream Housing Finance company deals in all home loans. They have presence across all urban, semi urban and rural areas. Customer first apply for home loan after that company validates the customer eligibility for loan. The Company wants to automate the loan eligibility process (real time) based on customer detail provided while filling online application form. These details are Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and others. To automate this process, they have given a problem to identify the customers segments, those are eligible for loan amount so that they can specifically target these customers. It’s a classification problem , given information about the application we have to predict whether the they’ll be to pay the loan or not. We’ll start by exploratory data analysis , then preprocessing , and finally we’ll be testing different models such as Logistic regression and decision trees. The data consists of the following rows: Loan_ID : Unique Loan IDGender : Male/ FemaleMarried : Applicant married (Y/N)Dependents : Number of dependents Education : Applicant Education (Graduate/ Under Graduate)Self_Employed : Self employed (Y/N)ApplicantIncome : Applicant incomeCoapplicantIncome : Coapplicant incomeLoanAmount : Loan amount in thousands of dollarsLoan_Amount_Term : Term of loan in monthsCredit_History : credit history meets guidelines yes or noProperty_Area : Urban/ Semi Urban/ RuralLoan_Status : Loan approved (Y/N) this is the target variable We’ll be using seaborn for visualisation and pandas for data manipulation. You can download the dataset from here : https://datahack.analyticsvidhya.com/contest/practice-problem-loan-prediction-iii/ We’ll import the necessary libraries and load the data : import matplotlib.pyplot as pltimport pandas as pdimport seaborn as sns%matplotlib inlineimport numpy as nptrain=pd.read_csv("train.csv")test=pd.read_csv("test.csv") We can look at few top rows using the head function train.head() We can see that there’s some missing data , we can further explore this using the pandas describe function: train.describe() Some variables have missing values that we’ll have to deal with , and also there seems to be some outliers for the Applicant Income , Coapplicant income and Loan Amount . We also see that about 84% applicants have a credit_history. Because the mean of Credit_History field is 0.84 and it has either (1 for having a credit history or 0 for not) It would be interesting to study the distribution of the numerical variables mainly the Applicant income and the loan amount. To do this we’ll use seaborn for visualization. sns.distplot(train.ApplicantIncome,kde=False) The distribution is skewed and we can notice quite a few outliers. Since Loan Amount has missing values , we can’t plot it directly. One solution is to drop the missing values rows then plot it, we can do this using the dropna function sns.distplot(train.ApplicantIncome.dropna(),kde=False) People with better education should normally have a higher income, we can check that by plotting the education level against the income. sns.boxplot(x='Education',y='ApplicantIncome',data=train) The distributions are quite similar but we can see that the graduates have more outliers which means that the people with huge income are most likely well educated. Another interesting variable is credit history , to check how it affects the Loan Status we can turn it into binary then calculate it’s mean for each value of credit history . A value close to 1 indicates a high loan success rate #turn loan status into binary modified=trainmodified['Loan_Status']=train['Loan_Status'].apply(lambda x: 0 if x=="N" else 1 )#calculate the meanmodified.groupby('Credit_History').mean()['Loan_Status']OUT : Credit_History0.0 0.0786521.0 0.795789Name: Loan_Status, dtype: float64 People with a credit history a way more likely to pay their loan, 0.07 vs 0.79 . This means that credit history will be an influential variable in our model. The first thing to do is to deal with the missing value , lets check first how many there are for each variable. train.apply(lambda x: sum(x.isnull()),axis=0)OUT:Loan_ID 0Gender 13Married 3Dependents 15Education 0Self_Employed 32ApplicantIncome 0CoapplicantIncome 0LoanAmount 22Loan_Amount_Term 14Credit_History 50Property_Area 0Loan_Status 0dtype: int64 For numerical values a good solution is to fill missing values with the mean , for categorical we can fill them with the mode (the value with the highest frequency) #categoricaltrain['Gender'].fillna(train['Gender'].mode()[0], inplace=True)train['Married'].fillna(train['Married'].mode()[0], inplace=True)train['Dependents'].fillna(train['Dependents'].mode()[0], inplace=True)train['Loan_Amount_Term'].fillna(train['Loan_Amount_Term'].mode()[0], inplace=True)train['Credit_History'].fillna(train['Credit_History'].mode()[0], inplace=True)train['Self_Employed'].fillna(train['Self_Employed'].mode()[0], inplace=True)#numericaldf['LoanAmount'].fillna(df['LoanAmount'].mean(), inplace=True) Next we have to handle the outliers , one solution is just to remove them but we can also log transform them to nullify their effect which is the approach that we went for here. Some people might have a low income but strong CoappliantIncome so a good idea is to combine them in a TotalIncome column. train['LoanAmount_log']=np.log(train['LoanAmount'])train['TotalIncome']= train['ApplicantIncome'] +train['CoapplicantIncome'] train['TotalIncome_log']=np.log(train['TotalIncome']) plotting the histogram of loan amount log we can see that it’s a normal distribution! We’re gonna use sklearn for our models , before doing that we need to turn all the categorical variables into numbers. We’ll do that using the LabelEncoder in sklearn from sklearn.preprocessing import LabelEncodercategory= ['Gender','Married','Dependents','Education','Self_Employed','Property_Area','Loan_Status'] encoder= LabelEncoder() for i in category: train[i] = encoder.fit_transform(train[i]) train.dtypesOUT:Loan_ID objectGender int64Married int64Dependents int64Education int64Self_Employed int64ApplicantIncome int64CoapplicantIncome float64LoanAmount float64Loan_Amount_Term float64Credit_History float64Property_Area int64Loan_Status int64LoanAmount_log float64TotalIncome float64TotalIncome_log float64dtype: object Now all our variables have became numbers that our models can understand. To try out different models we’ll create a function that takes in a model , fits it and mesures the accuracy which means using the model on the train set and mesuring the error on the same set . And we’ll use a technique called Kfold cross validation which splits randomly the data into train and test set, trains the model using the train set and validates it with the test set, it will repeat this K times hence the name Kfold and takes the average error. The latter method gives a better idea on how the model performs in real life. #Import the modelsfrom sklearn.linear_model import LogisticRegressionfrom sklearn.cross_validation import KFold #For K-fold cross validationfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.tree import DecisionTreeClassifier, export_graphvizfrom sklearn import metricsdef classification_model(model, data, predictors, outcome): #Fit the model: model.fit(data[predictors],data[outcome]) #Make predictions on training set: predictions = model.predict(data[predictors]) #Print accuracy accuracy = metrics.accuracy_score(predictions,data[outcome]) print ("Accuracy : %s" % "{0:.3%}".format(accuracy))#Perform k-fold cross-validation with 5 folds kf = KFold(data.shape[0], n_folds=5) error = [] for train, test in kf: # Filter training data train_predictors = (data[predictors].iloc[train,:]) # The target we're using to train the algorithm. train_target = data[outcome].iloc[train] # Training the algorithm using the predictors and target. model.fit(train_predictors, train_target) #Record error from each cross-validation run error.append(model.score(data[predictors].iloc[test,:], data[outcome].iloc[test])) print ("Cross-Validation Score : %s" % "{0:.3%}".format(np.mean(error))) Now we can test different models we’ll start with logistic regression: outcome_var = 'Loan_Status'model = LogisticRegression()predictor_var = ['Credit_History','Education','Married','Self_Employed','Property_Area']classification_model(model, train,predictor_var,outcome_var)OUT : Accuracy : 80.945%Cross-Validation Score : 80.946% We’ll try now a Decision tree which is should give us more accurate result model = DecisionTreeClassifier() predictor_var = ['Credit_History','Gender','Married','Education'] classification_model(model, df,predictor_var,outcome_var)OUT:Accuracy : 80.945%Cross-Validation Score : 78.179% We’ve got the same score on accuracy but a worse score in cross validation , a more complex model doesn’t always means a better score. Finally we’ll try random forests model = RandomForestClassifier(n_estimators=100)predictor_var = ['Gender', 'Married', 'Dependents', 'Education', 'Self_Employed', 'Loan_Amount_Term', 'Credit_History', 'Property_Area', 'LoanAmount_log','TotalIncome_log']classification_model(model, train,predictor_var,outcome_var)OUT: Accuracy : 100.000%Cross-Validation Score : 78.015% The model is giving us perfect score on accuracy but a low score in cross validation , this a good example of over fitting. The model is having a hard time at generalizing since it’s fitting perfectly to the train set. Solutions to this include : Reducing the number of predictors or Tuning the model parameters. We’ve gone through a good portion of the data science pipe line in this article, namely EDA , preprocessing and modeling and we’ve used essential classification models such as Logistic regression , Decision tree and Random forests. It would be interesting to learn more about the backbone logic behind these algorithms, and also tackle the data scraping and deployment phases.We’ll try to do that in the next articles.
[ { "code": null, "e": 390, "s": 171, "text": "Dream Housing Finance company deals in all home loans. They have presence across all urban, semi urban and rural areas. Customer first apply for home loan after that company validates the customer eligibility for loan." }, { "code": null, "e": 834, "s": 390, "text": "The Company wants to automate the loan eligibility process (real time) based on customer detail provided while filling online application form. These details are Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and others. To automate this process, they have given a problem to identify the customers segments, those are eligible for loan amount so that they can specifically target these customers." }, { "code": null, "e": 972, "s": 834, "text": "It’s a classification problem , given information about the application we have to predict whether the they’ll be to pay the loan or not." }, { "code": null, "e": 1130, "s": 972, "text": "We’ll start by exploratory data analysis , then preprocessing , and finally we’ll be testing different models such as Logistic regression and decision trees." }, { "code": null, "e": 1171, "s": 1130, "text": "The data consists of the following rows:" }, { "code": null, "e": 1697, "s": 1171, "text": "Loan_ID : Unique Loan IDGender : Male/ FemaleMarried : Applicant married (Y/N)Dependents : Number of dependents Education : Applicant Education (Graduate/ Under Graduate)Self_Employed : Self employed (Y/N)ApplicantIncome : Applicant incomeCoapplicantIncome : Coapplicant incomeLoanAmount : Loan amount in thousands of dollarsLoan_Amount_Term : Term of loan in monthsCredit_History : credit history meets guidelines yes or noProperty_Area : Urban/ Semi Urban/ RuralLoan_Status : Loan approved (Y/N) this is the target variable" }, { "code": null, "e": 1896, "s": 1697, "text": "We’ll be using seaborn for visualisation and pandas for data manipulation. You can download the dataset from here : https://datahack.analyticsvidhya.com/contest/practice-problem-loan-prediction-iii/" }, { "code": null, "e": 1953, "s": 1896, "text": "We’ll import the necessary libraries and load the data :" }, { "code": null, "e": 2119, "s": 1953, "text": "import matplotlib.pyplot as pltimport pandas as pdimport seaborn as sns%matplotlib inlineimport numpy as nptrain=pd.read_csv(\"train.csv\")test=pd.read_csv(\"test.csv\")" }, { "code": null, "e": 2171, "s": 2119, "text": "We can look at few top rows using the head function" }, { "code": null, "e": 2184, "s": 2171, "text": "train.head()" }, { "code": null, "e": 2292, "s": 2184, "text": "We can see that there’s some missing data , we can further explore this using the pandas describe function:" }, { "code": null, "e": 2309, "s": 2292, "text": "train.describe()" }, { "code": null, "e": 2653, "s": 2309, "text": "Some variables have missing values that we’ll have to deal with , and also there seems to be some outliers for the Applicant Income , Coapplicant income and Loan Amount . We also see that about 84% applicants have a credit_history. Because the mean of Credit_History field is 0.84 and it has either (1 for having a credit history or 0 for not)" }, { "code": null, "e": 2827, "s": 2653, "text": "It would be interesting to study the distribution of the numerical variables mainly the Applicant income and the loan amount. To do this we’ll use seaborn for visualization." }, { "code": null, "e": 2873, "s": 2827, "text": "sns.distplot(train.ApplicantIncome,kde=False)" }, { "code": null, "e": 2940, "s": 2873, "text": "The distribution is skewed and we can notice quite a few outliers." }, { "code": null, "e": 3109, "s": 2940, "text": "Since Loan Amount has missing values , we can’t plot it directly. One solution is to drop the missing values rows then plot it, we can do this using the dropna function" }, { "code": null, "e": 3164, "s": 3109, "text": "sns.distplot(train.ApplicantIncome.dropna(),kde=False)" }, { "code": null, "e": 3301, "s": 3164, "text": "People with better education should normally have a higher income, we can check that by plotting the education level against the income." }, { "code": null, "e": 3359, "s": 3301, "text": "sns.boxplot(x='Education',y='ApplicantIncome',data=train)" }, { "code": null, "e": 3524, "s": 3359, "text": "The distributions are quite similar but we can see that the graduates have more outliers which means that the people with huge income are most likely well educated." }, { "code": null, "e": 3754, "s": 3524, "text": "Another interesting variable is credit history , to check how it affects the Loan Status we can turn it into binary then calculate it’s mean for each value of credit history . A value close to 1 indicates a high loan success rate" }, { "code": null, "e": 4038, "s": 3754, "text": "#turn loan status into binary modified=trainmodified['Loan_Status']=train['Loan_Status'].apply(lambda x: 0 if x==\"N\" else 1 )#calculate the meanmodified.groupby('Credit_History').mean()['Loan_Status']OUT : Credit_History0.0 0.0786521.0 0.795789Name: Loan_Status, dtype: float64" }, { "code": null, "e": 4196, "s": 4038, "text": "People with a credit history a way more likely to pay their loan, 0.07 vs 0.79 . This means that credit history will be an influential variable in our model." }, { "code": null, "e": 4309, "s": 4196, "text": "The first thing to do is to deal with the missing value , lets check first how many there are for each variable." }, { "code": null, "e": 4670, "s": 4309, "text": "train.apply(lambda x: sum(x.isnull()),axis=0)OUT:Loan_ID 0Gender 13Married 3Dependents 15Education 0Self_Employed 32ApplicantIncome 0CoapplicantIncome 0LoanAmount 22Loan_Amount_Term 14Credit_History 50Property_Area 0Loan_Status 0dtype: int64" }, { "code": null, "e": 4835, "s": 4670, "text": "For numerical values a good solution is to fill missing values with the mean , for categorical we can fill them with the mode (the value with the highest frequency)" }, { "code": null, "e": 5358, "s": 4835, "text": "#categoricaltrain['Gender'].fillna(train['Gender'].mode()[0], inplace=True)train['Married'].fillna(train['Married'].mode()[0], inplace=True)train['Dependents'].fillna(train['Dependents'].mode()[0], inplace=True)train['Loan_Amount_Term'].fillna(train['Loan_Amount_Term'].mode()[0], inplace=True)train['Credit_History'].fillna(train['Credit_History'].mode()[0], inplace=True)train['Self_Employed'].fillna(train['Self_Employed'].mode()[0], inplace=True)#numericaldf['LoanAmount'].fillna(df['LoanAmount'].mean(), inplace=True)" }, { "code": null, "e": 5659, "s": 5358, "text": "Next we have to handle the outliers , one solution is just to remove them but we can also log transform them to nullify their effect which is the approach that we went for here. Some people might have a low income but strong CoappliantIncome so a good idea is to combine them in a TotalIncome column." }, { "code": null, "e": 5839, "s": 5659, "text": "train['LoanAmount_log']=np.log(train['LoanAmount'])train['TotalIncome']= train['ApplicantIncome'] +train['CoapplicantIncome'] train['TotalIncome_log']=np.log(train['TotalIncome'])" }, { "code": null, "e": 5925, "s": 5839, "text": "plotting the histogram of loan amount log we can see that it’s a normal distribution!" }, { "code": null, "e": 6092, "s": 5925, "text": "We’re gonna use sklearn for our models , before doing that we need to turn all the categorical variables into numbers. We’ll do that using the LabelEncoder in sklearn" }, { "code": null, "e": 6810, "s": 6092, "text": "from sklearn.preprocessing import LabelEncodercategory= ['Gender','Married','Dependents','Education','Self_Employed','Property_Area','Loan_Status'] encoder= LabelEncoder() for i in category: train[i] = encoder.fit_transform(train[i]) train.dtypesOUT:Loan_ID objectGender int64Married int64Dependents int64Education int64Self_Employed int64ApplicantIncome int64CoapplicantIncome float64LoanAmount float64Loan_Amount_Term float64Credit_History float64Property_Area int64Loan_Status int64LoanAmount_log float64TotalIncome float64TotalIncome_log float64dtype: object" }, { "code": null, "e": 6884, "s": 6810, "text": "Now all our variables have became numbers that our models can understand." }, { "code": null, "e": 7420, "s": 6884, "text": "To try out different models we’ll create a function that takes in a model , fits it and mesures the accuracy which means using the model on the train set and mesuring the error on the same set . And we’ll use a technique called Kfold cross validation which splits randomly the data into train and test set, trains the model using the train set and validates it with the test set, it will repeat this K times hence the name Kfold and takes the average error. The latter method gives a better idea on how the model performs in real life." }, { "code": null, "e": 8667, "s": 7420, "text": "#Import the modelsfrom sklearn.linear_model import LogisticRegressionfrom sklearn.cross_validation import KFold #For K-fold cross validationfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.tree import DecisionTreeClassifier, export_graphvizfrom sklearn import metricsdef classification_model(model, data, predictors, outcome): #Fit the model: model.fit(data[predictors],data[outcome]) #Make predictions on training set: predictions = model.predict(data[predictors]) #Print accuracy accuracy = metrics.accuracy_score(predictions,data[outcome]) print (\"Accuracy : %s\" % \"{0:.3%}\".format(accuracy))#Perform k-fold cross-validation with 5 folds kf = KFold(data.shape[0], n_folds=5) error = [] for train, test in kf: # Filter training data train_predictors = (data[predictors].iloc[train,:]) # The target we're using to train the algorithm. train_target = data[outcome].iloc[train] # Training the algorithm using the predictors and target. model.fit(train_predictors, train_target) #Record error from each cross-validation run error.append(model.score(data[predictors].iloc[test,:], data[outcome].iloc[test])) print (\"Cross-Validation Score : %s\" % \"{0:.3%}\".format(np.mean(error)))" }, { "code": null, "e": 8738, "s": 8667, "text": "Now we can test different models we’ll start with logistic regression:" }, { "code": null, "e": 8998, "s": 8738, "text": "outcome_var = 'Loan_Status'model = LogisticRegression()predictor_var = ['Credit_History','Education','Married','Self_Employed','Property_Area']classification_model(model, train,predictor_var,outcome_var)OUT : Accuracy : 80.945%Cross-Validation Score : 80.946%" }, { "code": null, "e": 9073, "s": 8998, "text": "We’ll try now a Decision tree which is should give us more accurate result" }, { "code": null, "e": 9284, "s": 9073, "text": "model = DecisionTreeClassifier() predictor_var = ['Credit_History','Gender','Married','Education'] classification_model(model, df,predictor_var,outcome_var)OUT:Accuracy : 80.945%Cross-Validation Score : 78.179%" }, { "code": null, "e": 9419, "s": 9284, "text": "We’ve got the same score on accuracy but a worse score in cross validation , a more complex model doesn’t always means a better score." }, { "code": null, "e": 9452, "s": 9419, "text": "Finally we’ll try random forests" }, { "code": null, "e": 9802, "s": 9452, "text": "model = RandomForestClassifier(n_estimators=100)predictor_var = ['Gender', 'Married', 'Dependents', 'Education', 'Self_Employed', 'Loan_Amount_Term', 'Credit_History', 'Property_Area', 'LoanAmount_log','TotalIncome_log']classification_model(model, train,predictor_var,outcome_var)OUT: Accuracy : 100.000%Cross-Validation Score : 78.015%" }, { "code": null, "e": 10021, "s": 9802, "text": "The model is giving us perfect score on accuracy but a low score in cross validation , this a good example of over fitting. The model is having a hard time at generalizing since it’s fitting perfectly to the train set." }, { "code": null, "e": 10115, "s": 10021, "text": "Solutions to this include : Reducing the number of predictors or Tuning the model parameters." } ]
Visualizing protein interaction networks in Python | by Ford Combs | Towards Data Science
Protein interaction data is incredibly important. It describes the interplay between the biomolecules encoded by genes. It allows us to understand the complexities of cellular function and even predict potential therapeutics. There are many databases that contain protein interaction data, but STRING is one of the best. It contains 3,123,056,667 total interactions at the moment of this writing. These interactions involve over 20,000,000 proteins in over 5,000 organisms, combining to form. In this article, I’m going to show you how to download protein interaction data from the STRING API and how to create graphs and visualizations of that data using NetworkX. ### The required libraries and packages ###import networkx as nximport requestsimport pandas as pdimport numpy as npimport matplotlib.pyplot as pltfrom matplotlib import cm Our data set will contain pairwise interactions for a handful of proteins that are involved in serotonin pathways. There are a number of ways these interactions can be found in STRING. We could go to the STRING website with a list of proteins and use the multiple proteins search, but here we want to do everything using Python. The requests library allows us to easily use HTTP requests to gather the data. The STRING documentation gives details about using the REST API and has lots of examples, but the basic call for gathering network information for multiple proteins looks like this: https://string-db.org/api/[output-format]/network?identifiers=[your_identifiers]&[optional_parameters] The details about output-format, identifiers, and optional parameters can all be found in the documentation. We will use the tsv (tab separated values) output format, a list of protein identifiers separated by %0d’s, and one optional parameter: species. The code to convert the list of proteins into the correct URL is shown below. The list of proteins was selected from a larger set of human proteins related to serotonin. One thing to note is the &species=9606 optional parameter. 9606 is the species number for humans. You can select different species by using the organism search. Next, the data can be converted to a pandas data frame which will be used to create the network. NetworkX includes several classes of graph. In this case, we will build an undirected, weighted graph. The graph is undirected because the interaction between protein A and protein B is the same as the interaction between protein B and protein A. The graph is weighted because the edges will have weights based on the interaction score. The interactions data frame has one row for each interaction that contains the two interacting proteins and the score of the interaction. The following code shows how that data can be used to build the graph with NetworkX. Each node represents a protein, each edge represents an interaction between two proteins, and each edge is weighted by the score. Here is some basic information about the graph using nx.info(G) : Name: Protein Interaction GraphType: GraphNumber of nodes: 20Number of edges: 128Average degree: 12.8000 The graph contains 20 nodes (proteins) with 128 edges (interactions). The degree of a node is the number of edges connected to that node. In this graph, the average degree is 12.8. We can create a simple visualization of this graph using the following code. Note the use of nx.spring_layout, which is one of NetworkX’s node positioning algorithms. The simple visualization shown above can be enhanced to include more information about the graph. I already mentioned degree, which is the number of edges connected to a node. Another measure we can include is betweenness centrality. Each pair of nodes in the graph is connected by one shortest path. In an unweighted graph, the shortest path consists of the smallest set of edges that connect two nodes. In a weighted graph, like we have here, the shortest path consists of the set of edges with the smallest sum of weights. The betweenness centrality of a given node is the measure of the number of shortest paths that pass through that node out of all of the shortest paths. The following code uses the degree and betweenness centrality of each node to determine its color and size and uses the edge weights to determine the color and size of the edges. We can use these color mappings to create an enhanced visualization with this code: There is a lot more information displayed in this graph. The color range goes from dark purple to bright yellow. The yellower the node, the higher the degree. The larger the node, the greater the betweenness centrality. The yellower and wider the edge, the greater the interaction score. One final useful visualization is the minimum spanning tree, which is the subset of edges that connect all the nodes with the lowest possible weight: Research in the field of data visualization has repeatedly shown that humans are particularly good at finding patterns in visual data. It takes less time and less brain power to understand a graph that to gather the same information from a table. Visualizations like those described here are great tools for examining and demonstrating any kind of network data. I hope you enjoyed this tutorial and gained some useful techniques that you can employ on your own projects!
[ { "code": null, "e": 838, "s": 172, "text": "Protein interaction data is incredibly important. It describes the interplay between the biomolecules encoded by genes. It allows us to understand the complexities of cellular function and even predict potential therapeutics. There are many databases that contain protein interaction data, but STRING is one of the best. It contains 3,123,056,667 total interactions at the moment of this writing. These interactions involve over 20,000,000 proteins in over 5,000 organisms, combining to form. In this article, I’m going to show you how to download protein interaction data from the STRING API and how to create graphs and visualizations of that data using NetworkX." }, { "code": null, "e": 1011, "s": 838, "text": "### The required libraries and packages ###import networkx as nximport requestsimport pandas as pdimport numpy as npimport matplotlib.pyplot as pltfrom matplotlib import cm" }, { "code": null, "e": 1601, "s": 1011, "text": "Our data set will contain pairwise interactions for a handful of proteins that are involved in serotonin pathways. There are a number of ways these interactions can be found in STRING. We could go to the STRING website with a list of proteins and use the multiple proteins search, but here we want to do everything using Python. The requests library allows us to easily use HTTP requests to gather the data. The STRING documentation gives details about using the REST API and has lots of examples, but the basic call for gathering network information for multiple proteins looks like this:" }, { "code": null, "e": 1704, "s": 1601, "text": "https://string-db.org/api/[output-format]/network?identifiers=[your_identifiers]&[optional_parameters]" }, { "code": null, "e": 2036, "s": 1704, "text": "The details about output-format, identifiers, and optional parameters can all be found in the documentation. We will use the tsv (tab separated values) output format, a list of protein identifiers separated by %0d’s, and one optional parameter: species. The code to convert the list of proteins into the correct URL is shown below." }, { "code": null, "e": 2386, "s": 2036, "text": "The list of proteins was selected from a larger set of human proteins related to serotonin. One thing to note is the &species=9606 optional parameter. 9606 is the species number for humans. You can select different species by using the organism search. Next, the data can be converted to a pandas data frame which will be used to create the network." }, { "code": null, "e": 2723, "s": 2386, "text": "NetworkX includes several classes of graph. In this case, we will build an undirected, weighted graph. The graph is undirected because the interaction between protein A and protein B is the same as the interaction between protein B and protein A. The graph is weighted because the edges will have weights based on the interaction score." }, { "code": null, "e": 3076, "s": 2723, "text": "The interactions data frame has one row for each interaction that contains the two interacting proteins and the score of the interaction. The following code shows how that data can be used to build the graph with NetworkX. Each node represents a protein, each edge represents an interaction between two proteins, and each edge is weighted by the score." }, { "code": null, "e": 3142, "s": 3076, "text": "Here is some basic information about the graph using nx.info(G) :" }, { "code": null, "e": 3248, "s": 3142, "text": "Name: Protein Interaction GraphType: GraphNumber of nodes: 20Number of edges: 128Average degree: 12.8000" }, { "code": null, "e": 3596, "s": 3248, "text": "The graph contains 20 nodes (proteins) with 128 edges (interactions). The degree of a node is the number of edges connected to that node. In this graph, the average degree is 12.8. We can create a simple visualization of this graph using the following code. Note the use of nx.spring_layout, which is one of NetworkX’s node positioning algorithms." }, { "code": null, "e": 4453, "s": 3596, "text": "The simple visualization shown above can be enhanced to include more information about the graph. I already mentioned degree, which is the number of edges connected to a node. Another measure we can include is betweenness centrality. Each pair of nodes in the graph is connected by one shortest path. In an unweighted graph, the shortest path consists of the smallest set of edges that connect two nodes. In a weighted graph, like we have here, the shortest path consists of the set of edges with the smallest sum of weights. The betweenness centrality of a given node is the measure of the number of shortest paths that pass through that node out of all of the shortest paths. The following code uses the degree and betweenness centrality of each node to determine its color and size and uses the edge weights to determine the color and size of the edges." }, { "code": null, "e": 4537, "s": 4453, "text": "We can use these color mappings to create an enhanced visualization with this code:" }, { "code": null, "e": 4825, "s": 4537, "text": "There is a lot more information displayed in this graph. The color range goes from dark purple to bright yellow. The yellower the node, the higher the degree. The larger the node, the greater the betweenness centrality. The yellower and wider the edge, the greater the interaction score." }, { "code": null, "e": 4975, "s": 4825, "text": "One final useful visualization is the minimum spanning tree, which is the subset of edges that connect all the nodes with the lowest possible weight:" } ]
Impala - Alter Table
The Alter table statement in Impala is used to perform changes on a given table. Using this statement, we can add, delete, or modify columns in an existing table and we can also rename it. This Chapter explains various types of alter statements with syntax and examples. First of all assume that we have a table named customers in the my_db database in Impala, with the following data ID NAME AGE ADDRESS SALARY --- --------- ----- ----------- -------- 1 Ramesh 32 Ahmedabad 20000 2 Khilan 25 Delhi 15000 3 Hardik 27 Bhopal 40000 4 Chaitali 25 Mumbai 35000 5 kaushik 23 Kota 30000 6 Komal 22 Mp 32000 And, if you get the list of tables in the database my_db, you can find the customers table in it as shown below. [quickstart.cloudera:21000] > show tables; Query: show tables +-----------+ | name | +-----------+ | customers | | employee | | student | | student1 | +-----------+ The basic syntax of ALTER TABLE to rename an existing table is as follows − ALTER TABLE [old_db_name.]old_table_name RENAME TO [new_db_name.]new_table_name Following is an example of changing the name of the table using the alter statement. Here we are changing the name of the table customers to users. [quickstart.cloudera:21000] > ALTER TABLE my_db.customers RENAME TO my_db.users; After executing the above query, Impala changes the name of the table as required, displaying the following message. Query: alter TABLE my_db.customers RENAME TO my_db.users You can verify the list of tables in the current database using the show tables statement. You can find the table named users instead of customers. Query: show tables +----------+ | name | +----------+ | employee | | student | | student1 | | users | +----------+ Fetched 4 row(s) in 0.10s The basic syntax of ALTER TABLE to add columns to an existing table is as follows − ALTER TABLE name ADD COLUMNS (col_spec[, col_spec ...]) The following query is an example demonstrating how to add columns to an existing table. Here we are adding two columns account_no and phone_number (both are of bigint data type) to the users table. [quickstart.cloudera:21000] > ALTER TABLE users ADD COLUMNS (account_no BIGINT, phone_no BIGINT); On executing the above query, it will add the specified columns to the table named student, displaying the following message. Query: alter TABLE users ADD COLUMNS (account_no BIGINT, phone_no BIGINT) If you verify the schema of the table users, you can find the newly added columns in it as shown below. quickstart.cloudera:21000] > describe users; Query: describe users +------------+--------+---------+ | name | type | comment | +------------+--------+---------+ | id | int | | | name | string | | | age | int | | | address | string | | | salary | bigint | | | account_no | bigint | | | phone_no | bigint | | +------------+--------+---------+ Fetched 7 row(s) in 0.20s The basic syntax of ALTER TABLE to DROP COLUMN in an existing table is as follows − ALTER TABLE name DROP [COLUMN] column_name The following query is an example of deleting columns from an existing table. Here we are deleting the column named account_no. [quickstart.cloudera:21000] > ALTER TABLE users DROP account_no; On executing the above query, Impala deletes the column named account_no displaying the following message. Query: alter TABLE users DROP account_no If you verify the schema of the table users, you cannot find the column named account_no since it was deleted. [quickstart.cloudera:21000] > describe users; Query: describe users +----------+--------+---------+ | name | type | comment | +----------+--------+---------+ | id | int | | | name | string | | | age | int | | | address | string | | | salary | bigint | | | phone_no | bigint | | +----------+--------+---------+ Fetched 6 row(s) in 0.11s The basic syntax of ALTER TABLE to change the name and datatype of a column in an existing table is as follows − ALTER TABLE name CHANGE column_name new_name new_type Following is an example of changing the name and datatype of a column using the alter statement. Here we are changing the name of the column phone_no to email and its data type to string. [quickstart.cloudera:21000] > ALTER TABLE users CHANGE phone_no e_mail string; On executing the above query, Impala does the specified changes, displaying the following message. Query: alter TABLE users CHANGE phone_no e_mail string You can verify the metadata of the table users using the describe statement. You can observe that Impala has done the required changes to the specified column. [quickstart.cloudera:21000] > describe users; Query: describe users +----------+--------+---------+ | name | type | comment | +----------+--------+---------+ | id | int | | | name | string | | | age | int | | | address | string | | | salary | bigint | | | phone_no | bigint | | +----------+--------+---------+ Fetched 6 row(s) in 0.11s Open the Impala Query editor and type the alter statement in it and click on the execute button as shown in the following screenshot. On executing the above query, it will change the name of the table customers to users. In the same way, we can execute all the alter queries. Print Add Notes Bookmark this page
[ { "code": null, "e": 2474, "s": 2285, "text": "The Alter table statement in Impala is used to perform changes on a given table. Using this statement, we can add, delete, or modify columns in an existing table and we can also rename it." }, { "code": null, "e": 2670, "s": 2474, "text": "This Chapter explains various types of alter statements with syntax and examples. First of all assume that we have a table named customers in the my_db database in Impala, with the following data" }, { "code": null, "e": 2978, "s": 2670, "text": "ID NAME AGE ADDRESS SALARY\n--- --------- ----- ----------- --------\n1 Ramesh 32 Ahmedabad 20000\n2 Khilan 25 Delhi 15000\n3 Hardik 27 Bhopal 40000\n4 Chaitali 25 Mumbai 35000\n5 kaushik 23 Kota 30000\n6 Komal 22 Mp 32000\n" }, { "code": null, "e": 3091, "s": 2978, "text": "And, if you get the list of tables in the database my_db, you can find the customers table in it as shown below." }, { "code": null, "e": 3275, "s": 3091, "text": "[quickstart.cloudera:21000] > show tables;\n\nQuery: show tables \n+-----------+ \n| name | \n+-----------+ \n| customers | \n| employee | \n| student | \n| student1 | \n+-----------+\n" }, { "code": null, "e": 3351, "s": 3275, "text": "The basic syntax of ALTER TABLE to rename an existing table is as follows −" }, { "code": null, "e": 3432, "s": 3351, "text": "ALTER TABLE [old_db_name.]old_table_name RENAME TO [new_db_name.]new_table_name\n" }, { "code": null, "e": 3580, "s": 3432, "text": "Following is an example of changing the name of the table using the alter statement. Here we are changing the name of the table customers to users." }, { "code": null, "e": 3662, "s": 3580, "text": "[quickstart.cloudera:21000] > ALTER TABLE my_db.customers RENAME TO my_db.users;\n" }, { "code": null, "e": 3779, "s": 3662, "text": "After executing the above query, Impala changes the name of the table as required, displaying the following message." }, { "code": null, "e": 3837, "s": 3779, "text": "Query: alter TABLE my_db.customers RENAME TO my_db.users\n" }, { "code": null, "e": 3985, "s": 3837, "text": "You can verify the list of tables in the current database using the show tables statement. You can find the table named users instead of customers." }, { "code": null, "e": 4144, "s": 3985, "text": "Query: show tables \n+----------+ \n| name | \n+----------+ \n| employee | \n| student | \n| student1 | \n| users | \n+----------+ \nFetched 4 row(s) in 0.10s\n" }, { "code": null, "e": 4228, "s": 4144, "text": "The basic syntax of ALTER TABLE to add columns to an existing table is as follows −" }, { "code": null, "e": 4285, "s": 4228, "text": "ALTER TABLE name ADD COLUMNS (col_spec[, col_spec ...])\n" }, { "code": null, "e": 4484, "s": 4285, "text": "The following query is an example demonstrating how to add columns to an existing table. Here we are adding two columns account_no and phone_number (both are of bigint data type) to the users table." }, { "code": null, "e": 4584, "s": 4484, "text": "[quickstart.cloudera:21000] > ALTER TABLE users ADD COLUMNS (account_no BIGINT, \nphone_no BIGINT);\n" }, { "code": null, "e": 4710, "s": 4584, "text": "On executing the above query, it will add the specified columns to the table named student, displaying the following message." }, { "code": null, "e": 4785, "s": 4710, "text": "Query: alter TABLE users ADD COLUMNS (account_no BIGINT, phone_no BIGINT)\n" }, { "code": null, "e": 4889, "s": 4785, "text": "If you verify the schema of the table users, you can find the newly added columns in it as shown below." }, { "code": null, "e": 5370, "s": 4889, "text": "quickstart.cloudera:21000] > describe users;\n \nQuery: describe users \n+------------+--------+---------+ \n| name | type | comment | \n+------------+--------+---------+ \n| id | int | | \n| name | string | | \n| age | int | |\n| address | string | | \n| salary | bigint | | \n| account_no | bigint | | \n| phone_no | bigint | | \n+------------+--------+---------+ \nFetched 7 row(s) in 0.20s\n" }, { "code": null, "e": 5454, "s": 5370, "text": "The basic syntax of ALTER TABLE to DROP COLUMN in an existing table is as follows −" }, { "code": null, "e": 5498, "s": 5454, "text": "ALTER TABLE name DROP [COLUMN] column_name\n" }, { "code": null, "e": 5626, "s": 5498, "text": "The following query is an example of deleting columns from an existing table. Here we are deleting the column named account_no." }, { "code": null, "e": 5692, "s": 5626, "text": "[quickstart.cloudera:21000] > ALTER TABLE users DROP account_no;\n" }, { "code": null, "e": 5799, "s": 5692, "text": "On executing the above query, Impala deletes the column named account_no displaying the following message." }, { "code": null, "e": 5841, "s": 5799, "text": "Query: alter TABLE users DROP account_no\n" }, { "code": null, "e": 5952, "s": 5841, "text": "If you verify the schema of the table users, you cannot find the column named account_no since it was deleted." }, { "code": null, "e": 6379, "s": 5952, "text": "[quickstart.cloudera:21000] > describe users; \n\nQuery: describe users \n+----------+--------+---------+ \n| name | type | comment | \n+----------+--------+---------+ \n| id | int | | \n| name | string | | \n| age | int | | \n| address | string | | \n| salary | bigint | | \n| phone_no | bigint | |\n+----------+--------+---------+ \nFetched 6 row(s) in 0.11s\n" }, { "code": null, "e": 6492, "s": 6379, "text": "The basic syntax of ALTER TABLE to change the name and datatype of a column in an existing table is as follows −" }, { "code": null, "e": 6547, "s": 6492, "text": "ALTER TABLE name CHANGE column_name new_name new_type\n" }, { "code": null, "e": 6735, "s": 6547, "text": "Following is an example of changing the name and datatype of a column using the alter statement. Here we are changing the name of the column phone_no to email and its data type to string." }, { "code": null, "e": 6815, "s": 6735, "text": "[quickstart.cloudera:21000] > ALTER TABLE users CHANGE phone_no e_mail string;\n" }, { "code": null, "e": 6914, "s": 6815, "text": "On executing the above query, Impala does the specified changes, displaying the following message." }, { "code": null, "e": 6970, "s": 6914, "text": "Query: alter TABLE users CHANGE phone_no e_mail string\n" }, { "code": null, "e": 7130, "s": 6970, "text": "You can verify the metadata of the table users using the describe statement. You can observe that Impala has done the required changes to the specified column." }, { "code": null, "e": 7556, "s": 7130, "text": "[quickstart.cloudera:21000] > describe users; \nQuery: describe users \n+----------+--------+---------+ \n| name | type | comment | \n+----------+--------+---------+ \n| id | int | | \n| name | string | | \n| age | int | | \n| address | string | | \n| salary | bigint | | \n| phone_no | bigint | |\n+----------+--------+---------+ \nFetched 6 row(s) in 0.11s\n" }, { "code": null, "e": 7690, "s": 7556, "text": "Open the Impala Query editor and type the alter statement in it and click on the execute button as shown in the following screenshot." }, { "code": null, "e": 7832, "s": 7690, "text": "On executing the above query, it will change the name of the table customers to users. In the same way, we can execute all the alter queries." }, { "code": null, "e": 7839, "s": 7832, "text": " Print" }, { "code": null, "e": 7850, "s": 7839, "text": " Add Notes" } ]
BigDecimal toString() Method in Java with Examples - GeeksforGeeks
05 Apr, 2019 The java.math.BigDecimal.toString() method is used to represent the current BigDecimal by which this method is called into String form, using scientific notation if an exponent is needed. It is done by following steps: A standard canonical string form of the BigDecimal is created by converting the absolute value of the unscaled value of BigDecimal in base ten using the characters ‘0’ through ‘9’ with no leading zeros except when the value is 0, then single character ‘0’ is used. Next, an adjusted exponent is calculated which is one less than adding the number of characters in the converted unscaled value and negated scale value. That is, -scale + (ulength-1), where ulength is the length of the absolute value of the unscaled value in decimal digits (its precision). An exponent in character form is then suffixed to the converted unscaled value (perhaps with inserted decimal point). This comprises the letter ‘E’ followed immediately by the adjusted exponent converted to a character form. Finally, the entire string is prefixed by a minus sign character ‘-‘ if the unscaled value is less than zero. No sign character is prefixed if the unscaled value is zero or positive. Syntax: public String toString() Parameter: This method do not accepts any parameter. Return value: This method returns the String representation of this BigDecimal number. Overrides: This method overrides java.lang.Object.toString() method of Object class. Below programs illustrates the use of toString() method in java Example 1: Example to convert BigDecimal into String without Scientific notation // Java program to demonstrate// toString() method of BigDecimal import java.math.*; class GFG { public static void main(String[] args) { // Creating a BigDecimal object BigDecimal b; // Object of String to hold the number String input = "012345678901234567" + "8901234567890123" + "4567890123456789" + "0123456789012345" + "6789012345678901" + "2345678901234567" + "8901234567890123" + "4567890123456789" + "0123456789012345" + "6789012345678901" + "2345678901234567" + "8901234567890123" + "4567890123456789" + "0123456789012345" + "6789012345678901" + "2345678901234567" + "8901234567890123" + "4554324324362432" + "7674637264783264" + "7832678463726478" + "3264736274673864" + "7364732463546354" + "6354632564532645" + "6325463546536453" + "6546325463546534" + "6325465345326456" + "4635463263453264" + "654632498739473"; // Converting to BigDecimal b = new BigDecimal(input); // Apply toString() method String s = b.toString(); // Print the result System.out.println(s); }} 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234554324324362432767463726478326478326784637264783264736274673864736473246354635463546325645326456325463546536453654632546354653463254653453264564635463263453264654632498739473 Example 2: Example to convert BigDecimal into String with Scientific notation // Java program to demonstrate// toString() method of BigDecimal import java.math.*; class GFG { public static void main(String[] args) { // Create a BigDecimal object BigDecimal a; // Create a String object String s; // Set precision to 5 MathContext mc = new MathContext(5); a = new BigDecimal("4536785E4", mc); // apply toString() method s = a.toString(); // print the result System.out.println(s); }} 4.5368E+10 Reference: https://docs.oracle.com/en/java/javase/12/docs/api/java.base/java/math/BigDecimal.html#toString() Java-BigDecimal Java-Functions Java-math-package Java Java Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Comments Old Comments Object Oriented Programming (OOPs) Concept in Java HashMap in Java with Examples How to iterate any Map in Java Initialize an ArrayList in Java Interfaces in Java ArrayList in Java Multidimensional Arrays in Java Stack Class in Java Singleton Class in Java LinkedList in Java
[ { "code": null, "e": 24015, "s": 23987, "text": "\n05 Apr, 2019" }, { "code": null, "e": 24234, "s": 24015, "text": "The java.math.BigDecimal.toString() method is used to represent the current BigDecimal by which this method is called into String form, using scientific notation if an exponent is needed. It is done by following steps:" }, { "code": null, "e": 24499, "s": 24234, "text": "A standard canonical string form of the BigDecimal is created by converting the absolute value of the unscaled value of BigDecimal in base ten using the characters ‘0’ through ‘9’ with no leading zeros except when the value is 0, then single character ‘0’ is used." }, { "code": null, "e": 24790, "s": 24499, "text": "Next, an adjusted exponent is calculated which is one less than adding the number of characters in the converted unscaled value and negated scale value. That is, -scale + (ulength-1), where ulength is the length of the absolute value of the unscaled value in decimal digits (its precision)." }, { "code": null, "e": 25015, "s": 24790, "text": "An exponent in character form is then suffixed to the converted unscaled value (perhaps with inserted decimal point). This comprises the letter ‘E’ followed immediately by the adjusted exponent converted to a character form." }, { "code": null, "e": 25198, "s": 25015, "text": "Finally, the entire string is prefixed by a minus sign character ‘-‘ if the unscaled value is less than zero. No sign character is prefixed if the unscaled value is zero or positive." }, { "code": null, "e": 25206, "s": 25198, "text": "Syntax:" }, { "code": null, "e": 25232, "s": 25206, "text": "public String toString()\n" }, { "code": null, "e": 25285, "s": 25232, "text": "Parameter: This method do not accepts any parameter." }, { "code": null, "e": 25372, "s": 25285, "text": "Return value: This method returns the String representation of this BigDecimal number." }, { "code": null, "e": 25457, "s": 25372, "text": "Overrides: This method overrides java.lang.Object.toString() method of Object class." }, { "code": null, "e": 25521, "s": 25457, "text": "Below programs illustrates the use of toString() method in java" }, { "code": null, "e": 25602, "s": 25521, "text": "Example 1: Example to convert BigDecimal into String without Scientific notation" }, { "code": "// Java program to demonstrate// toString() method of BigDecimal import java.math.*; class GFG { public static void main(String[] args) { // Creating a BigDecimal object BigDecimal b; // Object of String to hold the number String input = \"012345678901234567\" + \"8901234567890123\" + \"4567890123456789\" + \"0123456789012345\" + \"6789012345678901\" + \"2345678901234567\" + \"8901234567890123\" + \"4567890123456789\" + \"0123456789012345\" + \"6789012345678901\" + \"2345678901234567\" + \"8901234567890123\" + \"4567890123456789\" + \"0123456789012345\" + \"6789012345678901\" + \"2345678901234567\" + \"8901234567890123\" + \"4554324324362432\" + \"7674637264783264\" + \"7832678463726478\" + \"3264736274673864\" + \"7364732463546354\" + \"6354632564532645\" + \"6325463546536453\" + \"6546325463546534\" + \"6325465345326456\" + \"4635463263453264\" + \"654632498739473\"; // Converting to BigDecimal b = new BigDecimal(input); // Apply toString() method String s = b.toString(); // Print the result System.out.println(s); }}", "e": 27264, "s": 25602, "text": null }, { "code": null, "e": 27713, "s": 27264, "text": "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234554324324362432767463726478326478326784637264783264736274673864736473246354635463546325645326456325463546536453654632546354653463254653453264564635463263453264654632498739473" }, { "code": null, "e": 27791, "s": 27713, "text": "Example 2: Example to convert BigDecimal into String with Scientific notation" }, { "code": "// Java program to demonstrate// toString() method of BigDecimal import java.math.*; class GFG { public static void main(String[] args) { // Create a BigDecimal object BigDecimal a; // Create a String object String s; // Set precision to 5 MathContext mc = new MathContext(5); a = new BigDecimal(\"4536785E4\", mc); // apply toString() method s = a.toString(); // print the result System.out.println(s); }}", "e": 28307, "s": 27791, "text": null }, { "code": null, "e": 28319, "s": 28307, "text": "4.5368E+10\n" }, { "code": null, "e": 28428, "s": 28319, "text": "Reference: https://docs.oracle.com/en/java/javase/12/docs/api/java.base/java/math/BigDecimal.html#toString()" }, { "code": null, "e": 28444, "s": 28428, "text": "Java-BigDecimal" }, { "code": null, "e": 28459, "s": 28444, "text": "Java-Functions" }, { "code": null, "e": 28477, "s": 28459, "text": "Java-math-package" }, { "code": null, "e": 28482, "s": 28477, "text": "Java" }, { "code": null, "e": 28487, "s": 28482, "text": "Java" }, { "code": null, "e": 28585, "s": 28487, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 28594, "s": 28585, "text": "Comments" }, { "code": null, "e": 28607, "s": 28594, "text": "Old Comments" }, { "code": null, "e": 28658, "s": 28607, "text": "Object Oriented Programming (OOPs) Concept in Java" }, { "code": null, "e": 28688, "s": 28658, "text": "HashMap in Java with Examples" }, { "code": null, "e": 28719, "s": 28688, "text": "How to iterate any Map in Java" }, { "code": null, "e": 28751, "s": 28719, "text": "Initialize an ArrayList in Java" }, { "code": null, "e": 28770, "s": 28751, "text": "Interfaces in Java" }, { "code": null, "e": 28788, "s": 28770, "text": "ArrayList in Java" }, { "code": null, "e": 28820, "s": 28788, "text": "Multidimensional Arrays in Java" }, { "code": null, "e": 28840, "s": 28820, "text": "Stack Class in Java" }, { "code": null, "e": 28864, "s": 28840, "text": "Singleton Class in Java" } ]
Beginner’s Guide to Building Neural Networks in TensorFlow | by Evan Heitman | Towards Data Science
If you’re reading this you’ve probably had some exposure to neural networks and TensorFlow, but you might feel somewhat daunted by the various terms associated with deep learning that are often glossed over or left unexplained in many introductions to the technology. This article will shine a light on some of these topics by doing an in-depth walkthrough of TensorFlow 2.0’s beginner tutorial. This guide is based on the TensorFlow 2.0 beginner notebook and I recommend taking a look at it and running it in Google Colab (it’s only 16 lines of code!) to maximize your comprehension of the material covered here. NOTE: Given that this is a beginner’s guide, there should be a low barrier-to-entry to most of the ideas expressed herein, but some some background familiarity with with what neural networks are will be helpful. This article provides a good overview in case you want to brush up. After reading this you’ll have a better understanding of some key conceptual topics and TensorFlow/Keras implementation of those topics (Keras is a deep learning library built on top of TensorFlow). Conceptual: Neural network layer shapes Activation functions (such as Relu and Softmax) Logits Dropout Optimizers Loss Epochs TensorFlow/Keras functions: tf.keras.layers.Sequential() tf.keras.layers.Flatten() tf.keras.layers.Dense() model.compile() model.fit() The data that the TensorFlow 2.0 beginner tutorial uses is the MNIST dataset which is considered a kind of “Hello, World!” for neural networks and deep learning, and it can be downloaded directly from Keras. It is a dataset full of hand-drawn digits ranging from 0–9 with a corresponding label describing what digit the drawing is supposed to be depicting. The idea behind working with this dataset is that we want to be able to train a model that learns what kinds of shapes correspond to digits 0–9 and that subsequently is able to correctly label images that it hasn’t trained on. This task becomes more complicated when images such as the one pictured below are passed to the model. Some humans might even mistake this drawing for a zero even though it’s labelled as an eight. On a high level, the model built in the beginner tutorial takes in training images as inputs and tries to classify those images as a digit from 0–9. If it makes an incorrect prediction, it will make mathematical adjustments to better predict similar images. Once the model has finished training, it will be tested on images that it hasn’t trained on to get a final assessment of how well the model performs. Now let’s take a deep dive into how TensorFlow implements this basic neural network. After doing some set up in the first cell, the notebook starts by loading the MNIST dataset from the Keras library using its load_data() function which returns two tuples as shown in the code. Documentation can be found here. mnist = tf.keras.datasets.mnist(x_train, y_train), (x_test, y_test) = mnist.load_data() It will be helpful to understand what this data actually looks like as we go into exploring what TensorFlow does with it. >>> x_train.shape(60000, 28, 28)>>> y_train.shape(60000,)>>> x_test.shape(10000, 28, 28)>>> y_test.shape(10000,) Looking that these results we can see that there are 70k total images in the dataset, 60k training and 10k testing. The two 28’s indicate that each image is 28 pixels by 28 pixels and images are represented as 28x28 arrays filled with pixel values as can be seen in the following visualization. The last step that the notebook performs to prepare the data is converting every pixel value in each image to a floating-point number between 0.0–1.0. This is done to help with the scale in the math involved in producing a prediction for each image. x_train, x_test = x_train / 255.0, x_test / 255.0 Perhaps the most confusing part of this notebook is the part where the model structure is created. model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')]) The purpose of this code is to specify what kind of layers are going to be present in our neural net. The first component of this is the tf.keras.models.Sequential()call. All this function does is begin the creation of a linear (or “sequential”) arrangement of layers. All the other code in the above snippet detail which layers will be in the model and how they will be arranged. The next line of code tf.keras.layers.Flatten(input_shape=(28,28)) creates the first layer in our network. Intuitively, we want to be able to use all of the information in an image to predict what digit it is, and so the input layer should have a node for each pixel in an image. Each image has 28*28 =784 values and so Flatten() creates a layer with 784 nodes that contain each pixel value for a given image. If we had color images that contain 3 values for each pixel (RGB values) then Flatten() would create a layer with 28*28*3 = 2352 nodes. The other kind of layer we see in the model is created using tf.keras.layers.Dense() which creates what is called a fully-connected or densely-connected layer. This can be compared to a sparsely-connected layer and the distinction has to do with how information is passed between nodes in adjacent layers. You can see that in a densely-connected layers, each node in one layer is connected to each node in the next layer, whereas in sparsely-connected layers this is not the case. And so what Dense() does is create a layer that is fully connected to the layer that precedes it. The first parameter (128 in the first instance) specifies how many nodes should be in the layer. The number of nodes in hidden layers (layers that aren’t the input or output layers) is somewhat arbitrary but an important thing to note is that the output layer has a number of nodes equal to how many classes the model is trying to predict. In this case, the model is trying to predict between 10 different digits and so the final layer in the model has 10 nodes. This is crucial because the output for each node of the final layer will be a probability that a given image is a particular digit. In order to understand the rest of code in this snippet we need to develop an understanding of activation functions and dropout. Activation Functions As important as the layout and structure of a neural net is, it’s good to remember that, at the end of the day, what a neural net does is a lot of math. Each node takes the values of the nodes in the previous layer and computes a weighted sum of them, producing a scalar value which is called a logit. Much like how a neuron in the human brain “fires” when prompted by certain inputs, we must specify how each node (which are also sometimes referred to as neurons) in our network “fires” when it is given certain inputs. This is what activation functions do. They take logits, the results of the aforementioned weighted sum, and convert them to an “activation” based on what function is being used. A common activation function, and the one used in the first Dense() layer in our network, is called “ReLU” which is short for rectified linear unit. What ReLU does is make the activation of any negative logits 0 (the node does not fire), while leaving any positive logits unchanged (the node fires with strength linearly proportional to the strength of the input). For more information on the power of ReLU and why it is useful check out this article. Another common activation function, and the one that is used in the second instance of Dense(), is called “softmax”. As the image above shows, softmax takes the logits computed by the weighted sum of the activations from the previous layer and converts them to probabilities that sum to 1.0. This makes it an extremely useful activation function to use in our output layer because it provides easy-to-interpret results for the likelihood of an image being a particular digit. There are many other activation functions and deciding which one to use is often a matter of experimentation or heuristic judgement (or both). A good overview of some other activation functions can be found here (I recommend reading it if you’re having trouble understanding why activation functions are necessary in the first place). Dropout The last unexplained piece of the code snippet we’ve been examining so far is the call of tf.keras.layers.Dropout(). The concept of dropout goes back to the earlier discussion of the connectivity of layers, and has to do specifically with a few drawbacks associated with densely-connected layers. One of the drawbacks of densely-connected layers is that it can lead to very computationally-expensive neural nets. With each node transmitting information to every other node in the next layer, the complexity of the weighted sums computed in each node increases exponentially with the number of nodes in each layer. Another drawback is that with so much information being passed from layer to layer, models can have a tendency to overfit to the training data, ultimately hurting performance. This is where dropout comes in. Dropout makes it so some of the nodes in a given layer don’t pass on their information to the next layer. This helps with computation time and with overfitting. So in the beginner notebook, the call of Dropout(0.2) in between the two Dense() layers makes it so that each node in the first Dense() layer has a 0.2 probability of being dropped from the computation of the activations of the next layer. You might have caught on that this effectively makes the output layer in the model a sparsely-connected layer. Now that we understand all the components of our model, let’s take advantage of the model.summary() function and do a sanity check on what our network structure looks like. After double checking the output shapes, it all looks good, so now let’s move on to compiling, training, and running the model! Now that we’ve specified what our neural net looks like, the next step is to tell Tensorflow how to train it. Compiling the model The snippet of code we’ll be examining in this section is the following: model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) The model.compile()function is called on a pre-built model and it specifies the loss function, optimizer, and metrics, each of which will be explained. These are important features of how a neural network produces its final predictions. Loss FunctionAt the beginning of this guide, it was mentioned that on a high-level, the model built in the beginner notebook will learn how to classify certain images as digits and it does this by making a prediction, seeing how far off its prediction was from the correct answer, and then updating itself to better predict those kinds of digits. The loss function is the part of the model that quantifies how far off a prediction is from the correct answer. Different kinds of models are going to need different kinds of loss functions. For example, the loss function for a problem like this one where the outputs to our model are probabilities would have to very different from the loss function of a model that is trying to predict something like price in dollars. The loss function for this particular model is ‘sparse_categorical_crossentropy’ which is good for multiclass-classification problems like this one. In our case, if the model predicts that an image has only a small probability of being its actual label, that will lead to a high loss. Optimizer Another way of expressing what training a model actually means is that it seeks to minimize the loss. If loss is a measurement of how far off a prediction is from the correct answer, and a higher loss means a more incorrect prediction, seeking to minimize the loss is a quantifiable way of determining how well a model is performing. As was mentioned earlier, a key part of training neural nets is revising the mathematics parameters of the nodes of a network based off of how effective those parameters were in classifying an image. In a process called backpropagation, neural nets use a mathematical tool called gradient descent to update parameters to improve the model. The details of those terms are somewhat outside of the scope of this guide but for the purposes of understanding what the beginner notebook is doing, the optimizer parameter of the model.compile() function specifies a way for making the backpropagation process more faster and more effective. The “adam” optimizer is a commonly used optimizer and works fine for this problem. Metrics The last part of the model.compile() function is specifying the metrics it should use in evaluating the model. Accuracy is a useful, but imperfect metric, for gauging model performance and it should be used by itself with a some amount of caution (for more on why that is and some other potential metrics, you can take a look at this article). At long last comes actually training the model, and with TensorFlow 2.0, this is very easy to do. model.fit(x_train, y_train, epochs=5) This line of code is pretty intuitive, passing the training data and the correct labels of that data. The epoch parameter in the model.fit() function is the number of times the model sees all of the training data. The reason why we want the model to see all of the training data multiple times is that one go-through might not be enough for the model to sufficiently update its weights when computing weighted sums to make a marked improvement in predictive power. Running this code, you can see that in each epoch, the model goes through all 60k images in the training set. You can also see the the loss decreases and the accuracy increases with each epoch, meaning that the model is getting better and better at classifying digits with each epoch. And finally, using model.evaluate(x_test, y_test), we can predict the classes of our test set and see how our model performs. This plot shows that the even though training accuracy keeps going up the more epochs we train, the validation accuracy is starting to plateau or decrease which indicates that we probably don’t need to train for more than 5 epochs. For a more detailed evaluation of how the model performed, we can build a confusion matrix. From this confusion matrix we can see that our model has the most trouble with nines and tends to confuse them with sevens, fours, or threes. Congratulations! You’ve made it through this guide to TensorFlow 2.0’s beginner notebook and now have a better understanding of the shapes of neural network layers, activation functions, logits, dropout, optimizers, loss functions and loss, and epochs. You also gained familiarity with how to implement these concepts using TensorFlow/Keras! For more practice, I recommend experimenting with the different parameters discussed in this guide to see what impact they have on model performance. Happy modeling!
[ { "code": null, "e": 567, "s": 171, "text": "If you’re reading this you’ve probably had some exposure to neural networks and TensorFlow, but you might feel somewhat daunted by the various terms associated with deep learning that are often glossed over or left unexplained in many introductions to the technology. This article will shine a light on some of these topics by doing an in-depth walkthrough of TensorFlow 2.0’s beginner tutorial." }, { "code": null, "e": 785, "s": 567, "text": "This guide is based on the TensorFlow 2.0 beginner notebook and I recommend taking a look at it and running it in Google Colab (it’s only 16 lines of code!) to maximize your comprehension of the material covered here." }, { "code": null, "e": 1065, "s": 785, "text": "NOTE: Given that this is a beginner’s guide, there should be a low barrier-to-entry to most of the ideas expressed herein, but some some background familiarity with with what neural networks are will be helpful. This article provides a good overview in case you want to brush up." }, { "code": null, "e": 1264, "s": 1065, "text": "After reading this you’ll have a better understanding of some key conceptual topics and TensorFlow/Keras implementation of those topics (Keras is a deep learning library built on top of TensorFlow)." }, { "code": null, "e": 1276, "s": 1264, "text": "Conceptual:" }, { "code": null, "e": 1304, "s": 1276, "text": "Neural network layer shapes" }, { "code": null, "e": 1352, "s": 1304, "text": "Activation functions (such as Relu and Softmax)" }, { "code": null, "e": 1359, "s": 1352, "text": "Logits" }, { "code": null, "e": 1367, "s": 1359, "text": "Dropout" }, { "code": null, "e": 1378, "s": 1367, "text": "Optimizers" }, { "code": null, "e": 1383, "s": 1378, "text": "Loss" }, { "code": null, "e": 1390, "s": 1383, "text": "Epochs" }, { "code": null, "e": 1418, "s": 1390, "text": "TensorFlow/Keras functions:" }, { "code": null, "e": 1447, "s": 1418, "text": "tf.keras.layers.Sequential()" }, { "code": null, "e": 1473, "s": 1447, "text": "tf.keras.layers.Flatten()" }, { "code": null, "e": 1497, "s": 1473, "text": "tf.keras.layers.Dense()" }, { "code": null, "e": 1513, "s": 1497, "text": "model.compile()" }, { "code": null, "e": 1525, "s": 1513, "text": "model.fit()" }, { "code": null, "e": 1882, "s": 1525, "text": "The data that the TensorFlow 2.0 beginner tutorial uses is the MNIST dataset which is considered a kind of “Hello, World!” for neural networks and deep learning, and it can be downloaded directly from Keras. It is a dataset full of hand-drawn digits ranging from 0–9 with a corresponding label describing what digit the drawing is supposed to be depicting." }, { "code": null, "e": 2306, "s": 1882, "text": "The idea behind working with this dataset is that we want to be able to train a model that learns what kinds of shapes correspond to digits 0–9 and that subsequently is able to correctly label images that it hasn’t trained on. This task becomes more complicated when images such as the one pictured below are passed to the model. Some humans might even mistake this drawing for a zero even though it’s labelled as an eight." }, { "code": null, "e": 2714, "s": 2306, "text": "On a high level, the model built in the beginner tutorial takes in training images as inputs and tries to classify those images as a digit from 0–9. If it makes an incorrect prediction, it will make mathematical adjustments to better predict similar images. Once the model has finished training, it will be tested on images that it hasn’t trained on to get a final assessment of how well the model performs." }, { "code": null, "e": 2799, "s": 2714, "text": "Now let’s take a deep dive into how TensorFlow implements this basic neural network." }, { "code": null, "e": 3025, "s": 2799, "text": "After doing some set up in the first cell, the notebook starts by loading the MNIST dataset from the Keras library using its load_data() function which returns two tuples as shown in the code. Documentation can be found here." }, { "code": null, "e": 3113, "s": 3025, "text": "mnist = tf.keras.datasets.mnist(x_train, y_train), (x_test, y_test) = mnist.load_data()" }, { "code": null, "e": 3235, "s": 3113, "text": "It will be helpful to understand what this data actually looks like as we go into exploring what TensorFlow does with it." }, { "code": null, "e": 3348, "s": 3235, "text": ">>> x_train.shape(60000, 28, 28)>>> y_train.shape(60000,)>>> x_test.shape(10000, 28, 28)>>> y_test.shape(10000,)" }, { "code": null, "e": 3643, "s": 3348, "text": "Looking that these results we can see that there are 70k total images in the dataset, 60k training and 10k testing. The two 28’s indicate that each image is 28 pixels by 28 pixels and images are represented as 28x28 arrays filled with pixel values as can be seen in the following visualization." }, { "code": null, "e": 3893, "s": 3643, "text": "The last step that the notebook performs to prepare the data is converting every pixel value in each image to a floating-point number between 0.0–1.0. This is done to help with the scale in the math involved in producing a prediction for each image." }, { "code": null, "e": 3943, "s": 3893, "text": "x_train, x_test = x_train / 255.0, x_test / 255.0" }, { "code": null, "e": 4042, "s": 3943, "text": "Perhaps the most confusing part of this notebook is the part where the model structure is created." }, { "code": null, "e": 4257, "s": 4042, "text": "model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax')])" }, { "code": null, "e": 4638, "s": 4257, "text": "The purpose of this code is to specify what kind of layers are going to be present in our neural net. The first component of this is the tf.keras.models.Sequential()call. All this function does is begin the creation of a linear (or “sequential”) arrangement of layers. All the other code in the above snippet detail which layers will be in the model and how they will be arranged." }, { "code": null, "e": 5184, "s": 4638, "text": "The next line of code tf.keras.layers.Flatten(input_shape=(28,28)) creates the first layer in our network. Intuitively, we want to be able to use all of the information in an image to predict what digit it is, and so the input layer should have a node for each pixel in an image. Each image has 28*28 =784 values and so Flatten() creates a layer with 784 nodes that contain each pixel value for a given image. If we had color images that contain 3 values for each pixel (RGB values) then Flatten() would create a layer with 28*28*3 = 2352 nodes." }, { "code": null, "e": 5490, "s": 5184, "text": "The other kind of layer we see in the model is created using tf.keras.layers.Dense() which creates what is called a fully-connected or densely-connected layer. This can be compared to a sparsely-connected layer and the distinction has to do with how information is passed between nodes in adjacent layers." }, { "code": null, "e": 6358, "s": 5490, "text": "You can see that in a densely-connected layers, each node in one layer is connected to each node in the next layer, whereas in sparsely-connected layers this is not the case. And so what Dense() does is create a layer that is fully connected to the layer that precedes it. The first parameter (128 in the first instance) specifies how many nodes should be in the layer. The number of nodes in hidden layers (layers that aren’t the input or output layers) is somewhat arbitrary but an important thing to note is that the output layer has a number of nodes equal to how many classes the model is trying to predict. In this case, the model is trying to predict between 10 different digits and so the final layer in the model has 10 nodes. This is crucial because the output for each node of the final layer will be a probability that a given image is a particular digit." }, { "code": null, "e": 6487, "s": 6358, "text": "In order to understand the rest of code in this snippet we need to develop an understanding of activation functions and dropout." }, { "code": null, "e": 6508, "s": 6487, "text": "Activation Functions" }, { "code": null, "e": 7207, "s": 6508, "text": "As important as the layout and structure of a neural net is, it’s good to remember that, at the end of the day, what a neural net does is a lot of math. Each node takes the values of the nodes in the previous layer and computes a weighted sum of them, producing a scalar value which is called a logit. Much like how a neuron in the human brain “fires” when prompted by certain inputs, we must specify how each node (which are also sometimes referred to as neurons) in our network “fires” when it is given certain inputs. This is what activation functions do. They take logits, the results of the aforementioned weighted sum, and convert them to an “activation” based on what function is being used." }, { "code": null, "e": 7356, "s": 7207, "text": "A common activation function, and the one used in the first Dense() layer in our network, is called “ReLU” which is short for rectified linear unit." }, { "code": null, "e": 7659, "s": 7356, "text": "What ReLU does is make the activation of any negative logits 0 (the node does not fire), while leaving any positive logits unchanged (the node fires with strength linearly proportional to the strength of the input). For more information on the power of ReLU and why it is useful check out this article." }, { "code": null, "e": 7776, "s": 7659, "text": "Another common activation function, and the one that is used in the second instance of Dense(), is called “softmax”." }, { "code": null, "e": 8135, "s": 7776, "text": "As the image above shows, softmax takes the logits computed by the weighted sum of the activations from the previous layer and converts them to probabilities that sum to 1.0. This makes it an extremely useful activation function to use in our output layer because it provides easy-to-interpret results for the likelihood of an image being a particular digit." }, { "code": null, "e": 8470, "s": 8135, "text": "There are many other activation functions and deciding which one to use is often a matter of experimentation or heuristic judgement (or both). A good overview of some other activation functions can be found here (I recommend reading it if you’re having trouble understanding why activation functions are necessary in the first place)." }, { "code": null, "e": 8478, "s": 8470, "text": "Dropout" }, { "code": null, "e": 9268, "s": 8478, "text": "The last unexplained piece of the code snippet we’ve been examining so far is the call of tf.keras.layers.Dropout(). The concept of dropout goes back to the earlier discussion of the connectivity of layers, and has to do specifically with a few drawbacks associated with densely-connected layers. One of the drawbacks of densely-connected layers is that it can lead to very computationally-expensive neural nets. With each node transmitting information to every other node in the next layer, the complexity of the weighted sums computed in each node increases exponentially with the number of nodes in each layer. Another drawback is that with so much information being passed from layer to layer, models can have a tendency to overfit to the training data, ultimately hurting performance." }, { "code": null, "e": 9812, "s": 9268, "text": "This is where dropout comes in. Dropout makes it so some of the nodes in a given layer don’t pass on their information to the next layer. This helps with computation time and with overfitting. So in the beginner notebook, the call of Dropout(0.2) in between the two Dense() layers makes it so that each node in the first Dense() layer has a 0.2 probability of being dropped from the computation of the activations of the next layer. You might have caught on that this effectively makes the output layer in the model a sparsely-connected layer." }, { "code": null, "e": 9985, "s": 9812, "text": "Now that we understand all the components of our model, let’s take advantage of the model.summary() function and do a sanity check on what our network structure looks like." }, { "code": null, "e": 10113, "s": 9985, "text": "After double checking the output shapes, it all looks good, so now let’s move on to compiling, training, and running the model!" }, { "code": null, "e": 10223, "s": 10113, "text": "Now that we’ve specified what our neural net looks like, the next step is to tell Tensorflow how to train it." }, { "code": null, "e": 10243, "s": 10223, "text": "Compiling the model" }, { "code": null, "e": 10316, "s": 10243, "text": "The snippet of code we’ll be examining in this section is the following:" }, { "code": null, "e": 10436, "s": 10316, "text": "model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])" }, { "code": null, "e": 10673, "s": 10436, "text": "The model.compile()function is called on a pre-built model and it specifies the loss function, optimizer, and metrics, each of which will be explained. These are important features of how a neural network produces its final predictions." }, { "code": null, "e": 11726, "s": 10673, "text": "Loss FunctionAt the beginning of this guide, it was mentioned that on a high-level, the model built in the beginner notebook will learn how to classify certain images as digits and it does this by making a prediction, seeing how far off its prediction was from the correct answer, and then updating itself to better predict those kinds of digits. The loss function is the part of the model that quantifies how far off a prediction is from the correct answer. Different kinds of models are going to need different kinds of loss functions. For example, the loss function for a problem like this one where the outputs to our model are probabilities would have to very different from the loss function of a model that is trying to predict something like price in dollars. The loss function for this particular model is ‘sparse_categorical_crossentropy’ which is good for multiclass-classification problems like this one. In our case, if the model predicts that an image has only a small probability of being its actual label, that will lead to a high loss." }, { "code": null, "e": 11736, "s": 11726, "text": "Optimizer" }, { "code": null, "e": 12786, "s": 11736, "text": "Another way of expressing what training a model actually means is that it seeks to minimize the loss. If loss is a measurement of how far off a prediction is from the correct answer, and a higher loss means a more incorrect prediction, seeking to minimize the loss is a quantifiable way of determining how well a model is performing. As was mentioned earlier, a key part of training neural nets is revising the mathematics parameters of the nodes of a network based off of how effective those parameters were in classifying an image. In a process called backpropagation, neural nets use a mathematical tool called gradient descent to update parameters to improve the model. The details of those terms are somewhat outside of the scope of this guide but for the purposes of understanding what the beginner notebook is doing, the optimizer parameter of the model.compile() function specifies a way for making the backpropagation process more faster and more effective. The “adam” optimizer is a commonly used optimizer and works fine for this problem." }, { "code": null, "e": 12794, "s": 12786, "text": "Metrics" }, { "code": null, "e": 13138, "s": 12794, "text": "The last part of the model.compile() function is specifying the metrics it should use in evaluating the model. Accuracy is a useful, but imperfect metric, for gauging model performance and it should be used by itself with a some amount of caution (for more on why that is and some other potential metrics, you can take a look at this article)." }, { "code": null, "e": 13236, "s": 13138, "text": "At long last comes actually training the model, and with TensorFlow 2.0, this is very easy to do." }, { "code": null, "e": 13274, "s": 13236, "text": "model.fit(x_train, y_train, epochs=5)" }, { "code": null, "e": 13739, "s": 13274, "text": "This line of code is pretty intuitive, passing the training data and the correct labels of that data. The epoch parameter in the model.fit() function is the number of times the model sees all of the training data. The reason why we want the model to see all of the training data multiple times is that one go-through might not be enough for the model to sufficiently update its weights when computing weighted sums to make a marked improvement in predictive power." }, { "code": null, "e": 14024, "s": 13739, "text": "Running this code, you can see that in each epoch, the model goes through all 60k images in the training set. You can also see the the loss decreases and the accuracy increases with each epoch, meaning that the model is getting better and better at classifying digits with each epoch." }, { "code": null, "e": 14150, "s": 14024, "text": "And finally, using model.evaluate(x_test, y_test), we can predict the classes of our test set and see how our model performs." }, { "code": null, "e": 14382, "s": 14150, "text": "This plot shows that the even though training accuracy keeps going up the more epochs we train, the validation accuracy is starting to plateau or decrease which indicates that we probably don’t need to train for more than 5 epochs." }, { "code": null, "e": 14616, "s": 14382, "text": "For a more detailed evaluation of how the model performed, we can build a confusion matrix. From this confusion matrix we can see that our model has the most trouble with nines and tends to confuse them with sevens, fours, or threes." } ]
How to convert decimal to hexadecimal in JavaScript?
The number class has a toString method that accepts base as argument. We can pass base 16(hex numbers) to get the desired number converted to hexadecimal string. console.log(Number(255).toString(16)) console.log(Number(17).toString(16)) ff 11 We can convert these numbers back to decimal using the parseInt function. The parseInt function available in JavaScript has the following signature − parseInt(string, radix); Where, the paramters are the following − string −The value to parse. If this argument is not a string, then it is converted to one using the ToString method. Leading whitespace in this argument is ignored. radix −An integer between 2 and 36 that represents the radix (the base in mathematical numeral systems) of the string. So we can pass the string and the radix and convert any numbner with base from 2 to 36 to integer using this method. console.log(parseInt("ff", 16)) console.log(parseInt("11", 16)) 255 17
[ { "code": null, "e": 1224, "s": 1062, "text": "The number class has a toString method that accepts base as argument. We can pass base 16(hex numbers) to get the desired number converted to hexadecimal string." }, { "code": null, "e": 1299, "s": 1224, "text": "console.log(Number(255).toString(16))\nconsole.log(Number(17).toString(16))" }, { "code": null, "e": 1305, "s": 1299, "text": "ff\n11" }, { "code": null, "e": 1455, "s": 1305, "text": "We can convert these numbers back to decimal using the parseInt function. The parseInt function available in JavaScript has the following signature −" }, { "code": null, "e": 1481, "s": 1455, "text": "parseInt(string, radix);\n" }, { "code": null, "e": 1522, "s": 1481, "text": "Where, the paramters are the following −" }, { "code": null, "e": 1687, "s": 1522, "text": "string −The value to parse. If this argument is not a string, then it is converted to one using the ToString method. Leading whitespace in this argument is ignored." }, { "code": null, "e": 1806, "s": 1687, "text": "radix −An integer between 2 and 36 that represents the radix (the base in mathematical numeral systems) of the string." }, { "code": null, "e": 1923, "s": 1806, "text": "So we can pass the string and the radix and convert any numbner with base from 2 to 36 to integer using this method." }, { "code": null, "e": 1987, "s": 1923, "text": "console.log(parseInt(\"ff\", 16))\nconsole.log(parseInt(\"11\", 16))" }, { "code": null, "e": 1994, "s": 1987, "text": "255\n17" } ]
Python – Create a string made of the first and last two characters from a given string
12 Nov, 2020 Here we are going to see the approach of forming a string made from the first and last 2 characters of a given string. Input: Geeksforgeeks Output: Geks Input: Hi, There Output: Hire Method #1: Using list slicing In this example, we are going to loop through the string and store the length of the string in the count variable and then make the new substring by taking the first 2 characters and the last two characters with the help of the count variable. Python # Taking input from the userinputString = "Geeksforgeeks" count = 0 # Loop through the stringfor i in inputString: count = count + 1newString = inputString[ 0:2 ] + inputString [count - 2: count ] # Printing the new Stringprint("Input string = " + inputString)print("New String = "+ newString) Output: Input string = Geeksforgeeks New String = Geks Methods #2: Using a loop In this example we are going to store the length of the string in a variable and break the loop if its length is less than 4 characters otherwise we will store the characters if the variable matches the defined conditions and make a new string out of it. Python # Taking input from userinputString = "Geeksforgeeks" l = len(inputString)newString = "" # looping through the stringfor i in range(0, len(inputString)): if l < 3: break else: if i in (0, 1, l-2, l-1): newString = newString + inputString[i] else: continue # Printing New Stringprint("Input string : " + inputString)print("New String : " + newString) Output: Input string : Geeksforgeeks New String : Geks Python string-programs Python Python Programs Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. How to Install PIP on Windows ? Python Classes and Objects Python | os.path.join() method Introduction To PYTHON Python OOPs Concepts Defaultdict in Python Python | Get dictionary keys as a list Python | Convert a list to dictionary Python | Convert string dictionary to dictionary Python Program for Fibonacci numbers
[ { "code": null, "e": 28, "s": 0, "text": "\n12 Nov, 2020" }, { "code": null, "e": 147, "s": 28, "text": "Here we are going to see the approach of forming a string made from the first and last 2 characters of a given string." }, { "code": null, "e": 213, "s": 147, "text": "Input: Geeksforgeeks\nOutput: Geks\n\nInput: Hi, There\nOutput: Hire\n" }, { "code": null, "e": 243, "s": 213, "text": "Method #1: Using list slicing" }, { "code": null, "e": 487, "s": 243, "text": "In this example, we are going to loop through the string and store the length of the string in the count variable and then make the new substring by taking the first 2 characters and the last two characters with the help of the count variable." }, { "code": null, "e": 494, "s": 487, "text": "Python" }, { "code": "# Taking input from the userinputString = \"Geeksforgeeks\" count = 0 # Loop through the stringfor i in inputString: count = count + 1newString = inputString[ 0:2 ] + inputString [count - 2: count ] # Printing the new Stringprint(\"Input string = \" + inputString)print(\"New String = \"+ newString)", "e": 797, "s": 494, "text": null }, { "code": null, "e": 805, "s": 797, "text": "Output:" }, { "code": null, "e": 853, "s": 805, "text": "Input string = Geeksforgeeks\nNew String = Geks\n" }, { "code": null, "e": 878, "s": 853, "text": "Methods #2: Using a loop" }, { "code": null, "e": 1133, "s": 878, "text": "In this example we are going to store the length of the string in a variable and break the loop if its length is less than 4 characters otherwise we will store the characters if the variable matches the defined conditions and make a new string out of it." }, { "code": null, "e": 1140, "s": 1133, "text": "Python" }, { "code": "# Taking input from userinputString = \"Geeksforgeeks\" l = len(inputString)newString = \"\" # looping through the stringfor i in range(0, len(inputString)): if l < 3: break else: if i in (0, 1, l-2, l-1): newString = newString + inputString[i] else: continue # Printing New Stringprint(\"Input string : \" + inputString)print(\"New String : \" + newString)", "e": 1542, "s": 1140, "text": null }, { "code": null, "e": 1550, "s": 1542, "text": "Output:" }, { "code": null, "e": 1598, "s": 1550, "text": "Input string : Geeksforgeeks\nNew String : Geks\n" }, { "code": null, "e": 1621, "s": 1598, "text": "Python string-programs" }, { "code": null, "e": 1628, "s": 1621, "text": "Python" }, { "code": null, "e": 1644, "s": 1628, "text": "Python Programs" }, { "code": null, "e": 1742, "s": 1644, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 1774, "s": 1742, "text": "How to Install PIP on Windows ?" }, { "code": null, "e": 1801, "s": 1774, "text": "Python Classes and Objects" }, { "code": null, "e": 1832, "s": 1801, "text": "Python | os.path.join() method" }, { "code": null, "e": 1855, "s": 1832, "text": "Introduction To PYTHON" }, { "code": null, "e": 1876, "s": 1855, "text": "Python OOPs Concepts" }, { "code": null, "e": 1898, "s": 1876, "text": "Defaultdict in Python" }, { "code": null, "e": 1937, "s": 1898, "text": "Python | Get dictionary keys as a list" }, { "code": null, "e": 1975, "s": 1937, "text": "Python | Convert a list to dictionary" }, { "code": null, "e": 2024, "s": 1975, "text": "Python | Convert string dictionary to dictionary" } ]
XSSCon – Simple and Powerful XSS Scanner tool
28 Jul, 2021 XSSCon tool is a Python-based tool that features a powerful XSS (Cross-Site Scripting) Scanner. XSS is the most common vulnerability, which is identified on almost every web-based application; you only have to find an input field where you can inject your malicious payload. Automation can reduce your manual work if there is enormous scope in your target domain. You can use this XSSCon tool while performing penetration testing of web-based applications, or you can also use this tool in Bug Bounty Programs for getting low-medium level bugs and earning lots of Bounty. XSSCon crawls all links in a target domain; it has a strong crawler engine in the same domain.XSSCon supports POST and GET types of Requests and Responses.XSSCon has many settings which you can easily customize.XSSCon is a fully automated tool. XSSCon crawls all links in a target domain; it has a strong crawler engine in the same domain. XSSCon supports POST and GET types of Requests and Responses. XSSCon has many settings which you can easily customize. XSSCon is a fully automated tool. Step 1: First, you have to open your Kali Linux terminal and move to the desktop directory using the following command. On the desktop, we have to create a directory in which we will install the tool or clone the tool from GitHub. cd Desktop/ Step 2: Now, we are on the desktop. We will create a new directory called XSSCon using the following command. mkdir XSSCon Step 3: You have created the directory XSSCon on the Desktop and move to that directory using the following command. cd XSSCon/ Step 4: Install the required package (bs4/beautifulsoap) using the following command. pip3 install bs4 Step 5: Install the required package (requests) using the following command. pip3 install requests Step 6: Now you are under XSSCon directory. In this directory, you have to clone the XSSCon tool from GitHub. For cloning the tool, use the following command. git clone https://github.com/menkrep1337/XSSCon Step 7: Now, the tool has been cloned successfully to the XSSCon directory. Now list out all the contents of the tool using the following commands. ls Step 8: You can see a new directory here. XSSCon created,now move to this directory using the following command. cd XSSCon/ Step 9: List out all the content of the directory using the following command. ls Step 10: You have downloaded the tool, and now you have to give permission to the tool using the following command. chmod 777 xsscon.py requirements.txt Step 11: List out the content of the tool again to check permissions using the following command. ls Step 12: Permission has been given to requirements also. Now install all the requirements using the following command. Note : You can skip this step if you have installed the required packages from Step 4 and 5 pip3 install -r requirements.txt Step 13: Check the help page of the tool to get a better understanding of tool usage. python3 xsscon.py --help The tool has been downloaded successfully. Using this tool, you can easily check the cross-site scripting vulnerabilities of the websites and webapps. Now here are some examples of using the XSSCon tool. python3 xsscon.py -u http://testphp.vulnweb.com The XSSCon tool has started checking cross-site scripting vulnerabilities. These are the vulnerabilities that the tool has detected. The tool keeps checking the website again and again. When finding a vulnerable website, it will show you at the terminal. Now, you can see the payload injected by XSSCon is actually working if we open the link in a web browser. The Popup comes when we hit the malicious link. Kali-Linux Linux-Tools Linux-Unix Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n28 Jul, 2021" }, { "code": null, "e": 601, "s": 28, "text": "XSSCon tool is a Python-based tool that features a powerful XSS (Cross-Site Scripting) Scanner. XSS is the most common vulnerability, which is identified on almost every web-based application; you only have to find an input field where you can inject your malicious payload. Automation can reduce your manual work if there is enormous scope in your target domain. You can use this XSSCon tool while performing penetration testing of web-based applications, or you can also use this tool in Bug Bounty Programs for getting low-medium level bugs and earning lots of Bounty. " }, { "code": null, "e": 846, "s": 601, "text": "XSSCon crawls all links in a target domain; it has a strong crawler engine in the same domain.XSSCon supports POST and GET types of Requests and Responses.XSSCon has many settings which you can easily customize.XSSCon is a fully automated tool." }, { "code": null, "e": 941, "s": 846, "text": "XSSCon crawls all links in a target domain; it has a strong crawler engine in the same domain." }, { "code": null, "e": 1003, "s": 941, "text": "XSSCon supports POST and GET types of Requests and Responses." }, { "code": null, "e": 1060, "s": 1003, "text": "XSSCon has many settings which you can easily customize." }, { "code": null, "e": 1094, "s": 1060, "text": "XSSCon is a fully automated tool." }, { "code": null, "e": 1325, "s": 1094, "text": "Step 1: First, you have to open your Kali Linux terminal and move to the desktop directory using the following command. On the desktop, we have to create a directory in which we will install the tool or clone the tool from GitHub." }, { "code": null, "e": 1337, "s": 1325, "text": "cd Desktop/" }, { "code": null, "e": 1447, "s": 1337, "text": "Step 2: Now, we are on the desktop. We will create a new directory called XSSCon using the following command." }, { "code": null, "e": 1460, "s": 1447, "text": "mkdir XSSCon" }, { "code": null, "e": 1577, "s": 1460, "text": "Step 3: You have created the directory XSSCon on the Desktop and move to that directory using the following command." }, { "code": null, "e": 1588, "s": 1577, "text": "cd XSSCon/" }, { "code": null, "e": 1677, "s": 1588, "text": "Step 4: Install the required package (bs4/beautifulsoap) using the following command. " }, { "code": null, "e": 1694, "s": 1677, "text": "pip3 install bs4" }, { "code": null, "e": 1771, "s": 1694, "text": "Step 5: Install the required package (requests) using the following command." }, { "code": null, "e": 1793, "s": 1771, "text": "pip3 install requests" }, { "code": null, "e": 1952, "s": 1793, "text": "Step 6: Now you are under XSSCon directory. In this directory, you have to clone the XSSCon tool from GitHub. For cloning the tool, use the following command." }, { "code": null, "e": 2000, "s": 1952, "text": "git clone https://github.com/menkrep1337/XSSCon" }, { "code": null, "e": 2148, "s": 2000, "text": "Step 7: Now, the tool has been cloned successfully to the XSSCon directory. Now list out all the contents of the tool using the following commands." }, { "code": null, "e": 2151, "s": 2148, "text": "ls" }, { "code": null, "e": 2264, "s": 2151, "text": "Step 8: You can see a new directory here. XSSCon created,now move to this directory using the following command." }, { "code": null, "e": 2275, "s": 2264, "text": "cd XSSCon/" }, { "code": null, "e": 2354, "s": 2275, "text": "Step 9: List out all the content of the directory using the following command." }, { "code": null, "e": 2357, "s": 2354, "text": "ls" }, { "code": null, "e": 2473, "s": 2357, "text": "Step 10: You have downloaded the tool, and now you have to give permission to the tool using the following command." }, { "code": null, "e": 2510, "s": 2473, "text": "chmod 777 xsscon.py requirements.txt" }, { "code": null, "e": 2608, "s": 2510, "text": "Step 11: List out the content of the tool again to check permissions using the following command." }, { "code": null, "e": 2611, "s": 2608, "text": "ls" }, { "code": null, "e": 2730, "s": 2611, "text": "Step 12: Permission has been given to requirements also. Now install all the requirements using the following command." }, { "code": null, "e": 2822, "s": 2730, "text": "Note : You can skip this step if you have installed the required packages from Step 4 and 5" }, { "code": null, "e": 2856, "s": 2822, "text": "pip3 install -r requirements.txt " }, { "code": null, "e": 2942, "s": 2856, "text": "Step 13: Check the help page of the tool to get a better understanding of tool usage." }, { "code": null, "e": 2967, "s": 2942, "text": "python3 xsscon.py --help" }, { "code": null, "e": 3171, "s": 2967, "text": "The tool has been downloaded successfully. Using this tool, you can easily check the cross-site scripting vulnerabilities of the websites and webapps. Now here are some examples of using the XSSCon tool." }, { "code": null, "e": 3219, "s": 3171, "text": "python3 xsscon.py -u http://testphp.vulnweb.com" }, { "code": null, "e": 3474, "s": 3219, "text": "The XSSCon tool has started checking cross-site scripting vulnerabilities. These are the vulnerabilities that the tool has detected. The tool keeps checking the website again and again. When finding a vulnerable website, it will show you at the terminal." }, { "code": null, "e": 3628, "s": 3474, "text": "Now, you can see the payload injected by XSSCon is actually working if we open the link in a web browser. The Popup comes when we hit the malicious link." }, { "code": null, "e": 3639, "s": 3628, "text": "Kali-Linux" }, { "code": null, "e": 3651, "s": 3639, "text": "Linux-Tools" }, { "code": null, "e": 3662, "s": 3651, "text": "Linux-Unix" } ]
MySQL | CAST( ) Function
25 Nov, 2019 The MySQL CAST() function is used for converting a value from one datatype to another specific datatype. The CAST() function accepts two parameters which are the value to be converted and the datatype to which the value needs to be converted. The datatypes in which a given value can be converted are: DATE : It is used to convert a value to the DATE datatype. The Format returned is “YYYY-MM-DD”. DATETIME : It is used to convert a value to the DATETIME datatype. The Format returned is “YYYY-MM-DD HH:MM:SS”. TIME : It is used to convert a value to the TIME datatype. The Format returned is “HH:MM:SS”. CHAR : It is used to convert a value to the CHAR datatype. SIGNED : It is used to convert a value to SIGNED datatype. UNSIGNED : It is used to convert a value to UNSIGNED datatype. BINARY : It is used to convert a value to BINARY datatype. Syntax: CAST(input_value AS datatype) Parameters Used: input_value – It is used to specify the value which needs to be converted. datatype – It is used to specify the datatype in which the value needs to be converted. Return Value:The MySQL CAST() function returns a value in the desired datatype after conversion. Supported Versions of MySQL: MySQL 5.7 MySQL 5.6 MySQL 5.5 MySQL 5.1 MySQL 5.0 MySQL 4.1 MySQL 4.0 MySQL 3.23 Example-1: Implementing CAST() function to convert a value to DATE datatype. SELECT CAST("2019-11-21" AS DATE); Output: 2019-11-21 Example-2: Implementing CAST() function to convert a value to CHAR datatype. SELECT CAST(121 AS CHAR); Output: 121 Example-3: Implementing CAST() function to convert a value to SIGNED datatype. SELECT CAST(2-4 AS SIGNED); Output: -2 Example-4: Implementing CAST() function to convert a value to UNSIGNED datatype. SELECT CAST(2-4 AS UNSIGNED); Output: 18446744073709551614 mysql SQLmysql SQL SQL Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. How to Update Multiple Columns in Single Update Statement in SQL? Window functions in SQL What is Temporary Table in SQL? SQL | Sub queries in From Clause SQL using Python RANK() Function in SQL Server SQL Query to Find the Name of a Person Whose Name Starts with Specific Letter SQL Query to Convert VARCHAR to INT SQL Query to Compare Two Dates How to Write a SQL Query For a Specific Date Range and Date Time?
[ { "code": null, "e": 28, "s": 0, "text": "\n25 Nov, 2019" }, { "code": null, "e": 271, "s": 28, "text": "The MySQL CAST() function is used for converting a value from one datatype to another specific datatype. The CAST() function accepts two parameters which are the value to be converted and the datatype to which the value needs to be converted." }, { "code": null, "e": 330, "s": 271, "text": "The datatypes in which a given value can be converted are:" }, { "code": null, "e": 426, "s": 330, "text": "DATE : It is used to convert a value to the DATE datatype. The Format returned is “YYYY-MM-DD”." }, { "code": null, "e": 539, "s": 426, "text": "DATETIME : It is used to convert a value to the DATETIME datatype. The Format returned is “YYYY-MM-DD HH:MM:SS”." }, { "code": null, "e": 633, "s": 539, "text": "TIME : It is used to convert a value to the TIME datatype. The Format returned is “HH:MM:SS”." }, { "code": null, "e": 692, "s": 633, "text": "CHAR : It is used to convert a value to the CHAR datatype." }, { "code": null, "e": 751, "s": 692, "text": "SIGNED : It is used to convert a value to SIGNED datatype." }, { "code": null, "e": 814, "s": 751, "text": "UNSIGNED : It is used to convert a value to UNSIGNED datatype." }, { "code": null, "e": 873, "s": 814, "text": "BINARY : It is used to convert a value to BINARY datatype." }, { "code": null, "e": 881, "s": 873, "text": "Syntax:" }, { "code": null, "e": 911, "s": 881, "text": "CAST(input_value AS datatype)" }, { "code": null, "e": 928, "s": 911, "text": "Parameters Used:" }, { "code": null, "e": 1003, "s": 928, "text": "input_value – It is used to specify the value which needs to be converted." }, { "code": null, "e": 1091, "s": 1003, "text": "datatype – It is used to specify the datatype in which the value needs to be converted." }, { "code": null, "e": 1188, "s": 1091, "text": "Return Value:The MySQL CAST() function returns a value in the desired datatype after conversion." }, { "code": null, "e": 1217, "s": 1188, "text": "Supported Versions of MySQL:" }, { "code": null, "e": 1227, "s": 1217, "text": "MySQL 5.7" }, { "code": null, "e": 1237, "s": 1227, "text": "MySQL 5.6" }, { "code": null, "e": 1247, "s": 1237, "text": "MySQL 5.5" }, { "code": null, "e": 1257, "s": 1247, "text": "MySQL 5.1" }, { "code": null, "e": 1267, "s": 1257, "text": "MySQL 5.0" }, { "code": null, "e": 1277, "s": 1267, "text": "MySQL 4.1" }, { "code": null, "e": 1287, "s": 1277, "text": "MySQL 4.0" }, { "code": null, "e": 1298, "s": 1287, "text": "MySQL 3.23" }, { "code": null, "e": 1375, "s": 1298, "text": "Example-1: Implementing CAST() function to convert a value to DATE datatype." }, { "code": null, "e": 1411, "s": 1375, "text": "SELECT CAST(\"2019-11-21\" AS DATE); " }, { "code": null, "e": 1419, "s": 1411, "text": "Output:" }, { "code": null, "e": 1431, "s": 1419, "text": "2019-11-21 " }, { "code": null, "e": 1508, "s": 1431, "text": "Example-2: Implementing CAST() function to convert a value to CHAR datatype." }, { "code": null, "e": 1535, "s": 1508, "text": "SELECT CAST(121 AS CHAR); " }, { "code": null, "e": 1543, "s": 1535, "text": "Output:" }, { "code": null, "e": 1548, "s": 1543, "text": "121 " }, { "code": null, "e": 1627, "s": 1548, "text": "Example-3: Implementing CAST() function to convert a value to SIGNED datatype." }, { "code": null, "e": 1656, "s": 1627, "text": "SELECT CAST(2-4 AS SIGNED); " }, { "code": null, "e": 1664, "s": 1656, "text": "Output:" }, { "code": null, "e": 1668, "s": 1664, "text": "-2 " }, { "code": null, "e": 1749, "s": 1668, "text": "Example-4: Implementing CAST() function to convert a value to UNSIGNED datatype." }, { "code": null, "e": 1780, "s": 1749, "text": "SELECT CAST(2-4 AS UNSIGNED); " }, { "code": null, "e": 1788, "s": 1780, "text": "Output:" }, { "code": null, "e": 1810, "s": 1788, "text": "18446744073709551614 " }, { "code": null, "e": 1816, "s": 1810, "text": "mysql" }, { "code": null, "e": 1825, "s": 1816, "text": "SQLmysql" }, { "code": null, "e": 1829, "s": 1825, "text": "SQL" }, { "code": null, "e": 1833, "s": 1829, "text": "SQL" }, { "code": null, "e": 1931, "s": 1833, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 1997, "s": 1931, "text": "How to Update Multiple Columns in Single Update Statement in SQL?" }, { "code": null, "e": 2021, "s": 1997, "text": "Window functions in SQL" }, { "code": null, "e": 2053, "s": 2021, "text": "What is Temporary Table in SQL?" }, { "code": null, "e": 2086, "s": 2053, "text": "SQL | Sub queries in From Clause" }, { "code": null, "e": 2103, "s": 2086, "text": "SQL using Python" }, { "code": null, "e": 2133, "s": 2103, "text": "RANK() Function in SQL Server" }, { "code": null, "e": 2211, "s": 2133, "text": "SQL Query to Find the Name of a Person Whose Name Starts with Specific Letter" }, { "code": null, "e": 2247, "s": 2211, "text": "SQL Query to Convert VARCHAR to INT" }, { "code": null, "e": 2278, "s": 2247, "text": "SQL Query to Compare Two Dates" } ]
Python | Pandas Split strings into two List/Columns using str.split()
07 May, 2019 Pandas provide a method to split string around a passed separator/delimiter. After that, the string can be stored as a list in a series or it can also be used to create multiple column data frames from a single separated string. It works similarly to the Python’s default split() method but it can only be applied to an individual string. Pandas <code>str.split() method can be applied to a whole series. .str has to be prefixed everytime before calling this method to differentiate it from the Python’s default function otherwise, it will throw an error. Syntax: Series.str.split(pat=None, n=-1, expand=False) Parameters: pat: String value, separator or delimiter to separate string at.n: Numbers of max separations to make in a single string, default is -1 which means all.expand: Boolean value, returns a data frame with different value in different columns if True. Else it returns a series with list of strings. Return Type: Series of list or Data frame depending on expand Parameter To download the CSV used in code, click here. In the following examples, the data frame used contains data of some NBA players. The image of data frame before any operations is attached below. Example #1: Splitting string into list In this data, the split function is used to split the Team column at every “t”. The parameter is set to 1 and hence, the maximum number of separations in a single string will be 1. The expand parameter is False and that is why a series with List of strings is returned instead of a data frame. # importing pandas module import pandas as pd # reading csv file from url data = pd.read_csv("https://media.geeksforgeeks.org/wp-content/uploads/nba.csv") # dropping null value columns to avoid errorsdata.dropna(inplace = True) # new data frame with split value columnsdata["Team"]= data["Team"].str.split("t", n = 1, expand = True) # df displaydata Output:As shown in the output image, the Team column is now having a list. The string was separated at the first occurrence of “t” and not on the later occurrence since the n parameter was set to 1 (Max 1 separation in a string). Example #2: Making separate columns from string In this example, the Name column is separated at space (” “), and the expand parameter is set to True, which means it will return a data frame with all separated strings in different columns. The Data frame is then used to create new columns and the old Name column is dropped using .drop() method. # importing pandas module import pandas as pd # reading csv file from url data = pd.read_csv("https://media.geeksforgeeks.org/wp-content/uploads/nba.csv") # dropping null value columns to avoid errorsdata.dropna(inplace = True) # new data frame with split value columnsnew = data["Name"].str.split(" ", n = 1, expand = True) # making separate first name column from new data framedata["First Name"]= new[0] # making separate last name column from new data framedata["Last Name"]= new[1] # Dropping old Name columnsdata.drop(columns =["Name"], inplace = True) # df displaydata Output:As shown in the output image, a new data frame was returned by the split() function and it was used to create two new columns ( First Name and Last Name) in the data frame. New Data frame Data frame with Added columns Akanksha_Rai Python pandas-series Python pandas-series-methods Python-pandas Python Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Python Dictionary Different ways to create Pandas Dataframe Enumerate() in Python Python String | replace() How to Install PIP on Windows ? *args and **kwargs in Python Python Classes and Objects Convert integer to string in Python Python OOPs Concepts Python | os.path.join() method
[ { "code": null, "e": 53, "s": 25, "text": "\n07 May, 2019" }, { "code": null, "e": 282, "s": 53, "text": "Pandas provide a method to split string around a passed separator/delimiter. After that, the string can be stored as a list in a series or it can also be used to create multiple column data frames from a single separated string." }, { "code": null, "e": 609, "s": 282, "text": "It works similarly to the Python’s default split() method but it can only be applied to an individual string. Pandas <code>str.split() method can be applied to a whole series. .str has to be prefixed everytime before calling this method to differentiate it from the Python’s default function otherwise, it will throw an error." }, { "code": null, "e": 664, "s": 609, "text": "Syntax: Series.str.split(pat=None, n=-1, expand=False)" }, { "code": null, "e": 676, "s": 664, "text": "Parameters:" }, { "code": null, "e": 970, "s": 676, "text": "pat: String value, separator or delimiter to separate string at.n: Numbers of max separations to make in a single string, default is -1 which means all.expand: Boolean value, returns a data frame with different value in different columns if True. Else it returns a series with list of strings." }, { "code": null, "e": 1042, "s": 970, "text": "Return Type: Series of list or Data frame depending on expand Parameter" }, { "code": null, "e": 1088, "s": 1042, "text": "To download the CSV used in code, click here." }, { "code": null, "e": 1235, "s": 1088, "text": "In the following examples, the data frame used contains data of some NBA players. The image of data frame before any operations is attached below." }, { "code": null, "e": 1276, "s": 1237, "text": "Example #1: Splitting string into list" }, { "code": null, "e": 1570, "s": 1276, "text": "In this data, the split function is used to split the Team column at every “t”. The parameter is set to 1 and hence, the maximum number of separations in a single string will be 1. The expand parameter is False and that is why a series with List of strings is returned instead of a data frame." }, { "code": "# importing pandas module import pandas as pd # reading csv file from url data = pd.read_csv(\"https://media.geeksforgeeks.org/wp-content/uploads/nba.csv\") # dropping null value columns to avoid errorsdata.dropna(inplace = True) # new data frame with split value columnsdata[\"Team\"]= data[\"Team\"].str.split(\"t\", n = 1, expand = True) # df displaydata", "e": 1928, "s": 1570, "text": null }, { "code": null, "e": 2206, "s": 1928, "text": "Output:As shown in the output image, the Team column is now having a list. The string was separated at the first occurrence of “t” and not on the later occurrence since the n parameter was set to 1 (Max 1 separation in a string). Example #2: Making separate columns from string" }, { "code": null, "e": 2505, "s": 2206, "text": "In this example, the Name column is separated at space (” “), and the expand parameter is set to True, which means it will return a data frame with all separated strings in different columns. The Data frame is then used to create new columns and the old Name column is dropped using .drop() method." }, { "code": "# importing pandas module import pandas as pd # reading csv file from url data = pd.read_csv(\"https://media.geeksforgeeks.org/wp-content/uploads/nba.csv\") # dropping null value columns to avoid errorsdata.dropna(inplace = True) # new data frame with split value columnsnew = data[\"Name\"].str.split(\" \", n = 1, expand = True) # making separate first name column from new data framedata[\"First Name\"]= new[0] # making separate last name column from new data framedata[\"Last Name\"]= new[1] # Dropping old Name columnsdata.drop(columns =[\"Name\"], inplace = True) # df displaydata", "e": 3089, "s": 2505, "text": null }, { "code": null, "e": 3269, "s": 3089, "text": "Output:As shown in the output image, a new data frame was returned by the split() function and it was used to create two new columns ( First Name and Last Name) in the data frame." }, { "code": null, "e": 3314, "s": 3269, "text": "New Data frame Data frame with Added columns" }, { "code": null, "e": 3327, "s": 3314, "text": "Akanksha_Rai" }, { "code": null, "e": 3348, "s": 3327, "text": "Python pandas-series" }, { "code": null, "e": 3377, "s": 3348, "text": "Python pandas-series-methods" }, { "code": null, "e": 3391, "s": 3377, "text": "Python-pandas" }, { "code": null, "e": 3398, "s": 3391, "text": "Python" }, { "code": null, "e": 3496, "s": 3398, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 3514, "s": 3496, "text": "Python Dictionary" }, { "code": null, "e": 3556, "s": 3514, "text": "Different ways to create Pandas Dataframe" }, { "code": null, "e": 3578, "s": 3556, "text": "Enumerate() in Python" }, { "code": null, "e": 3604, "s": 3578, "text": "Python String | replace()" }, { "code": null, "e": 3636, "s": 3604, "text": "How to Install PIP on Windows ?" }, { "code": null, "e": 3665, "s": 3636, "text": "*args and **kwargs in Python" }, { "code": null, "e": 3692, "s": 3665, "text": "Python Classes and Objects" }, { "code": null, "e": 3728, "s": 3692, "text": "Convert integer to string in Python" }, { "code": null, "e": 3749, "s": 3728, "text": "Python OOPs Concepts" } ]
File createNewFile() method in Java with Examples
12 May, 2020 The createNewFile() function is a part of File class in Java . This function creates new empty file. The function returns true if the abstract file path does not exist and a new file is created. It returns false if the filename already exists. Function signature: public boolean createNewFile() Syntax: boolean var = file.createNewFile(); Parameters: This method does not accept any parameter. Return Type: The function returns boolean data type representing whether the new file is created or not. Exception: This method throws following exceptions: IO Exception: if input output error occurs Security Exception: if the write access to the file is denied Below programs illustrates the use of createNewFile() function: Example 1: The file “F:\\program1.txt” does not exist // Java program to demonstrate// createNewFile() method of File Class import java.io.*; public class solution { public static void main(String args[]) { try { // Get the file File f = new File("F:\\program.txt"); // Create new file // if it does not exist if (f.createNewFile()) System.out.println("File created"); else System.out.println("File already exists"); } catch (Exception e) { System.err.println(e); } }} Output: File created Example 2: The file “F:\\program.txt” is a existing file in F: Directory. // Java program to demonstrate// createNewFile() method of File Class import java.io.*; public class solution { public static void main(String args[]) { try { // Get the file File f = new File("F:\\program1.txt"); // Create new file // if it does not exist if (f.createNewFile()) System.out.println("File created"); else System.out.println("File already exists"); } catch (Exception e) { System.err.println(e); } }} Output: File already exists Note: The programs might not run in an online IDE. Please use an offline IDE and set the path of the file. vikas_vaibhav Java-File Class java-file-handling Java-Functions Java-IO package Java Programs Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Factory method design pattern in Java Java Program to Remove Duplicate Elements From the Array Iterate through List in Java Java program to count the occurrence of each character in a string using Hashmap How to Iterate HashMap in Java? Remove first and last character of a string in Java Program to print ASCII Value of a character Traverse Through a HashMap in Java Iterate Over the Characters of a String in Java How to Get Elements By Index from HashSet in Java?
[ { "code": null, "e": 28, "s": 0, "text": "\n12 May, 2020" }, { "code": null, "e": 272, "s": 28, "text": "The createNewFile() function is a part of File class in Java . This function creates new empty file. The function returns true if the abstract file path does not exist and a new file is created. It returns false if the filename already exists." }, { "code": null, "e": 292, "s": 272, "text": "Function signature:" }, { "code": null, "e": 323, "s": 292, "text": "public boolean createNewFile()" }, { "code": null, "e": 331, "s": 323, "text": "Syntax:" }, { "code": null, "e": 367, "s": 331, "text": "boolean var = file.createNewFile();" }, { "code": null, "e": 422, "s": 367, "text": "Parameters: This method does not accept any parameter." }, { "code": null, "e": 527, "s": 422, "text": "Return Type: The function returns boolean data type representing whether the new file is created or not." }, { "code": null, "e": 579, "s": 527, "text": "Exception: This method throws following exceptions:" }, { "code": null, "e": 622, "s": 579, "text": "IO Exception: if input output error occurs" }, { "code": null, "e": 684, "s": 622, "text": "Security Exception: if the write access to the file is denied" }, { "code": null, "e": 748, "s": 684, "text": "Below programs illustrates the use of createNewFile() function:" }, { "code": null, "e": 802, "s": 748, "text": "Example 1: The file “F:\\\\program1.txt” does not exist" }, { "code": "// Java program to demonstrate// createNewFile() method of File Class import java.io.*; public class solution { public static void main(String args[]) { try { // Get the file File f = new File(\"F:\\\\program.txt\"); // Create new file // if it does not exist if (f.createNewFile()) System.out.println(\"File created\"); else System.out.println(\"File already exists\"); } catch (Exception e) { System.err.println(e); } }}", "e": 1369, "s": 802, "text": null }, { "code": null, "e": 1377, "s": 1369, "text": "Output:" }, { "code": null, "e": 1390, "s": 1377, "text": "File created" }, { "code": null, "e": 1464, "s": 1390, "text": "Example 2: The file “F:\\\\program.txt” is a existing file in F: Directory." }, { "code": "// Java program to demonstrate// createNewFile() method of File Class import java.io.*; public class solution { public static void main(String args[]) { try { // Get the file File f = new File(\"F:\\\\program1.txt\"); // Create new file // if it does not exist if (f.createNewFile()) System.out.println(\"File created\"); else System.out.println(\"File already exists\"); } catch (Exception e) { System.err.println(e); } }}", "e": 2032, "s": 1464, "text": null }, { "code": null, "e": 2040, "s": 2032, "text": "Output:" }, { "code": null, "e": 2060, "s": 2040, "text": "File already exists" }, { "code": null, "e": 2167, "s": 2060, "text": "Note: The programs might not run in an online IDE. Please use an offline IDE and set the path of the file." }, { "code": null, "e": 2181, "s": 2167, "text": "vikas_vaibhav" }, { "code": null, "e": 2197, "s": 2181, "text": "Java-File Class" }, { "code": null, "e": 2216, "s": 2197, "text": "java-file-handling" }, { "code": null, "e": 2231, "s": 2216, "text": "Java-Functions" }, { "code": null, "e": 2247, "s": 2231, "text": "Java-IO package" }, { "code": null, "e": 2261, "s": 2247, "text": "Java Programs" }, { "code": null, "e": 2359, "s": 2261, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 2397, "s": 2359, "text": "Factory method design pattern in Java" }, { "code": null, "e": 2454, "s": 2397, "text": "Java Program to Remove Duplicate Elements From the Array" }, { "code": null, "e": 2483, "s": 2454, "text": "Iterate through List in Java" }, { "code": null, "e": 2564, "s": 2483, "text": "Java program to count the occurrence of each character in a string using Hashmap" }, { "code": null, "e": 2596, "s": 2564, "text": "How to Iterate HashMap in Java?" }, { "code": null, "e": 2648, "s": 2596, "text": "Remove first and last character of a string in Java" }, { "code": null, "e": 2692, "s": 2648, "text": "Program to print ASCII Value of a character" }, { "code": null, "e": 2727, "s": 2692, "text": "Traverse Through a HashMap in Java" }, { "code": null, "e": 2775, "s": 2727, "text": "Iterate Over the Characters of a String in Java" } ]
Upload and Download files from Google Drive storage using Python
26 Nov, 2020 In this article, we are going to see how can we download files from our Google Drive to our PC and upload files from our PC to Google Drive using its API in Python. It is a REST API that allows you to leverage Google Drive storage from within your app or program. So, let’s go ahead and write a Python script to do that. Requirements: Python (2.6 or higher) A Google account with Google Drive enabled Google API client and Google OAuth libraries Installation: Install the required libraries by running this command: pip install –upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib Setup: Now, to work with Google Drive API, we have to set up our account and enable Google Drive API. To set up your account, you can follow the steps given in the article. So, now we are ready to write the Python script. Please make sure the file credentials.json is in the same directory. First of all, we will import the required libraries. Then we will define a class DriveAPI with a constructor and two functions for uploading and downloading files. Inside the constructor, we will check if the file ‘token.pickle’ exists or not. If it exists, that means we have the access to the Google Drive storage and we don’t need to ask for it again. We may have to refresh the token if it’s been a long time since the token was used. if it doesn’t exist or is invalid, the script will open up a new tab in the browser and ask for access to Google Drive. Once the access is granted, it will connect to the drive and fetch a list of files in the Google Drive storage for that account and print that list. Each item of the list contains an id and name for that file in Google Drive. Now, Inside the FileDownload function, we will write the code to download a file. We need two things to do this. First is the id of that file in Drive and second is the name you want it to be saved as. Now, we will make a request to the Drive service to get us the file with the given id. Then, we will use a BytesIO object which will write the file to the memory. We will use the MediaIoBaseDownload class to receive the file from the server and write it in memory with the BytesIO object. Since the file size may vary from a few bytes to very large, we will prefer downloading the file in Chunks. We can also pass the chunk size if we don’t want to use the default one. Now, we will run a while loop and in each iteration of this loop, we will download a chunk of the file. Once it’s done, we will write the file from memory to our Hard Drive Storage. We will wrap this whole process in a try-except block so that if something goes wrong, our script doesn’t throw an error. To Upload a File, we will use the FileUpload function. We only need the file path to upload a file. From the file path, we can easily extract the file name and find its mime-type using the mimetypes module. We will create a dictionary with the key “name” which contains the file name. Now, we will use the MediaFileUpload class to generate the media file, and then we will create a new file in the drive with the create function and it will save our file data to that newly created file. Implementation: Python3 # import the required librariesfrom __future__ import print_functionimport pickleimport os.pathimport ioimport shutilimport requestsfrom mimetypes import MimeTypesfrom googleapiclient.discovery import buildfrom google_auth_oauthlib.flow import InstalledAppFlowfrom google.auth.transport.requests import Requestfrom googleapiclient.http import MediaIoBaseDownload, MediaFileUpload class DriveAPI: global SCOPES # Define the scopes SCOPES = ['https://www.googleapis.com/auth/drive'] def __init__(self): # Variable self.creds will # store the user access token. # If no valid token found # we will create one. self.creds = None # The file token.pickle stores the # user's access and refresh tokens. It is # created automatically when the authorization # flow completes for the first time. # Check if file token.pickle exists if os.path.exists('token.pickle'): # Read the token from the file and # store it in the variable self.creds with open('token.pickle', 'rb') as token: self.creds = pickle.load(token) # If no valid credentials are available, # request the user to log in. if not self.creds or not self.creds.valid: # If token is expired, it will be refreshed, # else, we will request a new one. if self.creds and self.creds.expired and self.creds.refresh_token: self.creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) self.creds = flow.run_local_server(port=0) # Save the access token in token.pickle # file for future usage with open('token.pickle', 'wb') as token: pickle.dump(self.creds, token) # Connect to the API service self.service = build('drive', 'v3', credentials=self.creds) # request a list of first N files or # folders with name and id from the API. results = self.service.files().list( pageSize=100, fields="files(id, name)").execute() items = results.get('files', []) # print a list of files print("Here's a list of files: \n") print(*items, sep="\n", end="\n\n") def FileDownload(self, file_id, file_name): request = self.service.files().get_media(fileId=file_id) fh = io.BytesIO() # Initialise a downloader object to download the file downloader = MediaIoBaseDownload(fh, request, chunksize=204800) done = False try: # Download the data in chunks while not done: status, done = downloader.next_chunk() fh.seek(0) # Write the received data to the file with open(file_name, 'wb') as f: shutil.copyfileobj(fh, f) print("File Downloaded") # Return True if file Downloaded successfully return True except: # Return False if something went wrong print("Something went wrong.") return False def FileUpload(self, filepath): # Extract the file name out of the file path name = filepath.split('/')[-1] # Find the MimeType of the file mimetype = MimeTypes().guess_type(name)[0] # create file metadata file_metadata = {'name': name} try: media = MediaFileUpload(filepath, mimetype=mimetype) # Create a new file in the Drive storage file = self.service.files().create( body=file_metadata, media_body=media, fields='id').execute() print("File Uploaded.") except: # Raise UploadError if file is not uploaded. raise UploadError("Can't Upload File.") if __name__ == "__main__": obj = DriveAPI() i = int(input("Enter your choice: "1 - Download file, 2- Upload File, 3- Exit.\n")) if i == 1: f_id = input("Enter file id: ") f_name = input("Enter file name: ") obj.FileDownload(f_id, f_name) elif i == 2: f_path = input("Enter full file path: ") obj.FileUpload(f_path) else: exit() Output: This will attempt to open a new window in your default browser. If this fails, copy the URL from the console and manually open it in your browser. Now, Log in to your Google account if you aren’t already logged in. If there are multiple accounts, you will be asked to choose one of them. Then, click on the Allow button to proceed. After the authentication has been completed, your browser will display a message saying “The authentication flow has been completed. You may close this window.” Now, the program will print a list of files in your Google drive and ask you if you want to Upload or Download a file. python-utility Technical Scripter 2020 Python Technical Scripter Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. How to Install PIP on Windows ? Python Classes and Objects Python | os.path.join() method Introduction To PYTHON Python OOPs Concepts How to drop one or multiple columns in Pandas Dataframe How To Convert Python Dictionary To JSON? Check if element exists in list in Python Python | Get unique values from a list Create a directory in Python
[ { "code": null, "e": 52, "s": 24, "text": "\n26 Nov, 2020" }, { "code": null, "e": 373, "s": 52, "text": "In this article, we are going to see how can we download files from our Google Drive to our PC and upload files from our PC to Google Drive using its API in Python. It is a REST API that allows you to leverage Google Drive storage from within your app or program. So, let’s go ahead and write a Python script to do that." }, { "code": null, "e": 387, "s": 373, "text": "Requirements:" }, { "code": null, "e": 410, "s": 387, "text": "Python (2.6 or higher)" }, { "code": null, "e": 453, "s": 410, "text": "A Google account with Google Drive enabled" }, { "code": null, "e": 498, "s": 453, "text": "Google API client and Google OAuth libraries" }, { "code": null, "e": 512, "s": 498, "text": "Installation:" }, { "code": null, "e": 568, "s": 512, "text": "Install the required libraries by running this command:" }, { "code": null, "e": 656, "s": 568, "text": "pip install –upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib" }, { "code": null, "e": 663, "s": 656, "text": "Setup:" }, { "code": null, "e": 758, "s": 663, "text": "Now, to work with Google Drive API, we have to set up our account and enable Google Drive API." }, { "code": null, "e": 829, "s": 758, "text": "To set up your account, you can follow the steps given in the article." }, { "code": null, "e": 878, "s": 829, "text": "So, now we are ready to write the Python script." }, { "code": null, "e": 947, "s": 878, "text": "Please make sure the file credentials.json is in the same directory." }, { "code": null, "e": 1506, "s": 947, "text": "First of all, we will import the required libraries. Then we will define a class DriveAPI with a constructor and two functions for uploading and downloading files. Inside the constructor, we will check if the file ‘token.pickle’ exists or not. If it exists, that means we have the access to the Google Drive storage and we don’t need to ask for it again. We may have to refresh the token if it’s been a long time since the token was used. if it doesn’t exist or is invalid, the script will open up a new tab in the browser and ask for access to Google Drive." }, { "code": null, "e": 1732, "s": 1506, "text": "Once the access is granted, it will connect to the drive and fetch a list of files in the Google Drive storage for that account and print that list. Each item of the list contains an id and name for that file in Google Drive." }, { "code": null, "e": 2708, "s": 1732, "text": "Now, Inside the FileDownload function, we will write the code to download a file. We need two things to do this. First is the id of that file in Drive and second is the name you want it to be saved as. Now, we will make a request to the Drive service to get us the file with the given id. Then, we will use a BytesIO object which will write the file to the memory. We will use the MediaIoBaseDownload class to receive the file from the server and write it in memory with the BytesIO object. Since the file size may vary from a few bytes to very large, we will prefer downloading the file in Chunks. We can also pass the chunk size if we don’t want to use the default one. Now, we will run a while loop and in each iteration of this loop, we will download a chunk of the file. Once it’s done, we will write the file from memory to our Hard Drive Storage. We will wrap this whole process in a try-except block so that if something goes wrong, our script doesn’t throw an error." }, { "code": null, "e": 3196, "s": 2708, "text": "To Upload a File, we will use the FileUpload function. We only need the file path to upload a file. From the file path, we can easily extract the file name and find its mime-type using the mimetypes module. We will create a dictionary with the key “name” which contains the file name. Now, we will use the MediaFileUpload class to generate the media file, and then we will create a new file in the drive with the create function and it will save our file data to that newly created file." }, { "code": null, "e": 3212, "s": 3196, "text": "Implementation:" }, { "code": null, "e": 3220, "s": 3212, "text": "Python3" }, { "code": "# import the required librariesfrom __future__ import print_functionimport pickleimport os.pathimport ioimport shutilimport requestsfrom mimetypes import MimeTypesfrom googleapiclient.discovery import buildfrom google_auth_oauthlib.flow import InstalledAppFlowfrom google.auth.transport.requests import Requestfrom googleapiclient.http import MediaIoBaseDownload, MediaFileUpload class DriveAPI: global SCOPES # Define the scopes SCOPES = ['https://www.googleapis.com/auth/drive'] def __init__(self): # Variable self.creds will # store the user access token. # If no valid token found # we will create one. self.creds = None # The file token.pickle stores the # user's access and refresh tokens. It is # created automatically when the authorization # flow completes for the first time. # Check if file token.pickle exists if os.path.exists('token.pickle'): # Read the token from the file and # store it in the variable self.creds with open('token.pickle', 'rb') as token: self.creds = pickle.load(token) # If no valid credentials are available, # request the user to log in. if not self.creds or not self.creds.valid: # If token is expired, it will be refreshed, # else, we will request a new one. if self.creds and self.creds.expired and self.creds.refresh_token: self.creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) self.creds = flow.run_local_server(port=0) # Save the access token in token.pickle # file for future usage with open('token.pickle', 'wb') as token: pickle.dump(self.creds, token) # Connect to the API service self.service = build('drive', 'v3', credentials=self.creds) # request a list of first N files or # folders with name and id from the API. results = self.service.files().list( pageSize=100, fields=\"files(id, name)\").execute() items = results.get('files', []) # print a list of files print(\"Here's a list of files: \\n\") print(*items, sep=\"\\n\", end=\"\\n\\n\") def FileDownload(self, file_id, file_name): request = self.service.files().get_media(fileId=file_id) fh = io.BytesIO() # Initialise a downloader object to download the file downloader = MediaIoBaseDownload(fh, request, chunksize=204800) done = False try: # Download the data in chunks while not done: status, done = downloader.next_chunk() fh.seek(0) # Write the received data to the file with open(file_name, 'wb') as f: shutil.copyfileobj(fh, f) print(\"File Downloaded\") # Return True if file Downloaded successfully return True except: # Return False if something went wrong print(\"Something went wrong.\") return False def FileUpload(self, filepath): # Extract the file name out of the file path name = filepath.split('/')[-1] # Find the MimeType of the file mimetype = MimeTypes().guess_type(name)[0] # create file metadata file_metadata = {'name': name} try: media = MediaFileUpload(filepath, mimetype=mimetype) # Create a new file in the Drive storage file = self.service.files().create( body=file_metadata, media_body=media, fields='id').execute() print(\"File Uploaded.\") except: # Raise UploadError if file is not uploaded. raise UploadError(\"Can't Upload File.\") if __name__ == \"__main__\": obj = DriveAPI() i = int(input(\"Enter your choice: \"1 - Download file, 2- Upload File, 3- Exit.\\n\")) if i == 1: f_id = input(\"Enter file id: \") f_name = input(\"Enter file name: \") obj.FileDownload(f_id, f_name) elif i == 2: f_path = input(\"Enter full file path: \") obj.FileUpload(f_path) else: exit()", "e": 7655, "s": 3220, "text": null }, { "code": null, "e": 7663, "s": 7655, "text": "Output:" }, { "code": null, "e": 8275, "s": 7663, "text": "This will attempt to open a new window in your default browser. If this fails, copy the URL from the console and manually open it in your browser. Now, Log in to your Google account if you aren’t already logged in. If there are multiple accounts, you will be asked to choose one of them. Then, click on the Allow button to proceed. After the authentication has been completed, your browser will display a message saying “The authentication flow has been completed. You may close this window.” Now, the program will print a list of files in your Google drive and ask you if you want to Upload or Download a file." }, { "code": null, "e": 8290, "s": 8275, "text": "python-utility" }, { "code": null, "e": 8314, "s": 8290, "text": "Technical Scripter 2020" }, { "code": null, "e": 8321, "s": 8314, "text": "Python" }, { "code": null, "e": 8340, "s": 8321, "text": "Technical Scripter" }, { "code": null, "e": 8438, "s": 8340, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 8470, "s": 8438, "text": "How to Install PIP on Windows ?" }, { "code": null, "e": 8497, "s": 8470, "text": "Python Classes and Objects" }, { "code": null, "e": 8528, "s": 8497, "text": "Python | os.path.join() method" }, { "code": null, "e": 8551, "s": 8528, "text": "Introduction To PYTHON" }, { "code": null, "e": 8572, "s": 8551, "text": "Python OOPs Concepts" }, { "code": null, "e": 8628, "s": 8572, "text": "How to drop one or multiple columns in Pandas Dataframe" }, { "code": null, "e": 8670, "s": 8628, "text": "How To Convert Python Dictionary To JSON?" }, { "code": null, "e": 8712, "s": 8670, "text": "Check if element exists in list in Python" }, { "code": null, "e": 8751, "s": 8712, "text": "Python | Get unique values from a list" } ]
Gerrit - Make & Commit Your Change
When you modify the code in the local file system, you can check for the changes within the directory using the following command. $ git diff In the project directory, we will modify some changes in the file called Example/Example.hooks.php and run the above command. We will get the result as shown in the following screenshot. You can check the changes made to the files or the directory using the following command. $ git status The above command allows to see which changes have been staged, which have not, and which files are not tracked by Git. Next, you can add the changes in the working directory and update the file in the next commit using following command. $ git add Example/Example.hooks.php After adding the file, again run the git status command to review the changes added to the staging area as shown in the following screenshot. You can see the difference between the index and your last commit, and what contents have been staged, using the following command. $ git diff --cached You can push the changes to the remote directory from the local repository using the following command. $ git commit When you run the above command, it will ask to add the commit message for your changes. This message will be seen by other people when you push the commit to the other repository. Add the commit message and run the command again as git commit, which will display the commit message as shown in the following screenshot. Print Add Notes Bookmark this page
[ { "code": null, "e": 2369, "s": 2238, "text": "When you modify the code in the local file system, you can check for the changes within the directory using the following command." }, { "code": null, "e": 2381, "s": 2369, "text": "$ git diff\n" }, { "code": null, "e": 2568, "s": 2381, "text": "In the project directory, we will modify some changes in the file called Example/Example.hooks.php and run the above command. We will get the result as shown in the following screenshot." }, { "code": null, "e": 2658, "s": 2568, "text": "You can check the changes made to the files or the directory using the following command." }, { "code": null, "e": 2672, "s": 2658, "text": "$ git status\n" }, { "code": null, "e": 2792, "s": 2672, "text": "The above command allows to see which changes have been staged, which have not, and which files are not tracked by Git." }, { "code": null, "e": 2911, "s": 2792, "text": "Next, you can add the changes in the working directory and update the file in the next commit using following command." }, { "code": null, "e": 2948, "s": 2911, "text": "$ git add Example/Example.hooks.php\n" }, { "code": null, "e": 3090, "s": 2948, "text": "After adding the file, again run the git status command to review the changes added to the staging area as shown in the following screenshot." }, { "code": null, "e": 3222, "s": 3090, "text": "You can see the difference between the index and your last commit, and what contents have been staged, using the following command." }, { "code": null, "e": 3243, "s": 3222, "text": "$ git diff --cached\n" }, { "code": null, "e": 3347, "s": 3243, "text": "You can push the changes to the remote directory from the local repository using the following command." }, { "code": null, "e": 3361, "s": 3347, "text": "$ git commit\n" }, { "code": null, "e": 3541, "s": 3361, "text": "When you run the above command, it will ask to add the commit message for your changes. This message will be seen by other people when you push the commit to the other repository." }, { "code": null, "e": 3681, "s": 3541, "text": "Add the commit message and run the command again as git commit, which will display the commit message as shown in the following screenshot." }, { "code": null, "e": 3688, "s": 3681, "text": " Print" }, { "code": null, "e": 3699, "s": 3688, "text": " Add Notes" } ]
Apache Derby - Drop Table
The DROP TABLE statement is used to remove an existing table including all its triggers, constraints, permissions. Following is the Syntax of the DROP TABLE statement. ij> DROP TABLE table_name; Suppose you have a table named Student in the database. The following SQL statement deletes a table named Student. ij> DROP TABLE Student; 0 rows inserted/updated/deleted Since we have removed the table if we try to describe it, we will get an error as follows ij> DESCRIBE Student; IJ ERROR: No table exists with the name STUDENT This section teaches you how to drop a table in Apache Derby database using JDBC application. If you want to request the Derby network server using network client, make sure that the server is up and running. The class name for the Network client driver is org.apache.derby.jdbc.ClientDriver and the URL is jdbc:derby://localhost:1527/DATABASE_NAME;create=true;user=USER_NAME;passw ord=PASSWORD" Follow the steps given below to drop a table in Apache Derby To communicate with the database, first of all, you need to register the driver. The forName() method of the class Class accepts a String value representing a class name loads it in to the memory, which automatically registers it. Register the driver using this method. In general, the first step we do to communicate to the database is to connect with it. The Connection class represents the physical connection with a database server. You can create a connection object by invoking the getConnection() method of the DriverManager class. Create a connection using this method. You need to create a Statement or PreparedStatement or, CallableStatement objects to send SQL statements to the database. You can create these using the methods createStatement(), prepareStatement() and, prepareCall() respectively. Create either of these objects using the appropriate method. After creating a statement, you need to execute it. The Statement class provides various methods to execute a query like the execute() method to execute a statement that returns more than one result set. The executeUpdate() method execute queries like INSERT, UPDATE, DELETE. The executeQuery() method to results that returns data etc. Use either of these methods and execute the statement created previously. Following JDBC example demonstrates how to drop a table in Apache Derby using JDBC program. Here, we are connecting to a database named sampleDB (will create if it does not exist) using the embedded driver. import java.sql.Connection; import java.sql.DriverManager; import java.sql.Statement; public class DropTable { public static void main(String args[]) throws Exception { //Registering the driver Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); //Getting the Connection object String URL = "jdbc:derby:sampleDB;create=true"; Connection conn = DriverManager.getConnection(URL); //Creating the Statement object Statement stmt = conn.createStatement(); //Executing the query String query = "DROP TABLE Employees"; stmt.execute(query); System.out.println("Table dropped"); } } On executing the above program, you will get the following output − Table dropped 46 Lectures 3.5 hours Arnab Chakraborty 23 Lectures 1.5 hours Mukund Kumar Mishra 16 Lectures 1 hours Nilay Mehta 52 Lectures 1.5 hours Bigdata Engineer 14 Lectures 1 hours Bigdata Engineer 23 Lectures 1 hours Bigdata Engineer Print Add Notes Bookmark this page
[ { "code": null, "e": 2295, "s": 2180, "text": "The DROP TABLE statement is used to remove an existing table including all its triggers, constraints, permissions." }, { "code": null, "e": 2348, "s": 2295, "text": "Following is the Syntax of the DROP TABLE statement." }, { "code": null, "e": 2376, "s": 2348, "text": "ij> DROP TABLE table_name;\n" }, { "code": null, "e": 2491, "s": 2376, "text": "Suppose you have a table named Student in the database. The following SQL statement deletes a table named Student." }, { "code": null, "e": 2548, "s": 2491, "text": "ij> DROP TABLE Student;\n0 rows inserted/updated/deleted\n" }, { "code": null, "e": 2638, "s": 2548, "text": "Since we have removed the table if we try to describe it, we will get an error as follows" }, { "code": null, "e": 2709, "s": 2638, "text": "ij> DESCRIBE Student;\nIJ ERROR: No table exists with the name STUDENT\n" }, { "code": null, "e": 2803, "s": 2709, "text": "This section teaches you how to drop a table in Apache Derby database using JDBC application." }, { "code": null, "e": 3105, "s": 2803, "text": "If you want to request the Derby network server using network client, make sure that the server is up and running. The class name for the Network client driver is org.apache.derby.jdbc.ClientDriver and the URL is jdbc:derby://localhost:1527/DATABASE_NAME;create=true;user=USER_NAME;passw\nord=PASSWORD\"" }, { "code": null, "e": 3166, "s": 3105, "text": "Follow the steps given below to drop a table in Apache Derby" }, { "code": null, "e": 3436, "s": 3166, "text": "To communicate with the database, first of all, you need to register the driver. The forName() method of the class Class accepts a String value representing a class name loads it in to the memory, which automatically registers it. Register the driver using this method." }, { "code": null, "e": 3744, "s": 3436, "text": "In general, the first step we do to communicate to the database is to connect with it. The Connection class represents the physical connection with a database server. You can create a connection object by invoking the getConnection() method of the DriverManager class. Create a connection using this method." }, { "code": null, "e": 4037, "s": 3744, "text": "You need to create a Statement or PreparedStatement or, CallableStatement objects to send SQL statements to the database. You can create these using the methods createStatement(), prepareStatement() and, prepareCall() respectively. Create either of these objects using the appropriate method." }, { "code": null, "e": 4448, "s": 4037, "text": "After creating a statement, you need to execute it. The Statement class provides various methods to execute a query like the execute() method to execute a statement that returns more than one result set. The executeUpdate() method execute queries like INSERT, UPDATE, DELETE. The executeQuery() method to results that returns data etc. Use either of these methods and execute the statement created previously.\n" }, { "code": null, "e": 4656, "s": 4448, "text": "Following JDBC example demonstrates how to drop a table in Apache Derby using JDBC program. Here, we are connecting to a database named sampleDB (will create if it does not exist) using the embedded driver.\n" }, { "code": null, "e": 5308, "s": 4656, "text": "import java.sql.Connection;\nimport java.sql.DriverManager;\nimport java.sql.Statement;\npublic class DropTable {\n public static void main(String args[]) throws Exception {\n //Registering the driver\n Class.forName(\"org.apache.derby.jdbc.EmbeddedDriver\");\n\n //Getting the Connection object\n String URL = \"jdbc:derby:sampleDB;create=true\";\n Connection conn = DriverManager.getConnection(URL);\n\n //Creating the Statement object\n Statement stmt = conn.createStatement();\n\n //Executing the query\n String query = \"DROP TABLE Employees\";\n stmt.execute(query);\n System.out.println(\"Table dropped\");\n }\n}" }, { "code": null, "e": 5376, "s": 5308, "text": "On executing the above program, you will get the following output −" }, { "code": null, "e": 5391, "s": 5376, "text": "Table dropped\n" }, { "code": null, "e": 5426, "s": 5391, "text": "\n 46 Lectures \n 3.5 hours \n" }, { "code": null, "e": 5445, "s": 5426, "text": " Arnab Chakraborty" }, { "code": null, "e": 5480, "s": 5445, "text": "\n 23 Lectures \n 1.5 hours \n" }, { "code": null, "e": 5501, "s": 5480, "text": " Mukund Kumar Mishra" }, { "code": null, "e": 5534, "s": 5501, "text": "\n 16 Lectures \n 1 hours \n" }, { "code": null, "e": 5547, "s": 5534, "text": " Nilay Mehta" }, { "code": null, "e": 5582, "s": 5547, "text": "\n 52 Lectures \n 1.5 hours \n" }, { "code": null, "e": 5600, "s": 5582, "text": " Bigdata Engineer" }, { "code": null, "e": 5633, "s": 5600, "text": "\n 14 Lectures \n 1 hours \n" }, { "code": null, "e": 5651, "s": 5633, "text": " Bigdata Engineer" }, { "code": null, "e": 5684, "s": 5651, "text": "\n 23 Lectures \n 1 hours \n" }, { "code": null, "e": 5702, "s": 5684, "text": " Bigdata Engineer" }, { "code": null, "e": 5709, "s": 5702, "text": " Print" }, { "code": null, "e": 5720, "s": 5709, "text": " Add Notes" } ]
ConcurrentLinkedDeque in Java with Examples - GeeksforGeeks
16 Sep, 2020 The ConcurrentLinkedDeque class in Java is a part of the Java Collection Framework and implements the Collection interface and the AbstractCollection class. It belongs to java.util.concurrent package. It is used to implement Deque with the help of LinkedList concurrently. Features of ConcurrentLinkedDeque Iterators and spliterators are weakly consistent. Concurrent insertion, removal, and access operations execute safely across multiple threads. It does not permit null elements. size() method is not implemented in constant time. Because of the asynchronous nature of these deques, determining the current number of elements requires a traversal of the elements. Class Hierarchy: Declaration: public abstract class ConcurrentLinkedDeque<E> extends AbstractCollection<E> implements Deque<E>, Serializable Here, E is the type of elements maintained by this collection. It implements Serializable, Iterable<E>, Collection<E>, Deque<E>, Queue<E> interfaces. 1. ConcurrentLinkedDeque(): This constructor is used to construct an empty deque. ConcurrentLinkedDeque<E> cld = new ConcurrentLinkedDeque<E>(); 2. ConcurrentLinkedDeque(Collection<E> c): This constructor is used to construct a deque with the elements of the Collection passed as the parameter. ConcurrentLinkedDeque<E> cld = new ConcurrentLinkedDeque<E>(Collection<E> c); Below is the sample program to illustrate ConcurrentLinkedDeque in Java: Java // Java Program to demonstrate ConcurrentLinkedDeque import java.util.concurrent.*; class ConcurrentLinkedDequeDemo { public static void main(String[] args) { // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque() // constructor ConcurrentLinkedDeque<Integer> cld = new ConcurrentLinkedDeque<Integer>(); // add element to the front // using addFirst() method cld.addFirst(12); cld.addFirst(70); cld.addFirst(1009); cld.addFirst(475); // Displaying the existing ConcurrentLinkedDeque System.out.println("ConcurrentLinkedDeque: " + cld); // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque(Collection c) // constructor ConcurrentLinkedDeque<Integer> cld1 = new ConcurrentLinkedDeque<Integer>(cld); // Displaying the existing ConcurrentLinkedDeque System.out.println("ConcurrentLinkedDeque1: " + cld1); }} ConcurrentLinkedDeque: [475, 1009, 70, 12] ConcurrentLinkedDeque1: [475, 1009, 70, 12] Example: Java // Java code to illustrate// methods of ConcurrentLinkedDeque import java.util.concurrent.*; class ConcurrentLinkedDequeDemo { public static void main(String[] args) { // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque() constructor ConcurrentLinkedDeque<Integer> cld = new ConcurrentLinkedDeque<Integer>(); // add element to the front // using addFirst() method cld.addFirst(12); cld.addFirst(70); cld.addFirst(1009); cld.addFirst(475); // Displaying the existing ConcurrentLinkedDeque System.out.println("ConcurrentLinkedDeque: " + cld); // Displaying the Last element // using getLast() method System.out.println("The Last element is: " + cld.getLast()); // Displaying the first element // using peekFirst() method System.out.println("First Element is: " + cld.peekFirst()); // Remove the Last element // using removeLast() method cld.removeLast(); // Displaying the existing ConcurrentLinkedDeque System.out.println("ConcurrentLinkedDeque: " + cld); }} ConcurrentLinkedDeque: [475, 1009, 70, 12] The Last element is: 12 First Element is: 475 ConcurrentLinkedDeque: [475, 1009, 70] 1. Adding Elements To add an element or Collection of elements, ConcurrentLinkedDeque provides methods like add(E e), addAll(Collection<? extends E> c), addFirst(E e), addLast(E e) methods. The example below explains these methods. Java // Java Program Demonstrate adding// elements to the ConcurrentLinkedDeque import java.util.concurrent.*; class AddingElements { public static void main(String[] args) { // create instance using ConcurrentLinkedDeque ConcurrentLinkedDeque<Integer> cld1 = new ConcurrentLinkedDeque<Integer>(); // Add element to the tail using // add or addLast methods cld1.add(12); cld1.add(110); // Add element to the head // using addFirst method cld1.addFirst(55); // Displaying the existing ConcurrentLinkedDeque System.out.println("Initial Elements in " + "the LinkedDeque cld : " + cld1); // create instance using ConcurrentLinkedDeque ConcurrentLinkedDeque<Integer> cld2 = new ConcurrentLinkedDeque<Integer>(); // Add elements of cld1 to the // cld2 using addAll method cld2.addAll(cld1); // Displaying the modified ConcurrentLinkedDeque System.out.println("Initial Elements in " + "the LinkedDeque cld2: " + cld2); }} Output: Initial Elements in the LinkedDeque cld : [55, 12, 110] Initial Elements in the LinkedDeque cld2: [55, 12, 110] 2. Remove Elements To remove an element, ConcurrentLinkedDeque provides methods like remove(), remove(Object o), removeFirst(), removeLast() etc. These methods are explained in the below example. Java // Java Program to demonstrate removing// elements of ConcurrentLinkedDeque import java.util.concurrent.*; class RemovingElements { public static void main(String[] args) { // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque() constructor ConcurrentLinkedDeque<Integer> cld = new ConcurrentLinkedDeque<Integer>(); // Add elements using add() method cld.add(40); cld.add(50); cld.add(60); cld.add(70); cld.add(80); // Displaying the existing LinkedDeque System.out.println( "Existing ConcurrentLinkedDeque: " + cld); // remove method removes the first // element of ConcurrentLinkedDeque // using remove() method System.out.println("Element removed: " + cld.remove()); // Remove 60 using remove(Object) System.out.println("60 removed: " + cld.remove(60)); // Displaying the existing ConcurrentLinkedDeque System.out.println( "Modified ConcurrentLinkedDeque: " + cld); // Remove the first element cld.removeFirst(); // Remove the Last element cld.removeLast(); // Displaying the existing ConcurrentLinkedDeque System.out.println( "Modified ConcurrentLinkedDeque: " + cld); }} Existing ConcurrentLinkedDeque: [40, 50, 60, 70, 80] Element removed: 40 60 removed: true Modified ConcurrentLinkedDeque: [50, 70, 80] Modified ConcurrentLinkedDeque: [70] 3. Iterating Elements We can iterate the ConcurrentLinkedDeque using iterator() or descendingIterator() methods. The below code explains both these methods. Java // Java code to illustrate iterating// elements of ConcurrentLinkedDeque import java.util.concurrent.*;import java.util.*; public class IteratingConcurrentLinkedDeque { public static void main(String args[]) { // Creating an empty ConcurrentLinkedDeque ConcurrentLinkedDeque<String> deque = new ConcurrentLinkedDeque<String>(); // Use add() method to add elements // into the ConcurrentLinkedDeque deque.add("Welcome"); deque.add("To"); deque.add("Geeks"); deque.add("4"); deque.add("Geeks"); // Displaying the ConcurrentLinkedDeque System.out.println("ConcurrentLinkedDeque: " + deque); // Creating an iterator Iterator fitr = deque.iterator(); // Displaying the values // after iterating through the ConcurrentLinkedDeque System.out.println("The iterator values are: "); while (fitr.hasNext()) { System.out.println(fitr.next()); } // Creating a desc_iterator Iterator ditr = deque.descendingIterator(); // Displaying the values after iterating // through the ConcurrentLinkedDeque // in reverse order System.out.println("The iterator values are: "); while (ditr.hasNext()) { System.out.println(ditr.next()); } }} ConcurrentLinkedDeque: [Welcome, To, Geeks, 4, Geeks] The iterator values are: Welcome To Geeks 4 Geeks The iterator values are: Geeks 4 Geeks To Welcome 4. Accessing Elements To access the elements of ConcurrentLinkedDeque, it provides methods like getFirst(), getLast(), element() methods. The below example explains these methods. Java // Java Program to Demonstrate accessing// elements of ConcurrentLinkedDeque import java.util.concurrent.*;import java.util.*; class Accessing { public static void main(String[] args) { // Creating an empty ConcurrentLinkedDeque ConcurrentLinkedDeque<String> cld = new ConcurrentLinkedDeque<String>(); // Add elements into the ConcurrentLinkedDeque cld.add("Welcome"); cld.add("To"); cld.add("Geeks"); cld.add("4"); cld.add("Geeks"); // Displaying the ConcurrentLinkedDeque System.out.println("Elements in the ConcurrentLinkedDeque: " + cld); // Displaying the first element System.out.println("The first element is: " + cld.getFirst()); // Displaying the Last element System.out.println("The Last element is: " + cld.getLast()); // Displaying the head of ConcurrentLinkedDeque System.out.println("The Head of ConcurrentLinkedDeque is: " + cld.element()); }} Output: Elements in the ConcurrentLinkedDeque: [Welcome, To, Geeks, 4, Geeks] The first element is: Welcome The Last element is: Geeks The Head of ConcurrentLinkedDeque is: Welcome Here, E is the type of element. METHOD DESCRIPTION METHOD DESCRIPTION METHOD DESCRIPTION METHOD DESCRIPTION Reference: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/concurrent/ConcurrentLinkedDeque.html Ganeshchowdharysadanala Java - util package Java-Collections Java-ConcurrentLinkedDeque Java Java Java-Collections Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Initialize an ArrayList in Java Object Oriented Programming (OOPs) Concept in Java HashMap in Java with Examples Interfaces in Java How to iterate any Map in Java ArrayList in Java Multidimensional Arrays in Java Stream In Java Singleton Class in Java Set in Java
[ { "code": null, "e": 24332, "s": 24304, "text": "\n16 Sep, 2020" }, { "code": null, "e": 24605, "s": 24332, "text": "The ConcurrentLinkedDeque class in Java is a part of the Java Collection Framework and implements the Collection interface and the AbstractCollection class. It belongs to java.util.concurrent package. It is used to implement Deque with the help of LinkedList concurrently." }, { "code": null, "e": 24639, "s": 24605, "text": "Features of ConcurrentLinkedDeque" }, { "code": null, "e": 24689, "s": 24639, "text": "Iterators and spliterators are weakly consistent." }, { "code": null, "e": 24782, "s": 24689, "text": "Concurrent insertion, removal, and access operations execute safely across multiple threads." }, { "code": null, "e": 24816, "s": 24782, "text": "It does not permit null elements." }, { "code": null, "e": 25000, "s": 24816, "text": "size() method is not implemented in constant time. Because of the asynchronous nature of these deques, determining the current number of elements requires a traversal of the elements." }, { "code": null, "e": 25018, "s": 25000, "text": "Class Hierarchy: " }, { "code": null, "e": 25033, "s": 25018, "text": "Declaration: " }, { "code": null, "e": 25218, "s": 25033, "text": "public abstract class ConcurrentLinkedDeque<E>\n extends AbstractCollection<E>\n implements Deque<E>, Serializable\n\nHere, E is the type of elements maintained by this collection.\n" }, { "code": null, "e": 25305, "s": 25218, "text": "It implements Serializable, Iterable<E>, Collection<E>, Deque<E>, Queue<E> interfaces." }, { "code": null, "e": 25387, "s": 25305, "text": "1. ConcurrentLinkedDeque(): This constructor is used to construct an empty deque." }, { "code": null, "e": 25450, "s": 25387, "text": "ConcurrentLinkedDeque<E> cld = new ConcurrentLinkedDeque<E>();" }, { "code": null, "e": 25600, "s": 25450, "text": "2. ConcurrentLinkedDeque(Collection<E> c): This constructor is used to construct a deque with the elements of the Collection passed as the parameter." }, { "code": null, "e": 25678, "s": 25600, "text": "ConcurrentLinkedDeque<E> cld = new ConcurrentLinkedDeque<E>(Collection<E> c);" }, { "code": null, "e": 25753, "s": 25678, "text": "Below is the sample program to illustrate ConcurrentLinkedDeque in Java: " }, { "code": null, "e": 25758, "s": 25753, "text": "Java" }, { "code": "// Java Program to demonstrate ConcurrentLinkedDeque import java.util.concurrent.*; class ConcurrentLinkedDequeDemo { public static void main(String[] args) { // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque() // constructor ConcurrentLinkedDeque<Integer> cld = new ConcurrentLinkedDeque<Integer>(); // add element to the front // using addFirst() method cld.addFirst(12); cld.addFirst(70); cld.addFirst(1009); cld.addFirst(475); // Displaying the existing ConcurrentLinkedDeque System.out.println(\"ConcurrentLinkedDeque: \" + cld); // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque(Collection c) // constructor ConcurrentLinkedDeque<Integer> cld1 = new ConcurrentLinkedDeque<Integer>(cld); // Displaying the existing ConcurrentLinkedDeque System.out.println(\"ConcurrentLinkedDeque1: \" + cld1); }}", "e": 26821, "s": 25758, "text": null }, { "code": null, "e": 26908, "s": 26821, "text": "ConcurrentLinkedDeque: [475, 1009, 70, 12]\nConcurrentLinkedDeque1: [475, 1009, 70, 12]" }, { "code": null, "e": 26922, "s": 26910, "text": "Example: " }, { "code": null, "e": 26927, "s": 26922, "text": "Java" }, { "code": "// Java code to illustrate// methods of ConcurrentLinkedDeque import java.util.concurrent.*; class ConcurrentLinkedDequeDemo { public static void main(String[] args) { // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque() constructor ConcurrentLinkedDeque<Integer> cld = new ConcurrentLinkedDeque<Integer>(); // add element to the front // using addFirst() method cld.addFirst(12); cld.addFirst(70); cld.addFirst(1009); cld.addFirst(475); // Displaying the existing ConcurrentLinkedDeque System.out.println(\"ConcurrentLinkedDeque: \" + cld); // Displaying the Last element // using getLast() method System.out.println(\"The Last element is: \" + cld.getLast()); // Displaying the first element // using peekFirst() method System.out.println(\"First Element is: \" + cld.peekFirst()); // Remove the Last element // using removeLast() method cld.removeLast(); // Displaying the existing ConcurrentLinkedDeque System.out.println(\"ConcurrentLinkedDeque: \" + cld); }}", "e": 28206, "s": 26927, "text": null }, { "code": null, "e": 28334, "s": 28206, "text": "ConcurrentLinkedDeque: [475, 1009, 70, 12]\nThe Last element is: 12\nFirst Element is: 475\nConcurrentLinkedDeque: [475, 1009, 70]" }, { "code": null, "e": 28355, "s": 28336, "text": "1. Adding Elements" }, { "code": null, "e": 28568, "s": 28355, "text": "To add an element or Collection of elements, ConcurrentLinkedDeque provides methods like add(E e), addAll(Collection<? extends E> c), addFirst(E e), addLast(E e) methods. The example below explains these methods." }, { "code": null, "e": 28573, "s": 28568, "text": "Java" }, { "code": "// Java Program Demonstrate adding// elements to the ConcurrentLinkedDeque import java.util.concurrent.*; class AddingElements { public static void main(String[] args) { // create instance using ConcurrentLinkedDeque ConcurrentLinkedDeque<Integer> cld1 = new ConcurrentLinkedDeque<Integer>(); // Add element to the tail using // add or addLast methods cld1.add(12); cld1.add(110); // Add element to the head // using addFirst method cld1.addFirst(55); // Displaying the existing ConcurrentLinkedDeque System.out.println(\"Initial Elements in \" + \"the LinkedDeque cld : \" + cld1); // create instance using ConcurrentLinkedDeque ConcurrentLinkedDeque<Integer> cld2 = new ConcurrentLinkedDeque<Integer>(); // Add elements of cld1 to the // cld2 using addAll method cld2.addAll(cld1); // Displaying the modified ConcurrentLinkedDeque System.out.println(\"Initial Elements in \" + \"the LinkedDeque cld2: \" + cld2); }}", "e": 29760, "s": 28573, "text": null }, { "code": null, "e": 29769, "s": 29760, "text": " Output:" }, { "code": null, "e": 29881, "s": 29769, "text": "Initial Elements in the LinkedDeque cld : [55, 12, 110]\nInitial Elements in the LinkedDeque cld2: [55, 12, 110]" }, { "code": null, "e": 29900, "s": 29881, "text": "2. Remove Elements" }, { "code": null, "e": 30077, "s": 29900, "text": "To remove an element, ConcurrentLinkedDeque provides methods like remove(), remove(Object o), removeFirst(), removeLast() etc. These methods are explained in the below example." }, { "code": null, "e": 30082, "s": 30077, "text": "Java" }, { "code": "// Java Program to demonstrate removing// elements of ConcurrentLinkedDeque import java.util.concurrent.*; class RemovingElements { public static void main(String[] args) { // Create a ConcurrentLinkedDeque // using ConcurrentLinkedDeque() constructor ConcurrentLinkedDeque<Integer> cld = new ConcurrentLinkedDeque<Integer>(); // Add elements using add() method cld.add(40); cld.add(50); cld.add(60); cld.add(70); cld.add(80); // Displaying the existing LinkedDeque System.out.println( \"Existing ConcurrentLinkedDeque: \" + cld); // remove method removes the first // element of ConcurrentLinkedDeque // using remove() method System.out.println(\"Element removed: \" + cld.remove()); // Remove 60 using remove(Object) System.out.println(\"60 removed: \" + cld.remove(60)); // Displaying the existing ConcurrentLinkedDeque System.out.println( \"Modified ConcurrentLinkedDeque: \" + cld); // Remove the first element cld.removeFirst(); // Remove the Last element cld.removeLast(); // Displaying the existing ConcurrentLinkedDeque System.out.println( \"Modified ConcurrentLinkedDeque: \" + cld); }}", "e": 31443, "s": 30082, "text": null }, { "code": null, "e": 31615, "s": 31443, "text": "Existing ConcurrentLinkedDeque: [40, 50, 60, 70, 80]\nElement removed: 40\n60 removed: true\nModified ConcurrentLinkedDeque: [50, 70, 80]\nModified ConcurrentLinkedDeque: [70]" }, { "code": null, "e": 31638, "s": 31615, "text": "3. Iterating Elements " }, { "code": null, "e": 31773, "s": 31638, "text": "We can iterate the ConcurrentLinkedDeque using iterator() or descendingIterator() methods. The below code explains both these methods." }, { "code": null, "e": 31778, "s": 31773, "text": "Java" }, { "code": "// Java code to illustrate iterating// elements of ConcurrentLinkedDeque import java.util.concurrent.*;import java.util.*; public class IteratingConcurrentLinkedDeque { public static void main(String args[]) { // Creating an empty ConcurrentLinkedDeque ConcurrentLinkedDeque<String> deque = new ConcurrentLinkedDeque<String>(); // Use add() method to add elements // into the ConcurrentLinkedDeque deque.add(\"Welcome\"); deque.add(\"To\"); deque.add(\"Geeks\"); deque.add(\"4\"); deque.add(\"Geeks\"); // Displaying the ConcurrentLinkedDeque System.out.println(\"ConcurrentLinkedDeque: \" + deque); // Creating an iterator Iterator fitr = deque.iterator(); // Displaying the values // after iterating through the ConcurrentLinkedDeque System.out.println(\"The iterator values are: \"); while (fitr.hasNext()) { System.out.println(fitr.next()); } // Creating a desc_iterator Iterator ditr = deque.descendingIterator(); // Displaying the values after iterating // through the ConcurrentLinkedDeque // in reverse order System.out.println(\"The iterator values are: \"); while (ditr.hasNext()) { System.out.println(ditr.next()); } }}", "e": 33159, "s": 31778, "text": null }, { "code": null, "e": 33315, "s": 33159, "text": "ConcurrentLinkedDeque: [Welcome, To, Geeks, 4, Geeks]\nThe iterator values are: \nWelcome\nTo\nGeeks\n4\nGeeks\nThe iterator values are: \nGeeks\n4\nGeeks\nTo\nWelcome" }, { "code": null, "e": 33337, "s": 33315, "text": "4. Accessing Elements" }, { "code": null, "e": 33495, "s": 33337, "text": "To access the elements of ConcurrentLinkedDeque, it provides methods like getFirst(), getLast(), element() methods. The below example explains these methods." }, { "code": null, "e": 33500, "s": 33495, "text": "Java" }, { "code": "// Java Program to Demonstrate accessing// elements of ConcurrentLinkedDeque import java.util.concurrent.*;import java.util.*; class Accessing { public static void main(String[] args) { // Creating an empty ConcurrentLinkedDeque ConcurrentLinkedDeque<String> cld = new ConcurrentLinkedDeque<String>(); // Add elements into the ConcurrentLinkedDeque cld.add(\"Welcome\"); cld.add(\"To\"); cld.add(\"Geeks\"); cld.add(\"4\"); cld.add(\"Geeks\"); // Displaying the ConcurrentLinkedDeque System.out.println(\"Elements in the ConcurrentLinkedDeque: \" + cld); // Displaying the first element System.out.println(\"The first element is: \" + cld.getFirst()); // Displaying the Last element System.out.println(\"The Last element is: \" + cld.getLast()); // Displaying the head of ConcurrentLinkedDeque System.out.println(\"The Head of ConcurrentLinkedDeque is: \" + cld.element()); }}", "e": 34583, "s": 33500, "text": null }, { "code": null, "e": 34592, "s": 34583, "text": "Output: " }, { "code": null, "e": 34765, "s": 34592, "text": "Elements in the ConcurrentLinkedDeque: [Welcome, To, Geeks, 4, Geeks]\nThe first element is: Welcome\nThe Last element is: Geeks\nThe Head of ConcurrentLinkedDeque is: Welcome" }, { "code": null, "e": 34797, "s": 34765, "text": "Here, E is the type of element." }, { "code": null, "e": 34804, "s": 34797, "text": "METHOD" }, { "code": null, "e": 34816, "s": 34804, "text": "DESCRIPTION" }, { "code": null, "e": 34823, "s": 34816, "text": "METHOD" }, { "code": null, "e": 34835, "s": 34823, "text": "DESCRIPTION" }, { "code": null, "e": 34842, "s": 34835, "text": "METHOD" }, { "code": null, "e": 34854, "s": 34842, "text": "DESCRIPTION" }, { "code": null, "e": 34861, "s": 34854, "text": "METHOD" }, { "code": null, "e": 34873, "s": 34861, "text": "DESCRIPTION" }, { "code": null, "e": 34993, "s": 34873, "text": "Reference: https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/concurrent/ConcurrentLinkedDeque.html" }, { "code": null, "e": 35017, "s": 34993, "text": "Ganeshchowdharysadanala" }, { "code": null, "e": 35037, "s": 35017, "text": "Java - util package" }, { "code": null, "e": 35054, "s": 35037, "text": "Java-Collections" }, { "code": null, "e": 35081, "s": 35054, "text": "Java-ConcurrentLinkedDeque" }, { "code": null, "e": 35086, "s": 35081, "text": "Java" }, { "code": null, "e": 35091, "s": 35086, "text": "Java" }, { "code": null, "e": 35108, "s": 35091, "text": "Java-Collections" }, { "code": null, "e": 35206, "s": 35108, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 35238, "s": 35206, "text": "Initialize an ArrayList in Java" }, { "code": null, "e": 35289, "s": 35238, "text": "Object Oriented Programming (OOPs) Concept in Java" }, { "code": null, "e": 35319, "s": 35289, "text": "HashMap in Java with Examples" }, { "code": null, "e": 35338, "s": 35319, "text": "Interfaces in Java" }, { "code": null, "e": 35369, "s": 35338, "text": "How to iterate any Map in Java" }, { "code": null, "e": 35387, "s": 35369, "text": "ArrayList in Java" }, { "code": null, "e": 35419, "s": 35387, "text": "Multidimensional Arrays in Java" }, { "code": null, "e": 35434, "s": 35419, "text": "Stream In Java" }, { "code": null, "e": 35458, "s": 35434, "text": "Singleton Class in Java" } ]
A Flask Full of Whiskey (WSGI). Serving up python web applications has... | by Ashok Chilakapati | Towards Data Science
Serving up python web applications has never been easier with the suite of WSGI servers currently at our disposal. Both uWSGI and gunicorn behind Nginx are excellent performers for serving up a Flask app... Yup, what more could you ask for in life right? There are a number of varieties too, to suit one’s preference. Joking aside, this article is about configuring and stress testing a few WSGI (Web Server Gateway Interface) alternatives for serving up a Python web application. Here is what we cover in this post A simple application is written with the Flask web development framework. The only API exposed is for generating a random quotation by querying a backend resource. In this case, it is Elasticsearch that has indexed a large number of quotations. Look at the following standalone WSGI webservers — gunicorn, uWSGI, and the default werkzeug that Flask is bundled with. Look at the benefit of using Nginx to front the client requests that are proxied back to the above. Use supervisor to manage the WSGI servers and Locust to drive the load test. We go through some code/config snippets here for illustration, but the full code can be obtained from github. Unless a web site is entirely static, the webserver needs a way to engage external applications to get some dynamic data. Over time many approaches have been implemented to make this exercise lean, efficient and easy. We had the good old CGI that spawned a new process for each request. Then came mod_python that embedded Python into the webserver, followed by FastCGI that allowed the webserver to tap into a pool of long-running processes to dispatch the request to. They all have their strengths and weaknesses. See the discussion and links on this stackoverflow page for example. The current favorite is the WSGI protocol that allows for a complete decoupling of webservers and the applications they need to access. Here is a general schematic. The WSGI servers are Http enabled on their own, so the client/Nginx can talk to them via Http. In the case of the uWSGI server, there is the option of uwsgi protocol as well for Nginx, and to test from command line. Nginx proxies the request back to a WSGI server configured for that URI. The WSGI server is configured with the Python application to call with the request. The results are relayed all the way back. The application is simple. The application is all of just one file — quotes.py. It allows for a single GET request. /quotes/byId?id=INTEGER_NUMBER The app fetches the quotation document from an Elasticsearch index with that INTEGER_NUMBER as the document ID, and renders them as follows. The images and CSS are served by Nginx when available. In the absence of Nginx, they are sent from the static folder. That is the entirety of the application. Identical no matter which WSGI server we choose to use. Here is the directory & file layout. .├── config.py # Config options for gunicorn├── quotes.py├── static│ ├── css│ │ └── quote.css│ ├── favicon.ico│ └── images│ ├── eleanor-roosevelt.jpg│ ├── martha washington.jpg│ └── maya angelou.jpg├── templates│ └── quote.html└── wsgi.py # Used by uWSGI When using the built-in werkzeug as the WSGI server, we supply the runtime config in quotes.py module. Config for uWSGI and gunicorn is supplied at the time of invoking the service. We will cover that next along with that for Nginx, and for the service manager supervisord. The number of concurrent processes/workers in use by any of the WSGI servers has an impact on performance. The recommended value is about twice the number of cores but can be larger if it does not degrade the performance. We do not mess with threads per worker here as the memory footprint of our application is small. See this post for some discussion on the use of workers vs threads. We start with 6 workers and vary it to gauge the impact. We use the same number for both gunicorn and uWSGI servers so the comparison is apples to apples. Unfortunately, there does not seem to be a way to do the same with the werkzeug server. We use supervisord to manage the WSGI server processes. This allows for easier configuration, control, a clean separation of logs by app/wsgi and a UI to boot. The configuration file for each server is placed at /etc/supervisor/conf.d, and the supervisord service is started up. [/etc/supervisor] ls conf.d/*conf.d/gunicorn.conf conf.d/uwsgi.conf conf.d/uwsgi-http.conf conf.d/werkzeug.conf[/etc/supervisor] sudo systemctl start supervisor.service Here is a screenshot of UI (by default at localhost:9001) that shows the running WSGI servers, and controls to stop/start, tail the logs and such. The difference between uwsgi and uwsgi-http is that the latter has a Http endpoint while the former works with the binary uwsgi protocol. We talked about this in the context of Figure 1. Let us look at the configuration files for each. Note that the paths in the config files below are placeholders with ‘...’ to be replaced appropriately as per the exact path on the disk. The command field in the config invokes gunicorn. The gunicorn server works with the app object in quotes.py and makes the web api available at port 9999. Here is the config file /etc/conf.d/gunicorn.conf A separate file config.py is used to supply the number of threads, logging details and such. The uWSGI server can offer either a Http or a uwsgi endpoint as we mentioned earlier. Using the uwsgi endpoint is recommended when the uWSGI server is behind a webserver like Nginx. The configuration below is for the Http endpoint. For the uwsgi endpoint, we replace “ -http 127.0.0.1:9997” with “ -socket 127.0.0.1:9998 “ The config is similar to that for gunicorn, but we do not use a separate config file. The key difference is the argument ‘-wsgi-file’ that points to the module with application object to be used by uWSGI server. The options for the default werkzeug server are given as part of the app.run (...) call in quotes.py module. We disable logging in order to not impact performance numbers. The only thing left for supervisord to do is to make werkzeugrun as a daemon. When Nginx is used, we need it to correctly route the requests to the above WSGI servers. We use the URI signature to decide which WSGI server should be contacted. Here is the relevant configuration from nginx.conf. We identify the WSGI server by the leading part of the URI and take care to proxy it back to its correct port we defined that server to be listening on. With all this under the belt here is a summary diagram with the flow of calls from the client to backend when Nginx is in place. In the absence of Nginx, the client sends requests directly to the Http endpoints enabled by the WSGI servers. Clear enough — no need for another diagram. Locust is a load testing framework for Python. The tests are conveniently defined in code and the stats collected as csv files. Here is a simple script that engages Locust while also collecting system metrics at the same time. We use cmonitor_collector for gathering load, and memory usage metrics. Start a system monitor to collect the load, memory usage, etc... stats Run the tests described in load_tests.py on the localhost Save the results to files ‘results_stats.csv’ and ‘results_stats_history.csv’. A total of 500 users are simulated with 10 users/second added as the test starts The test runs for 60 minutes Locust enables a UI as well (localhost:5557) with plots and such but not used here Stop the system monitor Post-process the csv data from Locust, and the system metrics data to generate graphics that can be compared across the different WSGI alternatives The only test we have to define is hitting the single API that we have exposed is .../quotes/byId?id=xxxx The code simulates a user that waits between 1 and 3 seconds before hitting the API again with a random integer as the ID of the quote to fetch. Finally time for some results. Took a while to get here for sure, but we have quite a few moving pieces. Plotting the collected data is straightforward (I used matplotlib here) so we will skip the code for that. You can get plots.py from github. We have two series of runs — (a) with 6 workers, and (b) with 60 workers for WSGI. Each series has 7 locust runs as shown in the code snippet above. Locust generates data for a variety of metrics — the number of requests, failures, response times, etc... as a function of time. Likewise, cmonitor collects data on the load, memory usage etc... of the hardware. Figure 5 below shows the results with workers. The main conclusions of interest from Figure 5 (E & F) are the following. Performance: We consider the average response time (Figure 5F). The performance of the uWSGI and gunicorn servers is comparable with/without Nginx. The default server werkzeug that Flask comes with is the worst, one of the reasons for their recommendation — do NOT use it in production. Also if you like uWSGI, go for the binary uwsgi protocol and put it behind Nginx, as it is the best. Here is the explicit order. uWSGI server (uwsgi) behind Nginxgunicorn server without NginxuWSGI server (Http) behind Nginxgunicorn server behind NginxuWSGI server (Http) without Nginxwerkzeug, with/without Nginx uWSGI server (uwsgi) behind Nginx gunicorn server without Nginx uWSGI server (Http) behind Nginx gunicorn server behind Nginx uWSGI server (Http) without Nginx werkzeug, with/without Nginx Why is the response time increasing? The reason is NOT because the server performance is degrading with time. Rather it is the starbucks phenomena for early morning coffee, at work! What Locust is reporting here is the total elapsed time between when the request is fired and the response is received. The rate at which we are firing requests is bigger than the rate at which the server is clearing them. So the requests get queued, and the line gets longer and longer with time. The requests that get in the line early have a smaller wait time than the ones that join the line later. This, of course, manifests as the longer response time for later requests. Why the step increase for median and smooth for the average? The median (or any percentile) is just one integer number (milliseconds) whereas the average is, of course, the average (a float) of all the numbers. The percentile is based on the counts on either side of its current value and given the randomness — increases slowly and by quantum jumps. The average, on the other hand, increases continuously. But there is more we can learn from here from the figures A-D. (A) The total number of requests increases with time — of course! Kind of linear but not quite, and there is some variation between the runs too. All that is simply because the wait_time between successive requests from the simulated users has been randomized in the code snippet above. (B) There are some failures but very, very few compared to the total number of requests served. Perhaps not enough to draw big conclusions. (C & D) There is plenty of free memory in all cases and not much load. But it does seem that when Nginx is not used, larger memory is being consumed with the server experiencing a slightly higher load. We are clearly not taxing the server with 6 workers. Let us bump up the workers to 60 and see what we get. This is in Figure 6 below. We have clearly increased the load and the memory usage (C & D) but all of our conclusions from Figure 5 still hold with the uWSGI server as the leader, followed by gunicorn. Before closing this post, let us look at response time results with 6 and 60 workers on the same plot focusing only on uWSGI and gunicorn alone. We have learned in this post that: the uWSGI server behind Nginx is the best performer we cannot go wrong with gunicorn either with/without Nginx we want to avoid placing the uWSGI Http server behind Nginx as they recommend on their website we do not use the default werkzeug server in production! Happy learning! Originally published at http://xplordat.com on February 16, 2020.
[ { "code": null, "e": 379, "s": 172, "text": "Serving up python web applications has never been easier with the suite of WSGI servers currently at our disposal. Both uWSGI and gunicorn behind Nginx are excellent performers for serving up a Flask app..." }, { "code": null, "e": 688, "s": 379, "text": "Yup, what more could you ask for in life right? There are a number of varieties too, to suit one’s preference. Joking aside, this article is about configuring and stress testing a few WSGI (Web Server Gateway Interface) alternatives for serving up a Python web application. Here is what we cover in this post" }, { "code": null, "e": 933, "s": 688, "text": "A simple application is written with the Flask web development framework. The only API exposed is for generating a random quotation by querying a backend resource. In this case, it is Elasticsearch that has indexed a large number of quotations." }, { "code": null, "e": 1054, "s": 933, "text": "Look at the following standalone WSGI webservers — gunicorn, uWSGI, and the default werkzeug that Flask is bundled with." }, { "code": null, "e": 1154, "s": 1054, "text": "Look at the benefit of using Nginx to front the client requests that are proxied back to the above." }, { "code": null, "e": 1231, "s": 1154, "text": "Use supervisor to manage the WSGI servers and Locust to drive the load test." }, { "code": null, "e": 1341, "s": 1231, "text": "We go through some code/config snippets here for illustration, but the full code can be obtained from github." }, { "code": null, "e": 1925, "s": 1341, "text": "Unless a web site is entirely static, the webserver needs a way to engage external applications to get some dynamic data. Over time many approaches have been implemented to make this exercise lean, efficient and easy. We had the good old CGI that spawned a new process for each request. Then came mod_python that embedded Python into the webserver, followed by FastCGI that allowed the webserver to tap into a pool of long-running processes to dispatch the request to. They all have their strengths and weaknesses. See the discussion and links on this stackoverflow page for example." }, { "code": null, "e": 2090, "s": 1925, "text": "The current favorite is the WSGI protocol that allows for a complete decoupling of webservers and the applications they need to access. Here is a general schematic." }, { "code": null, "e": 2306, "s": 2090, "text": "The WSGI servers are Http enabled on their own, so the client/Nginx can talk to them via Http. In the case of the uWSGI server, there is the option of uwsgi protocol as well for Nginx, and to test from command line." }, { "code": null, "e": 2379, "s": 2306, "text": "Nginx proxies the request back to a WSGI server configured for that URI." }, { "code": null, "e": 2505, "s": 2379, "text": "The WSGI server is configured with the Python application to call with the request. The results are relayed all the way back." }, { "code": null, "e": 2621, "s": 2505, "text": "The application is simple. The application is all of just one file — quotes.py. It allows for a single GET request." }, { "code": null, "e": 2652, "s": 2621, "text": "/quotes/byId?id=INTEGER_NUMBER" }, { "code": null, "e": 2793, "s": 2652, "text": "The app fetches the quotation document from an Elasticsearch index with that INTEGER_NUMBER as the document ID, and renders them as follows." }, { "code": null, "e": 2848, "s": 2793, "text": "The images and CSS are served by Nginx when available." }, { "code": null, "e": 2911, "s": 2848, "text": "In the absence of Nginx, they are sent from the static folder." }, { "code": null, "e": 3045, "s": 2911, "text": "That is the entirety of the application. Identical no matter which WSGI server we choose to use. Here is the directory & file layout." }, { "code": null, "e": 3340, "s": 3045, "text": ".├── config.py # Config options for gunicorn├── quotes.py├── static│ ├── css│ │ └── quote.css│ ├── favicon.ico│ └── images│ ├── eleanor-roosevelt.jpg│ ├── martha washington.jpg│ └── maya angelou.jpg├── templates│ └── quote.html└── wsgi.py # Used by uWSGI" }, { "code": null, "e": 3614, "s": 3340, "text": "When using the built-in werkzeug as the WSGI server, we supply the runtime config in quotes.py module. Config for uWSGI and gunicorn is supplied at the time of invoking the service. We will cover that next along with that for Nginx, and for the service manager supervisord." }, { "code": null, "e": 4001, "s": 3614, "text": "The number of concurrent processes/workers in use by any of the WSGI servers has an impact on performance. The recommended value is about twice the number of cores but can be larger if it does not degrade the performance. We do not mess with threads per worker here as the memory footprint of our application is small. See this post for some discussion on the use of workers vs threads." }, { "code": null, "e": 4244, "s": 4001, "text": "We start with 6 workers and vary it to gauge the impact. We use the same number for both gunicorn and uWSGI servers so the comparison is apples to apples. Unfortunately, there does not seem to be a way to do the same with the werkzeug server." }, { "code": null, "e": 4523, "s": 4244, "text": "We use supervisord to manage the WSGI server processes. This allows for easier configuration, control, a clean separation of logs by app/wsgi and a UI to boot. The configuration file for each server is placed at /etc/supervisor/conf.d, and the supervisord service is started up." }, { "code": null, "e": 4695, "s": 4523, "text": "[/etc/supervisor] ls conf.d/*conf.d/gunicorn.conf conf.d/uwsgi.conf conf.d/uwsgi-http.conf conf.d/werkzeug.conf[/etc/supervisor] sudo systemctl start supervisor.service" }, { "code": null, "e": 4842, "s": 4695, "text": "Here is a screenshot of UI (by default at localhost:9001) that shows the running WSGI servers, and controls to stop/start, tail the logs and such." }, { "code": null, "e": 5216, "s": 4842, "text": "The difference between uwsgi and uwsgi-http is that the latter has a Http endpoint while the former works with the binary uwsgi protocol. We talked about this in the context of Figure 1. Let us look at the configuration files for each. Note that the paths in the config files below are placeholders with ‘...’ to be replaced appropriately as per the exact path on the disk." }, { "code": null, "e": 5421, "s": 5216, "text": "The command field in the config invokes gunicorn. The gunicorn server works with the app object in quotes.py and makes the web api available at port 9999. Here is the config file /etc/conf.d/gunicorn.conf" }, { "code": null, "e": 5514, "s": 5421, "text": "A separate file config.py is used to supply the number of threads, logging details and such." }, { "code": null, "e": 5837, "s": 5514, "text": "The uWSGI server can offer either a Http or a uwsgi endpoint as we mentioned earlier. Using the uwsgi endpoint is recommended when the uWSGI server is behind a webserver like Nginx. The configuration below is for the Http endpoint. For the uwsgi endpoint, we replace “ -http 127.0.0.1:9997” with “ -socket 127.0.0.1:9998 “" }, { "code": null, "e": 6049, "s": 5837, "text": "The config is similar to that for gunicorn, but we do not use a separate config file. The key difference is the argument ‘-wsgi-file’ that points to the module with application object to be used by uWSGI server." }, { "code": null, "e": 6221, "s": 6049, "text": "The options for the default werkzeug server are given as part of the app.run (...) call in quotes.py module. We disable logging in order to not impact performance numbers." }, { "code": null, "e": 6299, "s": 6221, "text": "The only thing left for supervisord to do is to make werkzeugrun as a daemon." }, { "code": null, "e": 6515, "s": 6299, "text": "When Nginx is used, we need it to correctly route the requests to the above WSGI servers. We use the URI signature to decide which WSGI server should be contacted. Here is the relevant configuration from nginx.conf." }, { "code": null, "e": 6668, "s": 6515, "text": "We identify the WSGI server by the leading part of the URI and take care to proxy it back to its correct port we defined that server to be listening on." }, { "code": null, "e": 6797, "s": 6668, "text": "With all this under the belt here is a summary diagram with the flow of calls from the client to backend when Nginx is in place." }, { "code": null, "e": 6952, "s": 6797, "text": "In the absence of Nginx, the client sends requests directly to the Http endpoints enabled by the WSGI servers. Clear enough — no need for another diagram." }, { "code": null, "e": 7251, "s": 6952, "text": "Locust is a load testing framework for Python. The tests are conveniently defined in code and the stats collected as csv files. Here is a simple script that engages Locust while also collecting system metrics at the same time. We use cmonitor_collector for gathering load, and memory usage metrics." }, { "code": null, "e": 7322, "s": 7251, "text": "Start a system monitor to collect the load, memory usage, etc... stats" }, { "code": null, "e": 7380, "s": 7322, "text": "Run the tests described in load_tests.py on the localhost" }, { "code": null, "e": 7459, "s": 7380, "text": "Save the results to files ‘results_stats.csv’ and ‘results_stats_history.csv’." }, { "code": null, "e": 7540, "s": 7459, "text": "A total of 500 users are simulated with 10 users/second added as the test starts" }, { "code": null, "e": 7569, "s": 7540, "text": "The test runs for 60 minutes" }, { "code": null, "e": 7652, "s": 7569, "text": "Locust enables a UI as well (localhost:5557) with plots and such but not used here" }, { "code": null, "e": 7676, "s": 7652, "text": "Stop the system monitor" }, { "code": null, "e": 7824, "s": 7676, "text": "Post-process the csv data from Locust, and the system metrics data to generate graphics that can be compared across the different WSGI alternatives" }, { "code": null, "e": 7930, "s": 7824, "text": "The only test we have to define is hitting the single API that we have exposed is .../quotes/byId?id=xxxx" }, { "code": null, "e": 8075, "s": 7930, "text": "The code simulates a user that waits between 1 and 3 seconds before hitting the API again with a random integer as the ID of the quote to fetch." }, { "code": null, "e": 8321, "s": 8075, "text": "Finally time for some results. Took a while to get here for sure, but we have quite a few moving pieces. Plotting the collected data is straightforward (I used matplotlib here) so we will skip the code for that. You can get plots.py from github." }, { "code": null, "e": 8729, "s": 8321, "text": "We have two series of runs — (a) with 6 workers, and (b) with 60 workers for WSGI. Each series has 7 locust runs as shown in the code snippet above. Locust generates data for a variety of metrics — the number of requests, failures, response times, etc... as a function of time. Likewise, cmonitor collects data on the load, memory usage etc... of the hardware. Figure 5 below shows the results with workers." }, { "code": null, "e": 8803, "s": 8729, "text": "The main conclusions of interest from Figure 5 (E & F) are the following." }, { "code": null, "e": 9219, "s": 8803, "text": "Performance: We consider the average response time (Figure 5F). The performance of the uWSGI and gunicorn servers is comparable with/without Nginx. The default server werkzeug that Flask comes with is the worst, one of the reasons for their recommendation — do NOT use it in production. Also if you like uWSGI, go for the binary uwsgi protocol and put it behind Nginx, as it is the best. Here is the explicit order." }, { "code": null, "e": 9403, "s": 9219, "text": "uWSGI server (uwsgi) behind Nginxgunicorn server without NginxuWSGI server (Http) behind Nginxgunicorn server behind NginxuWSGI server (Http) without Nginxwerkzeug, with/without Nginx" }, { "code": null, "e": 9437, "s": 9403, "text": "uWSGI server (uwsgi) behind Nginx" }, { "code": null, "e": 9467, "s": 9437, "text": "gunicorn server without Nginx" }, { "code": null, "e": 9500, "s": 9467, "text": "uWSGI server (Http) behind Nginx" }, { "code": null, "e": 9529, "s": 9500, "text": "gunicorn server behind Nginx" }, { "code": null, "e": 9563, "s": 9529, "text": "uWSGI server (Http) without Nginx" }, { "code": null, "e": 9592, "s": 9563, "text": "werkzeug, with/without Nginx" }, { "code": null, "e": 10252, "s": 9592, "text": "Why is the response time increasing? The reason is NOT because the server performance is degrading with time. Rather it is the starbucks phenomena for early morning coffee, at work! What Locust is reporting here is the total elapsed time between when the request is fired and the response is received. The rate at which we are firing requests is bigger than the rate at which the server is clearing them. So the requests get queued, and the line gets longer and longer with time. The requests that get in the line early have a smaller wait time than the ones that join the line later. This, of course, manifests as the longer response time for later requests." }, { "code": null, "e": 10659, "s": 10252, "text": "Why the step increase for median and smooth for the average? The median (or any percentile) is just one integer number (milliseconds) whereas the average is, of course, the average (a float) of all the numbers. The percentile is based on the counts on either side of its current value and given the randomness — increases slowly and by quantum jumps. The average, on the other hand, increases continuously." }, { "code": null, "e": 10722, "s": 10659, "text": "But there is more we can learn from here from the figures A-D." }, { "code": null, "e": 11009, "s": 10722, "text": "(A) The total number of requests increases with time — of course! Kind of linear but not quite, and there is some variation between the runs too. All that is simply because the wait_time between successive requests from the simulated users has been randomized in the code snippet above." }, { "code": null, "e": 11149, "s": 11009, "text": "(B) There are some failures but very, very few compared to the total number of requests served. Perhaps not enough to draw big conclusions." }, { "code": null, "e": 11351, "s": 11149, "text": "(C & D) There is plenty of free memory in all cases and not much load. But it does seem that when Nginx is not used, larger memory is being consumed with the server experiencing a slightly higher load." }, { "code": null, "e": 11485, "s": 11351, "text": "We are clearly not taxing the server with 6 workers. Let us bump up the workers to 60 and see what we get. This is in Figure 6 below." }, { "code": null, "e": 11805, "s": 11485, "text": "We have clearly increased the load and the memory usage (C & D) but all of our conclusions from Figure 5 still hold with the uWSGI server as the leader, followed by gunicorn. Before closing this post, let us look at response time results with 6 and 60 workers on the same plot focusing only on uWSGI and gunicorn alone." }, { "code": null, "e": 11840, "s": 11805, "text": "We have learned in this post that:" }, { "code": null, "e": 11892, "s": 11840, "text": "the uWSGI server behind Nginx is the best performer" }, { "code": null, "e": 11951, "s": 11892, "text": "we cannot go wrong with gunicorn either with/without Nginx" }, { "code": null, "e": 12046, "s": 11951, "text": "we want to avoid placing the uWSGI Http server behind Nginx as they recommend on their website" }, { "code": null, "e": 12103, "s": 12046, "text": "we do not use the default werkzeug server in production!" }, { "code": null, "e": 12119, "s": 12103, "text": "Happy learning!" } ]
Print all LCS sequences | Practice | GeeksforGeeks
You are given two strings s and t. Now your task is to print all longest common sub-sequences in lexicographical order. Example 1: Input: s = abaaa, t = baabaca Output: aaaa abaa baaa Example 2: Input: s = aaa, t = a Output: a Your Task: You do not need to read or print anything. Your task is to complete the function all_longest_common_subsequences() which takes string a and b as first and second parameter respectively and returns a list of strings which contains all possible longest common subsequences in lexicographical order. Expected Time Complexity: O(n4) Expected Space Complexity: O(K * n) where K is a constant less than n. Constraints: 1 ≤ Length of both strings ≤ 50 +2 yoyogeshyo6351 month ago Successfully submitted int countLcs(string s,string t){ int n=s.size(); int m=t.size(); vector<vector<int>>dp(n+1,vector<int>(m+1,-1)); for(int i=0;i<=n;i++){ for(int j=0;j<=m;j++){ if(i==0 || j==0)dp[i][j]=0; else if(s[i-1]==t[j-1])dp[i][j]=1+dp[i-1][j-1]; else { dp[i][j]=max(dp[i-1][j],dp[i][j-1]); } } } return dp[n][m];}set<string>st;void helper(string s,string t,int i,int j,string &temp,vector<string>&ans,int count){ if(count==0){ if(st.find(temp)==st.end()) { st.insert(temp); ans.push_back(temp);} return; } if(i>=s.size() || j>=t.size())return; for(int r=i;r<s.size();r++){ for(int c=j;c<t.size();c++){ if(s[r]==t[c]){ temp.push_back(s[r]); helper(s,t,r+1,c+1,temp,ans,count-1); temp.pop_back(); } } }} vector<string> all_longest_common_subsequences(string s, string t) { // Code here int a=countLcs(s,t); vector<string>ans; string temp=""; helper(s,t,0,0,temp,ans,a); sort(ans.begin(),ans.end()); return ans; } -1 sgupta95192 months ago tle forever 0 avenvy3 months ago Python Solution (All Test Case Passed) def all_longest_common_subsequences(self, s, t): MAX=100 dp=[[-1 for i in range(MAX)] for i in range(MAX)] def lcs(str1, str2, len1, len2, i, j): if (i == len1 or j == len2): dp[i][j] = 0 return dp[i][j] if (dp[i][j] != -1): return dp[i][j] ret = 0 if (str1[i] == str2[j]): ret = 1 + lcs(str1, str2, len1, len2, i + 1, j + 1) else: ret = max(lcs(str1, str2, len1, len2, i + 1, j), lcs(str1, str2, len1, len2, i, j + 1)) dp[i][j] = ret return ret def printAll(str1, str2, len1, len2,cur, indx1, indx2, currlcs,lcslen): if (currlcs == lcslen): ans.append(cur) return if (indx1 == len1 or indx2 == len2): return for ch in range(ord('a'),ord('z') + 1): done = False for i in range(indx1,len1): if (chr(ch)==str1[i]): for j in range(indx2, len2): if (chr(ch) == str2[j] and dp[i][j] == lcslen-currlcs): new_cur = cur + chr(ch) printAll(str1, str2, len1, len2, new_cur, i + 1, j + 1, currlcs + 1,lcslen) done = True break if (done): break len1,len2 = len(s),len(t) lcslen = lcs(s, t, len1, len2, 0, 0) global ans ans=[] printAll(s, t, len1, len2, "", 0, 0, 0,lcslen) ans.sort() w=set() pre=[] for i in ans: if i in w: continue else: pre.append(i) w.add(i) return pre +2 shashankrustagii4 months ago class Solution { public: int dp[51][51]; unordered_set<string> mp; unordered_set<string> st; void printLCS(vector<string> &ans,string s,string t,int i,int j,string str) { if(i==0 or j==0) { reverse(str.begin(),str.end()); if(st.find(str)!=st.end()) return; st.insert(str); ans.push_back(str); return; } string key; key+=to_string(i); key+=to_string(j); key+=str; if(mp.find(key)!=mp.end()) return; mp.insert(key); if(s[i-1]==t[j-1]) printLCS(ans,s,t,i-1,j-1,str+s[i-1]); else if(dp[i-1][j]>dp[i][j-1]) printLCS(ans,s,t,i-1,j,str); else if(dp[i-1][j]<dp[i][j-1])printLCS(ans,s,t,i,j-1,str); else { printLCS(ans,s,t,i-1,j,str); printLCS(ans,s,t,i,j-1,str); } return; } vector<string> all_longest_common_subsequences(string s, string t) { st.clear(); int x=s.length(); int y=t.length(); for(int i=0;i<x+1;i++) for(int j=0;j<y+1;j++) if(!i or !j) dp[i][j]=0; for(int i=1;i<x+1;i++) for(int j=1;j<y+1;j++) if(s[i-1]!=t[j-1]) dp[i][j]=max(dp[i-1][j],dp[i][j-1]); else dp[i][j]=dp[i-1][j-1]+1; vector<string> ans; string str; int i=x; int j=y; printLCS(ans,s,t,i,j,str); sort(ans.begin(),ans.end()); return ans; } }; -5 saurabhkure167 months ago class Solution { public: int dp[101][101]; vector<string> res; void storeAllLCS(int n, int m, string&s, string& t){ vector<vector<unordered_set<string>>> allLcs ( 101, vector<unordered_set<string>>(101)); // We have taken unordered_set here so as to remove the possibility of redundant // sub-LC sequences as we need only one occurrence of the sub-LCS // Also unordered_set performs better compared to set/ordered_set for(int i = 0; i <= n; ++i) allLcs[i][0].insert(""); for(int i = 0; i <= m; ++i) allLcs[0][i].insert(""); for(int i = 1; i <= n; ++i){ for(int j = 1; j <= m; ++j){ if( s[i-1] == t[j-1] ){ for( string prev : allLcs[i-1][j-1] ){ allLcs[i][j].insert(prev + s[i-1]); } } else if( dp[i-1][j] == dp[i][j-1] ){ // Store all previous strings from both cells instead of just one for( string prev : allLcs[i-1][j] ) allLcs[i][j].insert(prev); for( string prev : allLcs[i][j-1] ) allLcs[i][j].insert(prev); } else{ // Get the maximum length set out of the two dp cells unordered_set<string> best = ( dp[i-1][j] > dp[i][j-1] ) ? ( allLcs[i-1][j] ) : ( allLcs[i][j-1] ); for( string prev : best ) allLcs[i][j].insert(prev); } } } // Last element of allLcs will contain all LCS of max lengths res = vector<string>( allLcs[n][m].begin(), allLcs[n][m].end() ); sort( res.begin(), res.end() ); } void lcs_Tabulation(int n, int m, string& s, string& t){ memset(dp, 0, sizeof(dp)); for(int i = 0; i <= n; ++i){ for(int j = 0; j <= m; ++j){ if( i == 0 || j == 0 ) dp[i][j] = 0; else if( s[i-1] == t[j-1] ) dp[i][j] = 1 + dp[i-1][j-1]; else dp[i][j] = max( dp[i-1][j], dp[i][j-1] ); } } storeAllLCS(n, m, s, t); } vector<string> all_longest_common_subsequences(string s, string t) { if( s.size() == 0 || t.size() == 0 ) return res; lcs_Tabulation(s.size(), t.size(), s, t); return res; } }; -2 Amit Kumar8 months ago Amit Kumar https://practice.geeksforge... 0 Sourav9 months ago Sourav //This function will return all the LCS in reverse order.set<string> helper(string &s, string &t, int i, int j, vector<vector<int>> &dp){ if(i == 0 || j == 0){ set<string> ans; ans.insert(""); return ans; } if(s[i-1] == t[j-1]){ set<string> ans; char ch = s[i-1]; set<string> tempAns = helper(s, t, i-1, j-1, dp); for(string s: tempAns){ string temp = ch + s; ans.insert(temp); } return ans; } else{ set<string> temp1; set<string>temp2; if(dp[i][j-1] >= dp[i-1][j]){ temp1 = helper(s, t, i, j-1, dp); } if(dp[i-1][j] >= dp[i][j-1]){ temp2 = helper(s, t, i-1, j, dp); } for(string s : temp1){ temp2.insert(s); } return temp2; } }public:vector<string> all_longest_common_subsequences(string s, string t){ // Simple code to find LCS length.... int m = s.length(); int n = t.length(); vector<vector<int>>dp(m+1, vector<int>(n+1)); for(int i = 0; i<=m; i++){ for(int j = 0; j<=n; j++){ if(i == 0 || j == 0){ dp[i][j] = 0; } else if(s[i-1] == t[j-1]){ dp[i][j] = dp[i-1][j-1] + 1; } else{ dp[i][j] = max(dp[i-1][j], dp[i][j-1]); } } } //This will return all the LCS but in reverse order. set<string> help = helper(s, t, m , n, dp); set<string> help1; //This will reverse the LCS's for(string s : help){ reverse(s.begin(), s.end()); help1.insert(s); } vector<string> ans; for(string s : help1){ ans.push_back(s); } return ans;} 0 Mohd Amir Khan9 months ago Mohd Amir Khan -1 naman jain9 months ago naman jain I am following this approach anybody else have done with this approach please connect:Approach 1)create dp table and find the length of LCS then we will use 2 loops to iterate complete table and if we get LCS length in any cell of dp matrix then we will call another method which will generate LCS string and add that to set and like wise do for all cell of dp which are equal to LCS lenght and then add element of set then to arraylist and return. This is my approach I am getting some string all are correct ans but complete set of answer which is expected is not comming can anyone help me with this approach. 0 PRATEEK JHABAK9 months ago PRATEEK JHABAK https://uploads.disquscdn.c...What is wrong in this? Please Help We strongly recommend solving this problem on your own before viewing its editorial. Do you still want to view the editorial? Login to access your submissions. Problem Contest Reset the IDE using the second button on the top right corner. Avoid using static/global variables in your code as your code is tested against multiple test cases and these tend to retain their previous values. Passing the Sample/Custom Test cases does not guarantee the correctness of code. On submission, your code is tested against multiple test cases consisting of all possible corner cases and stress constraints. You can access the hints to get an idea about what is expected of you as well as the final solution code. You can view the solutions submitted by other users from the submission tab.
[ { "code": null, "e": 358, "s": 238, "text": "You are given two strings s and t. Now your task is to print all longest common sub-sequences in lexicographical order." }, { "code": null, "e": 369, "s": 358, "text": "Example 1:" }, { "code": null, "e": 423, "s": 369, "text": "Input: s = abaaa, t = baabaca\nOutput: aaaa abaa baaa\n" }, { "code": null, "e": 434, "s": 423, "text": "Example 2:" }, { "code": null, "e": 467, "s": 434, "text": "Input: s = aaa, t = a\nOutput: a\n" }, { "code": null, "e": 777, "s": 467, "text": "Your Task:\nYou do not need to read or print anything. Your task is to complete the function all_longest_common_subsequences() which takes string a and b as first and second parameter respectively and returns a list of strings which contains all possible longest common subsequences in lexicographical order.\n " }, { "code": null, "e": 882, "s": 777, "text": "Expected Time Complexity: O(n4)\nExpected Space Complexity: O(K * n) where K is a constant less than n.\n " }, { "code": null, "e": 927, "s": 882, "text": "Constraints:\n1 ≤ Length of both strings ≤ 50" }, { "code": null, "e": 930, "s": 927, "text": "+2" }, { "code": null, "e": 955, "s": 930, "text": "yoyogeshyo6351 month ago" }, { "code": null, "e": 978, "s": 955, "text": "Successfully submitted" }, { "code": null, "e": 1671, "s": 978, "text": " int countLcs(string s,string t){ int n=s.size(); int m=t.size(); vector<vector<int>>dp(n+1,vector<int>(m+1,-1)); for(int i=0;i<=n;i++){ for(int j=0;j<=m;j++){ if(i==0 || j==0)dp[i][j]=0; else if(s[i-1]==t[j-1])dp[i][j]=1+dp[i-1][j-1]; else { dp[i][j]=max(dp[i-1][j],dp[i][j-1]); } } } return dp[n][m];}set<string>st;void helper(string s,string t,int i,int j,string &temp,vector<string>&ans,int count){ if(count==0){ if(st.find(temp)==st.end()) { st.insert(temp); ans.push_back(temp);} return; } if(i>=s.size() || j>=t.size())return;" }, { "code": null, "e": 2165, "s": 1671, "text": " for(int r=i;r<s.size();r++){ for(int c=j;c<t.size();c++){ if(s[r]==t[c]){ temp.push_back(s[r]); helper(s,t,r+1,c+1,temp,ans,count-1); temp.pop_back(); } } }} vector<string> all_longest_common_subsequences(string s, string t) { // Code here int a=countLcs(s,t); vector<string>ans; string temp=\"\"; helper(s,t,0,0,temp,ans,a); sort(ans.begin(),ans.end()); return ans; }" }, { "code": null, "e": 2168, "s": 2165, "text": "-1" }, { "code": null, "e": 2191, "s": 2168, "text": "sgupta95192 months ago" }, { "code": null, "e": 2203, "s": 2191, "text": "tle forever" }, { "code": null, "e": 2205, "s": 2203, "text": "0" }, { "code": null, "e": 2224, "s": 2205, "text": "avenvy3 months ago" }, { "code": null, "e": 2263, "s": 2224, "text": "Python Solution (All Test Case Passed)" }, { "code": null, "e": 4133, "s": 2263, "text": "def all_longest_common_subsequences(self, s, t):\n\t MAX=100\n\t dp=[[-1 for i in range(MAX)] for i in range(MAX)]\n\t def lcs(str1, str2, len1, len2, i, j):\n\t if (i == len1 or j == len2):\n\t dp[i][j] = 0\n\t return dp[i][j]\n\t if (dp[i][j] != -1):\n\t return dp[i][j]\n\t ret = 0\n\t if (str1[i] == str2[j]):\n\t ret = 1 + lcs(str1, str2, len1, len2, i + 1, j + 1)\n else:\n ret = max(lcs(str1, str2, len1, len2, i + 1, j),\n lcs(str1, str2, len1, len2, i, j + 1))\n dp[i][j] = ret\n return ret\n def printAll(str1, str2, len1, len2,cur, indx1, indx2, currlcs,lcslen):\n if (currlcs == lcslen):\n ans.append(cur)\n return\n if (indx1 == len1 or indx2 == len2):\n return\n for ch in range(ord('a'),ord('z') + 1):\n done = False\n for i in range(indx1,len1):\n if (chr(ch)==str1[i]):\n for j in range(indx2, len2):\n if (chr(ch) == str2[j] and dp[i][j] == lcslen-currlcs):\n new_cur = cur + chr(ch)\n printAll(str1, str2, len1, len2, new_cur, i + 1, j + 1, currlcs + 1,lcslen)\n done = True\n break\n if (done):\n break\n len1,len2 = len(s),len(t)\n lcslen = lcs(s, t, len1, len2, 0, 0)\n global ans\n ans=[]\n printAll(s, t, len1, len2, \"\", 0, 0, 0,lcslen)\n ans.sort()\n w=set()\n pre=[]\n for i in ans:\n if i in w:\n continue\n else:\n pre.append(i)\n w.add(i)\n return pre" }, { "code": null, "e": 4136, "s": 4133, "text": "+2" }, { "code": null, "e": 4165, "s": 4136, "text": "shashankrustagii4 months ago" }, { "code": null, "e": 5604, "s": 4165, "text": "class Solution\n{\n\tpublic:\n\tint dp[51][51];\n\tunordered_set<string> mp;\n\tunordered_set<string> st;\n\tvoid printLCS(vector<string> &ans,string s,string t,int i,int j,string str)\n\t{\n\t if(i==0 or j==0)\n\t {\n\t reverse(str.begin(),str.end());\n\t if(st.find(str)!=st.end()) return;\n\t st.insert(str);\n ans.push_back(str);\n return;\n\t }\n\t string key;\n\t key+=to_string(i);\n\t key+=to_string(j);\n\t key+=str;\n\t if(mp.find(key)!=mp.end()) return;\n\t mp.insert(key);\n\t if(s[i-1]==t[j-1]) printLCS(ans,s,t,i-1,j-1,str+s[i-1]);\n\t else if(dp[i-1][j]>dp[i][j-1]) printLCS(ans,s,t,i-1,j,str);\n\t else if(dp[i-1][j]<dp[i][j-1])printLCS(ans,s,t,i,j-1,str);\n\t else\n\t {\n\t printLCS(ans,s,t,i-1,j,str);\n\t printLCS(ans,s,t,i,j-1,str);\n\t }\n\t return;\n\t}\n\tvector<string> all_longest_common_subsequences(string s, string t)\n\t{\n\t\tst.clear();\n\t\tint x=s.length();\n\t\tint y=t.length();\n for(int i=0;i<x+1;i++)\n for(int j=0;j<y+1;j++)\n if(!i or !j) dp[i][j]=0;\n for(int i=1;i<x+1;i++)\n for(int j=1;j<y+1;j++)\n if(s[i-1]!=t[j-1]) dp[i][j]=max(dp[i-1][j],dp[i][j-1]);\n else dp[i][j]=dp[i-1][j-1]+1;\n vector<string> ans;\n string str;\n int i=x;\n int j=y;\n printLCS(ans,s,t,i,j,str);\n sort(ans.begin(),ans.end());\n return ans;\n \n \n\t}\n};" }, { "code": null, "e": 5607, "s": 5604, "text": "-5" }, { "code": null, "e": 5633, "s": 5607, "text": "saurabhkure167 months ago" }, { "code": null, "e": 7431, "s": 5633, "text": "class Solution\n{\npublic:\nint dp[101][101];\nvector<string> res;\n\nvoid storeAllLCS(int n, int m, string&s, string& t){\n\nvector<vector<unordered_set<string>>> allLcs ( 101,\nvector<unordered_set<string>>(101));\n// We have taken unordered_set here so as to remove the possibility of redundant\n// sub-LC sequences as we need only one occurrence of the sub-LCS\n// Also unordered_set performs better compared to set/ordered_set\nfor(int i = 0; i <= n; ++i)\nallLcs[i][0].insert(\"\");\n\nfor(int i = 0; i <= m; ++i)\nallLcs[0][i].insert(\"\");\n\nfor(int i = 1; i <= n; ++i){\nfor(int j = 1; j <= m; ++j){\nif( s[i-1] == t[j-1] ){\nfor( string prev : allLcs[i-1][j-1] ){\nallLcs[i][j].insert(prev + s[i-1]);\n}\n}\nelse if( dp[i-1][j] == dp[i][j-1] ){\n// Store all previous strings from both cells instead of just one\nfor( string prev : allLcs[i-1][j] )\nallLcs[i][j].insert(prev);\nfor( string prev : allLcs[i][j-1] )\nallLcs[i][j].insert(prev);\n}\nelse{\n// Get the maximum length set out of the two dp cells\nunordered_set<string> best = ( dp[i-1][j] > dp[i][j-1] ) ?\n( allLcs[i-1][j] ) : ( allLcs[i][j-1] );\n\nfor( string prev : best )\nallLcs[i][j].insert(prev);\n}\n}\n}\n// Last element of allLcs will contain all LCS of max lengths\nres = vector<string>( allLcs[n][m].begin(), allLcs[n][m].end() );\nsort( res.begin(), res.end() );\n}\n\nvoid lcs_Tabulation(int n, int m, string& s, string& t){\nmemset(dp, 0, sizeof(dp));\nfor(int i = 0; i <= n; ++i){\nfor(int j = 0; j <= m; ++j){\nif( i == 0 || j == 0 )\ndp[i][j] = 0;\nelse if( s[i-1] == t[j-1] )\ndp[i][j] = 1 + dp[i-1][j-1];\nelse\ndp[i][j] = max( dp[i-1][j], dp[i][j-1] );\n}\n}\n\nstoreAllLCS(n, m, s, t);\n}\n\nvector<string> all_longest_common_subsequences(string s, string t)\n{\n\nif( s.size() == 0 || t.size() == 0 )\nreturn res;\n\nlcs_Tabulation(s.size(), t.size(), s, t);\nreturn res;\n}\n};" }, { "code": null, "e": 7434, "s": 7431, "text": "-2" }, { "code": null, "e": 7457, "s": 7434, "text": "Amit Kumar8 months ago" }, { "code": null, "e": 7468, "s": 7457, "text": "Amit Kumar" }, { "code": null, "e": 7499, "s": 7468, "text": "https://practice.geeksforge..." }, { "code": null, "e": 7501, "s": 7499, "text": "0" }, { "code": null, "e": 7520, "s": 7501, "text": "Sourav9 months ago" }, { "code": null, "e": 7527, "s": 7520, "text": "Sourav" }, { "code": null, "e": 9403, "s": 7527, "text": "//This function will return all the LCS in reverse order.set<string> helper(string &s, string &t, int i, int j, vector<vector<int>> &dp){ if(i == 0 || j == 0){ set<string> ans; ans.insert(\"\"); return ans; } if(s[i-1] == t[j-1]){ set<string> ans; char ch = s[i-1]; set<string> tempAns = helper(s, t, i-1, j-1, dp); for(string s: tempAns){ string temp = ch + s; ans.insert(temp); } return ans; } else{ set<string> temp1; set<string>temp2; if(dp[i][j-1] >= dp[i-1][j]){ temp1 = helper(s, t, i, j-1, dp); } if(dp[i-1][j] >= dp[i][j-1]){ temp2 = helper(s, t, i-1, j, dp); } for(string s : temp1){ temp2.insert(s); } return temp2; } }public:vector<string> all_longest_common_subsequences(string s, string t){ // Simple code to find LCS length.... int m = s.length(); int n = t.length(); vector<vector<int>>dp(m+1, vector<int>(n+1)); for(int i = 0; i<=m; i++){ for(int j = 0; j<=n; j++){ if(i == 0 || j == 0){ dp[i][j] = 0; } else if(s[i-1] == t[j-1]){ dp[i][j] = dp[i-1][j-1] + 1; } else{ dp[i][j] = max(dp[i-1][j], dp[i][j-1]); } } } //This will return all the LCS but in reverse order. set<string> help = helper(s, t, m , n, dp); set<string> help1; //This will reverse the LCS's for(string s : help){ reverse(s.begin(), s.end()); help1.insert(s); } vector<string> ans; for(string s : help1){ ans.push_back(s); } return ans;}" }, { "code": null, "e": 9405, "s": 9403, "text": "0" }, { "code": null, "e": 9432, "s": 9405, "text": "Mohd Amir Khan9 months ago" }, { "code": null, "e": 9447, "s": 9432, "text": "Mohd Amir Khan" }, { "code": null, "e": 9450, "s": 9447, "text": "-1" }, { "code": null, "e": 9473, "s": 9450, "text": "naman jain9 months ago" }, { "code": null, "e": 9484, "s": 9473, "text": "naman jain" }, { "code": null, "e": 9938, "s": 9484, "text": "I am following this approach anybody else have done with this approach please connect:Approach 1)create dp table and find the length of LCS then we will use 2 loops to iterate complete table and if we get LCS length in any cell of dp matrix then we will call another method which will generate LCS string and add that to set and like wise do for all cell of dp which are equal to LCS lenght and then add element of set then to arraylist and return." }, { "code": null, "e": 10103, "s": 9938, "text": "This is my approach I am getting some string all are correct ans but complete set of answer which is expected is not comming can anyone help me with this approach." }, { "code": null, "e": 10105, "s": 10103, "text": "0" }, { "code": null, "e": 10132, "s": 10105, "text": "PRATEEK JHABAK9 months ago" }, { "code": null, "e": 10147, "s": 10132, "text": "PRATEEK JHABAK" }, { "code": null, "e": 10212, "s": 10147, "text": "https://uploads.disquscdn.c...What is wrong in this? Please Help" }, { "code": null, "e": 10358, "s": 10212, "text": "We strongly recommend solving this problem on your own before viewing its editorial. Do you still\n want to view the editorial?" }, { "code": null, "e": 10394, "s": 10358, "text": " Login to access your submissions. " }, { "code": null, "e": 10404, "s": 10394, "text": "\nProblem\n" }, { "code": null, "e": 10414, "s": 10404, "text": "\nContest\n" }, { "code": null, "e": 10477, "s": 10414, "text": "Reset the IDE using the second button on the top right corner." }, { "code": null, "e": 10625, "s": 10477, "text": "Avoid using static/global variables in your code as your code is tested against multiple test cases and these tend to retain their previous values." }, { "code": null, "e": 10833, "s": 10625, "text": "Passing the Sample/Custom Test cases does not guarantee the correctness of code. On submission, your code is tested against multiple test cases consisting of all possible corner cases and stress constraints." }, { "code": null, "e": 10939, "s": 10833, "text": "You can access the hints to get an idea about what is expected of you as well as the final solution code." } ]
Dart Programming - Calling a Function
A function must be called to execute it. This process is termed as function invocation. function_name() The following example illustrates how a function can be invoked − void main() { test(); } test() { //function definition print("function called"); } It will produce the following output − function called 44 Lectures 4.5 hours Sriyank Siddhartha 34 Lectures 4 hours Sriyank Siddhartha 69 Lectures 4 hours Frahaan Hussain 117 Lectures 10 hours Frahaan Hussain 22 Lectures 1.5 hours Pranjal Srivastava 34 Lectures 3 hours Pranjal Srivastava Print Add Notes Bookmark this page
[ { "code": null, "e": 2613, "s": 2525, "text": "A function must be called to execute it. This process is termed as function invocation." }, { "code": null, "e": 2630, "s": 2613, "text": "function_name()\n" }, { "code": null, "e": 2696, "s": 2630, "text": "The following example illustrates how a function can be invoked −" }, { "code": null, "e": 2796, "s": 2696, "text": "void main() { \n test(); \n} \ntest() { \n //function definition \n print(\"function called\"); \n} " }, { "code": null, "e": 2835, "s": 2796, "text": "It will produce the following output −" }, { "code": null, "e": 2852, "s": 2835, "text": "function called\n" }, { "code": null, "e": 2887, "s": 2852, "text": "\n 44 Lectures \n 4.5 hours \n" }, { "code": null, "e": 2907, "s": 2887, "text": " Sriyank Siddhartha" }, { "code": null, "e": 2940, "s": 2907, "text": "\n 34 Lectures \n 4 hours \n" }, { "code": null, "e": 2960, "s": 2940, "text": " Sriyank Siddhartha" }, { "code": null, "e": 2993, "s": 2960, "text": "\n 69 Lectures \n 4 hours \n" }, { "code": null, "e": 3010, "s": 2993, "text": " Frahaan Hussain" }, { "code": null, "e": 3045, "s": 3010, "text": "\n 117 Lectures \n 10 hours \n" }, { "code": null, "e": 3062, "s": 3045, "text": " Frahaan Hussain" }, { "code": null, "e": 3097, "s": 3062, "text": "\n 22 Lectures \n 1.5 hours \n" }, { "code": null, "e": 3117, "s": 3097, "text": " Pranjal Srivastava" }, { "code": null, "e": 3150, "s": 3117, "text": "\n 34 Lectures \n 3 hours \n" }, { "code": null, "e": 3170, "s": 3150, "text": " Pranjal Srivastava" }, { "code": null, "e": 3177, "s": 3170, "text": " Print" }, { "code": null, "e": 3188, "s": 3177, "text": " Add Notes" } ]
How to Create Expandable RecyclerView items in Android using Kotlin? - GeeksforGeeks
24 Mar, 2021 RecyclerView is a ViewGroup added to the android studio as a successor of the GridView and ListView. It is an improvement on both of them and can be found in the latest v-7 support packages. It has been created to make possible construction of any lists with XML layouts as an item that can be customized vastly while improving the efficiency of ListViews and GridViews. This improvement is achieved by recycling the views which are out of the visibility of the user. For example, if a user scrolled down to a position where items 4 and 5 are visible; items 1, 2, and 3 would be cleared from the memory to reduce memory consumption. In this article, we will explain how to create Expandable Recycler View items in android. Below is the sample video to show what we are going to build. Note that we are going to implement this project using the Kotlin language. Step 1: Create a new project To create a new project in Android Studio please refer to How to Create/Start a New Project in Android Studio. Note that select Kotlin as the programming language. Step 2: Add view binding dependency Go to build.gradle(app) and the following dependency inside the android tag and click sync now. buildFeatures { viewBinding true } Step 3: Working with the activity_main.xml Go to the activity_main.xml file and refer to the following code. Below is the code for the activity_main.xml file. It has only a single Recycler view which we will use to show our data. XML <?xml version="1.0" encoding="utf-8"?> <androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android" xmlns:app="http://schemas.android.com/apk/res-auto" xmlns:tools="http://schemas.android.com/tools" android:layout_width="match_parent" android:background="#F5F8FD" android:layout_height="match_parent" tools:context=".MainActivity"> <!--Add recycler view to main activity--> <androidx.recyclerview.widget.RecyclerView android:id="@+id/rv_list" android:layout_width="match_parent" android:layout_height="match_parent" tools:listitem="@layout/single_item" app:layoutManager="androidx.recyclerview.widget.LinearLayoutManager" /> </androidx.constraintlayout.widget.ConstraintLayout> Step 4: Create a new layout file and name it as single_item.xml file Go to the single_item.xml file and refer to the following code. Below is the code for the single_item.xml file. It is the single item layout that we will use in RecyclerView. XML <?xml version="1.0" encoding="utf-8"?> <com.google.android.material.card.MaterialCardView xmlns:android="http://schemas.android.com/apk/res/android" xmlns:app="http://schemas.android.com/apk/res-auto" android:id="@+id/card_layout" android:layout_width="match_parent" android:layout_height="wrap_content" android:layout_marginStart="5dp" android:layout_marginEnd="5dp" android:layout_marginBottom="10dp"> <androidx.constraintlayout.widget.ConstraintLayout android:layout_width="match_parent" android:layout_height="match_parent"> <!--Text view for showing the language name--> <TextView android:id="@+id/tv_lang_name" android:layout_width="wrap_content" android:layout_height="40dp" android:layout_marginStart="20dp" android:layout_marginTop="10dp" android:text="Language" android:textColor="@color/black" android:textSize="20sp" android:textStyle="bold" app:layout_constraintLeft_toLeftOf="parent" app:layout_constraintTop_toTopOf="parent" /> <!--This is the layout "expanded_view" we will hide initially and show as expanded layout when user clicks on any of the item--> <RelativeLayout android:id="@+id/expanded_view" android:layout_width="match_parent" android:layout_height="wrap_content" android:layout_marginTop="10dp" app:layout_constraintLeft_toLeftOf="parent" app:layout_constraintRight_toRightOf="parent" app:layout_constraintTop_toBottomOf="@id/tv_lang_name"> <!--It has a text view which we will use in our case as a description text for the languages--> <TextView android:id="@+id/tv_description" android:layout_width="match_parent" android:layout_height="wrap_content" android:layout_margin="10dp" android:text="Description Text" android:textSize="18sp" /> </RelativeLayout> </androidx.constraintlayout.widget.ConstraintLayout> </com.google.android.material.card.MaterialCardView> Step 5: Create a new model class Create a new class Language.kt we will use data of custom generic “Language” to pass in the list that will be shown in RecyclerView. Kotlin // this is the Language model class class Language( val name : String ="", val description : String= "", var expand : Boolean = false ) Step 6: Working with adapter class Create a new class RvAdapter.kt this will act as an Adapter class for the recycler view. The logic behind the expandable recycler view is that initially, we will make the visibility of layout with id “expanded_view” of “single_item.xml” to GONE and once the user clicks on any item of recycler view we will make its visibility VISIBLE. Comments are added before the code for better understanding. Kotlin import android.view.LayoutInflater import android.view.View import android.view.ViewGroup import androidx.recyclerview.widget.RecyclerView import com.geeksforgeeks.rvadapterviewbinding.databinding.SingleItemBinding class RvAdapter( private var languageList: List<Language> ) : RecyclerView.Adapter<RvAdapter.ViewHolder>() { // create an inner class with name ViewHolder // It takes a view argument, in which pass the generated class of single_item.xml // ie SingleItemBinding and in the RecyclerView.ViewHolder(binding.root) pass it like this inner class ViewHolder(val binding: SingleItemBinding) : RecyclerView.ViewHolder(binding.root) // inside the onCreateViewHolder inflate the view of SingleItemBinding // and return new ViewHolder object containing this layout override fun onCreateViewHolder(parent: ViewGroup, viewType: Int): ViewHolder { val binding = SingleItemBinding.inflate(LayoutInflater.from(parent.context), parent, false) return ViewHolder(binding) } // bind the items with each item of the list languageList which than will be // shown in recycler view // to keep it simple we are not setting any image data to view override fun onBindViewHolder(holder: ViewHolder, position: Int) { with(holder){ with(languageList[position]){ // set name of the language from the list binding.tvLangName.text = this.name // set description to the text // since this is inside "expandedView" its visibility will be gone initially // after click on the item we will make the visibility of the "expandedView" visible // which will also make the visibility of desc also visible binding.tvDescription.text = this.description // check if boolean property "extend" is true or false // if it is true make the "extendedView" Visible binding.expandedView.visibility = if (this.expand) View.VISIBLE else View.GONE // on Click of the item take parent card view in our case // revert the boolean "expand" binding.cardLayout.setOnClickListener { this.expand = !this.expand notifyDataSetChanged() } } } } // return the size of languageList override fun getItemCount(): Int { return languageList.size } } Step 7: Working with MainActivity.kt Go to the MainActivity.kt file and refer to the following code. Below is the code for the MainActivity.kt file. Comments are added inside the code to understand the code in more detail. Kotlin import androidx.appcompat.app.AppCompatActivity import android.os.Bundle import androidx.recyclerview.widget.LinearLayoutManager import com.geeksforgeeks.rvadapterviewbinding.databinding.ActivityMainBinding class MainActivity : AppCompatActivity() { // view binding for the activity private var _binding: ActivityMainBinding? = null private val binding get() = _binding!! // get reference to the adapter class private var languageList = ArrayList<Language>() private lateinit var rvAdapter: RvAdapter override fun onCreate(savedInstanceState: Bundle?) { super.onCreate(savedInstanceState) _binding = ActivityMainBinding.inflate(layoutInflater) setContentView(binding.root) // define layout manager for the Recycler view binding.rvList.layoutManager = LinearLayoutManager(this) // attach adapter to the recycler view rvAdapter = RvAdapter(languageList) binding.rvList.adapter = rvAdapter // create new objects // add some row data val language1 = Language( "Java", "Java is an Object Oriented Programming language." + " Java is used in all kind of applications like Mobile Applications (Android is Java based), " + "desktop applications, web applications, client server applications, enterprise applications and many more. ", false ) val language2 = Language( "Kotlin", "Kotlin is a statically typed, general-purpose programming language" + " developed by JetBrains, that has built world-class IDEs like IntelliJ IDEA, PhpStorm, Appcode, etc.", false ) val language3 = Language( "Python", "Python is a high-level, general-purpose and a very popular programming language." + " Python programming language (latest Python 3) is being used in web development, Machine Learning applications, " + "along with all cutting edge technology in Software Industry.", false ) val language4 = Language( "CPP", "C++ is a general purpose programming language and widely used now a days for " + "competitive programming. It has imperative, object-oriented and generic programming features. ", false ) // add items to list languageList.add(language1) languageList.add(language2) languageList.add(language3) languageList.add(language4) rvAdapter.notifyDataSetChanged() } // on destroy of view make the binding reference to null override fun onDestroy() { super.onDestroy() _binding = null } } Output: Github Repo here. Kotlin Android Android Kotlin Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Flutter - Custom Bottom Navigation Bar How to Read Data from SQLite Database in Android? Retrofit with Kotlin Coroutine in Android Android Listview in Java with Example How to Change the Background Color After Clicking the Button in Android? Android UI Layouts Kotlin Array Retrofit with Kotlin Coroutine in Android Kotlin Setters and Getters MVP (Model View Presenter) Architecture Pattern in Android with Example
[ { "code": null, "e": 25188, "s": 25157, "text": " \n24 Mar, 2021\n" }, { "code": null, "e": 26050, "s": 25188, "text": "RecyclerView is a ViewGroup added to the android studio as a successor of the GridView and ListView. It is an improvement on both of them and can be found in the latest v-7 support packages. It has been created to make possible construction of any lists with XML layouts as an item that can be customized vastly while improving the efficiency of ListViews and GridViews. This improvement is achieved by recycling the views which are out of the visibility of the user. For example, if a user scrolled down to a position where items 4 and 5 are visible; items 1, 2, and 3 would be cleared from the memory to reduce memory consumption. In this article, we will explain how to create Expandable Recycler View items in android. Below is the sample video to show what we are going to build. Note that we are going to implement this project using the Kotlin language. " }, { "code": null, "e": 26079, "s": 26050, "text": "Step 1: Create a new project" }, { "code": null, "e": 26243, "s": 26079, "text": "To create a new project in Android Studio please refer to How to Create/Start a New Project in Android Studio. Note that select Kotlin as the programming language." }, { "code": null, "e": 26279, "s": 26243, "text": "Step 2: Add view binding dependency" }, { "code": null, "e": 26375, "s": 26279, "text": "Go to build.gradle(app) and the following dependency inside the android tag and click sync now." }, { "code": null, "e": 26392, "s": 26375, "text": " buildFeatures {" }, { "code": null, "e": 26415, "s": 26392, "text": " viewBinding true" }, { "code": null, "e": 26419, "s": 26415, "text": " }" }, { "code": null, "e": 26462, "s": 26419, "text": "Step 3: Working with the activity_main.xml" }, { "code": null, "e": 26649, "s": 26462, "text": "Go to the activity_main.xml file and refer to the following code. Below is the code for the activity_main.xml file. It has only a single Recycler view which we will use to show our data." }, { "code": null, "e": 26653, "s": 26649, "text": "XML" }, { "code": "\n\n\n\n\n\n\n<?xml version=\"1.0\" encoding=\"utf-8\"?> \n<androidx.constraintlayout.widget.ConstraintLayout \n xmlns:android=\"http://schemas.android.com/apk/res/android\"\n xmlns:app=\"http://schemas.android.com/apk/res-auto\"\n xmlns:tools=\"http://schemas.android.com/tools\"\n android:layout_width=\"match_parent\"\n android:background=\"#F5F8FD\"\n android:layout_height=\"match_parent\"\n tools:context=\".MainActivity\"> \n \n <!--Add recycler view to main activity-->\n <androidx.recyclerview.widget.RecyclerView\n android:id=\"@+id/rv_list\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"match_parent\"\n tools:listitem=\"@layout/single_item\"\n app:layoutManager=\"androidx.recyclerview.widget.LinearLayoutManager\" /> \n \n</androidx.constraintlayout.widget.ConstraintLayout>\n\n\n\n\n\n", "e": 27489, "s": 26663, "text": null }, { "code": null, "e": 27558, "s": 27489, "text": "Step 4: Create a new layout file and name it as single_item.xml file" }, { "code": null, "e": 27733, "s": 27558, "text": "Go to the single_item.xml file and refer to the following code. Below is the code for the single_item.xml file. It is the single item layout that we will use in RecyclerView." }, { "code": null, "e": 27737, "s": 27733, "text": "XML" }, { "code": "\n\n\n\n\n\n\n<?xml version=\"1.0\" encoding=\"utf-8\"?> \n<com.google.android.material.card.MaterialCardView \n xmlns:android=\"http://schemas.android.com/apk/res/android\"\n xmlns:app=\"http://schemas.android.com/apk/res-auto\"\n android:id=\"@+id/card_layout\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"wrap_content\"\n android:layout_marginStart=\"5dp\"\n android:layout_marginEnd=\"5dp\"\n android:layout_marginBottom=\"10dp\"> \n \n <androidx.constraintlayout.widget.ConstraintLayout\n android:layout_width=\"match_parent\"\n android:layout_height=\"match_parent\"> \n \n <!--Text view for showing the language name-->\n <TextView\n android:id=\"@+id/tv_lang_name\"\n android:layout_width=\"wrap_content\"\n android:layout_height=\"40dp\"\n android:layout_marginStart=\"20dp\"\n android:layout_marginTop=\"10dp\"\n android:text=\"Language\"\n android:textColor=\"@color/black\"\n android:textSize=\"20sp\"\n android:textStyle=\"bold\"\n app:layout_constraintLeft_toLeftOf=\"parent\"\n app:layout_constraintTop_toTopOf=\"parent\" /> \n \n <!--This is the layout \"expanded_view\" we will \n hide initially and show as expanded \n layout when user clicks on any of the item-->\n <RelativeLayout\n android:id=\"@+id/expanded_view\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"wrap_content\"\n android:layout_marginTop=\"10dp\"\n app:layout_constraintLeft_toLeftOf=\"parent\"\n app:layout_constraintRight_toRightOf=\"parent\"\n app:layout_constraintTop_toBottomOf=\"@id/tv_lang_name\"> \n \n <!--It has a text view which we will use in our case as \n a description text for the languages-->\n <TextView\n android:id=\"@+id/tv_description\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"wrap_content\"\n android:layout_margin=\"10dp\"\n android:text=\"Description Text\"\n android:textSize=\"18sp\" /> \n </RelativeLayout> \n \n </androidx.constraintlayout.widget.ConstraintLayout> \n \n</com.google.android.material.card.MaterialCardView>\n\n\n\n\n\n", "e": 30088, "s": 27747, "text": null }, { "code": null, "e": 30121, "s": 30088, "text": "Step 5: Create a new model class" }, { "code": null, "e": 30254, "s": 30121, "text": "Create a new class Language.kt we will use data of custom generic “Language” to pass in the list that will be shown in RecyclerView." }, { "code": null, "e": 30261, "s": 30254, "text": "Kotlin" }, { "code": "\n\n\n\n\n\n\n// this is the Language model class \nclass Language( \n val name : String =\"\", \n val description : String= \"\", \n var expand : Boolean = false\n)\n\n\n\n\n\n", "e": 30436, "s": 30271, "text": null }, { "code": null, "e": 30471, "s": 30436, "text": "Step 6: Working with adapter class" }, { "code": null, "e": 30868, "s": 30471, "text": "Create a new class RvAdapter.kt this will act as an Adapter class for the recycler view. The logic behind the expandable recycler view is that initially, we will make the visibility of layout with id “expanded_view” of “single_item.xml” to GONE and once the user clicks on any item of recycler view we will make its visibility VISIBLE. Comments are added before the code for better understanding." }, { "code": null, "e": 30875, "s": 30868, "text": "Kotlin" }, { "code": "\n\n\n\n\n\n\nimport android.view.LayoutInflater \nimport android.view.View \nimport android.view.ViewGroup \nimport androidx.recyclerview.widget.RecyclerView \nimport com.geeksforgeeks.rvadapterviewbinding.databinding.SingleItemBinding \n \nclass RvAdapter( \n private var languageList: List<Language> \n) : RecyclerView.Adapter<RvAdapter.ViewHolder>() { \n \n // create an inner class with name ViewHolder \n // It takes a view argument, in which pass the generated class of single_item.xml \n // ie SingleItemBinding and in the RecyclerView.ViewHolder(binding.root) pass it like this \n inner class ViewHolder(val binding: SingleItemBinding) : RecyclerView.ViewHolder(binding.root) \n \n // inside the onCreateViewHolder inflate the view of SingleItemBinding \n // and return new ViewHolder object containing this layout \n override fun onCreateViewHolder(parent: ViewGroup, viewType: Int): ViewHolder { \n val binding = SingleItemBinding.inflate(LayoutInflater.from(parent.context), parent, false) \n return ViewHolder(binding) \n } \n \n // bind the items with each item of the list languageList which than will be \n // shown in recycler view \n // to keep it simple we are not setting any image data to view \n override fun onBindViewHolder(holder: ViewHolder, position: Int) { \n with(holder){ \n with(languageList[position]){ \n // set name of the language from the list \n binding.tvLangName.text = this.name \n // set description to the text \n // since this is inside \"expandedView\" its visibility will be gone initially \n // after click on the item we will make the visibility of the \"expandedView\" visible \n // which will also make the visibility of desc also visible \n binding.tvDescription.text = this.description \n // check if boolean property \"extend\" is true or false \n // if it is true make the \"extendedView\" Visible \n binding.expandedView.visibility = if (this.expand) View.VISIBLE else View.GONE \n // on Click of the item take parent card view in our case \n // revert the boolean \"expand\" \n binding.cardLayout.setOnClickListener { \n this.expand = !this.expand \n notifyDataSetChanged() \n } \n } \n } \n } \n // return the size of languageList \n override fun getItemCount(): Int { \n return languageList.size \n } \n}\n\n\n\n\n\n", "e": 33445, "s": 30885, "text": null }, { "code": null, "e": 33482, "s": 33445, "text": "Step 7: Working with MainActivity.kt" }, { "code": null, "e": 33668, "s": 33482, "text": "Go to the MainActivity.kt file and refer to the following code. Below is the code for the MainActivity.kt file. Comments are added inside the code to understand the code in more detail." }, { "code": null, "e": 33675, "s": 33668, "text": "Kotlin" }, { "code": "\n\n\n\n\n\n\nimport androidx.appcompat.app.AppCompatActivity \nimport android.os.Bundle \nimport androidx.recyclerview.widget.LinearLayoutManager \nimport com.geeksforgeeks.rvadapterviewbinding.databinding.ActivityMainBinding \n \nclass MainActivity : AppCompatActivity() { \n \n // view binding for the activity \n private var _binding: ActivityMainBinding? = null\n private val binding get() = _binding!! \n \n // get reference to the adapter class \n private var languageList = ArrayList<Language>() \n private lateinit var rvAdapter: RvAdapter \n \n override fun onCreate(savedInstanceState: Bundle?) { \n super.onCreate(savedInstanceState) \n _binding = ActivityMainBinding.inflate(layoutInflater) \n setContentView(binding.root) \n \n // define layout manager for the Recycler view \n binding.rvList.layoutManager = LinearLayoutManager(this) \n \n // attach adapter to the recycler view \n rvAdapter = RvAdapter(languageList) \n binding.rvList.adapter = rvAdapter \n \n // create new objects \n // add some row data \n val language1 = Language( \n \"Java\", \n \"Java is an Object Oriented Programming language.\" + \n \" Java is used in all kind of applications like Mobile Applications (Android is Java based), \" + \n \"desktop applications, web applications, client server applications, enterprise applications and many more. \", \n false\n ) \n val language2 = Language( \n \"Kotlin\", \n \"Kotlin is a statically typed, general-purpose programming language\" + \n \" developed by JetBrains, that has built world-class IDEs like IntelliJ IDEA, PhpStorm, Appcode, etc.\", \n false\n ) \n val language3 = Language( \n \"Python\", \n \"Python is a high-level, general-purpose and a very popular programming language.\" + \n \" Python programming language (latest Python 3) is being used in web development, Machine Learning applications, \" + \n \"along with all cutting edge technology in Software Industry.\", \n false\n ) \n val language4 = Language( \n \"CPP\", \n \"C++ is a general purpose programming language and widely used now a days for \" + \n \"competitive programming. It has imperative, object-oriented and generic programming features. \", \n false\n ) \n \n // add items to list \n languageList.add(language1) \n languageList.add(language2) \n languageList.add(language3) \n languageList.add(language4) \n \n rvAdapter.notifyDataSetChanged() \n \n } \n \n // on destroy of view make the binding reference to null \n override fun onDestroy() { \n super.onDestroy() \n _binding = null\n } \n}\n\n\n\n\n\n", "e": 36582, "s": 33685, "text": null }, { "code": null, "e": 36590, "s": 36582, "text": "Output:" }, { "code": null, "e": 36608, "s": 36590, "text": "Github Repo here." }, { "code": null, "e": 36625, "s": 36608, "text": "\nKotlin Android\n" }, { "code": null, "e": 36635, "s": 36625, "text": "\nAndroid\n" }, { "code": null, "e": 36644, "s": 36635, "text": "\nKotlin\n" }, { "code": null, "e": 36849, "s": 36644, "text": "Writing code in comment? \n Please use ide.geeksforgeeks.org, \n generate link and share the link here.\n " }, { "code": null, "e": 36888, "s": 36849, "text": "Flutter - Custom Bottom Navigation Bar" }, { "code": null, "e": 36938, "s": 36888, "text": "How to Read Data from SQLite Database in Android?" }, { "code": null, "e": 36980, "s": 36938, "text": "Retrofit with Kotlin Coroutine in Android" }, { "code": null, "e": 37018, "s": 36980, "text": "Android Listview in Java with Example" }, { "code": null, "e": 37091, "s": 37018, "text": "How to Change the Background Color After Clicking the Button in Android?" }, { "code": null, "e": 37110, "s": 37091, "text": "Android UI Layouts" }, { "code": null, "e": 37123, "s": 37110, "text": "Kotlin Array" }, { "code": null, "e": 37165, "s": 37123, "text": "Retrofit with Kotlin Coroutine in Android" }, { "code": null, "e": 37192, "s": 37165, "text": "Kotlin Setters and Getters" } ]
What is the difference between Declaring and Initializing a variable in JavaScript?
The following is stated about declaration and initialization of a variable in ECMAScript specification − A var statement declares variables that are scoped to the running execution context’s VariableEnvironment. Var variables are created when their containing Lexical Environment is instantiated and are initialized to undefined when created. [...] A variable defined by a VariableDeclaration with an Initializer is assigned the value of its Initializer’s AssignmentExpression when the VariableDeclaration is executed, not when the variable is created. The above defines the difference: All variables are initialized with the value undefined. Variables declarations are initialized with undefined upon the initialization of their lexical environment. This initialization does not work as an assignment.
[ { "code": null, "e": 1167, "s": 1062, "text": "The following is stated about declaration and initialization of a variable in ECMAScript specification −" }, { "code": null, "e": 1615, "s": 1167, "text": "A var statement declares variables that are scoped to the running execution context’s VariableEnvironment. Var variables are created when their containing Lexical Environment is instantiated and are initialized to undefined when created. [...] A variable defined by a VariableDeclaration with an Initializer is assigned the value of its Initializer’s AssignmentExpression when the VariableDeclaration is executed, not when the variable is created." }, { "code": null, "e": 1649, "s": 1615, "text": "The above defines the difference:" }, { "code": null, "e": 1705, "s": 1649, "text": "All variables are initialized with the value undefined." }, { "code": null, "e": 1813, "s": 1705, "text": "Variables declarations are initialized with undefined upon the initialization of their lexical environment." }, { "code": null, "e": 1865, "s": 1813, "text": "This initialization does not work as an assignment." } ]
How to set default value for empty row in MySQL?
To set default value for empty row, use the concept of COALESCE(). Let us first create a table − mysql> create table DemoTable1863 ( FirstName varchar(20) ); Query OK, 0 rows affected (0.00 sec) Insert some records in the table using insert command − mysql> insert into DemoTable1863 values('Chris'); Query OK, 1 row affected (0.00 sec) mysql> insert into DemoTable1863 values(NULL); Query OK, 1 row affected (0.00 sec) mysql> insert into DemoTable1863 values('David'); Query OK, 1 row affected (0.00 sec) mysql> insert into DemoTable1863 values(NULL); Query OK, 1 row affected (0.00 sec) Display all records from the table using select statement − mysql> select * from DemoTable1863; This will produce the following output − +-----------+ | FirstName | +-----------+ | Chris | | NULL | | David | | NULL | +-----------+ 4 rows in set (0.00 sec) Following is the query to set default value for empty row − mysql> select coalesce(FirstName,'UNKNOWN NAME') from DemoTable1863; This will produce the following output − +------------------------------------+ | coalesce(FirstName,'UNKNOWN NAME') | +------------------------------------+ | Chris | | UNKNOWN NAME | | David | | UNKNOWN NAME | +------------------------------------+ 4 rows in set (0.00 sec)
[ { "code": null, "e": 1159, "s": 1062, "text": "To set default value for empty row, use the concept of COALESCE(). Let us first create a table −" }, { "code": null, "e": 1272, "s": 1159, "text": "mysql> create table DemoTable1863\n (\n FirstName varchar(20)\n );\nQuery OK, 0 rows affected (0.00 sec)" }, { "code": null, "e": 1328, "s": 1272, "text": "Insert some records in the table using insert command −" }, { "code": null, "e": 1666, "s": 1328, "text": "mysql> insert into DemoTable1863 values('Chris');\nQuery OK, 1 row affected (0.00 sec)\nmysql> insert into DemoTable1863 values(NULL);\nQuery OK, 1 row affected (0.00 sec)\nmysql> insert into DemoTable1863 values('David');\nQuery OK, 1 row affected (0.00 sec)\nmysql> insert into DemoTable1863 values(NULL);\nQuery OK, 1 row affected (0.00 sec)" }, { "code": null, "e": 1726, "s": 1666, "text": "Display all records from the table using select statement −" }, { "code": null, "e": 1762, "s": 1726, "text": "mysql> select * from DemoTable1863;" }, { "code": null, "e": 1803, "s": 1762, "text": "This will produce the following output −" }, { "code": null, "e": 1940, "s": 1803, "text": "+-----------+\n| FirstName |\n+-----------+\n| Chris |\n| NULL |\n| David |\n| NULL |\n+-----------+\n4 rows in set (0.00 sec)" }, { "code": null, "e": 2000, "s": 1940, "text": "Following is the query to set default value for empty row −" }, { "code": null, "e": 2069, "s": 2000, "text": "mysql> select coalesce(FirstName,'UNKNOWN NAME') from DemoTable1863;" }, { "code": null, "e": 2110, "s": 2069, "text": "This will produce the following output −" }, { "code": null, "e": 2447, "s": 2110, "text": "+------------------------------------+\n| coalesce(FirstName,'UNKNOWN NAME') |\n+------------------------------------+\n| Chris |\n| UNKNOWN NAME |\n| David |\n| UNKNOWN NAME |\n+------------------------------------+\n4 rows in set (0.00 sec)" } ]
Node.js fs.promise.readdir() Method
08 Oct, 2021 The fs.promise.readdir() method defined in the File System module of Node.js. The file System module is basically to interact with the hard disk of the users computer. The readdir() method is used to read the files and folders names. The fs.promise.readdir() method returns a resolved or rejected promise and hence avoid the callback nesting or callback hell problems that may occur in fs.readdir() method. Syntax fs.promise.readdir(path, options) Parameter: This method accepts two parameters as mentioned above and described below: path: It is a string, buffer or url that specifies the path to the directory, whose contents is to be read. options: It is an optional parameter and used to specify encoding techniques (default-utf8) etc. Return Value: It returns a resolved or rejected promise. The promise is resolved with the list of the names of files and folders if a directory is successfully read otherwise rejected with an error object if any error is occurred (example-specified directory not exist or does not have permissions to read files etc). Example 1: // Node.js program to demonstrate the // fs.promise.readdir() Method // Importing File System moduleconst fs = require('fs') // The process.cwd() gives current// working directoryfs.promises.readdir(process.cwd()) // If promise resolved and // datas are fetched .then(filenames => { for (let filename of filenames) { console.log(filename) } }) // If promise is rejected .catch(err => { console.log(err) }) Output: Read and display the contents of current working directory ‘gfgExamples’ Example 2: // Node.js program to demonstrate the // fs.promise.readdir() Method // Importing File System moduleconst fs = require('fs') // process.cwd() gives current// working directoryconst targetDir = process.argv[2] || process.cwd() fs.promises.readdir(targetDir) // If promise resolved and // datas are fetched .then(filenames => { for (let filename of filenames) { console.log(filename) } }) // If promise is rejected .catch(err => { console.log(err) }) Output: Read and display the contents of one directory back to current working directory ‘gfgExamples’. Reference: https://nodejs.org/docs/latest/api/fs.html#fs_fspromises_readdir_path_options Node.js-fs-module Node.js Web Technologies Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n08 Oct, 2021" }, { "code": null, "e": 435, "s": 28, "text": "The fs.promise.readdir() method defined in the File System module of Node.js. The file System module is basically to interact with the hard disk of the users computer. The readdir() method is used to read the files and folders names. The fs.promise.readdir() method returns a resolved or rejected promise and hence avoid the callback nesting or callback hell problems that may occur in fs.readdir() method." }, { "code": null, "e": 442, "s": 435, "text": "Syntax" }, { "code": null, "e": 476, "s": 442, "text": "fs.promise.readdir(path, options)" }, { "code": null, "e": 562, "s": 476, "text": "Parameter: This method accepts two parameters as mentioned above and described below:" }, { "code": null, "e": 670, "s": 562, "text": "path: It is a string, buffer or url that specifies the path to the directory, whose contents is to be read." }, { "code": null, "e": 767, "s": 670, "text": "options: It is an optional parameter and used to specify encoding techniques (default-utf8) etc." }, { "code": null, "e": 1085, "s": 767, "text": "Return Value: It returns a resolved or rejected promise. The promise is resolved with the list of the names of files and folders if a directory is successfully read otherwise rejected with an error object if any error is occurred (example-specified directory not exist or does not have permissions to read files etc)." }, { "code": null, "e": 1096, "s": 1085, "text": "Example 1:" }, { "code": "// Node.js program to demonstrate the // fs.promise.readdir() Method // Importing File System moduleconst fs = require('fs') // The process.cwd() gives current// working directoryfs.promises.readdir(process.cwd()) // If promise resolved and // datas are fetched .then(filenames => { for (let filename of filenames) { console.log(filename) } }) // If promise is rejected .catch(err => { console.log(err) })", "e": 1564, "s": 1096, "text": null }, { "code": null, "e": 1645, "s": 1564, "text": "Output: Read and display the contents of current working directory ‘gfgExamples’" }, { "code": null, "e": 1656, "s": 1645, "text": "Example 2:" }, { "code": "// Node.js program to demonstrate the // fs.promise.readdir() Method // Importing File System moduleconst fs = require('fs') // process.cwd() gives current// working directoryconst targetDir = process.argv[2] || process.cwd() fs.promises.readdir(targetDir) // If promise resolved and // datas are fetched .then(filenames => { for (let filename of filenames) { console.log(filename) } }) // If promise is rejected .catch(err => { console.log(err) })", "e": 2169, "s": 1656, "text": null }, { "code": null, "e": 2273, "s": 2169, "text": "Output: Read and display the contents of one directory back to current working directory ‘gfgExamples’." }, { "code": null, "e": 2362, "s": 2273, "text": "Reference: https://nodejs.org/docs/latest/api/fs.html#fs_fspromises_readdir_path_options" }, { "code": null, "e": 2380, "s": 2362, "text": "Node.js-fs-module" }, { "code": null, "e": 2388, "s": 2380, "text": "Node.js" }, { "code": null, "e": 2405, "s": 2388, "text": "Web Technologies" } ]
What is the inline function in JavaScript ?
16 Jun, 2022 In JavaScript, inline function is a special type of anonymous function which is assigned to a variable, or in other words, an anonymous function with a name. JavaScript does not support the traditional concept of inline function like in C or C++. Thus anonymous function and inline function is practically the same. Unlike normal function, they are created at runtime. Syntax: Function: function func() { //Your Code Here } Anonymous function: function() { //Your Code Here } Inline function var func = function() { //Your Code Here }; Explanation: Making an inline function is really simple. First, make an anonymous function(a function with no name), and then assign it to a variable. Example: html <!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta http-equiv="X-UA-Compatible" content="ie=edge"> <title>JS Functions</title> <!-- jQuery CDN --> <script src="https://code.jquery.com/jquery-3.4.1.min.js" integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"> </script> <!-- End of CDN --> <style> button { background-color: #4CAF50; border: none; color: white; padding: 15px 32px; text-align: center; font-size: 16px; margin: 8px 2px; cursor: pointer; display: block; width: 270px; } </style></head> <body> <h1 style="text-align:center;color:green;">GeeksforGeeks</h1> <p align="center"> <button id="function">function</button> <button id="anonymous-function">anonymous function</button> <button id="inline-function">inline function</button> </p> <script type="text/javascript"> //function function func() { alert("Hello I'm inside function"); } $('#function').click(func); //anonymous function $('#anonymous-function').click(function() { alert("Hello I'm inside anonymous function"); }); //inline function var inline_func = function() { alert("Hello I'm inside inline function"); }; $('#inline-function').click(inline_func); </script></body> </html> Output: Clicking on any of the above button will call the respective function and an alert message will show up. Let’s say “inline function” button is clicked then the following alert message will pop up. simmytarika5 Picked JavaScript Technical Scripter Web Technologies Web technologies Questions Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Difference between var, let and const keywords in JavaScript Differences between Functional Components and Class Components in React Remove elements from a JavaScript Array Roadmap to Learn JavaScript For Beginners Difference Between PUT and PATCH Request Installation of Node.js on Linux Top 10 Projects For Beginners To Practice HTML and CSS Skills Difference between var, let and const keywords in JavaScript How to insert spaces/tabs in text using HTML/CSS? How to fetch data from an API in ReactJS ?
[ { "code": null, "e": 52, "s": 24, "text": "\n16 Jun, 2022" }, { "code": null, "e": 421, "s": 52, "text": "In JavaScript, inline function is a special type of anonymous function which is assigned to a variable, or in other words, an anonymous function with a name. JavaScript does not support the traditional concept of inline function like in C or C++. Thus anonymous function and inline function is practically the same. Unlike normal function, they are created at runtime." }, { "code": null, "e": 429, "s": 421, "text": "Syntax:" }, { "code": null, "e": 439, "s": 429, "text": "Function:" }, { "code": null, "e": 480, "s": 439, "text": "function func() {\n //Your Code Here\n}" }, { "code": null, "e": 500, "s": 480, "text": "Anonymous function:" }, { "code": null, "e": 536, "s": 500, "text": "function() {\n //Your Code Here\n}" }, { "code": null, "e": 552, "s": 536, "text": "Inline function" }, { "code": null, "e": 602, "s": 552, "text": "var func = function() { \n //Your Code Here \n};" }, { "code": null, "e": 615, "s": 602, "text": "Explanation:" }, { "code": null, "e": 753, "s": 615, "text": "Making an inline function is really simple. First, make an anonymous function(a function with no name), and then assign it to a variable." }, { "code": null, "e": 763, "s": 753, "text": "Example: " }, { "code": null, "e": 768, "s": 763, "text": "html" }, { "code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> <meta http-equiv=\"X-UA-Compatible\" content=\"ie=edge\"> <title>JS Functions</title> <!-- jQuery CDN --> <script src=\"https://code.jquery.com/jquery-3.4.1.min.js\" integrity=\"sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=\" crossorigin=\"anonymous\"> </script> <!-- End of CDN --> <style> button { background-color: #4CAF50; border: none; color: white; padding: 15px 32px; text-align: center; font-size: 16px; margin: 8px 2px; cursor: pointer; display: block; width: 270px; } </style></head> <body> <h1 style=\"text-align:center;color:green;\">GeeksforGeeks</h1> <p align=\"center\"> <button id=\"function\">function</button> <button id=\"anonymous-function\">anonymous function</button> <button id=\"inline-function\">inline function</button> </p> <script type=\"text/javascript\"> //function function func() { alert(\"Hello I'm inside function\"); } $('#function').click(func); //anonymous function $('#anonymous-function').click(function() { alert(\"Hello I'm inside anonymous function\"); }); //inline function var inline_func = function() { alert(\"Hello I'm inside inline function\"); }; $('#inline-function').click(inline_func); </script></body> </html>", "e": 2409, "s": 768, "text": null }, { "code": null, "e": 2418, "s": 2409, "text": "Output: " }, { "code": null, "e": 2615, "s": 2418, "text": "Clicking on any of the above button will call the respective function and an alert message will show up. Let’s say “inline function” button is clicked then the following alert message will pop up." }, { "code": null, "e": 2628, "s": 2615, "text": "simmytarika5" }, { "code": null, "e": 2635, "s": 2628, "text": "Picked" }, { "code": null, "e": 2646, "s": 2635, "text": "JavaScript" }, { "code": null, "e": 2665, "s": 2646, "text": "Technical Scripter" }, { "code": null, "e": 2682, "s": 2665, "text": "Web Technologies" }, { "code": null, "e": 2709, "s": 2682, "text": "Web technologies Questions" }, { "code": null, "e": 2807, "s": 2709, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 2868, "s": 2807, "text": "Difference between var, let and const keywords in JavaScript" }, { "code": null, "e": 2940, "s": 2868, "text": "Differences between Functional Components and Class Components in React" }, { "code": null, "e": 2980, "s": 2940, "text": "Remove elements from a JavaScript Array" }, { "code": null, "e": 3022, "s": 2980, "text": "Roadmap to Learn JavaScript For Beginners" }, { "code": null, "e": 3063, "s": 3022, "text": "Difference Between PUT and PATCH Request" }, { "code": null, "e": 3096, "s": 3063, "text": "Installation of Node.js on Linux" }, { "code": null, "e": 3158, "s": 3096, "text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills" }, { "code": null, "e": 3219, "s": 3158, "text": "Difference between var, let and const keywords in JavaScript" }, { "code": null, "e": 3269, "s": 3219, "text": "How to insert spaces/tabs in text using HTML/CSS?" } ]
UTC_TIMESTAMP() function in MySQL
13 Oct, 2020 UTC_TIMESTAMP() function in MySQL is used to check current Coordinated Universal Time (UTC) date and time value. It returns the current UTC date and time value in YYYY-MM-DD HH:MM:SS or YYYYMMDDHHMMSS.uuu format, depending on whether the function is used in string or numeric context. Syntax : UTC_TIMESTAMP OR UTC_TIMESTAMP() Parameter :This method does not accept any parameter. Returns :It returns the current UTC date and time value. Example-1 :Getting the current UTC date and time using UTC_TIMESTAMP Function. SELECT UTC_TIMESTAMP as CurrUtcDateAndTime ; Output : Example-2 :Getting the current UTC date and time using UTC_TIMESTAMP Function in numeric format. SELECT UTC_TIMESTAMP + 0 as CurrUtcDateAndTime ; Output : Example-3 :The UTC_TIMESTAMP function can be used to set value of columns. To demonstrate create a table named DeliveryDetails. CREATE TABLE DeliveryDetails ( DeliveryId INT AUTO_INCREMENT, ProductId INT NOT NULL, ProductName VARCHAR(20) NOT NULL, Delivered_At TIMESTAMP NOT NULL, PRIMARY KEY(DeliveryId) ); Here, we will use UTC_TIMESTAMP function when a delivery will be completed. The value in Delivered_At column will be the value given by UTC_TIMESTAMP Function. INSERT INTO DeliveryDetails(ProductId, ProductName, Delivered_At) VALUES (1010001, 'Dell Inspirion', UTC_TIMESTAMP ); Now, checking the DeliveryDetails table : SELECT * FROM DeliveryDetails; Output : DBMS-SQL mysql SQL SQL Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here.
[ { "code": null, "e": 28, "s": 0, "text": "\n13 Oct, 2020" }, { "code": null, "e": 313, "s": 28, "text": "UTC_TIMESTAMP() function in MySQL is used to check current Coordinated Universal Time (UTC) date and time value. It returns the current UTC date and time value in YYYY-MM-DD HH:MM:SS or YYYYMMDDHHMMSS.uuu format, depending on whether the function is used in string or numeric context." }, { "code": null, "e": 322, "s": 313, "text": "Syntax :" }, { "code": null, "e": 357, "s": 322, "text": "UTC_TIMESTAMP\n OR\nUTC_TIMESTAMP()\n" }, { "code": null, "e": 411, "s": 357, "text": "Parameter :This method does not accept any parameter." }, { "code": null, "e": 468, "s": 411, "text": "Returns :It returns the current UTC date and time value." }, { "code": null, "e": 547, "s": 468, "text": "Example-1 :Getting the current UTC date and time using UTC_TIMESTAMP Function." }, { "code": null, "e": 593, "s": 547, "text": "SELECT UTC_TIMESTAMP as CurrUtcDateAndTime ;\n" }, { "code": null, "e": 602, "s": 593, "text": "Output :" }, { "code": null, "e": 699, "s": 602, "text": "Example-2 :Getting the current UTC date and time using UTC_TIMESTAMP Function in numeric format." }, { "code": null, "e": 749, "s": 699, "text": "SELECT UTC_TIMESTAMP + 0 as CurrUtcDateAndTime ;\n" }, { "code": null, "e": 758, "s": 749, "text": "Output :" }, { "code": null, "e": 886, "s": 758, "text": "Example-3 :The UTC_TIMESTAMP function can be used to set value of columns. To demonstrate create a table named DeliveryDetails." }, { "code": null, "e": 1067, "s": 886, "text": "CREATE TABLE DeliveryDetails (\nDeliveryId INT AUTO_INCREMENT,\nProductId INT NOT NULL,\nProductName VARCHAR(20) NOT NULL,\nDelivered_At TIMESTAMP NOT NULL,\nPRIMARY KEY(DeliveryId)\n);\n" }, { "code": null, "e": 1227, "s": 1067, "text": "Here, we will use UTC_TIMESTAMP function when a delivery will be completed. The value in Delivered_At column will be the value given by UTC_TIMESTAMP Function." }, { "code": null, "e": 1349, "s": 1227, "text": "INSERT INTO \nDeliveryDetails(ProductId, ProductName, Delivered_At)\nVALUES\n(1010001, 'Dell Inspirion', UTC_TIMESTAMP );\n" }, { "code": null, "e": 1391, "s": 1349, "text": "Now, checking the DeliveryDetails table :" }, { "code": null, "e": 1423, "s": 1391, "text": "SELECT * FROM DeliveryDetails;\n" }, { "code": null, "e": 1432, "s": 1423, "text": "Output :" }, { "code": null, "e": 1441, "s": 1432, "text": "DBMS-SQL" }, { "code": null, "e": 1447, "s": 1441, "text": "mysql" }, { "code": null, "e": 1451, "s": 1447, "text": "SQL" }, { "code": null, "e": 1455, "s": 1451, "text": "SQL" } ]
JavaScript | Auto-filling one field same as other
18 Nov, 2020 (This article takes some prior knowledge of HTML, CSS, and JavaScript.)You might have noticed that sometimes websites like e-commerce or some government website have two address fields in their forms. One for primary address and another for secondary address(or one for billing address and another for shipping address etc).Most of the time people have the same primary and secondary addresses and to save us from the tedious work of re-entering the same data again they have some kind of option to automatically copy the contents of one field into another.We are going to see how to make such kind of Auto-Complete form using JavaScript.In the form we are going to discuss, there is a checkbox and whenever it is checked, the code automatically copies values from the primary address and primary zip-code to the secondary address and secondary zip-code respectively. If the checkbox is unchecked, these fields will go blank.Here is the simple code for such kind of form: <!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8" /> <title>Form Auto Fill</title> <style> fieldset { margin-bottom: 5%; } </style> </head> <body> <h1>AutoFill Form</h1> <form> //Fields for primary address <fieldset> <legend><b>Primary Address</b> </legend> <label for="primaryaddress"> Address:</label> <input type="text" name="Address" id="primaryaddress" required /><br /> <label for="primaryzip">Zip code:</label> <input type="text" name="Zip code" id="primaryzip" pattern="[0-9]{6}" required /><br /> </fieldset> <input type="checkbox" id="same" name="same" onchange="addressFunction()" /> <label for="same"> If same secondary address select this box. </label> // Fields for secondary address <fieldset> <legend><b>Secondary Address</b></legend> <label for="secondaryaddress"> Address: </label> <input type="text" name="Address" id="secondaryaddress" required /><br /> <label for="secondaryzip"> Zip code:</label> <input type="text" name="Zip code" id="secondaryzip" pattern="[0-9]{6}" required /><br /> </fieldset> // Submit button in the form <input type="submit" value="Submit" /> </form> // JavaScript Code <script> function addressFunction() { if (document.getElementById( "same").checked) { document.getElementById( "secondaryaddress").value = document.getElementById( "primaryaddress").value; document.getElementById( "secondaryzip").value = document.getElementById( "primaryzip").value; } else { document.getElementById( "secondaryaddress").value = ""; document.getElementById( "secondaryzip").value = ""; } } </script> </body></html> #This is how the form will look like before checking the box: #And this is how it will look after checking the box:Note: features like ‘required'(line 18, 20, 29, 31)-ensures that form will only be submitted if these fields are non-empty; ‘pattern = “[0-9]{6}”‘(line 20, 31)-ensures that format of zip-code is correct i.e., six digit zip-code. Explanation:When the checked state of the checkbox is changed the ‘onchange'(see line 23) event will occur which will call the ‘addressFunction()’.If the box is checked, values of primary address and primary zip-code will be copied to secondary address and secondary zip-code(by using ‘getElementById()’ function we are referring to an element of particular Id and ‘.value’ to access the value at that particular Id element).Otherwise, these fields will remain blank so that it can be filled by the user(in case of different primary and secondary addresses). javascript-form JavaScript Web Technologies Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Difference between var, let and const keywords in JavaScript Differences between Functional Components and Class Components in React Remove elements from a JavaScript Array Hide or show elements in HTML using display property How to append HTML code to a div using JavaScript ? Top 10 Projects For Beginners To Practice HTML and CSS Skills Installation of Node.js on Linux Difference between var, let and const keywords in JavaScript How to insert spaces/tabs in text using HTML/CSS? How to fetch data from an API in ReactJS ?
[ { "code": null, "e": 28, "s": 0, "text": "\n18 Nov, 2020" }, { "code": null, "e": 1000, "s": 28, "text": "(This article takes some prior knowledge of HTML, CSS, and JavaScript.)You might have noticed that sometimes websites like e-commerce or some government website have two address fields in their forms. One for primary address and another for secondary address(or one for billing address and another for shipping address etc).Most of the time people have the same primary and secondary addresses and to save us from the tedious work of re-entering the same data again they have some kind of option to automatically copy the contents of one field into another.We are going to see how to make such kind of Auto-Complete form using JavaScript.In the form we are going to discuss, there is a checkbox and whenever it is checked, the code automatically copies values from the primary address and primary zip-code to the secondary address and secondary zip-code respectively. If the checkbox is unchecked, these fields will go blank.Here is the simple code for such kind of form:" }, { "code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\" /> <title>Form Auto Fill</title> <style> fieldset { margin-bottom: 5%; } </style> </head> <body> <h1>AutoFill Form</h1> <form> //Fields for primary address <fieldset> <legend><b>Primary Address</b> </legend> <label for=\"primaryaddress\"> Address:</label> <input type=\"text\" name=\"Address\" id=\"primaryaddress\" required /><br /> <label for=\"primaryzip\">Zip code:</label> <input type=\"text\" name=\"Zip code\" id=\"primaryzip\" pattern=\"[0-9]{6}\" required /><br /> </fieldset> <input type=\"checkbox\" id=\"same\" name=\"same\" onchange=\"addressFunction()\" /> <label for=\"same\"> If same secondary address select this box. </label> // Fields for secondary address <fieldset> <legend><b>Secondary Address</b></legend> <label for=\"secondaryaddress\"> Address: </label> <input type=\"text\" name=\"Address\" id=\"secondaryaddress\" required /><br /> <label for=\"secondaryzip\"> Zip code:</label> <input type=\"text\" name=\"Zip code\" id=\"secondaryzip\" pattern=\"[0-9]{6}\" required /><br /> </fieldset> // Submit button in the form <input type=\"submit\" value=\"Submit\" /> </form> // JavaScript Code <script> function addressFunction() { if (document.getElementById( \"same\").checked) { document.getElementById( \"secondaryaddress\").value = document.getElementById( \"primaryaddress\").value; document.getElementById( \"secondaryzip\").value = document.getElementById( \"primaryzip\").value; } else { document.getElementById( \"secondaryaddress\").value = \"\"; document.getElementById( \"secondaryzip\").value = \"\"; } } </script> </body></html>", "e": 3789, "s": 1000, "text": null }, { "code": null, "e": 3851, "s": 3789, "text": "#This is how the form will look like before checking the box:" }, { "code": null, "e": 3924, "s": 3851, "text": "#And this is how it will look after checking the box:Note: features like" }, { "code": null, "e": 4028, "s": 3924, "text": "‘required'(line 18, 20, 29, 31)-ensures that form will only be submitted if these fields are non-empty;" }, { "code": null, "e": 4133, "s": 4028, "text": "‘pattern = “[0-9]{6}”‘(line 20, 31)-ensures that format of zip-code is correct i.e., six digit zip-code." }, { "code": null, "e": 4692, "s": 4133, "text": "Explanation:When the checked state of the checkbox is changed the ‘onchange'(see line 23) event will occur which will call the ‘addressFunction()’.If the box is checked, values of primary address and primary zip-code will be copied to secondary address and secondary zip-code(by using ‘getElementById()’ function we are referring to an element of particular Id and ‘.value’ to access the value at that particular Id element).Otherwise, these fields will remain blank so that it can be filled by the user(in case of different primary and secondary addresses)." }, { "code": null, "e": 4708, "s": 4692, "text": "javascript-form" }, { "code": null, "e": 4719, "s": 4708, "text": "JavaScript" }, { "code": null, "e": 4736, "s": 4719, "text": "Web Technologies" }, { "code": null, "e": 4834, "s": 4736, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 4895, "s": 4834, "text": "Difference between var, let and const keywords in JavaScript" }, { "code": null, "e": 4967, "s": 4895, "text": "Differences between Functional Components and Class Components in React" }, { "code": null, "e": 5007, "s": 4967, "text": "Remove elements from a JavaScript Array" }, { "code": null, "e": 5060, "s": 5007, "text": "Hide or show elements in HTML using display property" }, { "code": null, "e": 5112, "s": 5060, "text": "How to append HTML code to a div using JavaScript ?" }, { "code": null, "e": 5174, "s": 5112, "text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills" }, { "code": null, "e": 5207, "s": 5174, "text": "Installation of Node.js on Linux" }, { "code": null, "e": 5268, "s": 5207, "text": "Difference between var, let and const keywords in JavaScript" }, { "code": null, "e": 5318, "s": 5268, "text": "How to insert spaces/tabs in text using HTML/CSS?" } ]
Java Program for Merge Sort for Linked Lists
21 Oct, 2021 Merge sort is often preferred for sorting a linked list. The slow random-access performance of a linked list makes some other algorithms (such as quicksort) perform poorly, and others (such as heapsort) completely impossible. Let head be the first node of the linked list to be sorted and headRef be the pointer to head. Note that we need a reference to head in MergeSort() as the below implementation changes next links to sort the linked lists (not data at the nodes), so head node has to be changed if the data at original head is not the smallest value in linked list. MergeSort(headRef) 1) If head is NULL or there is only one element in the Linked List then return. 2) Else divide the linked list into two halves. FrontBackSplit(head, &a, &b); /* a and b are two halves */ 3) Sort the two halves a and b. MergeSort(a); MergeSort(b); 4) Merge the sorted a and b (using SortedMerge() discussed here) and update the head pointer using headRef. *headRef = SortedMerge(a, b); Java // Java program to illustrate merge sorted// of linkedList public class linkedList { node head = null; // node a, b; static class node { int val; node next; public node(int val) { this.val = val; } } node sortedMerge(node a, node b) { node result = null; /* Base cases */ if (a == null) return b; if (b == null) return a; /* Pick either a or b, and recur */ if (a.val <= b.val) { result = a; result.next = sortedMerge(a.next, b); } else { result = b; result.next = sortedMerge(a, b.next); } return result; } node mergeSort(node h) { // Base case : if head is null if (h == null || h.next == null) { return h; } // get the middle of the list node middle = getMiddle(h); node nextofmiddle = middle.next; // set the next of middle node to null middle.next = null; // Apply mergeSort on left list node left = mergeSort(h); // Apply mergeSort on right list node right = mergeSort(nextofmiddle); // Merge the left and right lists node sortedlist = sortedMerge(left, right); return sortedlist; } // Utility function to get the middle of the linked list node getMiddle(node h) { // Base case if (h == null) return h; node fastptr = h.next; node slowptr = h; // Move fastptr by two and slow ptr by one // Finally slowptr will point to middle node while (fastptr != null) { fastptr = fastptr.next; if (fastptr != null) { slowptr = slowptr.next; fastptr = fastptr.next; } } return slowptr; } void push(int new_data) { /* allocate node */ node new_node = new node(new_data); /* link the old list off the new node */ new_node.next = head; /* move the head to point to the new node */ head = new_node; } // Utility function to print the linked list void printList(node headref) { while (headref != null) { System.out.print(headref.val + " "); headref = headref.next; } } public static void main(String[] args) { linkedList li = new linkedList(); /* * Let us create a unsorted linked lists to test the functions Created * lists shall be a: 2->3->20->5->10->15 */ li.push(15); li.push(10); li.push(5); li.push(20); li.push(3); li.push(2); System.out.println("Linked List without sorting is :"); li.printList(li.head); // Apply merge Sort li.head = li.mergeSort(li.head); System.out.print("\n Sorted Linked List is: \n"); li.printList(li.head); }} // This code is contributed by Rishabh Mahrsee Linked List without sorting is : 2 3 20 5 10 15 Sorted Linked List is: 2 3 5 10 15 20 Time Complexity: O(n*log n) Space Complexity: O(n*log n) Approach 2: This approach is simpler and uses log n space. mergeSort(): If the size of the linked list is 1 then return the headFind mid using The Tortoise and The Hare ApproachStore the next of mid in head2 i.e. the right sub-linked list.Now Make the next midpoint null.Recursively call mergeSort() on both left and right sub-linked list and store the new head of the left and right linked list.Call merge() given the arguments new heads of left and right sub-linked lists and store the final head returned after merging.Return the final head of the merged linkedlist. If the size of the linked list is 1 then return the head Find mid using The Tortoise and The Hare Approach Store the next of mid in head2 i.e. the right sub-linked list. Now Make the next midpoint null. Recursively call mergeSort() on both left and right sub-linked list and store the new head of the left and right linked list. Call merge() given the arguments new heads of left and right sub-linked lists and store the final head returned after merging. Return the final head of the merged linkedlist. merge(head1, head2): Take a pointer say merged to store the merged list in it and store a dummy node in it.Take a pointer temp and assign merge to it.If the data of head1 is less than the data of head2, then, store head1 in next of temp & move head1 to the next of head1.Else store head2 in next of temp & move head2 to the next of head2.Move temp to the next of temp.Repeat steps 3, 4 & 5 until head1 is not equal to null and head2 is not equal to null.Now add any remaining nodes of the first or the second linked list to the merged linked list.Return the next of merged(that will ignore the dummy and return the head of the final merged linked list) Take a pointer say merged to store the merged list in it and store a dummy node in it. Take a pointer temp and assign merge to it. If the data of head1 is less than the data of head2, then, store head1 in next of temp & move head1 to the next of head1. Else store head2 in next of temp & move head2 to the next of head2. Move temp to the next of temp. Repeat steps 3, 4 & 5 until head1 is not equal to null and head2 is not equal to null. Now add any remaining nodes of the first or the second linked list to the merged linked list. Return the next of merged(that will ignore the dummy and return the head of the final merged linked list) Java // Java program for the above approachimport java.io.*;import java.lang.*;import java.util.*; // Node Classclass Node { int data; Node next; Node(int key) { this.data = key; next = null; }} class GFG { // Function to merge sort static Node mergeSort(Node head) { if (head.next == null) return head; Node mid = findMid(head); Node head2 = mid.next; mid.next = null; Node newHead1 = mergeSort(head); Node newHead2 = mergeSort(head2); Node finalHead = merge(newHead1, newHead2); return finalHead; } // Function to merge two linked lists static Node merge(Node head1, Node head2) { Node merged = new Node(-1); Node temp = merged; // While head1 is not null and head2 // is not null while (head1 != null && head2 != null) { if (head1.data < head2.data) { temp.next = head1; head1 = head1.next; } else { temp.next = head2; head2 = head2.next; } temp = temp.next; } // While head1 is not null while (head1 != null) { temp.next = head1; head1 = head1.next; temp = temp.next; } // While head2 is not null while (head2 != null) { temp.next = head2; head2 = head2.next; temp = temp.next; } return merged.next; } // Find mid using The Tortoise and The Hare approach static Node findMid(Node head) { Node slow = head, fast = head.next; while (fast != null && fast.next != null) { slow = slow.next; fast = fast.next.next; } return slow; } // Function to print list static void printList(Node head) { while (head != null) { System.out.print(head.data + " "); head = head.next; } } // Driver Code public static void main(String[] args) { Node head = new Node(7); Node temp = head; temp.next = new Node(10); temp = temp.next; temp.next = new Node(5); temp = temp.next; temp.next = new Node(20); temp = temp.next; temp.next = new Node(3); temp = temp.next; temp.next = new Node(2); temp = temp.next; // Apply merge Sort head = mergeSort(head); System.out.print("\nSorted Linked List is: \n"); printList(head); }} Output: Sorted Linked List is: 2 3 5 7 10 20 Time Complexity: O(n*log n) Space Complexity: O(log n) Please refer complete article on Merge Sort for Linked Lists for more details! Java Programs Writing code in comment? Please use ide.geeksforgeeks.org, generate link and share the link here. Iterate Over the Characters of a String in Java How to Convert Char to String in Java? How to Get Elements By Index from HashSet in Java? Java Program to Write into a File How to Write Data into Excel Sheet using Java? Java Program to Read a File to String Comparing two ArrayList In Java Java Program to Convert File to a Byte Array SHA-1 Hash Java Program to Find Sum of Array Elements
[ { "code": null, "e": 52, "s": 24, "text": "\n21 Oct, 2021" }, { "code": null, "e": 278, "s": 52, "text": "Merge sort is often preferred for sorting a linked list. The slow random-access performance of a linked list makes some other algorithms (such as quicksort) perform poorly, and others (such as heapsort) completely impossible." }, { "code": null, "e": 625, "s": 278, "text": "Let head be the first node of the linked list to be sorted and headRef be the pointer to head. Note that we need a reference to head in MergeSort() as the below implementation changes next links to sort the linked lists (not data at the nodes), so head node has to be changed if the data at original head is not the smallest value in linked list." }, { "code": null, "e": 1064, "s": 625, "text": "MergeSort(headRef)\n1) If head is NULL or there is only one element in the Linked List \n then return.\n2) Else divide the linked list into two halves. \n FrontBackSplit(head, &a, &b); /* a and b are two halves */\n3) Sort the two halves a and b.\n MergeSort(a);\n MergeSort(b);\n4) Merge the sorted a and b (using SortedMerge() discussed here) \n and update the head pointer using headRef.\n *headRef = SortedMerge(a, b);\n" }, { "code": null, "e": 1069, "s": 1064, "text": "Java" }, { "code": "// Java program to illustrate merge sorted// of linkedList public class linkedList { node head = null; // node a, b; static class node { int val; node next; public node(int val) { this.val = val; } } node sortedMerge(node a, node b) { node result = null; /* Base cases */ if (a == null) return b; if (b == null) return a; /* Pick either a or b, and recur */ if (a.val <= b.val) { result = a; result.next = sortedMerge(a.next, b); } else { result = b; result.next = sortedMerge(a, b.next); } return result; } node mergeSort(node h) { // Base case : if head is null if (h == null || h.next == null) { return h; } // get the middle of the list node middle = getMiddle(h); node nextofmiddle = middle.next; // set the next of middle node to null middle.next = null; // Apply mergeSort on left list node left = mergeSort(h); // Apply mergeSort on right list node right = mergeSort(nextofmiddle); // Merge the left and right lists node sortedlist = sortedMerge(left, right); return sortedlist; } // Utility function to get the middle of the linked list node getMiddle(node h) { // Base case if (h == null) return h; node fastptr = h.next; node slowptr = h; // Move fastptr by two and slow ptr by one // Finally slowptr will point to middle node while (fastptr != null) { fastptr = fastptr.next; if (fastptr != null) { slowptr = slowptr.next; fastptr = fastptr.next; } } return slowptr; } void push(int new_data) { /* allocate node */ node new_node = new node(new_data); /* link the old list off the new node */ new_node.next = head; /* move the head to point to the new node */ head = new_node; } // Utility function to print the linked list void printList(node headref) { while (headref != null) { System.out.print(headref.val + \" \"); headref = headref.next; } } public static void main(String[] args) { linkedList li = new linkedList(); /* * Let us create a unsorted linked lists to test the functions Created * lists shall be a: 2->3->20->5->10->15 */ li.push(15); li.push(10); li.push(5); li.push(20); li.push(3); li.push(2); System.out.println(\"Linked List without sorting is :\"); li.printList(li.head); // Apply merge Sort li.head = li.mergeSort(li.head); System.out.print(\"\\n Sorted Linked List is: \\n\"); li.printList(li.head); }} // This code is contributed by Rishabh Mahrsee", "e": 4092, "s": 1069, "text": null }, { "code": null, "e": 4182, "s": 4092, "text": "Linked List without sorting is :\n2 3 20 5 10 15 \n Sorted Linked List is: \n2 3 5 10 15 20\n" }, { "code": null, "e": 4210, "s": 4182, "text": "Time Complexity: O(n*log n)" }, { "code": null, "e": 4239, "s": 4210, "text": "Space Complexity: O(n*log n)" }, { "code": null, "e": 4298, "s": 4239, "text": "Approach 2: This approach is simpler and uses log n space." }, { "code": null, "e": 4311, "s": 4298, "text": "mergeSort():" }, { "code": null, "e": 4809, "s": 4311, "text": "If the size of the linked list is 1 then return the headFind mid using The Tortoise and The Hare ApproachStore the next of mid in head2 i.e. the right sub-linked list.Now Make the next midpoint null.Recursively call mergeSort() on both left and right sub-linked list and store the new head of the left and right linked list.Call merge() given the arguments new heads of left and right sub-linked lists and store the final head returned after merging.Return the final head of the merged linkedlist." }, { "code": null, "e": 4866, "s": 4809, "text": "If the size of the linked list is 1 then return the head" }, { "code": null, "e": 4916, "s": 4866, "text": "Find mid using The Tortoise and The Hare Approach" }, { "code": null, "e": 4979, "s": 4916, "text": "Store the next of mid in head2 i.e. the right sub-linked list." }, { "code": null, "e": 5012, "s": 4979, "text": "Now Make the next midpoint null." }, { "code": null, "e": 5138, "s": 5012, "text": "Recursively call mergeSort() on both left and right sub-linked list and store the new head of the left and right linked list." }, { "code": null, "e": 5265, "s": 5138, "text": "Call merge() given the arguments new heads of left and right sub-linked lists and store the final head returned after merging." }, { "code": null, "e": 5313, "s": 5265, "text": "Return the final head of the merged linkedlist." }, { "code": null, "e": 5334, "s": 5313, "text": "merge(head1, head2):" }, { "code": null, "e": 5966, "s": 5334, "text": "Take a pointer say merged to store the merged list in it and store a dummy node in it.Take a pointer temp and assign merge to it.If the data of head1 is less than the data of head2, then, store head1 in next of temp & move head1 to the next of head1.Else store head2 in next of temp & move head2 to the next of head2.Move temp to the next of temp.Repeat steps 3, 4 & 5 until head1 is not equal to null and head2 is not equal to null.Now add any remaining nodes of the first or the second linked list to the merged linked list.Return the next of merged(that will ignore the dummy and return the head of the final merged linked list)" }, { "code": null, "e": 6053, "s": 5966, "text": "Take a pointer say merged to store the merged list in it and store a dummy node in it." }, { "code": null, "e": 6097, "s": 6053, "text": "Take a pointer temp and assign merge to it." }, { "code": null, "e": 6219, "s": 6097, "text": "If the data of head1 is less than the data of head2, then, store head1 in next of temp & move head1 to the next of head1." }, { "code": null, "e": 6287, "s": 6219, "text": "Else store head2 in next of temp & move head2 to the next of head2." }, { "code": null, "e": 6318, "s": 6287, "text": "Move temp to the next of temp." }, { "code": null, "e": 6405, "s": 6318, "text": "Repeat steps 3, 4 & 5 until head1 is not equal to null and head2 is not equal to null." }, { "code": null, "e": 6499, "s": 6405, "text": "Now add any remaining nodes of the first or the second linked list to the merged linked list." }, { "code": null, "e": 6605, "s": 6499, "text": "Return the next of merged(that will ignore the dummy and return the head of the final merged linked list)" }, { "code": null, "e": 6610, "s": 6605, "text": "Java" }, { "code": "// Java program for the above approachimport java.io.*;import java.lang.*;import java.util.*; // Node Classclass Node { int data; Node next; Node(int key) { this.data = key; next = null; }} class GFG { // Function to merge sort static Node mergeSort(Node head) { if (head.next == null) return head; Node mid = findMid(head); Node head2 = mid.next; mid.next = null; Node newHead1 = mergeSort(head); Node newHead2 = mergeSort(head2); Node finalHead = merge(newHead1, newHead2); return finalHead; } // Function to merge two linked lists static Node merge(Node head1, Node head2) { Node merged = new Node(-1); Node temp = merged; // While head1 is not null and head2 // is not null while (head1 != null && head2 != null) { if (head1.data < head2.data) { temp.next = head1; head1 = head1.next; } else { temp.next = head2; head2 = head2.next; } temp = temp.next; } // While head1 is not null while (head1 != null) { temp.next = head1; head1 = head1.next; temp = temp.next; } // While head2 is not null while (head2 != null) { temp.next = head2; head2 = head2.next; temp = temp.next; } return merged.next; } // Find mid using The Tortoise and The Hare approach static Node findMid(Node head) { Node slow = head, fast = head.next; while (fast != null && fast.next != null) { slow = slow.next; fast = fast.next.next; } return slow; } // Function to print list static void printList(Node head) { while (head != null) { System.out.print(head.data + \" \"); head = head.next; } } // Driver Code public static void main(String[] args) { Node head = new Node(7); Node temp = head; temp.next = new Node(10); temp = temp.next; temp.next = new Node(5); temp = temp.next; temp.next = new Node(20); temp = temp.next; temp.next = new Node(3); temp = temp.next; temp.next = new Node(2); temp = temp.next; // Apply merge Sort head = mergeSort(head); System.out.print(\"\\nSorted Linked List is: \\n\"); printList(head); }}", "e": 9175, "s": 6610, "text": null }, { "code": null, "e": 9183, "s": 9175, "text": "Output:" }, { "code": null, "e": 9222, "s": 9183, "text": "Sorted Linked List is: \n2 3 5 7 10 20 " }, { "code": null, "e": 9250, "s": 9222, "text": "Time Complexity: O(n*log n)" }, { "code": null, "e": 9277, "s": 9250, "text": "Space Complexity: O(log n)" }, { "code": null, "e": 9356, "s": 9277, "text": "Please refer complete article on Merge Sort for Linked Lists for more details!" }, { "code": null, "e": 9370, "s": 9356, "text": "Java Programs" }, { "code": null, "e": 9468, "s": 9370, "text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here." }, { "code": null, "e": 9516, "s": 9468, "text": "Iterate Over the Characters of a String in Java" }, { "code": null, "e": 9555, "s": 9516, "text": "How to Convert Char to String in Java?" }, { "code": null, "e": 9606, "s": 9555, "text": "How to Get Elements By Index from HashSet in Java?" }, { "code": null, "e": 9640, "s": 9606, "text": "Java Program to Write into a File" }, { "code": null, "e": 9687, "s": 9640, "text": "How to Write Data into Excel Sheet using Java?" }, { "code": null, "e": 9725, "s": 9687, "text": "Java Program to Read a File to String" }, { "code": null, "e": 9757, "s": 9725, "text": "Comparing two ArrayList In Java" }, { "code": null, "e": 9802, "s": 9757, "text": "Java Program to Convert File to a Byte Array" }, { "code": null, "e": 9813, "s": 9802, "text": "SHA-1 Hash" } ]