Tanvir commited on
Commit
91e0ed2
β€’
1 Parent(s): ca39bf6

initialize

Browse files
Files changed (5) hide show
  1. README.md +22 -0
  2. gdorks.txt +0 -0
  3. remove_duplicates.py +40 -0
  4. remove_emptylines.py +25 -0
  5. sort.py +28 -0
README.md CHANGED
@@ -1,3 +1,25 @@
1
  ---
2
  license: cc-by-4.0
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-4.0
3
+ pretty_name: Google Dorks
4
+ tags:
5
+ - List
6
+ - Google
7
+ - GHDB
8
+ - Dork
9
+ language:
10
+ - en
11
+ size_categories:
12
+ - 10K<n<100K
13
  ---
14
+
15
+ # g_Dorks [TXT dataset]
16
+
17
+ A dataset comprising a collection of popular Google dorks acquired from various and numerous origins.
18
+
19
+ ## Data Source
20
+
21
+ ***Secret!***
22
+
23
+ ## Disclaimer
24
+
25
+ Please note that while I strive to maintain data quality, I cannot guarantee the accuracy or quality of all entries in this dataset. Use it responsibly and exercise caution when relying on the data for any critical applications. Your feedback and contributions are greatly appreciated for improving the dataset's overall quality.
gdorks.txt ADDED
The diff for this file is too large to render. See raw diff
 
remove_duplicates.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from collections import Counter
3
+
4
+ def remove_duplicates(file_path):
5
+ with open(file_path, 'r') as file:
6
+ lines = file.readlines()
7
+
8
+ line_counts = Counter(lines)
9
+
10
+ duplicates = [line for line, count in line_counts.items() if count > 1]
11
+
12
+ if not duplicates:
13
+ print(f"No duplicates found in {file_path}.")
14
+ return
15
+
16
+ with open(file_path, 'w') as file:
17
+ unique_lines = set(lines)
18
+ file.writelines(unique_lines)
19
+
20
+ print(f"{len(duplicates)} duplicates removed from {file_path}.")
21
+ print("Removed duplicates:")
22
+ print("")
23
+ for duplicate in duplicates:
24
+ print(duplicate.strip())
25
+
26
+ def main():
27
+ parser = argparse.ArgumentParser(description='Remove duplicate lines from a text file.')
28
+ parser.add_argument('file', nargs='?', help='Path to the text file to remove duplicates')
29
+
30
+ args = parser.parse_args()
31
+
32
+ if args.file:
33
+ file_path = args.file
34
+ else:
35
+ file_path = input("Enter the path to the text file: ")
36
+
37
+ remove_duplicates(file_path)
38
+
39
+ if __name__ == "__main__":
40
+ main()
remove_emptylines.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ def remove_empty_lines(file_path):
4
+ try:
5
+ with open(file_path, 'r') as file:
6
+ lines = file.readlines()
7
+
8
+ non_empty_lines = [line for line in lines if line.strip()]
9
+
10
+ with open(file_path, 'w') as file:
11
+ file.writelines(non_empty_lines)
12
+
13
+ print(f"Empty lines removed from {file_path}")
14
+ except FileNotFoundError:
15
+ print(f"File not found: {file_path}")
16
+ except Exception as e:
17
+ print(f"An error occurred: {e}")
18
+
19
+ if __name__ == "__main__":
20
+ if len(sys.argv) > 1:
21
+ file_path = sys.argv[1]
22
+ else:
23
+ file_path = input("Enter the path of the text file: ")
24
+
25
+ remove_empty_lines(file_path)
sort.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ def sort_file(file_path):
4
+ with open(file_path, 'r') as file:
5
+ lines = file.readlines()
6
+
7
+ lines.sort()
8
+
9
+ with open(file_path, 'w') as file:
10
+ file.writelines(lines)
11
+
12
+ print(f"The lines in {file_path} have been sorted.")
13
+
14
+ def main():
15
+ parser = argparse.ArgumentParser(description='Sort lines in a text file.')
16
+ parser.add_argument('file', nargs='?', help='Path to the text file to be sorted')
17
+
18
+ args = parser.parse_args()
19
+
20
+ if args.file:
21
+ file_path = args.file
22
+ else:
23
+ file_path = input("Enter the path to the text file: ")
24
+
25
+ sort_file(file_path)
26
+
27
+ if __name__ == "__main__":
28
+ main()