DGurgurov commited on
Commit
1d26da5
1 Parent(s): e74feb2

Upload 6 files

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ cn_relations_clean.json filter=lfs diff=lfs merge=lfs -text
cn_relations_clean.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efc7e1ab6d75dc7392be33765b5cbba9d8f388d321a8e0dbf51d33840800d37
3
+ size 961527622
src/cn_analyze.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ # Load the relations dictionary from the JSON file
4
+ input_file_path = "cn_relations_clean.json"
5
+ with open(input_file_path, "r") as json_file:
6
+ relations_dict = json.load(json_file)
7
+
8
+ # Prepare the results
9
+ results = []
10
+ num_languages = len(relations_dict)
11
+ results.append(f"Number of languages: {num_languages}\n")
12
+
13
+ for language_code, start_nodes in relations_dict.items():
14
+ num_start_edges = len(start_nodes.keys())
15
+ num_end_edges = len([end_node for edges in start_nodes.values() for end_node in edges])
16
+ results.append(f"Language: {language_code} - Number of start edges: {num_start_edges} - Number of end edges: {num_end_edges}\n")
17
+
18
+ # Write the results to a text file
19
+ output_file_path = "cn_relations_summary.txt"
20
+ with open(output_file_path, "w") as txt_file:
21
+ txt_file.writelines(results)
22
+
23
+ print(f"Summary saved to {output_file_path}")
src/cn_clean.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import re
3
+
4
+ def clean_prefixes(data):
5
+ cleaned_data = {}
6
+ prefix_pattern = re.compile(r'^/c/[^/]+/') # Matches the prefix part '/c/<lang_code>/'
7
+
8
+ for lang, words in data.items():
9
+ cleaned_words = {}
10
+ for key, values in words.items():
11
+ # Clean the key
12
+ cleaned_key = prefix_pattern.sub('', key).replace('_', ' ')
13
+
14
+ # Clean the values
15
+ cleaned_values = [prefix_pattern.sub('', value).replace('_', ' ') for value in values]
16
+
17
+ cleaned_words[cleaned_key] = cleaned_values
18
+
19
+ cleaned_data[lang] = cleaned_words
20
+
21
+ return cleaned_data
22
+
23
+ # Load the relations dictionary from the JSON file
24
+ input_file_path = "cn_relations.json"
25
+ with open(input_file_path, "r") as json_file:
26
+ data = json.load(json_file)
27
+
28
+ cleaned_data = clean_prefixes(data)
29
+
30
+ print(cleaned_data['zdj'])
31
+
32
+ # Save the cleaned data to a new JSON file
33
+ output_file_path = "cn_relations_clean.json"
34
+ with open(output_file_path, "w") as json_file:
35
+ json.dump(cleaned_data, json_file, ensure_ascii=False, indent=4)
src/cn_inspect.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from itertools import chain
3
+
4
+ # Load the relations dictionary from the JSON file
5
+ input_file_path = "cn_relations.json"
6
+ with open(input_file_path, "r") as json_file:
7
+ relations_dict = json.load(json_file)
8
+
9
+ # Define the specific language to look for
10
+ specific_language = "ii"
11
+
12
+ # Check if the specific language exists in the dictionary
13
+ if specific_language in relations_dict:
14
+ start_nodes = relations_dict[specific_language]
15
+ print(start_nodes)
16
+ num_start_edges = sum(len(edges) for edges in start_nodes.values())
17
+ all_end_edges = list(chain.from_iterable(start_nodes.values()))
18
+ num_start_edges = len(start_nodes.keys())
19
+ num_end_edges = len([end_node for edges in start_nodes.values() for end_node in edges])
20
+ print(f"Language: {specific_language} - Number of start edges: {num_start_edges} - Number of end edges: {num_end_edges}\n")
21
+
22
+
23
+ else:
24
+ print(f"Language {specific_language} not found in the dataset.")
src/cn_parser.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import json
3
+
4
+ # Define the dictionary to store the relations
5
+ print("--------> Sequential processing setup!")
6
+ relations_dict = {}
7
+
8
+ # Path to the gzipped file
9
+ file_path = "conceptnet-assertions-5.7.0.csv.gz"
10
+
11
+ # Function to parse each line and extract words and relations
12
+ def parse_line(line):
13
+ parts = line.strip().split("\t")
14
+ if len(parts) >= 3:
15
+ # Extract the start and end nodes
16
+ start_node_uri = parts[2]
17
+ end_node_uri = parts[3]
18
+
19
+ # Extract the language code
20
+ start_lang_code = start_node_uri.split("/")[2]
21
+ language_code = start_lang_code
22
+
23
+ # Extract the node names
24
+ start_node = start_node_uri.split("/")[3]
25
+ end_node = end_node_uri.split("/")[3]
26
+
27
+ # Filter out end nodes that are http:// links
28
+ if end_node_uri.startswith("http://"):
29
+ return
30
+
31
+ # Initialize the language dictionary if it doesn't exist
32
+ if language_code not in relations_dict:
33
+ relations_dict[language_code] = {}
34
+
35
+ # Initialize the start node list if it doesn't exist
36
+ if start_node not in relations_dict[language_code]:
37
+ relations_dict[language_code][start_node] = []
38
+ # Add the end node to the start node's list
39
+ relations_dict[language_code][start_node].append(end_node)
40
+
41
+ # Open the gzipped file and parse each line sequentially
42
+ with gzip.open(file_path, "rt") as f:
43
+ print("--------> File opened!")
44
+ for line in f:
45
+ parse_line(line)
46
+
47
+
48
+ # Save the relations dictionary to a JSON file
49
+ print("--------> About to save the file!")
50
+ output_file_path = "cn_relations.json"
51
+ with open(output_file_path, "w") as json_file:
52
+ json.dump(relations_dict, json_file, ensure_ascii=False)
53
+
54
+ print("Data saved to", output_file_path)
src/parse_cn.sh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # download ConceptNet data from the official website
4
+ # file name - conceptnet-assertions-5.7.0.csv.gz
5
+ # link - https://s3.amazonaws.com/conceptnet/downloads/2019/edges/conceptnet-assertions-5.7.0.csv.gz
6
+
7
+
8
+ echo "Running cn_parser.py..."
9
+ python cn_parser.py
10
+
11
+ echo "Running cn_inspect.py..."
12
+ python cn_inspect.py
13
+
14
+ echo "Running cn_clean.py..."
15
+ python cn_clean.py
16
+
17
+ echo "Running cn_analyze.py..."
18
+ python cn_analyze.py
19
+
20
+
21
+ # output files:
22
+ # 1. complete conceptnet data without sources and relation types
23
+ # 2. clean concpenete data without prefixes and underscores between words
24
+ # 3. analysis file with the number of languages and edges for each language