LeoZhangzaolin commited on
Commit
8be8808
1 Parent(s): 72217c4

Update DataClean and ImageTransfer.py

Browse files
Files changed (1) hide show
  1. DataClean and ImageTransfer.py +61 -89
DataClean and ImageTransfer.py CHANGED
@@ -1,87 +1,69 @@
1
- #### delete unuseful columns
2
  import csv
 
 
 
 
3
 
4
- file_path = '/Users/leozhangzaolin/Downloads/Graptolite specimens.csv'
5
-
6
- with open(file_path, newline='', encoding='utf-8') as file:
7
- reader = csv.reader(file)
8
- data = list(reader)
9
-
10
  columns_to_delete = [
11
  "species ID", "Phylum", "Class", "Order", "revised species name",
12
  "total number of specimens", "specimens Serial No", "显微镜照片数量", "SLR photo No",
13
  "相机照片数量", "跑数据照片总数", "备注", "age from", "age to", "collection No", "Microscrope photo No"
14
  ]
15
 
 
 
 
 
 
16
  header = data[0]
17
- indices_to_delete = [header.index(column) for column in columns_to_delete if column in header]
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  indices_to_delete.sort(reverse=True)
 
20
 
21
- for row in data:
22
  for index in indices_to_delete:
23
  del row[index]
24
 
25
- new_file_path = '/Users/leozhangzaolin/desktop/New_GS.csv'
26
-
27
- with open(new_file_path, mode='w', newline='', encoding='utf-8') as file:
28
- writer = csv.writer(file)
29
- writer.writerows(data)
30
-
31
-
32
- #### merge columns
33
-
34
- file_path1 = '/Users/leozhangzaolin/desktop/New_GS.csv'
35
-
36
- with open(file_path1, newline='', encoding='utf-8') as file1:
37
- reader1 = csv.reader(file1)
38
- data1 = list(reader1)
39
 
40
- header1 = data1[0]
41
- family_index1 = header1.index('Family') if 'Family' in header1 else None
42
- subfamily_index1 = header1.index('Subfamily') if 'Subfamily' in header1 else None
43
- locality_index1 = header1.index('Locality') if 'Locality' in header1 else None
44
- longitude_index1 = header1.index('Longitude') if 'Longitude' in header1 else None
45
- latitude_index1 = header1.index('Latitude') if 'Latitude' in header1 else None
46
- horizon_index1 = header1.index('Horizon') if 'Horizon' in header1 else None
47
-
48
- for row1 in data1[1:]:
49
- family1 = row1[family_index1] if family_index1 is not None else ''
50
- subfamily1 = row1[subfamily_index1] if subfamily_index1 is not None else 'no subfamily'
51
- row1[family_index1] = f"{family1} ({subfamily1})" if subfamily_index1 is not None else family1
52
-
53
- locality1 = row1[locality_index1] if locality_index1 is not None else ''
54
- longitude1 = row1[longitude_index1] if longitude_index1 is not None else ''
55
- latitude1 = row1[latitude_index1] if latitude_index1 is not None else ''
56
- horizon1 = row1[horizon_index1] if horizon_index1 is not None else ''
57
- row1[locality_index1] = f"{locality1} ({longitude1}, {latitude1}, {horizon1})"
58
-
59
- header1[family_index1] = 'Family (Subfamily)'
60
- header1[locality_index1] = 'Locality (Longitude, Latitude, Horizon)'
61
-
62
- indices_to_delete1 = sorted([subfamily_index1, longitude_index1, latitude_index1, horizon_index1], reverse=True)
63
- for index1 in indices_to_delete1:
64
- if index1 is not None:
65
- for row1 in data1:
66
- del row1[index1]
67
-
68
- new_file_path1 = '/Users/leozhangzaolin/desktop/New_GS_1.csv'
69
-
70
- with open(new_file_path1, mode='w', newline='', encoding='utf-8') as file1:
71
- writer1 = csv.writer(file1)
72
- writer1.writerows(data1)
73
-
74
- #### read images
75
- import os
76
- import numpy as np
77
- from PIL import Image
78
- import pandas as pd
79
-
80
- # Paths
81
- csv_file_path = '/Users/leozhangzaolin/desktop/New_GS_1.csv'
82
  image_dir_paths = ['/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 1',
83
  '/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 2']
84
- npy_save_dir = '/Users/leozhangzaolin/Desktop/arrays'
85
 
86
  # Normalize file extensions in the image directories
87
  def normalize_file_extensions(dir_path):
@@ -89,17 +71,13 @@ def normalize_file_extensions(dir_path):
89
  if filename.lower().endswith('.jpg') and not filename.endswith('.jpg'):
90
  base, ext = os.path.splitext(filename)
91
  new_filename = base + '.jpg'
92
- os.rename(os.path.join(dir_path, filename),
93
- os.path.join(dir_path, new_filename))
94
 
95
  for path in image_dir_paths:
96
  normalize_file_extensions(path)
97
 
98
- # Load the CSV file
99
- df = pd.read_csv(csv_file_path)
100
-
101
- # Function to process and resize the image
102
- def process_image(image_name, save_dir, max_size=(1024, 1024)):
103
  image_base_name = os.path.splitext(image_name)[0]
104
  image_paths = [os.path.join(dir_path, image_base_name + suffix)
105
  for dir_path in image_dir_paths
@@ -111,26 +89,20 @@ def process_image(image_name, save_dir, max_size=(1024, 1024)):
111
  return None
112
 
113
  with Image.open(image_path) as img:
114
- # Resize the image using LANCZOS filter (replacement for ANTIALIAS)
115
  img.thumbnail(max_size, Image.Resampling.LANCZOS)
116
- image_array = np.array(img)
117
- npy_filename = image_base_name + '_S.npy'
118
- npy_path = os.path.join(save_dir, npy_filename)
119
- np.save(npy_path, image_array)
120
- return npy_path
121
 
122
- # Create a directory to save the numpy arrays
123
- os.makedirs(npy_save_dir, exist_ok=True)
124
-
125
- # Process each image and update the 'image file name' column
126
- df['image file name'] = df['image file name'].apply(lambda x: process_image(x, npy_save_dir))
127
-
128
- # Remove rows where the image was not found
129
  df = df.dropna(subset=['image file name'])
130
 
 
 
 
131
  # Rename the 'image file name' column to 'image'
132
  df.rename(columns={'image file name': 'image'}, inplace=True)
133
 
134
- # Save the updated DataFrame to a new CSV file
135
- updated_csv_path = '/Users/leozhangzaolin/Desktop/New_GS_2_updated_images.csv'
136
- df.to_csv(updated_csv_path, index=False)
 
 
1
+ #### This code shows how I process the data and transfer the images to Numpy arrays on local. After processing, I upload the final csv to github and get the URL.
2
  import csv
3
+ import os
4
+ import numpy as np
5
+ from PIL import Image
6
+ import pandas as pd
7
 
8
+ # --- Initial Setup ---
9
+ initial_csv_file_path = 'https://github.com/LeoZhangzaolin/photos/blob/main/Graptolite%20specimens.csv'
 
 
 
 
10
  columns_to_delete = [
11
  "species ID", "Phylum", "Class", "Order", "revised species name",
12
  "total number of specimens", "specimens Serial No", "显微镜照片数量", "SLR photo No",
13
  "相机照片数量", "跑数据照片总数", "备注", "age from", "age to", "collection No", "Microscrope photo No"
14
  ]
15
 
16
+ # --- Read and Process CSV Data ---
17
+ with open(initial_csv_file_path, newline='', encoding='utf-8') as file:
18
+ reader = csv.reader(file)
19
+ data = list(reader)
20
+
21
  header = data[0]
 
22
 
23
+ # Find indices for columns to merge
24
+ family_index = header.index('Family') if 'Family' in header else None
25
+ subfamily_index = header.index('Subfamily') if 'Subfamily' in header else None
26
+ locality_index = header.index('Locality') if 'Locality' in header else None
27
+ longitude_index = header.index('Longitude') if 'Longitude' in header else None
28
+ latitude_index = header.index('Latitude') if 'Latitude' in header else None
29
+ horizon_index = header.index('Horizon') if 'Horizon' in header else None
30
+
31
+ # Process rows: merge and delete columns
32
+ for row in data[1:]:
33
+ # Merge columns
34
+ if family_index is not None and subfamily_index is not None:
35
+ family = row[family_index]
36
+ subfamily = row[subfamily_index] if row[subfamily_index] else 'no subfamily'
37
+ row[family_index] = f"{family} ({subfamily})"
38
+
39
+ if locality_index is not None and all([longitude_index, latitude_index, horizon_index]):
40
+ locality = row[locality_index]
41
+ longitude = row[longitude_index]
42
+ latitude = row[latitude_index]
43
+ horizon = row[horizon_index]
44
+ row[locality_index] = f"{locality} ({longitude}, {latitude}, {horizon})"
45
+
46
+ # Update header and remove unneeded columns
47
+ header[family_index] = 'Family (Subfamily)'
48
+ header[locality_index] = 'Locality (Longitude, Latitude, Horizon)'
49
+ indices_to_delete = [header.index(column) for column in columns_to_delete if column in header]
50
+ merged_indices = [subfamily_index, longitude_index, latitude_index, horizon_index]
51
+ indices_to_delete.extend(merged_indices)
52
+ indices_to_delete = list(set(indices_to_delete))
53
  indices_to_delete.sort(reverse=True)
54
+ header = [col for i, col in enumerate(header) if i not in indices_to_delete]
55
 
56
+ for row in data[1:]:
57
  for index in indices_to_delete:
58
  del row[index]
59
 
60
+ # Convert processed data into a DataFrame
61
+ df = pd.DataFrame(data[1:], columns=header)
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ # --- Image Processing ---
64
+ # Image directories
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  image_dir_paths = ['/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 1',
66
  '/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 2']
 
67
 
68
  # Normalize file extensions in the image directories
69
  def normalize_file_extensions(dir_path):
 
71
  if filename.lower().endswith('.jpg') and not filename.endswith('.jpg'):
72
  base, ext = os.path.splitext(filename)
73
  new_filename = base + '.jpg'
74
+ os.rename(os.path.join(dir_path, filename), os.path.join(dir_path, new_filename))
 
75
 
76
  for path in image_dir_paths:
77
  normalize_file_extensions(path)
78
 
79
+ # Function to process and return the image array
80
+ def process_image_array(image_name, max_size=(1024, 1024)):
 
 
 
81
  image_base_name = os.path.splitext(image_name)[0]
82
  image_paths = [os.path.join(dir_path, image_base_name + suffix)
83
  for dir_path in image_dir_paths
 
89
  return None
90
 
91
  with Image.open(image_path) as img:
 
92
  img.thumbnail(max_size, Image.Resampling.LANCZOS)
93
+ return np.array(img)
 
 
 
 
94
 
95
+ # Apply the function to embed image arrays in the 'image file name' column
96
+ df['image file name'] = df['image file name'].apply(process_image_array)
 
 
 
 
 
97
  df = df.dropna(subset=['image file name'])
98
 
99
+ # Since arrays can't be directly saved in CSV, convert them to a string representation
100
+ df['image file name'] = df['image file name'].apply(lambda x: np.array2string(x))
101
+
102
  # Rename the 'image file name' column to 'image'
103
  df.rename(columns={'image file name': 'image'}, inplace=True)
104
 
105
+ # --- Save the Final DataFrame to a CSV File ---
106
+ final_csv_path = '/Users/leozhangzaolin/Desktop/Final_GS_with_Images.csv'
107
+ df.to_csv(final_csv_path, index=False)
108
+