John Doe commited on
Commit
b4b48c2
1 Parent(s): d00ac21
app.py CHANGED
@@ -16,6 +16,10 @@ from processing.classification_model import classify
16
  from processing.barsplots_rootcauses import check_classification
17
  from processing.cloud_access import auto_download
18
 
 
 
 
 
19
  sprint_data_folder = "Messungen"
20
  seafile_token = os.environ['SEAFILE_TOKEN']
21
 
@@ -99,60 +103,51 @@ def main():
99
 
100
  st.image("assets/slice.png")
101
 
102
- auswahl = st.radio("Merkmal", ["Vorschub", "Schnitttiefe", "Werkzeugverschleiß"], horizontal=True, index=0)
103
- #width = 500
104
 
105
 
106
- if auswahl == "Vorschub":
107
- st.image("assets/max_Vorschub.PNG")
108
- elif auswahl == "Schnitttiefe":
109
- st.image("assets/max_Schnitttiefe.PNG")
110
- else:
111
- st.image("assets/max_Verschleiß.PNG")
112
 
113
  with tab2:
114
 
115
  selected_file = st.selectbox("Oberflächenscan auswählen", sprint_csv_files, 0)
116
- col1, col_blanc, col2= st.columns([0.6, 0.05, 0.35], gap="large")
117
 
118
  with col1:
119
  try:
120
  fig = surface_plot(selected_file)
121
- fig.update_layout(height=600, width=800)
122
  st.plotly_chart(fig, theme="streamlit", use_container_width=False)
123
  except:
124
  pass
 
125
 
126
  with col_blanc:
127
  st.write("")
128
 
129
  with col2:
130
- st.markdown("<h2 style='text-align: center; color: black; font-family:Arial;font-size:2rem;'>Klassifizierung</h2>", unsafe_allow_html=True)
131
- st.markdown("#")
132
-
133
- dummy, tolerance, z_data, fivetothirty = plot_timeseries(selected_file, 9)
134
- data = fivetothirty["z"].values
135
- mean = np.mean(data)
136
- data = data - mean
137
 
138
- try:
139
- data = torch.tensor(data, dtype=torch.float).to("cpu")
140
- data = data.view(1,1,500)
141
- prediction = classify(data)
142
-
143
- fig_feed, fig_depth, fig_condition = check_classification(prediction[0])
144
- st.pyplot(fig_feed)
145
- st.pyplot(fig_depth)
146
- st.pyplot(fig_condition)
147
- except Exception as e:
148
- st.write(e)
149
 
150
 
151
  if __name__ == "__main__":
152
  main()
153
 
154
- t1 = threading.Thread(target=auto_download, args=(seafile_token,))
155
- t1.start()
156
 
157
 
158
 
 
16
  from processing.barsplots_rootcauses import check_classification
17
  from processing.cloud_access import auto_download
18
 
19
+ from processing.processing_data import slice_data
20
+ from processing.processing_data import create_recurrence_plot
21
+ from processing.processing_data import classification
22
+
23
  sprint_data_folder = "Messungen"
24
  seafile_token = os.environ['SEAFILE_TOKEN']
25
 
 
103
 
104
  st.image("assets/slice.png")
105
 
106
+ # auswahl = st.radio("Merkmal", ["Vorschub", "Schnitttiefe", "Werkzeugverschleiß"], horizontal=True, index=0)
107
+ # #width = 500
108
 
109
 
110
+ # if auswahl == "Vorschub":
111
+ # st.image("assets/max_Vorschub.PNG")
112
+ # elif auswahl == "Schnitttiefe":
113
+ # st.image("assets/max_Schnitttiefe.PNG")
114
+ # else:
115
+ # st.image("assets/max_Verschleiß.PNG")
116
 
117
  with tab2:
118
 
119
  selected_file = st.selectbox("Oberflächenscan auswählen", sprint_csv_files, 0)
120
+ col1, col_blanc, col2= st.columns([0.45, 0.1, 0.40], gap="large")
121
 
122
  with col1:
123
  try:
124
  fig = surface_plot(selected_file)
125
+ fig.update_layout(height=800, width=600)
126
  st.plotly_chart(fig, theme="streamlit", use_container_width=False)
127
  except:
128
  pass
129
+
130
 
131
  with col_blanc:
132
  st.write("")
133
 
134
  with col2:
135
+ st.markdown("<h2 style='text-align: center; color: black; font-family:Arial;font-size:2rem;'>Auswertung</h2>", unsafe_allow_html=True)
 
 
 
 
 
 
136
 
137
+ sliced_data = slice_data(selected_file)
138
+ recurrence_plot = create_recurrence_plot(sliced_data)
139
+ classification_result = classification(recurrence_plot)
140
+ fig_feed, fig_depth, fig_wear = check_classification(classification_result)
141
+ st.pyplot(fig_feed)
142
+ st.pyplot(fig_depth)
143
+ st.pyplot(fig_wear)
 
 
 
 
144
 
145
 
146
  if __name__ == "__main__":
147
  main()
148
 
149
+ #t1 = threading.Thread(target=auto_download, args=(seafile_token,))
150
+ #t1.start()
151
 
152
 
153
 
processing/{model_d2_v0.6.pt → _best_model_fold_3.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f35b7eedb4f7e7a87010dd5e796ff88c589045ce6e1582bbe1de427b99e2336d
3
- size 139143916
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da9776ab0218a1da6f8b89383cafaa961732337624ffb08a18ea915bfe7c5d0b
3
+ size 44808074
processing/model_d2_v0.7.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:431422f57a7b87f61e13e11304302a0f4aa3ab185990080acc8691c921645767
3
- size 139137029
 
 
 
 
processing/processing_data.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import torch
4
+ from torchvision import transforms, models
5
+ from PIL import Image
6
+ from torch import nn
7
+ from pyts.image import RecurrencePlot
8
+
9
+
10
+ def slice_data(csv):
11
+
12
+ df = pd.read_csv(csv, sep=",", encoding="utf-16", header=None)
13
+ df.columns = ['x', 'y', 'z']
14
+
15
+ # slicing
16
+ df = df.iloc[:16000]
17
+ df = df[(df['y'] > -30) & (df['y'] < -5)]
18
+ df = df[(df['x'] > -5.5) & (df['x'] < 5.5)]
19
+
20
+ # transform x, y, z
21
+ df["x"] = df["x"] +7.5 # in the raw data, X = 0 is in the middle of the workpiece-width. Shift X to the left by 7.5 mm to have X = 0 at the left edge of the workpiece.
22
+ df["y"] = df["y"] * (-1) # flip the y-axis
23
+ df['z'] = df['z'] - (1.5) # the radius of the tip of the measuring device is 1.5 mm and needs to be substracted from z
24
+
25
+ # These are the constant X values for the slicing, where the measuring probe actually moves along the y-axis:
26
+ x_values = [2.4, 3, 3.6, 4.2, 4.8, 5.4, 6, 6.6, 7.2, 7.8, 8.4, 9, 9.6, 10.2, 10.8, 11.4, 12, 12.6]
27
+ tol = 0.1
28
+ x_set = 5
29
+
30
+ x = x_values[x_set]
31
+
32
+ data = df[(df['x'] > x-tol) & (df['x'] < x+tol)]
33
+
34
+ # Delete column "x" if it exists
35
+ if "x" in data.columns:
36
+ data.drop(columns=["x"], inplace=True)
37
+
38
+ # Sort by y
39
+ data = data.sort_values(by=['y'])
40
+
41
+
42
+ # Normalize z values by subtracting the mean. Maybe try global normalization instead
43
+ z = data['z'].values
44
+ mean = np.mean(z)
45
+ data['z'] = data['z'] - mean
46
+ data['z'] = data['z'].apply(lambda x: round(x, 6))
47
+
48
+ data.reset_index(drop=True, inplace=True)
49
+
50
+ return data
51
+
52
+
53
+
54
+ def create_recurrence_plot(sliced_data):
55
+
56
+ global_vmin = 0
57
+ global_vmax = 1
58
+
59
+ z = sliced_data['z'].values
60
+ # Normalize z values to the range [global_vmin, global_vmax] for RP transformation
61
+ z_normalized = (z - np.min(z)) / (np.max(z) - np.min(z))
62
+ z_normalized = z_normalized * (global_vmax - global_vmin) + global_vmin
63
+
64
+ # Create RP image
65
+ rp = RecurrencePlot(dimension=1, time_delay=1, threshold='point', percentage=20)
66
+ rp_image = rp.fit_transform(z_normalized.reshape(1, -1))[0]
67
+
68
+ # Convert the RP image to a PIL Image object
69
+ rp_image_pil = Image.fromarray((rp_image * 255).astype(np.uint8))
70
+
71
+ # Resize the image to 512*512 pixels
72
+ rp_image_resized = rp_image_pil.resize((512, 512), Image.LANCZOS)
73
+
74
+ # convert to grayscale
75
+ rp_image_resized = rp_image_resized.convert('L')
76
+
77
+ return rp_image_resized
78
+
79
+
80
+
81
+ def classification(image):
82
+
83
+ # Define transformations for images
84
+ transform = transforms.Compose([
85
+ transforms.Resize((224, 224)), # Resize to match ResNet input size
86
+ transforms.ToTensor(), # Convert PIL image to PyTorch tensor
87
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
88
+ ])
89
+
90
+ # Define the MultiOutputModel class as it was initially
91
+ class MultiOutputModel(nn.Module):
92
+ def __init__(self, base_model, num_classes_list):
93
+ super(MultiOutputModel, self).__init__()
94
+ num_features = base_model.fc.in_features
95
+ base_model.fc = nn.Identity()
96
+ self.base_model = base_model
97
+ self.fc_feed_rate = nn.Linear(num_features, num_classes_list[0])
98
+ self.fc_cutting_depth = nn.Linear(num_features, num_classes_list[1])
99
+ self.fc_tool_wear = nn.Linear(num_features, num_classes_list[2])
100
+
101
+ def forward(self, x):
102
+ x = self.base_model(x)
103
+ feed_rate_out = self.fc_feed_rate(x)
104
+ cutting_depth_out = self.fc_cutting_depth(x)
105
+ tool_wear_out = self.fc_tool_wear(x)
106
+ return tool_wear_out, feed_rate_out, cutting_depth_out
107
+
108
+ # Recreate the base model and MultiOutputModel
109
+ base_model = models.resnet18(weights='DEFAULT')
110
+ model = MultiOutputModel(base_model, [3, 3, 3])
111
+
112
+ # Load the model state dict
113
+ model_path = r'processing/_best_model_fold_3.pt'
114
+ model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
115
+
116
+ # Set the model to evaluation model
117
+ model.eval()
118
+
119
+ # Check if GPU is available and move model to GPU
120
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
121
+ model.to(device)
122
+
123
+ img = image.convert('RGB')
124
+ img = transform(img)
125
+ img = img.unsqueeze(0) # Add batch dimension
126
+
127
+
128
+ # Move the tensor to the appropriate device (CPU in this case)
129
+ img_tensor = img.to(torch.device('cpu'))
130
+
131
+ with torch.no_grad():
132
+ tool_wear_out, feed_rate_out, cutting_depth_out = model(img_tensor)
133
+
134
+ # Step 4: Interpret the Results
135
+ predicted_feed = torch.argmax(feed_rate_out, dim=1).item()
136
+ predicted_depth = torch.argmax(cutting_depth_out, dim=1).item()
137
+ predicted_toolwear = torch.argmax(tool_wear_out, dim=1).item()
138
+
139
+ return predicted_feed, predicted_depth, predicted_toolwear