File size: 3,812 Bytes
bcad657
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import torch
from torchvision import transforms
from PIL import Image
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
import matplotlib as mpl


""" These functions are executed in the streamlit app """

class CustomModel(nn.Module):
    def __init__(self):
        super(CustomModel, self).__init__()

        self.fc_layers = nn.Sequential(
            nn.Linear(3, 64), 
            nn.ReLU(),
            nn.Linear(64, 744 * 554)  
        )

    def forward(self, x_features):
        x_features = self.fc_layers(x_features)
        output = x_features.view(-1, 1, 744, 554)
        return output




def predict_image(parameters):

    """Predicts an image based on parameters (feedrate, depth of cut, toolwear)"""

    # Load the trained model
    model = CustomModel() 
    model.load_state_dict(torch.load("processing/prediction_model.pth"))
    model.eval()  

    with torch.no_grad():
        input_features = torch.tensor(parameters, dtype=torch.float32)       
        predicted_image = model(input_features.unsqueeze(0)) 
        return predicted_image.numpy() 


def image_to_ts(image):

    """Transforms an image to a time series and returns the plot"""
    
    # Extract the pixel values from the image
    z_values = image[0, :]
    reversed_values = z_values
    reversed_values = (reversed_values - 0.5) 
    
    """
    # Define vmin and vmax used during transformation
    vmin = -0.1
    vmax = 0.1

    # Inverse transformation
    reversed_values = vmin + (z_values / 255.0) * (vmax - vmin)

    # drop all entries with value 0.01:
    reversed_values = reversed_values[reversed_values != 0.01]

    #reversed_values = reversed_values + 3.25
    """
    # reverse transformation. Check again for mistakes
    x = np.arange(len(reversed_values)) / len(reversed_values) * 25 + 5

    # Plot the time series
    fig, ax = plt.subplots(figsize=(8, 5))
    
    ax.set_ylim(-0.25, 0.25)
    ax.set_xlim(5, 30)
    
    mpl.rcParams['font.family'] = 'Arial'
    mpl.rcParams['font.size'] = 30

    ax.set_xlabel("Bauteillänge", fontname="Arial", fontsize=16, labelpad=7)
    ax.set_ylabel("Normalisierte Oberfläche", fontname="Arial", fontsize=16, labelpad=7)
    plt.yticks(fontname="Arial", fontsize=14, color="black")
    plt.xticks(range(5, 31, 5), fontname="Arial", fontsize=14, color = "black")
    #plt.title("Oberfläche",fontname="Arial", fontsize=18, color="black", weight="bold", pad=10)

    xticks = ax.get_xticks()
    xticklabels = [str(int(x)) if x != xticks[-2] else "mm" for x in xticks]
    ax.set_xticklabels(xticklabels)


    yticks = ax.get_yticks()
    #yticklabels = [str(int(y)) if y != yticks[-2] else "" for y in yticks]
    yticklabels = yticks = ["" for y in yticks]
    ax.set_yticklabels(yticklabels)


    gridwidth = 1.5
    plt.grid(axis="y", linewidth=0.75, color="black")
    plt.grid(axis="x", linewidth=0.75, color="black")

    rand = ["top", "right", "bottom", "left"]
    for i in rand:
        plt.gca().spines[i].set_linewidth(gridwidth)
        ax.spines[i].set_color('black')

    plt.plot(x, reversed_values, color="#00509b", linewidth=2)

    """
    # Define the tolerance range
    tolerance_lower = -0.085
    tolerance_upper = 0.085

    ax.fill_between(x, tolerance_lower, tolerance_upper, color='gray', alpha=0.2)

    # Check if the plot is within tolerance
    within_tolerance = all(tolerance_lower <= val <= tolerance_upper for val in reversed_values)

    tolerance = None 
    if within_tolerance:
        tolerance = True
    else:
        tolerance = False
    """
    return fig


def NN_prediction(feed, plaindepth, wear):
    new_features = torch.tensor([[feed, plaindepth, wear]]) 
    predicted_image = predict_image(new_features)
    fig = image_to_ts(predicted_image[0, 0])
    return fig