File size: 4,771 Bytes
4247f5a
 
 
16e3b84
 
 
 
 
4247f5a
 
 
 
 
8b16ee5
4247f5a
 
 
 
8b16ee5
4247f5a
5aab893
4247f5a
 
840f6e0
 
4247f5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b16ee5
 
840f6e0
 
 
 
 
 
 
 
 
 
 
 
 
 
5aab893
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16e3b84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import pandas as pd
import numpy as np

# for data visualization:
import matplotlib.pyplot as plt
import seaborn as sns

# for regression:
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error

# Read your data file
datafile_path = "data/chat_transcripts_with_embeddings_and_scores.csv"

df = pd.read_csv(datafile_path)

# Convert embeddings to numpy arrays
df['embedding'] = df['embedding'].apply(lambda x: [float(num) for num in x.strip('[]').split(',')])


# Split the data into features (X) and labels (y)
X = list(df.embedding.values)
y = df[['avoide', 'avoida', 'avoidb', 'avoidc', 'avoidd', 'anxietye', 'anxietya', 'anxietyb', 'anxietyc', 'anxietyd']].values


# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Train your regression model
rfr = RandomForestRegressor(n_estimators=100)
rfr.fit(X_train, y_train)

# Make predictions on the test data
preds = rfr.predict(X_test)

# Evaluate your model
mse = mean_squared_error(y_test, preds)
mae = mean_absolute_error(y_test, preds)

print(f"Chat transcript embeddings performance: mse={mse:.2f}, mae={mae:.2f}")


# Mean Squared Error (MSE) is a measure of how close a fitted line is to data points.
# In the context of this task, a lower MSE means that our model's predicted attachment scores are closer to the true scores.
# An MSE of 1.32 suggests that the average squared difference between the predicted and actual scores is 1.32. 
# Since our scores are normalized between 0 and 1, this error could be considered relatively high, 
# meaning the model's predictions are somewhat off from the true values.

# Mean Absolute Error (MAE) is another measure of error in our predictions. 
# It's the average absolute difference between the predicted and actual scores.
# An MAE of 0.96 suggests that, on average, our predicted attachment scores are off by 0.96 from the true scores.
# Considering that our scores are normalized between 0 and 1, this error is also quite high, indicating that 
# the model's predictions are not very accurate. 

# Both MSE and MAE are loss functions that we want to minimize. Lower values for both indicate better model performance. 
# In general, the lower these values, the better the model's predictions are.


# Guide for adding additional features to improve performance:
# Additional Features Extraction
# To add new features to the data, you will need to create new columns in the DataFrame
# Each new feature will be a new column, which can be created by applying a function to the text data

# For example, if you were adding a feature for the length of the chat, you would do something like this:
# df['text_length'] = df['ChatTranscript'].apply(len)

# If you are using an external library to compute a feature (like NLTK for tokenization or sentiment analysis), you would need to import that library and use its functions.
# For example, to compute a sentiment score with TextBlob, you might do something like this:
# from textblob import TextBlob
# df['sentiment'] = df['ChatTranscript'].apply(lambda text: TextBlob(text).sentiment.polarity)

# Make sure to handle any potential errors or exceptions in your function.
# For example, if a chat is empty, trying to compute its length or sentiment might cause an error.

# After you've added your new features, you can include them in your model by adding them to your features array when you split the data into training and testing sets.
# For example, if 'text_length' and 'sentiment' are new features, you might do this:
# X = df[['embedding', 'text_length', 'sentiment']].values

# Always be sure to check your data after adding new features to make sure everything looks correct.


column_names = ['avoide', 'avoida', 'avoidb', 'avoidc', 'avoidd', 'anxietye', 'anxietya', 'anxietyb', 'anxietyc', 'anxietyd']

# Create a DataFrame for the predictions
preds_df = pd.DataFrame(preds, columns=column_names)

# Create a DataFrame for the actual values
y_test_df = pd.DataFrame(y_test, columns=column_names)

# Create a 2x5 subplot grid
fig, axes = plt.subplots(2, 5, figsize=(20, 10))

# Loop over each column
for idx, col in enumerate(column_names):
    # Plot the actual values on the left column
    sns.histplot(y_test_df[col], bins=10, ax=axes[idx//5, idx%5], color='blue', kde=True)

    # Plot the predicted values on the right column
    sns.histplot(preds_df[col], bins=10, ax=axes[idx//5, idx%5], color='red', kde=True)

    # Set the title of the subplot
    axes[idx//5, idx%5].set_title(f"{col} - actual vs predicted")

# Add a legend
plt.legend(labels=['actual', 'predicted'])

# Show the plot
plt.show()