fix typo
Browse files- logmetric.py +6 -6
logmetric.py
CHANGED
@@ -98,25 +98,25 @@ class LogMetric(evaluate.Metric):
|
|
98 |
pass
|
99 |
|
100 |
# Jaccard Similarity to measure closeness of two log-messages
|
101 |
-
def get_jaccard_similarity(set1, set2):
|
102 |
intersection = set1.intersection(set2)
|
103 |
union = set1.union(set2)
|
104 |
return len(intersection) / len(union)
|
105 |
|
106 |
# A score depending on the difference in length of two sentences
|
107 |
-
def get_length_score(sentence1, sentence2):
|
108 |
s1len = len(sentence1)
|
109 |
s2len = len(sentence2)
|
110 |
|
111 |
return 1 - (abs(s1len - s2len) / max(s1len, s2len))
|
112 |
|
113 |
# Combine a weighted average of different scores
|
114 |
-
def get_overall_similarity(sentence1, sentence2):
|
115 |
s1split = sentence1.split()
|
116 |
s2split = sentence2.split()
|
117 |
|
118 |
-
jaccard_score = get_jaccard_similarity(set(s1split), set(s2split))
|
119 |
-
length_score = get_length_score(s1split, s2split)
|
120 |
|
121 |
return (jaccard_score * 0.7 + length_score * 0.3) * 100.0
|
122 |
|
@@ -196,7 +196,7 @@ class LogMetric(evaluate.Metric):
|
|
196 |
monotonicallyIncreasingScore = 0.0
|
197 |
|
198 |
# apply jaccard-similarity to every pred-ref pair and then take mean score * 100
|
199 |
-
local_score = np.mean([get_overall_similarity(p, r) for p,r in
|
200 |
zip(
|
201 |
list(map(lambda t: t[1], pred_logentries))[:min_logentries],
|
202 |
list(map(lambda t: t[1], ref_logentries))[:min_logentries]
|
|
|
98 |
pass
|
99 |
|
100 |
# Jaccard Similarity to measure closeness of two log-messages
|
101 |
+
def get_jaccard_similarity(self, set1, set2):
|
102 |
intersection = set1.intersection(set2)
|
103 |
union = set1.union(set2)
|
104 |
return len(intersection) / len(union)
|
105 |
|
106 |
# A score depending on the difference in length of two sentences
|
107 |
+
def get_length_score(self, sentence1, sentence2):
|
108 |
s1len = len(sentence1)
|
109 |
s2len = len(sentence2)
|
110 |
|
111 |
return 1 - (abs(s1len - s2len) / max(s1len, s2len))
|
112 |
|
113 |
# Combine a weighted average of different scores
|
114 |
+
def get_overall_similarity(self, sentence1, sentence2):
|
115 |
s1split = sentence1.split()
|
116 |
s2split = sentence2.split()
|
117 |
|
118 |
+
jaccard_score = self.get_jaccard_similarity(set(s1split), set(s2split))
|
119 |
+
length_score = self.get_length_score(s1split, s2split)
|
120 |
|
121 |
return (jaccard_score * 0.7 + length_score * 0.3) * 100.0
|
122 |
|
|
|
196 |
monotonicallyIncreasingScore = 0.0
|
197 |
|
198 |
# apply jaccard-similarity to every pred-ref pair and then take mean score * 100
|
199 |
+
local_score = np.mean([self.get_overall_similarity(p, r) for p,r in
|
200 |
zip(
|
201 |
list(map(lambda t: t[1], pred_logentries))[:min_logentries],
|
202 |
list(map(lambda t: t[1], ref_logentries))[:min_logentries]
|