File size: 3,305 Bytes
07baa2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import json
import os
from sentence_transformers import SentenceTransformer, util
from tqdm import tqdm
import concurrent.futures

# File to classify data in.
filePath = "ShareGPT_2023.05.02v0_unfiltered_cleaned_split.json"

# Threshold Settings
refusal_threshold = 0.5
non_refusal_threshold = 0.5

# Example refusal and non-refusal sentences
refusals = ["I'm sorry, I cannot complete the request.", "It is not possible to provide that information.", "Sorry, but we don't allow questions like that.", " We won't be able to assist you with that."]
non_refusals = ["Sure, I can do that.", "No problem, I can do that for you.", "Here is the information you asked for."]

# Set the max workers to half the available cores
max_workers = os.cpu_count() // 2

# Define a function that takes a conversation and returns a label and an example sentence
def process_conversation(conversation):
	global max_refusal, max_non_refusal, refusal_threshold, non_refusal_threshold

	value = conversation["value"]
	value_vec = model.encode(value, convert_to_tensor=True)

	# Compute the cosine similarity with the example sentences
	refusals_sim = util.pytorch_cos_sim(value_vec, refusals_vec)
	non_refusals_sim = util.pytorch_cos_sim(value_vec, non_refusals_vec)

	# Find the maximum similarity score and index for each category
	refusals_max_sim, refusals_max_idx = refusals_sim.max(dim=1)
	non_refusals_max_sim, non_refusals_max_idx = non_refusals_sim.max(dim=1)
	
	if(refusals_max_sim > max_refusal):
		max_refusal = refusals_max_sim.item()
	if(non_refusals_max_sim > max_non_refusal):
		max_non_refusal = non_refusals_max_sim.item()


	if refusals_max_sim > refusal_threshold and refusals_max_sim > non_refusals_max_sim:
		label = "refusal"
		example = refusals[refusals_max_idx]
	elif non_refusals_max_sim > non_refusal_threshold and non_refusals_max_sim > refusals_max_sim:
		label = "non-refusal"
		example = non_refusals[non_refusals_max_idx]
	else:
		label = "unrelated"
		example = None

	return label, example, value


with open(filePath, "r", encoding="utf-8") as f:
	data = json.load(f)
	
bad_ids = []

max_refusal = 0.0
max_non_refusal = 0.0

# Load a pre-trained sentence-transformer model
model = SentenceTransformer("paraphrase-MiniLM-L6-v2")

refusals_vec = model.encode(refusals, convert_to_tensor=True)
non_refusals_vec = model.encode(non_refusals, convert_to_tensor=True)

refusal_count = 0
non_refusal_count = 0
unrelated_count = 0

pbar1 = tqdm(data)
for item in pbar1:

	id_ = item["id"]

	with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
		
		futures = [executor.submit(process_conversation, conversation) for conversation in item["conversations"] if conversation["from"] == "gpt"]

		for future in concurrent.futures.as_completed(futures):
			
			label, example, value = future.result()
			
			if label == "refusal":
				item = {}
				item["id"] = id_
				item["value"] = value
				bad_ids.append(item)
				print(f"\nID: {id_} | Value: {value}");
				refusal_count += 1
			elif label == "non-refusal":
				non_refusal_count += 1
			else:
				unrelated_count += 1

			pbar1.set_description("Max Refusal: {:.3f}".format(max_refusal));
			pbar1.set_postfix(r=refusal_count, u=unrelated_count)

with open("possible_bad_entries.json", "w") as f:
    json.dump(bad_ids, f)