|
import json |
|
import os |
|
from sentence_transformers import SentenceTransformer, util |
|
from tqdm import tqdm |
|
import concurrent.futures |
|
|
|
|
|
filePath = "ShareGPT_2023.05.02v0_unfiltered_cleaned_split.json" |
|
|
|
|
|
refusal_threshold = 0.5 |
|
non_refusal_threshold = 0.5 |
|
|
|
|
|
refusals = ["I'm sorry, I cannot complete the request.", "It is not possible to provide that information.", "Sorry, but we don't allow questions like that.", " We won't be able to assist you with that."] |
|
non_refusals = ["Sure, I can do that.", "No problem, I can do that for you.", "Here is the information you asked for."] |
|
|
|
|
|
max_workers = os.cpu_count() // 2 |
|
|
|
|
|
def process_conversation(conversation): |
|
global max_refusal, max_non_refusal, refusal_threshold, non_refusal_threshold |
|
|
|
value = conversation["value"] |
|
value_vec = model.encode(value, convert_to_tensor=True) |
|
|
|
|
|
refusals_sim = util.pytorch_cos_sim(value_vec, refusals_vec) |
|
non_refusals_sim = util.pytorch_cos_sim(value_vec, non_refusals_vec) |
|
|
|
|
|
refusals_max_sim, refusals_max_idx = refusals_sim.max(dim=1) |
|
non_refusals_max_sim, non_refusals_max_idx = non_refusals_sim.max(dim=1) |
|
|
|
if(refusals_max_sim > max_refusal): |
|
max_refusal = refusals_max_sim.item() |
|
if(non_refusals_max_sim > max_non_refusal): |
|
max_non_refusal = non_refusals_max_sim.item() |
|
|
|
|
|
if refusals_max_sim > refusal_threshold and refusals_max_sim > non_refusals_max_sim: |
|
label = "refusal" |
|
example = refusals[refusals_max_idx] |
|
elif non_refusals_max_sim > non_refusal_threshold and non_refusals_max_sim > refusals_max_sim: |
|
label = "non-refusal" |
|
example = non_refusals[non_refusals_max_idx] |
|
else: |
|
label = "unrelated" |
|
example = None |
|
|
|
return label, example, value |
|
|
|
|
|
with open(filePath, "r", encoding="utf-8") as f: |
|
data = json.load(f) |
|
|
|
bad_ids = [] |
|
|
|
max_refusal = 0.0 |
|
max_non_refusal = 0.0 |
|
|
|
|
|
model = SentenceTransformer("paraphrase-MiniLM-L6-v2") |
|
|
|
refusals_vec = model.encode(refusals, convert_to_tensor=True) |
|
non_refusals_vec = model.encode(non_refusals, convert_to_tensor=True) |
|
|
|
refusal_count = 0 |
|
non_refusal_count = 0 |
|
unrelated_count = 0 |
|
|
|
pbar1 = tqdm(data) |
|
for item in pbar1: |
|
|
|
id_ = item["id"] |
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: |
|
|
|
futures = [executor.submit(process_conversation, conversation) for conversation in item["conversations"] if conversation["from"] == "gpt"] |
|
|
|
for future in concurrent.futures.as_completed(futures): |
|
|
|
label, example, value = future.result() |
|
|
|
if label == "refusal": |
|
item = {} |
|
item["id"] = id_ |
|
item["value"] = value |
|
bad_ids.append(item) |
|
print(f"\nID: {id_} | Value: {value}"); |
|
refusal_count += 1 |
|
elif label == "non-refusal": |
|
non_refusal_count += 1 |
|
else: |
|
unrelated_count += 1 |
|
|
|
pbar1.set_description("Max Refusal: {:.3f}".format(max_refusal)); |
|
pbar1.set_postfix(r=refusal_count, u=unrelated_count) |
|
|
|
with open("possible_bad_entries.json", "w") as f: |
|
json.dump(bad_ids, f) |