File size: 1,629 Bytes
07baa2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import json
from typing import List, Tuple
from concurrent.futures import ProcessPoolExecutor, as_completed
import logging
from hashlib import md5

from tqdm import tqdm


# Percentage of similarity between two conversations to be considered a duplicate
similarity_threshold = 80


def remove_duplicates(conversations: List[dict]) -> List[dict]:
    unique_ids = {}
    unique_hashes = set()

    with ProcessPoolExecutor() as executor:
        futures = {executor.submit(check_unique, conversation, unique_hashes): conversation for conversation in conversations}
        total_tasks = len(futures)

        for future in tqdm(as_completed(futures), total=total_tasks, desc="Deduplicating", unit="conversations"):
            is_unique, conversation = future.result()
            if is_unique:
                id_ = conversation.pop('id')
                hash_ = conversation_hash(conversation)
                unique_ids[hash_] = (id_, conversation)
                unique_hashes.add(hash_)
            else:
                logging.debug(f"Duplicate found: {conversation}")

        executor.shutdown(wait=True)

    return [{'id': unique_ids[hash_][0], **unique_ids[hash_][1]} for hash_ in unique_hashes]


def check_unique(conversation: dict, unique_hashes: set) -> Tuple[bool, dict]:
    hash_ = conversation_hash(conversation)

    if hash_ in unique_hashes:
        return False, conversation

    return True, conversation


def conversation_hash(conversation: dict) -> str:
    set_ = frozenset((msg['value'] for msg in conversation['conversations']))
    return md5(json.dumps(sorted(list(set_))).encode()).hexdigest()