File size: 4,326 Bytes
9abf365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
"""
Functions operating on the verbalisation of an ontology, providing support for
generating documentation, extracting competency questions, and preliminarly
testing an ontology via competency questions.
"""

import config
from openai import OpenAI

cqe_prompt_a = "You are asked to provide a comprehensive list of competency "\
               "questions describing all the possible requirements that can be "\
               "addressed by the ontology described before."

cqt_prompt_a = "You are asked to infer if the ontology described before can "\
               "address the following competency question: \"{}\" "\
               "Valid answers are: Yes, No."


class ChatInterface:

    def __init__(self,
                 api_key: str,
                 model_name: str = config.DEFAULT_MODEL,
                 sampling_seed: int = config.DEFAULT_SEED,
                 temperature: int = config.DEFAULT_TEMPERATURE):
        # Save client configuration for all calls
        self.client = OpenAI(api_key=api_key)
        self.model_name = model_name
        self.sampling_seed = sampling_seed
        self.temperature = temperature

    def chat_completion(self, messages, **kwargs):

        model = kwargs["model"] if "model" in kwargs else self.model_name
        temperature = kwargs["temperature"] if "temperature" in kwargs \
            else self.temperature  # this do not alter the class defaults

        completion = self.client.chat.completions.create(
            model=model,
            messages=messages,
            seed=self.sampling_seed,
            temperature=temperature,
        )
        return completion.choices[0].message.content


def extract_competency_questions(onto_verbalisation: str,
                                 chat_interface: ChatInterface,
                                 prompt: str = cqe_prompt_a):
    """
    Extract competency questions from the verbalisation of an ontology.

    Parameters
    ----------
    onto_verbalisation : str
        A string expressing the ontology verbalisation as output from a
        supported method in the `verbaliser` module.
    chat_interface : ChatInterface
        An instance of a chat interface holding the API session.
    prompt : str, optional
        CQ extraction prompt, by default cqe_prompt_a

    Returns
    -------
    competency_questions : str
        A list of competency questions induced from the verbalisation.

    """    
    full_prompt = onto_verbalisation + "\n" + prompt
    conversation_history = [
        {"role": "system", "content": "You are an ontology expert."},
        {"role": "user", "content": full_prompt}
    ]

    competency_questions = chat_interface.chat_completion(
        conversation_history, model="gpt-3.5-turbo-16k")

    return competency_questions


def test_competency_questions(onto_verbalisation: str,
                              competency_questions: list[str],
                              chat_interface: ChatInterface,
                              cq_prompt: str = cqt_prompt_a):
    """
    Performs a preliminary test of the ontology to assess whether its
    verbalisation allows for addressing each competency questions given.

    Parameters
    ----------
    onto_verbalisation : str
        A string expressing the ontology verbalisation as output from a
        supported method in the `verbaliser` module.
    competency_questions: list[str]
        A list of competency questions to use for preliminary testing.
    chat_interface : ChatInterface
        An instance of a chat interface holding the API session.
    cq_prompt : str, optional
        CQ test prompt, by default cqt_prompt_a

    Returns
    -------
    cq_test_dict : dict
        A dictionary holding an outcome (yes/no) as a preliminary test of each
        competency question. Keys correspond to CQs.

    """    
    cq_test_dict = {}
    for cq in competency_questions:
        full_prompt = onto_verbalisation + "\n" + cq_prompt.format(cq)
        conversation_history = [
            {"role": "system", "content": "You are an ontology engineer."},
            {"role": "user", "content": full_prompt}
        ]
        outcome = chat_interface.chat_completion(
            conversation_history, model="gpt-3.5-turbo-16k")
        cq_test_dict[cq] = outcome

    return cq_test_dict