File size: 7,131 Bytes
eb7040d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
"""This script runs the trained model on data and saves the predictions to a file."""

from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import logging
import random
import tqdm
import json
import argparse

# Set the logging level to info
logging.basicConfig(level=logging.INFO)

# Set the device to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info(f"Device: {device}")

# Prompts for the different tasks
START_PROMPT_TASK1 = "Hér er texti sem ég vil að þú skoðir vel og vandlega. Þú skalt skoða hvert einasta orð, orðasamband, og setningu og meta hvort þér finnist eitthvað athugavert, til dæmis hvað varðar málfræði, stafsetningu, skringilega merkingu og svo framvegis.\nHér er textinn:\n\n"
END_PROMPT_TASK1 = "Sérðu eitthvað sem mætti betur fara í textanum? Búðu til lista af öllum slíkum tilvikum þar sem hver lína tilgreinir hver villan er, hvar hún er, og hvað væri gert í staðinn fyrir villuna.\n\n"

START_PROMPT_TASK2 = "Hér er texti sem ég vil að þú skoðir vel og vandlega. Þú skalt skoða hvert einasta orð, orðasamband, og setningu og meta hvort þér finnist eitthvað athugavert, til dæmis hvað varðar málfræði, stafsetningu, skringilega merkingu og svo framvegis.Ég er með tvær útgáfur af textanum, A og B, og önnur þeirra gæti verið betri en hin á einhvern hátt, t.d. hvað varðar stafsetningu, málfræði o.s.frv.\nHér er texti A:\n\n"
MIDDLE_PROMPT_TASK2 = "Hér er texti B:\n\n"
END_PROMPT_TASK2 = "Hvorn textann líst þér betur á?\n\n"

START_PROMPT_TASK3 = "Hér er texti sem ég vil að þú skoðir vel og vandlega. Þú skalt skoða hvert einasta orð, orðasamband, og setningu og meta hvort þér finnist eitthvað athugavert, til dæmis hvað varðar málfræði, stafsetningu, skringilega merkingu og svo framvegis.\nHér er textinn:\n\n"
END_PROMPT_TASK3 = "Reyndu nú að laga textann þannig að hann líti betur út, eins og þér finnst best við hæfi.\n\n"

START_PROMPT_TASK = {
    1: START_PROMPT_TASK1,
    2: START_PROMPT_TASK2,
    3: START_PROMPT_TASK3,
}
END_PROMPT_TASK = {1: END_PROMPT_TASK1, 2: END_PROMPT_TASK2, 3: END_PROMPT_TASK3}

SEP = "\n\n"


def set_seed(seed):
    """Set the random seed for reproducibility."""
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    random.seed(seed)


def tokenize_data(tokenizer, data, task, max_length):
    """Tokenize the data and return the input_ids and attention_mask."""
    tokenized_start = tokenizer(START_PROMPT_TASK[task])["input_ids"]
    tokenized_end = tokenizer(END_PROMPT_TASK[task])["input_ids"]
    if task == 2:
        tokenized_middle = tokenizer(MIDDLE_PROMPT_TASK2)["input_ids"]

    # Tokenize the data
    tokenized_data = []
    if task == 1 or task == 3:
        for sentence in data:
            tokenized_sentence = tokenizer(sentence + SEP)["input_ids"]

            # Concatenate the tokenized data
            concatted_data = (
                [tokenizer.bos_token_id]
                + tokenized_start
                + tokenized_sentence
                + tokenized_end
            )

            # Truncate the data
            concatted_data = concatted_data[:max_length]

            tokenized_data.append(concatted_data)
    elif task == 2:
        for line in data:
            data_a = line["a"]
            data_b = line["b"]
            tokenized_sentence_a = tokenizer(data_a + SEP)["input_ids"]
            tokenized_sentence_b = tokenizer(data_b + SEP)["input_ids"]

            # Concatenate the tokenized data
            concatted_data = (
                [tokenizer.bos_token_id]
                + tokenized_start
                + tokenized_sentence_a
                + tokenized_middle
                + tokenized_sentence_b
                + tokenized_end
            )

            # Truncate the data
            concatted_data = concatted_data[:max_length]

            tokenized_data.append(concatted_data)

    return tokenized_data


def run_model_on_data(model_path, tokenizer_name, arguments):
    """Run the model on the data and save the predictions to a file."""
    # Load the model and tokenizer
    model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.bfloat16)
    model.to(device)
    model.eval()
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)

    # Load the data
    if arguments.task == 1 or arguments.task == 3:
        with open(arguments.input_file, "r") as file:
            data = file.read().splitlines()
    elif arguments.task == 2:
        with open(arguments.input_file, "r") as file:
            data = file.read().splitlines()
            data = [json.loads(line) for line in data]

    # Tokenize the data
    data_tokenized = tokenize_data(
        tokenizer, data, arguments.task, tokenizer.model_max_length
    )
    logging.info(f"Number of examples: {len(data_tokenized)}")

    # Run the model on the data
    predictions = []
    progress_bar = tqdm.tqdm(total=len(data_tokenized), desc="Running model on data")

    for input_ids in data_tokenized:
        progress_bar.update(1)

        # Generate the predictions
        with torch.cuda.amp.autocast(dtype=torch.bfloat16):
            input_ids_tensor = torch.tensor(input_ids).unsqueeze(0).to(device)
            output = model.generate(
                input_ids=input_ids_tensor, max_new_tokens=500, num_return_sequences=1
            )

            # Only get the part of the prediction that was generated
            prediction = tokenizer.decode(
                output[0][len(input_ids) :], skip_special_tokens=True
            )
            predictions.append(prediction)

    progress_bar.close()

    # Save the predictions to a file
    with open(arguments.output_file, "w") as file:
        if arguments.task == 1:
            # We want to include the original text in the output file
            counter = 0
            for prediction in predictions:
                file.write(data[counter] + "\n")
                file.write(prediction.split("\n\n")[0] + "\n\n")
                counter += 1
        else:
            for prediction in predictions:
                file.write(prediction.split("\n\n")[0] + "\n")

    logging.info(f"Predictions written to file: {arguments.output_file}")


if __name__ == "__main__":
    # Parse the arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--task", type=int, help="The task type (1, 2, or 3)")
    parser.add_argument(
        "--input-file",
        type=str,
        help="The path to the input file with data to be corrected",
    )
    parser.add_argument(
        "--output-file",
        type=str,
        help="The path to the output file where the corrected data will be saved",
    )
    args = parser.parse_args()

    model_path = "./gpt-sw3-model"
    tokenizer_name = "AI-Sweden-Models/gpt-sw3-6.7b"

    set_seed(42)
    run_model_on_data(model_path, tokenizer_name, args)