File size: 7,555 Bytes
ef2abea
 
 
 
 
 
 
 
 
16bf127
ef2abea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16bf127
 
 
 
 
 
ef2abea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
#!/usr/bin/env python3 -u

from collections import namedtuple

import math 
import torch
from torch.nn.utils.rnn import pad_sequence

from fairseq import checkpoint_utils, options, tasks, utils
from eet.fairseq.transformer import EETTransformerDecoder

Batch = namedtuple('Batch', 'ids src_tokens src_lengths')

def make_batches(lines, task, max_positions, encode_fn):  

    tokens = [task.source_dictionary.encode_line(encode_fn(line),
                                                 add_if_not_exist=False,
                                                 append_eos=False,
                                                 reverse_order=True).long()
              for line in lines]
    lengths = [t.numel() for t in tokens]
    tokens = pad_sequence(tokens, batch_first=True,
                          padding_value=1).flip(dims=(1,))
    
    return Batch(ids=torch.arange(len(tokens)),
                  src_tokens=tokens, 
                  src_lengths=torch.tensor(lengths))

def encode_fn(x_str):
    x_str = "</s> " + x_str
    return x_str


def decode_fn(x):
    x = x.replace(" ", "")
    return x 

def eos_token_filter(sent):
    return True


def post_precess(line):

    if "<" in line:
        line = line.split("<")[0]
    return line


class Inference(object):

    def __init__(self, model_path, data_path, eet_batch_size):
        
        parser = options.get_generation_parser(default_task="language_modeling")
        args = options.parse_args_and_arch(parser)
        args.data = data_path
        args.path = model_path
        self.args = args

        # generate parameter
        args.beam = 1 # don't change
        args.min_len = 5
        args.max_len_b = 30
        args.lenpen = 1.0
        args.sampling = True 
        # args.sampling_topp = 0.7
        args.sampling_topk = 10
        args.temperature = 0.8
        args.no_repeat_ngram_size = 1
        args.fp16 = True

        # Setup task, e.g., translation
        task = tasks.setup_task(args)
        self.task = task
        # Set dictionaries
        self.src_dict = task.source_dictionary
        self.tgt_dict = task.target_dictionary

        use_cuda = torch.cuda.is_available() and not args.cpu
        self.use_cuda = use_cuda

        model_path = args.path
        checkpoint = torch.load(model_path.replace("best.pt", "best_part_1.pt"))
        checkpoint["model"].update(torch.load(model_path.replace("best.pt", "best_part_2.pt")))
        checkpoint["model"].update(torch.load(model_path.replace("best.pt", "best_part_3.pt")))
        torch.save(checkpoint, model_path)
        
        state = torch.load(args.path, map_location=torch.device("cpu"))
        cfg_args = eval(str(state["cfg"]))["model"]
        del cfg_args["_name"]
        keys_list = []
        values_list = []
        for key,value in cfg_args.items() :
            keys_list.append(key)
            values_list.append(value) 
        Model_args = namedtuple("Model_args", keys_list)
        model_args = Model_args._make(values_list)
        del state

        eet_seq_len = 512 # max seqence length
        eet_batch_size = eet_batch_size     
        data_type = torch.float16
        eet_config = {"data_type":data_type,
                      "max_batch":eet_batch_size,      
                      "full_seq_len":eet_seq_len}
        print(model_args)
        eet_model = EETTransformerDecoder.from_fairseq_pretrained(model_id_or_path = args.path,
                                                    dictionary = self.src_dict,args=model_args, 
                                                    config = eet_config,
                                                    no_encoder_attn = True)
        self.models = [eet_model]
        # Initialize generator
        self.generator = task.build_generator(self.models, args)

        # Load alignment dictionary for unknown word replacement
        # (None if no unknown word replacement, empty if no path to align dictionary)
        self.align_dict = utils.load_align_dict(args.replace_unk)

        self.max_positions = 1024
        self.eos_index = self.tgt_dict.eos()
        self.pad_index = self.tgt_dict.pad()

    def __call__(self, inputs, append_right_eos=True):
        
        results = []
        start_id = 0
        
        batch = make_batches(inputs, self.task, self.max_positions, encode_fn)
        inputs_str = inputs

        src_tokens = batch.src_tokens
        src_lengths = batch.src_lengths
        # a new paragraph always
        if src_tokens[0][-1].item() != self.eos_index and append_right_eos:
            src_tokens = torch.cat([src_tokens, src_tokens.new_ones(src_tokens.size(0), 1) * self.eos_index], dim=1)
            src_lengths += 1
        if self.use_cuda:
            src_tokens = src_tokens.cuda()
            src_lengths = src_lengths.cuda()
        sample = {
            'net_input': {
                'src_tokens': src_tokens,
                'src_lengths': src_lengths,
            },
        }
        
        translations = self.task.inference_step(self.generator, self.models, sample)

        for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
            results.append((start_id + id, src_tokens[i], hypos))

        # sort output to match input order
        final_results = []
        for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]):
            # Process top predictions
            tmp_res = []
            for hypo in hypos[:min(len(hypos), self.args.nbest)]:
                hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
                    hypo_tokens=hypo['tokens'].int().cpu()[len(src_tokens)-1:],
                    src_str=None,
                    alignment=hypo['alignment'],
                    align_dict=self.align_dict,
                    tgt_dict=self.tgt_dict)

                detok_hypo_str = decode_fn(hypo_str)
                if eos_token_filter(detok_hypo_str):
                    detok_hypo_str = post_precess(detok_hypo_str)
                    score = hypo['score'] / math.log(2)  # convert to base 2
                    tmp_res.append([detok_hypo_str, score])
            final_results.append(tmp_res)
        return final_results




class Dialogue(object):
    def __init__(self, inference_model=None, max_dialogue_history=6):

        self.inference_model = inference_model
        self.max_dialogue_history = max_dialogue_history
        self.dialogue_history = []

    def get_repsonse(self, input_text):
        self.dialogue_history.append(input_text.strip())
        model_inp = ""
        for idx, x in enumerate(self.dialogue_history[-self.max_dialogue_history:]):
            if idx % 2 == 0:
                model_inp += " <0> " + " ".join(list(x))
            else:
                model_inp += " <1> " + " ".join(list(x))
        if idx % 2 == 0:
            model_inp += " <1>"
        else:
            model_inp += " <0>"
        # generate 5 candidates
        text = self.inference_model([model_inp]*5, append_right_eos=False)
        response = [x[0][0] for x in text]
        # response rank according to length
        response = sorted(response, key=lambda x:len(set(x)))
        # overlap-score 
        overlap = [[len(set(x) & set(model_inp)) * len(x), x] for x in response[-4:-1]]
        overlap = sorted(overlap, key=lambda x:x[0])
        final_response = overlap[-2][1]
        self.dialogue_history.append(final_response)
        return final_response

    def clear_dialogue_history(self):
        self.dialogue_history = []