# -*- coding: utf-8 -*-
import codecs


def make_submission(y_pred, output_file):
    line_template = '%d\t%d\n'
    lines = []
    with codecs.open(output_file, 'w', encoding='utf-8') as f_output:
        for idx, predicted_value in enumerate(y_pred):
            current_line = line_template % (idx+1, int(predicted_value))
            lines.append(current_line)
        f_output.writelines(lines)


def post_processing(y_pred, data_loader, test=False):
    vocab = data_loader.get_vocabulary()
    idx_ma = vocab[u'吗']
    if not test:
        x_a = data_loader.x_valid_a
        x_b = data_loader.x_valid_b
    else:
        x_a = data_loader.x_test_a
        x_b = data_loader.x_test_b

    y_pred_post = list()

    for item in zip(y_pred, x_a, x_b):
        y_hat, seq_a, seq_b = item[0], item[1], item[2]
        # Rule 1
        if ((idx_ma in seq_a) and (idx_ma not in seq_b)) or ((idx_ma in seq_a) and (idx_ma in seq_b)):
            y_pred_post.append(0)
        else:
            y_pred_post.append(int(y_hat))

    return y_pred_post


def output_error_lines(y_pred, y_true, y_result, data_loader):
    vocab = data_loader.get_vocabulary()
    x_a = data_loader.x_valid_a
    x_b = data_loader.x_valid_b
    t_line = '%s\t%s\t%d\t%f\n'
    target_lines = list()
    vocab = dict((v, k) for k, v in vocab.items())
    for idx, item in enumerate(zip(y_pred, y_true)):
        if item[0] != item[1]:
            temp_line_a = ''.join(vocab[vocab_id] for vocab_id in x_a[idx])
            temp_line_b = ''.join(vocab[vocab_id] for vocab_id in x_b[idx])
            temp_line = t_line % (temp_line_a, temp_line_b, item[1], y_result[idx][0])
            target_lines.append(temp_line)
        else:
            continue

    with codecs.open('datasets/valid_error.csv', 'w', encoding='utf-8') as f_output:
        f_output.writelines(target_lines)
