import codecs
import csv
import json
import math

import jieba
import numpy as np

from config import Config
from dataset.collator import ClassificationType
from dataset.data_preprocessor import clean_punctuations
from predict import Predictor
from treebuilder import get_deepest_data


def preprocess_jieba(csv_file, json_file):
    with open(json_file, "w", encoding='utf-8') as fout:
        with open(csv_file, 'r', encoding="gbk") as fin:
            lines = csv.reader(fin)
            for items in lines:
                text_seg_list = jieba.cut(items[2].strip())
                text_data = ' '.join(text_seg_list)
                text_data = clean_punctuations(text_data)
                # multiple spaces and reserving one
                text_data = ' '.join(text_data.split())
                sample = dict()
                sample['doc_label'] = ['Test Label']
                sample['doc_token'] = text_data.split(' ')
                sample['doc_keyword'] = []
                sample['doc_topic'] = []
                json_str = json.dumps(sample, ensure_ascii=False)
                fout.write(json_str)
                fout.write('\n')


if __name__ == '__main__':
    csv_file = 'data/predict.csv'
    json_file = 'data/predict.json'
    config_file = 'conf/train.json'
    result_file = 'data/predict_result.csv'

    preprocess_jieba(csv_file, json_file)

    config = Config(config_file=config_file)
    predictor = Predictor(config)
    batch_size = config.eval.batch_size
    input_texts = []
    is_multi = config.task_info.label_type == ClassificationType.MULTI_LABEL
    for line in codecs.open(json_file, "r", predictor.dataset.CHARSET):
        input_texts.append(line.strip("\n"))
    epoches = math.ceil(len(input_texts) / batch_size)
    with open(result_file, 'w', encoding="gbk", newline='') as fout:
        with open(csv_file, 'r', encoding="gbk") as fin:
            lines = csv.reader(fin)
            lines = [row for row in lines]
            csv_write = csv.writer(fout, dialect='excel')
            for i in range(epoches):
                batch_texts = input_texts[i * batch_size:(i + 1) * batch_size]
                predict_probs = predictor.predict(batch_texts)
                for index, predict_prob in enumerate(predict_probs):
                    if not is_multi:
                        predict_label_ids = [predict_prob.argmax()]
                    else:
                        predict_label_ids = []
                        predict_label_idx = np.argsort(-predict_prob)
                        for j in range(0, config.eval.top_k):
                            if predict_prob[predict_label_idx[j]] > config.eval.threshold:
                                predict_label_ids.append(predict_label_idx[j])
                    predict_label_list = [predictor.dataset.id_to_label_map[predict_label_id] \
                                          for predict_label_id in predict_label_ids]
                    predict_label_list = get_deepest_data(predict_label_list)
                    predict_label = "/".join(predict_label_list)
                    column = lines[index]
                    csv_write.writerow([column[0], predict_label, column[2].strip()])
