import argparse
import csv
import os

import mindspore
import mindspore.dataset as ms_dataset
import mindspore.nn as nn
import mindspore.numpy as mnp
import numpy as np
import pandas as pd
import process
from lstm import SentimentNet
from mindspore import (PYNATIVE_MODE, Tensor, load_checkpoint,
                       load_param_into_net, save_checkpoint, set_context)
from mindspore.common.initializer import HeUniform, Uniform
from tqdm import tqdm


def parse_args():
    # 创建解析
    parser = argparse.ArgumentParser(description="train textcnn",
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # 添加参数
    parser.add_argument('--model_path', type=str, default='./', help='model path')
    parser.add_argument('--output_path', type=str, default='./result.txt',  help='the predicted txt saved')
    parser.add_argument('--test_data_path', type=str, default='./test_clean.csv', help='testdata path')
    # 解析参数f
    args_opt = parser.parse_args()
    return args_opt


class AMAZONData():
    """AMAZON数据集加载器

    加载AMAZON数据集并处理为一个Python迭代对象。

    """
    def __init__(self, path):
        self.path = path
        self.reviews = []
        self.labels = []
        self._load()


    def _load(self):
        # 将数据加载至内存
        csvFile = open(self.path, "r")
        dict_reader = csv.DictReader(csvFile)
        for row in dict_reader:
            review = row['review']
            review=process.lower(review)
            review=process.remove_abb(review)
            review=process.remove_punctuations(review)
            review=process.remove_html(review)
            review=process.remove_url(review)
            review=process.remove_emoji(review)
            self.reviews.append(review.split())
            label = int(np.float32(row['label']))
            self.labels.append(label)

    def __getitem__(self, idx):
        return self.reviews[idx], self.labels[idx]


    def __len__(self):
        return len(self.reviews)


def load_amazon(amazon_path):
    # amazon_train = ms_dataset.GeneratorDataset(AMAZONData(amazon_path), column_names=["review", "label"], shuffle=True)
    amazon_test = ms_dataset.GeneratorDataset(AMAZONData(amazon_path), column_names=["review", "label"], shuffle=False)
    return amazon_test


def load_glove(glove_path):
    glove_100d_path = os.path.join(glove_path, 'glove.6B.100d.txt')
    embeddings = []
    tokens = []
    with open(glove_100d_path, encoding='utf-8') as gf:
        for glove in gf:
            word, embedding = glove.split(maxsplit=1)
            tokens.append(word)
            embeddings.append(np.fromstring(embedding, dtype=np.float32, sep=' '))
    # 添加 <unk>, <pad> 两个特殊占位符对应的embedding
    embeddings.append(np.random.rand(100))
    embeddings.append(np.zeros((100,), np.float32))
    vocab = ms_dataset.text.Vocab.from_list(tokens, special_tokens=["<unk>", "<pad>"], special_first=False)
    embeddings = np.array(embeddings).astype(np.float32)
    return vocab, embeddings


def data_preprocessing(vocab, amazon_train):
    lookup_op = ms_dataset.text.Lookup(vocab, unknown_token='<unk>')
    pad_op = ms_dataset.transforms.c_transforms.PadEnd([400], pad_value=vocab.tokens_to_ids('<pad>'))
    type_cast_op = ms_dataset.transforms.c_transforms.TypeCast(mindspore.float32)
    amazon_train = amazon_train.map(operations=[lookup_op, pad_op], input_columns=['review'])
    amazon_train = amazon_train.map(operations=[type_cast_op], input_columns=['label'])
    amazon_train = amazon_train.batch(64, drop_remainder=False)
    return amazon_train


if __name__ == '__main__':
    set_context(mode=mindspore.PYNATIVE_MODE, device_target='CPU')
    args_opt = parse_args()
    vocab, embeddings =  load_glove(args_opt.model_path)
    pad_idx = vocab.tokens_to_ids('<pad>')
    net1 = SentimentNet(embeddings, pad_idx)
    net2 = SentimentNet(embeddings, pad_idx)
    param_dict = load_checkpoint(os.path.join(args_opt.model_path, 'lstm_1.ckpt'))
    load_param_into_net(net1, param_dict)

    param_dict = load_checkpoint(os.path.join(args_opt.model_path, 'lstm_2.ckpt'))
    load_param_into_net(net2, param_dict)

    amazon_test = load_amazon(args_opt.test_data_path)
    amazon_test = data_preprocessing(vocab, amazon_test)
    net1.set_train(False)
    net2.set_train(False)
    with open(args_opt.output_path, 'w+') as r:
        for i in amazon_test.create_tuple_iterator():
            pre1 = net1(i[0])
            pre2 = net2(i[0])
            pre = pre1 + pre2
            pre = mindspore.nn.Softmax(axis=-1)(pre)
            pre = mindspore.ops.Argmax(axis=-1)(pre) + 1
            for j in range(pre.shape[0]):
                r.writelines(str(pre[j]) + '\n')



#%%
