import argparse
import csv
import math
import os
import re
import string

import mindspore
import mindspore.dataset as ms_dataset
import mindspore.nn as nn
import mindspore.numpy as mnp
import numpy as np
import pandas as pd
from mindspore import (PYNATIVE_MODE, Tensor, load_checkpoint,
                       load_param_into_net, save_checkpoint, set_context)
from mindspore.common.initializer import HeUniform, Uniform
from tqdm import tqdm

from lstm import SentimentNet


def parse_args():
    # 创建解析
    parser = argparse.ArgumentParser(description="train textcnn",
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # 添加参数
    parser.add_argument('--amazon_path', type=str, default='train.csv', help='imbd path')
    parser.add_argument('--glove_path', type=str, default='./', help='glove path')
    parser.add_argument('--out_path', default='train/save_model/', type=str, help='the path model saved')
    parser.add_argument('--epoch_size', default=10, type=int, help='training epochs')
    parser.add_argument('--base_lr', default=1e-3, type=float, help='learning rate')
    parser.add_argument('--device_target', type=str, default='GPU')
    parser.add_argument('-weight_decay', default=3e-5, type=float)
    parser.add_argument('--class_num', default=5, type=int)
    # 解析参数f
    args_opt = parser.parse_args()
    return args_opt


class AMAZONData():
    """AMAZON数据集加载器

    加载AMAZON数据集并处理为一个Python迭代对象。

    """
    def __init__(self, path):
        self.path = path
        self.reviews = []
        self.labels = []
        self._load()


    def _load(self):
        # 将数据加载至内存
        csvFile = open(self.path, "r")
        dict_reader = csv.DictReader(csvFile)
        for row in dict_reader:
            review = row['review']
            self.reviews.append(review.split())
            label = int(np.float32(row['label']))
            self.labels.append(label)
        print('end')

    def __getitem__(self, idx):
        return self.reviews[idx], self.labels[idx]


    def __len__(self):
        return len(self.reviews)


def load_amazon(amazon_path):
    # amazon_train = ms_dataset.GeneratorDataset(AMAZONData(amazon_path), column_names=["review", "label"], shuffle=True)
    amazon_test = ms_dataset.GeneratorDataset(AMAZONData(amazon_path), column_names=["review", "label"], shuffle=False)
    return amazon_test


def load_glove(glove_path):
    glove_100d_path = glove_path
    embeddings = []
    tokens = []
    with open(glove_100d_path, encoding='utf-8') as gf:
        for glove in gf:
            word, embedding = glove.split(maxsplit=1)
            tokens.append(word)
            embeddings.append(np.fromstring(embedding, dtype=np.float32, sep=' '))
    # 添加 <unk>, <pad> 两个特殊占位符对应的embedding
    embeddings.append(np.random.rand(100))
    embeddings.append(np.zeros((100,), np.float32))
    vocab = ms_dataset.text.Vocab.from_list(tokens, special_tokens=["<unk>", "<pad>"], special_first=False)
    embeddings = np.array(embeddings).astype(np.float32)
    return vocab, embeddings


def data_preprocessing(vocab, amazon_train):
    lookup_op = ms_dataset.text.Lookup(vocab, unknown_token='<unk>')
    pad_op = ms_dataset.transforms.c_transforms.PadEnd([400], pad_value=vocab.tokens_to_ids('<pad>'))
    type_cast_op = ms_dataset.transforms.c_transforms.TypeCast(mindspore.float32)
    amazon_train = amazon_train.map(operations=[lookup_op, pad_op], input_columns=['review'])
    amazon_train = amazon_train.map(operations=[type_cast_op], input_columns=['label'])
    amazon_train = amazon_train.batch(64, drop_remainder=False)
    return amazon_train


if __name__ == '__main__':
    print('='*100)
    print(os.listdir())
    print('='*100)
    set_context(mode=mindspore.PYNATIVE_MODE, device_target='CPU')
    args_opt = parse_args()
    
    vocab, embeddings =  load_glove(os.path.join(args_opt.glove_path, 'glove.6B.100d.txt'))
    pad_idx = vocab.tokens_to_ids('<pad>')
    net = SentimentNet(embeddings, pad_idx)
    param_dict = load_checkpoint('fast_text_valid_0.7629206730769231.ckpt')
    # param_dict = load_checkpoint('fast_text_valid.ckpt')
    # 将参数加载到网络中
    load_param_into_net(net, param_dict)
    amazon_test = load_amazon('test_clean.csv')
    # amazon_test = load_amazon('test-final-public.csv')
    amazon_test = data_preprocessing(vocab, amazon_test)
    net.set_train(False)
    with open('final_result_1205.txt', 'w+') as r:
        for i in amazon_test.create_tuple_iterator():
            pre = net(i[0])
            pre = mindspore.nn.Softmax(axis=-1)(pre)
            pre = mindspore.ops.Argmax(axis=-1)(pre) + 1
            for j in range(pre.shape[0]):
                r.writelines(str(pre[j]) + '\n')



#%%
