# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
#################train textcnn example on movie review########################
python train.py
"""
import argparse
import csv
import hashlib
import math
import os
import re
import string
import sys

import mindspore
import mindspore as ms
import mindspore.dataset as ms_dataset
import mindspore.nn as nn
import mindspore.numpy as mnp
import numpy as np
from lstm import SentimentNet
from mindspore import Tensor, ops, save_checkpoint
from mindspore.common import set_seed
from mindspore.common.initializer import HeUniform, Uniform
from mindspore.nn.metrics import Accuracy
from mindspore.train.callback import (Callback, CheckpointConfig, LossMonitor,
                                      ModelCheckpoint, TimeMonitor)
from mindspore.train.model import Model
from sklearn.model_selection import KFold, train_test_split
from tqdm import tqdm

set_seed(1)


def evaluate_train_loop(model, dataset):
    model.set_train(False)
    total, correct = 0, 0
    for data, label in dataset.create_tuple_iterator():
        pred = model(data)
        pre = mindspore.nn.Softmax(axis=-1)(pred)
        total += len(data)
        correct += (pre.argmax(1) == label.argmax(1)).asnumpy().sum()
    correct /= total
    print(f"Training: \n Accuracy: {(100*correct):>0.1f}%\n")


def evaluate_valid_loop(model, dataset, loss_fn, best_acc):
    num_batches = dataset.get_dataset_size()
    model.set_train(False)
    total, test_loss, correct = 0, 0, 0
    for data, label in dataset.create_tuple_iterator():
        pred = model(data)
        pre = mindspore.nn.Softmax(axis=-1)(pred)
        total += len(data)
        test_loss += loss_fn(pred, label).asnumpy()
        correct += (pre.argmax(1) == label.argmax(1)).asnumpy().sum()
    test_loss /= num_batches
    correct /= total
    if correct > best_acc:
        best_acc = correct
        save_checkpoint(model, os.path.join(args_opt.output_path, 'lstm_{}.ckpt'.format(best_acc)))
    print(f"Valid: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
    return best_acc


def parse_args():
    # 创建解析
    parser = argparse.ArgumentParser(description="train textcnn",
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    # 添加参数
    parser.add_argument('--train_data_path', type=str, default=r'F:\Yuan\MyTrainOnProgramming\昇思线上\wtgywa', help='imbd path')
    parser.add_argument('--output_path', default='inference/', type=str, help='the path model saved')
    parser.add_argument('--epoch_size', default=4, type=int, help='training epochs')
    parser.add_argument('--base_lr', default=0.001, type=float, help='learning rate')
    # 解析参数f
    args_opt = parser.parse_args()
    return args_opt


def train_loop(model, dataset, loss_fn, optimizer):
    # Define forward function
    def forward_fn(data, label):
        logits = model(data)
        loss = loss_fn(logits, label)
        return loss, logits

    # Get gradient function
    grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True)

    # Define function of one-step training
    def train_step(data, label):
        (loss, _), grads = grad_fn(data, label)
        loss = ops.depend(loss, optimizer(grads))
        return loss

    size = dataset.get_dataset_size()
    model.set_train()
    for batch, (data, label) in enumerate(dataset.create_tuple_iterator()):
        loss = train_step(data, label)

        if batch % 100 == 0:
            loss, current = loss.asnumpy(), batch
            print(f"loss: {loss:>7f}  [{current:>3d}/{size:>3d}]")


def load_glove(glove_path):
    glove_100d_path = glove_path
    embeddings = []
    tokens = []
    with open(glove_100d_path, encoding='utf-8') as gf:
        for glove in gf:
            word, embedding = glove.split(maxsplit=1)
            tokens.append(word)
            embeddings.append(np.fromstring(embedding, dtype=np.float32, sep=' '))
    # 添加 <unk>, <pad> 两个特殊占位符对应的embedding
    embeddings.append(np.random.rand(100))
    embeddings.append(np.zeros((100,), np.float32))
    vocab = ms_dataset.text.Vocab.from_list(tokens, special_tokens=["<unk>", "<pad>"], special_first=False)
    embeddings = np.array(embeddings).astype(np.float32)
    return vocab, embeddings


def data_preprocessing(vocab, amazon_train):
    lookup_op = ms_dataset.text.Lookup(vocab, unknown_token='<unk>')
    pad_op = ms_dataset.transforms.c_transforms.PadEnd([400], pad_value=vocab.tokens_to_ids('<pad>'))
    type_cast_op = ms_dataset.transforms.c_transforms.TypeCast(mindspore.float32)
    amazon_train = amazon_train.map(operations=[lookup_op, pad_op], input_columns=['review'])
    amazon_train = amazon_train.map(operations=[type_cast_op], input_columns=['label'])
    amazon_train = amazon_train.batch(256, drop_remainder=True)
    return amazon_train


class AMAZONData():
    """Amazon数据集加载器

    加载Amazon数据集并处理为一个Python迭代对象。

    """
    def __init__(self, texts, labels):
        self.texts = texts
        self.labels = labels

    def __getitem__(self, idx):
        one_hot = [0] * 5
        one_hot[self.labels[idx]-1] = 1
        return self.texts[idx], one_hot

    def __len__(self):
        return len(self.texts)


def load_amazon(amazon_path, amazonboost_path, valid_size=10000):
    train_reviews = []
    train_labels = []
    valid_reviews = []
    valid_labels = []

    numbers = [i for i in range(80000)]
    numbers_train, numbers_valid = train_test_split(numbers, test_size=valid_size)
    numbers_train = set(numbers_train)
    numbers_valid = set(numbers_valid)


    csvFile = open(amazon_path, "r")
    dict_reader = csv.DictReader(csvFile)
    t = 0
    for row in dict_reader:
        review = row['review']
        label = int(np.float32(row['label']))
        review_list = review.split()
        if len(review_list) != 0:
            if t in numbers_valid:
                valid_reviews.append(review_list)
                valid_labels.append(label)
        t += 1

    csvFile = open(amazonboost_path, "r")
    dict_reader = csv.DictReader(csvFile)
  
    for row in dict_reader:
        review = row['review']
        label = int(np.float32(row['label']))
        review_list = review.split()
        number = int(np.float32(row['idx']))
        if len(review_list) != 0:
            if number in numbers_train:
                train_reviews.append(review_list)
                train_labels.append(label)
 
    training_amazon_data = AMAZONData(train_reviews, train_labels)
    training_dataset = ms_dataset.GeneratorDataset(training_amazon_data, column_names=["review", "label"], shuffle=True)
    valid_amazon_data = AMAZONData(valid_reviews, valid_labels)
    valid_dataset = ms_dataset.GeneratorDataset(valid_amazon_data, column_names=["review", "label"], shuffle=False)
    return training_dataset, valid_dataset


def train_net(args_opt):
    '''train net'''
    for _ in range(50):
        print('='*30)
    stats = os.stat(os.path.join(args_opt.train_data_path, 'train.csv'))
    print('The training file is %s Bytes'%stats.st_size)
    with open(os.path.join(args_opt.train_data_path, 'train.csv'), 'rb') as fp:
        data = fp.read()
    file_md5= hashlib.md5(data).hexdigest()
    print('The hash code of training file is %s'%file_md5)

    stats = os.stat(os.path.join(args_opt.train_data_path, 'eda_train.csv'))
    print('The data augmentation file is %s Bytes'%stats.st_size)
    with open(os.path.join(args_opt.train_data_path, 'eda_train.csv'), 'rb') as fp:
        data = fp.read()
    file_md5= hashlib.md5(data).hexdigest()
    print('The hash code of data augmentation file is %s'%file_md5)
    for _ in range(50):
        print('='*30)

    '''train net'''
    amazon_train, amazon_valid  = load_amazon(os.path.join(args_opt.train_data_path, 'train.csv'), os.path.join(args_opt.train_data_path, 'eda_train.csv'))
    vocab, embeddings = load_glove(os.path.join(args_opt.train_data_path, 'glove.6B.100d.txt'))
    pad_idx = vocab.tokens_to_ids('<pad>')
    amazon_train  = data_preprocessing(vocab, amazon_train)
    amazon_valid  = data_preprocessing(vocab, amazon_valid)
    base_lr = float(args_opt.base_lr)
    net = SentimentNet(embeddings, pad_idx)
    optimizer = nn.Adam(net.trainable_params(), learning_rate=base_lr)
    loss_fn = nn.CrossEntropyLoss()
    epochs = args_opt.epoch_size
    best_acc = 0
    for t in range(epochs):
        print(f"Epoch {t+1}\n-------------------------------")
        train_loop(net, amazon_train, loss_fn, optimizer)
        evaluate_valid_loop(net, amazon_train ,loss_fn, best_acc)
        best_acc = evaluate_valid_loop(net, amazon_valid, loss_fn, best_acc)
    print("Done!")
    print("train success")


if __name__ == '__main__':
    args_opt = parse_args()
    if sys.platform == 'win32':
        ms.set_context(mode=ms.PYNATIVE_MODE)
    else:
        ms.set_context(mode=ms.GRAPH_MODE, device_target="GPU")
    train_net(args_opt)
