#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: Gaoxiang

import torch
import os
import logging
import numpy as np
from text_cls_config import Config
from text_cls_dataloader import DataIterator
from text_cls_dataloader import dataloader_init
from text_cls_model import fastText
from text_cls_evaluate import Evaluator

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def main(config):
    if not os.path.isdir(config["model_path"]):
        os.mkdir(config["model_path"])

    if not (os.path.exists(config["label_vocab_path"]) and os.path.exists(config["text_vocab_path"])):
        config["to_build_new_vocab"] = True

    datafield = dataloader_init(config=config)  # 初始化dataloader相关设置，返回field
    train_data = DataIterator(raw_set=config["train_data_path"],
                              is_test_set=False,
                              use_cuda=config["use_cuda"],
                              batchsize=config["batch_size"],
                              datafield=datafield,
                              config=config)

    model = fastText(config)
    evaluator = Evaluator(config, model, logger, datafield)
    if config["use_cuda"]:
        model = model.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=config["learning_rate"])
    criterion = torch.nn.CrossEntropyLoss()
    model.train()
    train_loss = []

    for epoch in range(config["num_epoches"]):
        epoch += 1
        model.train()
        logger.info("epoch %d begin" % epoch)

        for data in train_data.get_tier_data():
            if config["use_cuda"]:
                content, label = data.text.cuda(), data.label.cuda()
            else:
                content, label = data.text, data.label
            outputs = model(content)
            loss = criterion(outputs, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss.append(loss.item())

            logger.info("batch loss %f" % loss)
        logger.info("epoch average loss: %f" % np.mean(train_loss))
        # evaluator.eval(epoch, config)
        evaluator.eval_sklearn(epoch, config)
        model_path = os.path.join(config["model_path"], "epoch_%d.pth" % epoch)
        torch.save(model.state_dict(), model_path)  #保存模型权重


if __name__ == "__main__":
    main(Config)
