import argparse
from datetime import datetime
import random
import sys
import time
from importlib import import_module

import jieba
import numpy as np
import torch
from tensorboardX import SummaryWriter
import logging
from utils import *
import os

from train_eval import *

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

parser = argparse.ArgumentParser(description="中文文本分类")
parser.add_argument('--model', type=str, default="TextGRU", help="选择一个模型")
parser.add_argument("--embedding", default="embedding_Tencent", type=str, help="random或者选择一个npz")
parser.add_argument("--word", default=False, type=bool, help="True:词语 False:字")
parser.add_argument("--random_seed", default=9, type=int, help="随机种子")
args = parser.parse_args()


def set_fixed_seed(seed: int):
    random.seed(seed)  # Python 内置随机数
    np.random.seed(seed)  # NumPy 随机数
    torch.manual_seed(seed)  # CPU 上的 PyTorch 随机数
    torch.cuda.manual_seed(seed)  # 当前 GPU 的随机数
    torch.cuda.manual_seed_all(seed)  # 所有 GPU 的随机数
    torch.backends.cudnn.deterministic = True  # 保证每次返回的卷积算法是确定的

# 相比于网络上，我自己想改进的地方有
# 1、 除了LSTM还需要有GRU
# 2、 除了按照字embedding，还需要按照词embedding
if __name__ == '__main__':
    dataset = 'data'  # 路径的名字嘞
    model_name = args.model
    set_fixed_seed(args.random_seed)

    if not args.word and args.embedding == "random":
        logging.info("我还没写呢！")
        sys.exit(-11)

# ===========学习方法：使用分字 使用lstm 使用预训练好的embedding===========
    elif not args.word:
        embedding = ''.join([args.embedding, ".npz"])
        x = import_module("models." + model_name)
        config = x.Config(os.getcwd(), embedding)
        # print(config.__dict__)

        start_loading_data_time = time.time()
        print("Loading Data...")
        vocab, train_data, dev_data, test_data = build_dataset(config, args.word)

        train_loader = get_iterator(train_data, config, shuffle=True)
        dev_loader = get_iterator(dev_data, config)
        test_loader = get_iterator(test_data, config)

        end_loading_data_time = time.time()
        print("Loading Data结束，用时{:.2f}秒".format(end_loading_data_time - start_loading_data_time))

        print("=="*20)

        model = x.Model(config).to(config.device)
        writer = SummaryWriter(logdir=config.log_path)
        # writer = None
        for name, param in model.named_parameters():
            print(f"Name: {name}, Shape: {param.shape}")

        print("=="*20)
        train(config, model, train_loader, dev_loader, test_loader, writer)

# ===========学习方法：使用分词 使用lstm/gru 使用自己搞的embedding===========
    elif args.word:

        build_embedding = False
        embedding = ''.join([args.embedding, ".npz"])
        if embedding in ["embedding_SougouNews.npz", "embedding_Tencent.npz"]:
            print("不是哥们，这是分字的embedding，你是分词哇！")
            while True:
                user_input = input("你看看是不是选错了，还要运行吗？[y/n]:").strip().lower()
                if user_input in ["y", "yes"]:
                    build_embedding = True
                    break
                elif user_input in ["n", "no"]:
                    sys.exit(-12)
                elif user_input == "help":
                    print("输入y接下来我自己构建嵌入，不想运行就输入n")
                else:
                    print("输错了？你输入的是啥呀！")

        if build_embedding:
            if os.path.exists("./data/jieba_vocab.pkl"):
                vocab = pkl.load(open("./data/jieba_vocab.pkl", "rb"))
            else:
                print("=="*20)
                start_vocab_time = time.time()
                embedding = "embedding_jieba.npz"
                vocab = build_vocab('./data', tokenizer=jieba.cut, max_size=4760, min_freq=3)
                with open("./data/jieba_vocab.pkl", "wb") as f:
                    pkl.dump(vocab, f)
                print("build_vocab用时{}s".format(get_time_dif(start_vocab_time)))
                print("已保存")
                print("=="*20)

        # ==== 按道理我应该去训练词向量，但请参考fastText，这不是我该干的====

        x = import_module("models." + model_name)
        config = x.Config(os.getcwd(), embedding)

        config.vocab_path = os.getcwd() + "/data/jieba_vocab.pkl"
        config.save_path = os.getcwd() + "/saved_dict/" + datetime.now().strftime("%Y%m%d_%H_%M_%S_word") + model_name + ".ckpt"

        start_loading_data_time = time.time()
        print("Loading Data...")
        vocab, train_data, dev_data, test_data = build_dataset(config, args.word)

        train_loader = get_iterator(train_data, config, shuffle=True)
        dev_loader = get_iterator(dev_data, config)
        test_loader = get_iterator(test_data, config)

        print("Loading Data结束，用时{:.2f}秒".format(get_time_dif(start_loading_data_time)))

        print("==" * 20)

        model = x.Model(config).to(config.device)
        writer = SummaryWriter(logdir=config.log_path)
        # writer = None
        for name, param in model.named_parameters():
            print(f"Name: {name}, Shape: {param.shape}")

        print("==" * 20)
        train(config, model, train_loader, dev_loader, test_loader, writer)





