import os
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
import numpy as np
import logging
import time
from collections import Counter
from pathlib import Path

# 加载 IMDB 数据集
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data()
print(x_train.shape, y_train.shape)

# 获取 IMDB 数据集的词汇表，并添加一些特殊标记
_word2idx = tf.keras.datasets.imdb.get_word_index()
word2idx = {w: i + 3 for w, i in _word2idx.items()}
word2idx['<pad>'] = 0
word2idx['<start>'] = 1
word2idx['<unk>'] = 2
idx2word = {i: w for w, i in word2idx.items()}

# 定义按长度排序的函数
def sort_by_len(x, y):
    x, y = np.asarray(x), np.asarray(y)
    idx = sorted(range(len(x)), key=lambda i: len(x[i]), reverse=True)
    return x[idx], y[idx]

# 对训练集和测试集按长度排序
x_train, y_train = sort_by_len(x_train, y_train)
x_test, y_test = sort_by_len(x_test, y_test)

# 定义将数据写入文件的函数
def write_file(f_path, xs, ys):
    with open(f_path, 'w', encoding='utf-8') as f:
        for x, y in zip(xs, ys):
            f.write(str(y) + '\t' + ' '.join([idx2word[i] for i in x if i in idx2word]) + '\n')

# 将训练集和测试集写入文件
write_file('./data/train.txt', x_train, y_train)
write_file('./data/test.txt', x_test, y_test)

# 统计训练集中的单词频率
counter = Counter()
with open('./data/train.txt', encoding='utf-8') as f:
    for line in f:
        line = line.rstrip()
        label, words = line.split('\t')
        words = words.split(' ')
        counter.update(words)

# 创建词汇表，只保留频率大于等于10的单词
words = ['<pad>'] + [w for w, freq in counter.most_common() if freq >= 10]
print('Vocab Size:', len(words))
Path('./vocab').mkdir(exist_ok=True)

# 将词汇表写入文件
with open('./vocab/word.txt', 'w', encoding='utf-8') as f:
    for w in words:
        f.write(w + '\n')

# 加载词汇表
word2idx = {}
with open('./vocab/word.txt', encoding='utf-8') as f:
    for i, line in enumerate(f):
        line = line.rstrip()
        word2idx[line] = i

# 初始化词嵌入矩阵
embedding = np.zeros((len(word2idx) + 1, 50))  # +1 表示未知单词

# 加载预训练的 GloVe 词向量
with open('./data/glove.6B.50d.txt', encoding='utf-8') as f:
    count = 0
    for i, line in enumerate(f):
        if i % 100000 == 0:
            print('- At line {}'.format(i))
        line = line.rstrip()
        sp = line.split(' ')
        word, vec = sp[0], sp[1:]
        if word in word2idx:
            count += 1
            embedding[word2idx[word]] = np.asarray(vec, dtype='float32')

print("[%d / %d] words have found pre-trained values" % (count, len(word2idx)))
np.save('./vocab/word.npy', embedding)
print('Saved ./vocab/word.npy')

# 更新参数字典
params = {
    'vocab_path': './vocab/word.txt',
    'train_path': './data/train.txt',
    'test_path': './data/test.txt',
    'num_samples': 25000,
    'num_labels': 2,
    'batch_size': 8,
    'max_len': 200,
    'rnn_units': 100,
    'dropout_rate': 0.2,
    'clip_norm': 10.,
    'lr': 3e-4,
    'word2idx': word2idx  # 添加 word2idx 到 params
}

# 定义数据生成器
def data_generator(f_path, params):
    with open(f_path, encoding='utf-8') as f:
        print('Reading', f_path)
        for line in f:
            line = line.rstrip()
            label, text = line.split('\t')
            text = text.split(' ')
            x = [params['word2idx'].get(w, len(params['word2idx'])) for w in text]
            if len(x) >= params['max_len']:
                x = x[:params['max_len']]
            else:
                x += [0] * (params['max_len'] - len(x))
            y = int(label)
            yield x, y

# 定义数据集函数
def dataset(is_training, params):
    _shapes = ([params['max_len']], ())
    _types = (tf.int32, tf.int32)

    if is_training:
        ds = tf.data.Dataset.from_generator(
            lambda: data_generator(params['train_path'], params),
            output_shapes=_shapes,
            output_types=_types,
        )
        ds = ds.shuffle(params['num_samples'])
        ds = ds.batch(params['batch_size'])
        ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
    else:
        ds = tf.data.Dataset.from_generator(
            lambda: data_generator(params['test_path'], params),
            output_shapes=_shapes,
            output_types=_types,
        )
        ds = ds.batch(params['batch_size'])
        ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
    return ds

# 定义模型类
# 定义模型类
class Model(tf.keras.Model):
    def __init__(self, params):
        super(Model, self).__init__()
        self.embedding = tf.Variable(np.load('./vocab/word.npy'),
                                     dtype=tf.float32,
                                     name='pretrained_embedding',
                                     trainable=False)
        self.drop1 = tf.keras.layers.Dropout(params['dropout_rate'])
        self.rnn1 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(params['rnn_units'], return_sequences=False))
        self.drop_fc = tf.keras.layers.Dropout(params['dropout_rate'])
        self.fc = tf.keras.layers.Dense(2 * params['rnn_units'], tf.nn.elu)
        self.out_linear = tf.keras.layers.Dense(2)

    def call(self, inputs, training=False):
        if inputs.dtype != tf.int32:
            inputs = tf.cast(inputs, tf.int32)

        x = tf.nn.embedding_lookup(self.embedding, inputs)
        x = self.drop1(x, training=training)
        x = self.rnn1(x)
        x = self.drop_fc(x, training=training)
        x = self.fc(x)
        x = self.out_linear(x)
        return x

# 创建模型实例
model = Model(params)
model.build(input_shape=(None, None))

# 定义学习率衰减函数
decay_lr = tf.optimizers.schedules.ExponentialDecay(params['lr'], 1000, 0.95)
optim = tf.optimizers.Adam(params['lr'])
global_step = 0

# 初始化训练日志
history_acc = []
best_acc = 0.0

t0 = time.time()
logger = logging.getLogger('tensorflow')
logger.setLevel(logging.INFO)

# 确保保存模型的目录存在
Path('./models').mkdir(exist_ok=True)

# 开始训练循环
while True:
    # 训练模型
    for texts, labels in dataset(is_training=True, params=params):
        print(texts)
        print("----------")
        print(labels)
        with tf.GradientTape() as tape:
            logits = model(texts, training=True)
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
            loss = tf.reduce_mean(loss)

        optim.learning_rate.assign(decay_lr(global_step))
        grads = tape.gradient(loss, model.trainable_variables)
        grads, _ = tf.clip_by_global_norm(grads, params['clip_norm'])
        optim.apply_gradients(zip(grads, model.trainable_variables))

        if global_step % 50 == 0:
            logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format(
                global_step, loss.numpy().item(), time.time() - t0, optim.learning_rate.numpy().item()))
            t0 = time.time()
        global_step += 1

    # 验证集效果
    m = tf.keras.metrics.Accuracy()

    for texts, labels in dataset(is_training=False, params=params):
        logits = model(texts, training=False)
        y_pred = tf.argmax(logits, axis=-1)
        m.update_state(y_true=labels, y_pred=y_pred)

    acc = m.result().numpy()
    logger.info("Evaluation: Testing Accuracy: {:.3f}".format(acc))

    if acc > best_acc:  # 如果当前准确率高于最佳准确率
        best_acc = acc
        model.save('./models/best_model', save_format="tf")  # 保存整个模型为 TensorFlow SavedModel 格式
        logger.info("Model saved at step {} with accuracy {:.3f}".format(global_step, best_acc))
    else:  # 如果当前准确率低于最佳准确率
        logger.info("Testing Accuracy decreased, stopping training.")
        break  # 停止训练

logger.info("Training completed. Best Accuracy: {:.3f}".format(best_acc))