import torch

import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
import pandas as pd

from sklearn.model_selection import train_test_split

from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import BpeTrainer

# ------------
# 设置 CUDA LAUNCH BLOCKING
CUDA_LAUNCH_BLOCKING = "1"
import os

os.environ["TOKENIZERS_PARALLELISM"] = "false"

# 读取训练集和测试集数据
df = pd.read_csv('./feedback-prize-english-language-learning/train.csv')
test_df = pd.read_csv('./feedback-prize-english-language-learning/test.csv')

# 删除不需要的列
df.drop(columns=['syntax'], inplace=True)
df.drop(columns=['vocabulary'], inplace=True)
df.drop(columns=['phraseology'], inplace=True)
df.drop(columns=['grammar'], inplace=True)
df.drop(columns=['conventions'], inplace=True)

# 初始化分词器
tokenizer = Tokenizer(BPE(unk_token='[UNK]'))
tokenizer.pre_tokenizer = Whitespace()
tokenizer.enable_truncation(max_length=512)
tokenizer.enable_padding(direction='left', length=512)

# 训练分词器
trainer = BpeTrainer(vocab_size=20000, min_frequency=2, special_tokens=['[PAD]', '[UNK]'])
df['full_text']
tokenizer.train_from_iterator(df['full_text'], trainer)


# 实现论文数据集类
class EssayDataset:
    def __init__(self, df, tokenizer, test=False):
        self.test = test
        self.classes = ['cohesion']
        self.texts = list(df['full_text'].values)
        if self.test is False:
            self.labels = df.loc[:, self.classes].values
        self.tokenizer = tokenizer

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        text = self.tokenizer.encode(text).ids
        text = torch.tensor(text, dtype=torch.long)
        if self.test is False:
            label = self.labels[idx, :] / 1.
            label = torch.tensor(label, dtype=torch.float32)
            return text, label
        return text


# 创建样本数据集
sample_ds = EssayDataset(df, tokenizer)

# 打印样本数据集相关信息
print(len(df['full_text'][0].split()))
encoded = tokenizer.encode(df['full_text'][0]).ids
print(len(encoded))
print(encoded)
tokenizer.decode(encoded)


# 实现基于LSTM的模型类
class RNNModel(pl.LightningModule):
    def __init__(self, config):
        super(RNNModel, self).__init__()
        self.save_hyperparameters()
        self.config = config
        self.vocab_size = self.config['vocab']
        self.embed_dim = self.config['embed_dim']
        self.hidden_dim = self.config['hidden_dim']
        self.seq_len = self.config['seq_len']
        self.n_layers = self.config['n_layers']
        self.output_dim = self.config['output_dim']

        self.lr = config['lr']

        self.embedding = nn.Embedding(self.vocab_size, self.embed_dim)

        self.lstm = nn.LSTM(input_size=self.embed_dim,
                            hidden_size=self.hidden_dim,
                            num_layers=self.n_layers,
                            batch_first=True
                            )

        self.drop = nn.Dropout(0.3)
        self.linear = nn.Linear(self.hidden_dim, self.output_dim)

        self.test_preds = []

    def forward(self, x):
        x = self.embedding(x)
        x, (h, c) = self.lstm(x)
        x = self.drop(h[-1])
        x = self.linear(x)
        return x

    def loss_fn(self, outputs, targets):
        colwise_mse = torch.mean(torch.square(targets - outputs), dim=0)
        loss = torch.mean(torch.sqrt(colwise_mse), dim=0)
        return loss

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20, eta_min=1e-6)
        return [optimizer], [scheduler]

    def training_step(self, batch, batch_idx):
        x, y = batch
        outputs = self(x)

        loss = self.loss_fn(outputs, y)

        self.log('train_loss', loss.item(), on_epoch=True)

        return loss

    def validation_step(self, batch, batch_idx):
        x, y = batch

        outputs = self(x)

        loss = self.loss_fn(outputs, y)

        self.log('val_loss', loss.item(), on_epoch=True)

    def test_step(self, batch, batch_idx):
        sample = batch
        preds = self(sample) * 1.
        self.test_preds.append(preds.detach().cpu())

    def get_predictions(self):
        return torch.cat(self.test_preds).numpy()


# 配置模型参数
score_cols = ['cohesion']
config = {
    'vocab': tokenizer.get_vocab_size(),
    'embed_dim': 15,
    'hidden_dim': 32,
    'seq_len': 512,
    'n_layers': 4,
    'output_dim': len(score_cols),
    'lr': 3e-4,
    'epochs': 25,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu',
    'batch_size': 8,
    'seed': 1357,
    'model_name': 'lstm-embeddings'
}
print(config)


# 准备训练集和验证集数据加载器
def prepare_datasets(df, test_size=0.2):
    train_df, val_df = train_test_split(df,
                                        test_size=test_size,
                                        shuffle=True,
                                        random_state=config['seed']
                                        )
    train_df = train_df.reset_index(drop=True)
    val_df = val_df.reset_index(drop=True)

    train_ds = EssayDataset(train_df, tokenizer)
    val_ds = EssayDataset(val_df, tokenizer)

    train_loader = torch.utils.data.DataLoader(train_ds, batch_size=config['batch_size'], shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_ds, batch_size=config['batch_size'])

    return train_loader, val_loader


# 创建训练集和验证集数据加载器
train_loader, val_loader = prepare_datasets(df)
len(train_loader), len(val_loader)

# 准备测试集数据加载器
test_ds = EssayDataset(test_df, tokenizer, test=True)
test_loader = torch.utils.data.DataLoader(test_ds, batch_size=1, shuffle=False)

# 打印训练集和验证集的样本形状
_x, _y = next(iter(train_loader))
_x.shape, _y.shape

# 初始化模型
model = RNNModel(config)

# 初始化训练器
trainer = pl.Trainer(accelerator='gpu',
                     callbacks=[
                         EarlyStopping(monitor="val_loss",
                                       mode="min",
                                       patience=1,
                                       )
                     ],
                     max_epochs=config['epochs']
                     )

# 查找最佳学习率
lr_finder = trainer.tuner.lr_find(model, train_loader, val_loader)

# 结果可以在以下位置找到
lr_finder.results

# 绘制图形
fig = lr_finder.plot(suggest=True)
fig.show()

# 根据图形选择学习率
new_lr = lr_finder.suggestion()
print(new_lr)
model.hparams.lr = new_lr
model.hparams

# 训练模型
trainer.fit(model, train_loader, val_loader)

# 获取指标
metrics = trainer.logged_metrics
logs = {
    'train_loss': metrics['train_loss_epoch'].item(),
    'val_loss': metrics['val_loss'].item()
}
logs

# 在测试集上测试模型
trainer.test(model, test_loader)

# 获取预测结果
p = model.get_predictions()

p.shape

# 创建提交文件
submission = pd.DataFrame({'text_id': test_df['text_id']})
submission[score_cols] = p

submission

# 保存提交文件
submission.to_csv('submission.csv', index=False)
