#region #处理数据
import pandas as pd
import numpy as np

# data_=pd.read_csv(r"C:\Users\Aiomi\Desktop\drug_text_data\drug_text_data\train_F3WbcTw.csv")
# data_.head(10)
#
# data_1=data_.loc[:,['text']]
# data_.loc[:,['text']]
#
# data_1.to_csv(r"C:\Users\Aiomi\Desktop\drug_text_data\drug_text_data\train.csv")
#endregion


import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import BpeTrainer

batch_size = 96
learning_rate = 0.1
num_inputs = 28 * 28
num_outputs = 10
num_hiddens_1 = 256
num_hiddens_2 = 64

tokenizer = Tokenizer(BPE())
tokenizer.pre_tokenizer = Whitespace()
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
tokenizer.train(files=[r"C:\Users\Aiomi\Desktop\drug_text_data\drug_text_data\train.csv"], trainer=trainer)

print(tokenizer.encode("whetherererer the intellectual").tokens)

class MLP(nn.Module):
    def __init__(self, num_inputs, num_hiddens_1, num_hiddens_2, num_outputs):
        super(MLP, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(num_inputs, num_hiddens_1),
            nn.BatchNorm1d(num_hiddens_1),
            nn.ReLU(),
            nn.Linear(num_hiddens_1, num_hiddens_2),
            nn.BatchNorm1d(num_hiddens_2),
            nn.ReLU(),
            nn.Linear(num_hiddens_2, num_outputs),
            nn.ReLU()
        )

    def forward(self, x):
        x = self.net(x)
        return x

model = MLP(num_inputs, num_hiddens_1, num_hiddens_2, num_outputs)
model.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.RAdam(model.parameters(), lr=learning_rate)