import pandas as pd
import torch
from torch import nn, optim

data = pd.read_csv(r"C:\Users\Aiomi\Desktop\drug_text_data\drug_text_data\train_F3WbcTw.csv")

data = data.iloc[:,1:4]
print(data)

# print(data.head())

# from sklearn.feature_extraction.text import TfidfVectorizer
# from  sklearn.naive_bayes import MultinomialNB
# from  sklearn.pipeline import Pipeline
#
# model_0 = Pipeline([
#     ("tf-idf",TfidfVectorizer()),
#     ("clf",MultinomialNB())
# ])
# model_0.fit(x_train,y_train)

# from sklearn.model_selection import train_test_split
# train_data, test_data = train_test_split(data, test_size=0.2)
# print(train_data)
#
# train_features = train_data.drop(['label_column'], axis=1).to_numpy()
# train_labels = train_data['label_column'].to_numpy()
#
# test_features = test_data.drop(['label_column'], axis=1).to_numpy()
# test_labels = test_data['label_column'].to_numpy()
#
# batch_size = 96
# learning_rate = 0.1
# num_inputs = 28 * 28
# num_outputs = 10
# num_hiddens_1 = 256
# num_hiddens_2 = 64
#
# class MLP(nn.Module):
#     def __init__(self, num_inputs, num_hiddens_1, num_hiddens_2, num_outputs):
#         super(MLP, self).__init__()
#         self.net = nn.Sequential(
#             nn.Linear(num_inputs, num_hiddens_1),
#             nn.BatchNorm1d(num_hiddens_1),
#             nn.ReLU(),
#             nn.Linear(num_hiddens_1, num_hiddens_2),
#             nn.BatchNorm1d(num_hiddens_2),
#             nn.ReLU(),
#             nn.Linear(num_hiddens_2, num_outputs),
#             nn.ReLU()
#         )
#
#     def forward(self, x):
#         x = self.net(x)
#         return x
#
# model = MLP(num_inputs, num_hiddens_1, num_hiddens_2, num_outputs)
# model.cuda()
#
# criterion = nn.CrossEntropyLoss()
# optimizer = optim.RAdam(model.parameters(), lr=learning_rate)
#
#
# num_epochs=1000
# for epoch in range(num_epochs):
#     # 将数据转换为张量
#     inputs = torch.Tensor(train_features)
#     labels = torch.LongTensor(train_labels)
#
#     # 前向传递
#     outputs = model(inputs)
#     loss = criterion(outputs, labels)
#
#     # 反向传播和优化
#     optimizer.zero_grad()
#     loss.backward()
#     optimizer.step()
#
#     # 打印损失
#     if (epoch + 1) % 100 == 0:
#         print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, loss.item()))
