from torch.optim import Adam
from data_process import *
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt

class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        self.embedding = nn.Embedding(encoder_number_embedding, encoder_embedding_dim,padding_idx=0)
        self.fc1 = nn.Linear(encoder_embedding_dim, 256)
        self.fc2= nn.Linear(256, 2)

    def forward(self, x):
        embedded = self.embedding(x)
        # 取嵌入的平均值作为句子表示
        pooled = embedded.mean(dim=1)
        y=self.fc1(pooled)
        y=F.relu(y)
        y=self.fc2(y)
        return y


linear_model = LinearModel()
optimizer = Adam(linear_model.parameters(), lr=lr)
all_loss=[]
all_precision=[]
for epoch in range(epochs):
    epoch_loss=0
    train_num = 0
    for x,y,_ in trainloader:
        optimizer.zero_grad()
        y_pred = linear_model(x)
        loss = F.cross_entropy(y_pred, torch.squeeze(y))
        epoch_loss+=loss.item()
        predicted = torch.argmax(y_pred, dim=-1)
        train_num += (predicted == torch.squeeze(y)).sum().item()
        loss.backward()
        optimizer.step()
    print(f"({epoch+1}/{epochs})训练准确率：{train_num/len(trainloader.dataset)}  ,训练损失值：{epoch_loss}")
    all_loss.append(epoch_loss)
    all_precision.append(train_num/len(trainloader.dataset))

torch.save(linear_model, "linear_model.pt")

print("-------------------测试------------------------")
linear_model.eval()
test_nums=0
with torch.no_grad():
    for x,y,_ in testloader:
        pred = linear_model(x)
        predicted = torch.argmax(pred, dim=-1)
        test_nums+=(predicted==torch.squeeze(y)).sum().item()
precision=test_nums/len(testloader.dataset)
print("test_precision:",precision)

plt.figure(figsize=(10,6))
plt.plot(range(1,epochs+1),all_loss)
plt.figure(figsize=(10,6))
plt.plot(range(1,epochs+1),all_precision)
plt.show()