# -*- coding: utf-8 -*-
# @Time : 2021-11-17 18:52
# @Author : lwb
# @Site : 
# @File : CNNmain.py
import sys
sys.path.append('../')
from Models.CNN import CNN
import torch.nn as nn
import torch
from tqdm.auto import  tqdm
from torch.optim import Adam
from torch.utils.data import Dataset,DataLoader
from utils.CNNprocess import collate_fn
from Data.dataset import BowDataset
from utils.vocab import Vocab
# 导入数据集
def load_sentence_polarity():
    from nltk.corpus import sentence_polarity
    vocab = Vocab.build(sentence_polarity.sents())
    print('test---vocab:', len(vocab))
    train_data = [(vocab.convert_tokens_to_ids(sentence), 0) for sentence in
                  sentence_polarity.sents(categories='pos')[:4000]] \
                 + [(vocab.convert_tokens_to_ids(sentence), 1) for sentence in
                    sentence_polarity.sents(categories='neg')[:4000]]
    # 其余数据作为测试集
    test_data = [(vocab.convert_tokens_to_ids(sentence), 0) for sentence in
                 sentence_polarity.sents(categories='pos')[4000:]] \
                + [(vocab.convert_tokens_to_ids(sentence), 1) for sentence in
                   sentence_polarity.sents(categories='neg')[4000:]]
    return train_data, test_data, vocab
# 参数设置
embedding_dim=128
hidden_dim=256
num_class=2
batch_size=32
num_epoch=5
filter_size=3   # 卷积核大小
num_filter=100  # 卷积核数目

# 加载数据
train_data,test_data,vocab=load_sentence_polarity()
train_dataset=BowDataset(train_data)
test_dataset=BowDataset(test_data)
train_data_loader=DataLoader(train_dataset,batch_size=batch_size,
                             collate_fn=collate_fn,
                             shuffle=True)
test_dataset_loader=DataLoader(test_dataset,batch_size=batch_size,
                             collate_fn=collate_fn,
                             shuffle=True)

# 加载模型
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model=CNN(len(vocab),embedding_dim,filter_size,num_filter,num_class)
model.to(device)  # 将模型加载到cpu或gou中
#训练过程
nll_loss=nn.NLLLoss()
optimizer=Adam(model.parameters(),lr=0.001)

model.train()
for epoch in range(num_epoch):
    total_loss=0
    for batch in tqdm(train_data_loader,desc=f"Training Epoch {epoch}"):
        inputs,targets=[x.to(device) for x in batch]
        log_probs=model(inputs)
        loss=nll_loss(log_probs,targets)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        total_loss+=loss.item()
    print(f'loss:{total_loss:.2f}')

# 测试过程
acc=0
for batch in tqdm(test_dataset_loader,desc=f'Testing'):
    inputs,targets=[x.to(device) for x in batch]
    with torch.no_grad():
        output=model(inputs)
        acc+=(output.argmax(dim=1)==targets).sum().item()
print(f'Acc:{acc/len(test_dataset_loader):.2f}')
