import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.utils.data import dataloader

BATCH_SIZE= 16

device = torch.device("cude" if torch.cuda.is_available() else "cpu")
print(device)
print(torch.torch.cuda.is_available() )


class TextSentiment(nn.Module):
    def __init__(self, vocab_size,embed_dim,num_class):
        super().__init__()
        self.embeding = nn.Embedding(vocab_size,embed_dim,sparse=True)
        self.fc = nn.Linear(embed_dim,num_class)
        self.init_weight()
    
    def init_weight(self):
        initrange=0.5
        self.embeding.weight.data.uniform_(-initrange,initrange)
        self.fc.weight.data.uniform_(-initrange,initrange)

    def forward(self,text):
        # text = [m,] 16表示行数 100表示每行数据大小不固定 每行词可能有很多，可能很少， 
        embedded = self.embeding(text) # 结果是（m,32） m
        c = embedded.size(0) // BATCH_SIZE # 取整数 10 12 13  表示多少个batchsize，为了后续统一转换成固定大小
        embedded = embedded[:c]   # (xx,32)   10*16  16*12  16*13  取整数 
        embedded:torch.Tensor
        embedded = embedded.transpose(1,0).unsqueeze(0) # (1,32,19*16,16*13)
        embedded = F.avg_pool1d(embedded,stride=c) # 按照c的步长求  最后大小是（1，32，BATCH_SIZE）
        embedded_l = embedded[0].transpose(1,0)   # (BATCH_SIZE,32)
        return self.fc(embedded_l)  # (BATCH_SIZE,num_class)  num_class=4



def genrate_batch(batch):
    label = [_[0] for _ in batch]
    label = torch.tensor(label)
    text = [_[1] for _ in batch]
    text = torch.cat(text)
    return text,label







if __name__ == '__main__':
    v = [(torch.tensor([1,1,1]),0),(torch.tensor([2,2,2]),1),(torch.tensor([2,2,2]),2)]
    res = genrate_batch(v)
    print(res)