ando55 commited on
Commit
1581552
1 Parent(s): d38a1be

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +13 -13
model.py CHANGED
@@ -59,8 +59,8 @@ class PointerNetworks(nn.Module):
59
  self.nnEm = nn.Embedding(self.voca_size,self.word_dim,padding_idx=2000001)
60
  #self.nnEm = nn.Embedding.from_pretrained(self.voc_embeddings,freeze=self.finedtuning,padding_idx=-1)
61
  self.initEmbeddings(self.voc_embeddings)
62
- if self.use_cuda:
63
- self.nnEm = self.nnEm.cuda()
64
 
65
 
66
 
@@ -102,17 +102,17 @@ class PointerNetworks(nn.Module):
102
  h_0 = Variable(torch.zeros(self.num_encoder_bi*self.num_rnn_layers, batchsize, hsize))
103
  c_0 = Variable(torch.zeros(self.num_encoder_bi*self.num_rnn_layers, batchsize, hsize))
104
 
105
- if self.use_cuda:
106
- h_0 = h_0.cuda()
107
- c_0 = c_0.cuda()
108
 
109
  return (h_0, c_0)
110
  else:
111
 
112
  h_0 = Variable(torch.zeros(self.num_encoder_bi*self.num_rnn_layers, batchsize, hsize))
113
 
114
- if self.use_cuda:
115
- h_0 = h_0.cuda()
116
 
117
 
118
  return h_0
@@ -229,8 +229,8 @@ class PointerNetworks(nn.Module):
229
  #print(curX)
230
 
231
  x_index_var = Variable(torch.from_numpy(curX_index.astype(np.int64)))
232
- if self.use_cuda:
233
- x_index_var = x_index_var.cuda()
234
  cur_lookup = curX[x_index_var]
235
  #print(cur_lookup)
236
 
@@ -277,8 +277,8 @@ class PointerNetworks(nn.Module):
277
  # TODO: make it point backward, only consider predict_range in current time step
278
  # align groundtruth
279
  cur_groundy_var = Variable(torch.LongTensor([int(cur_groundy) - int(cur_start_index)]))
280
- if self.use_cuda:
281
- cur_groundy_var = cur_groundy_var.cuda()
282
 
283
  curencoder_hn_back = curencoder_hn[predict_range,:]
284
 
@@ -395,8 +395,8 @@ class PointerNetworks(nn.Module):
395
 
396
 
397
  cur_groundy_var = Variable(torch.LongTensor([max(0,int(cur_groundy) - loopstart)]))
398
- if self.use_cuda:
399
- cur_groundy_var = cur_groundy_var.cuda()
400
 
401
  batch_loss = batch_loss + loss_function(cur_logists, cur_groundy_var)
402
 
 
59
  self.nnEm = nn.Embedding(self.voca_size,self.word_dim,padding_idx=2000001)
60
  #self.nnEm = nn.Embedding.from_pretrained(self.voc_embeddings,freeze=self.finedtuning,padding_idx=-1)
61
  self.initEmbeddings(self.voc_embeddings)
62
+ #if self.use_cuda:
63
+ # self.nnEm = self.nnEm.cuda()
64
 
65
 
66
 
 
102
  h_0 = Variable(torch.zeros(self.num_encoder_bi*self.num_rnn_layers, batchsize, hsize))
103
  c_0 = Variable(torch.zeros(self.num_encoder_bi*self.num_rnn_layers, batchsize, hsize))
104
 
105
+ #if self.use_cuda:
106
+ # h_0 = h_0.cuda()
107
+ # c_0 = c_0.cuda()
108
 
109
  return (h_0, c_0)
110
  else:
111
 
112
  h_0 = Variable(torch.zeros(self.num_encoder_bi*self.num_rnn_layers, batchsize, hsize))
113
 
114
+ #if self.use_cuda:
115
+ # h_0 = h_0.cuda()
116
 
117
 
118
  return h_0
 
229
  #print(curX)
230
 
231
  x_index_var = Variable(torch.from_numpy(curX_index.astype(np.int64)))
232
+ #if self.use_cuda:
233
+ # x_index_var = x_index_var.cuda()
234
  cur_lookup = curX[x_index_var]
235
  #print(cur_lookup)
236
 
 
277
  # TODO: make it point backward, only consider predict_range in current time step
278
  # align groundtruth
279
  cur_groundy_var = Variable(torch.LongTensor([int(cur_groundy) - int(cur_start_index)]))
280
+ #if self.use_cuda:
281
+ # cur_groundy_var = cur_groundy_var.cuda()
282
 
283
  curencoder_hn_back = curencoder_hn[predict_range,:]
284
 
 
395
 
396
 
397
  cur_groundy_var = Variable(torch.LongTensor([max(0,int(cur_groundy) - loopstart)]))
398
+ #if self.use_cuda:
399
+ # cur_groundy_var = cur_groundy_var.cuda()
400
 
401
  batch_loss = batch_loss + loss_function(cur_logists, cur_groundy_var)
402