Spaces:
Runtime error
Runtime error
Upload 7 files
Browse files- models/DPCNN.py +89 -0
- models/FastText.py +69 -0
- models/TextCNN.py +67 -0
- models/TextRCNN.py +64 -0
- models/TextRNN.py +75 -0
- models/TextRNN_Att.py +73 -0
- models/Transformer.py +178 -0
models/DPCNN.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: UTF-8
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
|
8 |
+
class Config(object):
|
9 |
+
|
10 |
+
"""配置参数"""
|
11 |
+
def __init__(self, dataset, embedding):
|
12 |
+
self.model_name = 'DPCNN'
|
13 |
+
self.train_path = dataset + '/data/train.txt' # 训练集
|
14 |
+
self.dev_path = dataset + '/data/dev.txt' # 验证集
|
15 |
+
self.test_path = dataset + '/data/test.txt' # 测试集
|
16 |
+
self.class_list = [x.strip() for x in open(
|
17 |
+
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
|
18 |
+
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
|
19 |
+
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
|
20 |
+
self.log_path = dataset + '/log/' + self.model_name
|
21 |
+
self.embedding_pretrained = torch.tensor(
|
22 |
+
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
|
23 |
+
if embedding != 'random' else None # 预训练词向量
|
24 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
|
25 |
+
|
26 |
+
self.dropout = 0.5 # 随机失活
|
27 |
+
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
|
28 |
+
self.num_classes = len(self.class_list) # 类别数
|
29 |
+
self.n_vocab = 0 # 词表大小,在运行时赋值
|
30 |
+
self.num_epochs = 20 # epoch数
|
31 |
+
self.batch_size = 128 # mini-batch大小
|
32 |
+
self.pad_size = 32 # 每句话处理成的长度(短填长切)
|
33 |
+
self.learning_rate = 1e-3 # 学习率
|
34 |
+
self.embed = self.embedding_pretrained.size(1)\
|
35 |
+
if self.embedding_pretrained is not None else 300 # 字向量维度
|
36 |
+
self.num_filters = 250 # 卷积核数量(channels数)
|
37 |
+
|
38 |
+
|
39 |
+
'''Deep Pyramid Convolutional Neural Networks for Text Categorization'''
|
40 |
+
|
41 |
+
|
42 |
+
class Model(nn.Module):
|
43 |
+
def __init__(self, config):
|
44 |
+
super(Model, self).__init__()
|
45 |
+
if config.embedding_pretrained is not None:
|
46 |
+
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
|
47 |
+
else:
|
48 |
+
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
|
49 |
+
self.conv_region = nn.Conv2d(1, config.num_filters, (3, config.embed), stride=1)
|
50 |
+
self.conv = nn.Conv2d(config.num_filters, config.num_filters, (3, 1), stride=1)
|
51 |
+
self.max_pool = nn.MaxPool2d(kernel_size=(3, 1), stride=2)
|
52 |
+
self.padding1 = nn.ZeroPad2d((0, 0, 1, 1)) # top bottom
|
53 |
+
self.padding2 = nn.ZeroPad2d((0, 0, 0, 1)) # bottom
|
54 |
+
self.relu = nn.ReLU()
|
55 |
+
self.fc = nn.Linear(config.num_filters, config.num_classes)
|
56 |
+
|
57 |
+
def forward(self, x):
|
58 |
+
x = x[0]
|
59 |
+
x = self.embedding(x)
|
60 |
+
x = x.unsqueeze(1) # [batch_size, 250, seq_len, 1]
|
61 |
+
x = self.conv_region(x) # [batch_size, 250, seq_len-3+1, 1]
|
62 |
+
|
63 |
+
x = self.padding1(x) # [batch_size, 250, seq_len, 1]
|
64 |
+
x = self.relu(x)
|
65 |
+
x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]
|
66 |
+
x = self.padding1(x) # [batch_size, 250, seq_len, 1]
|
67 |
+
x = self.relu(x)
|
68 |
+
x = self.conv(x) # [batch_size, 250, seq_len-3+1, 1]
|
69 |
+
while x.size()[2] > 2:
|
70 |
+
x = self._block(x)
|
71 |
+
x = x.squeeze() # [batch_size, num_filters(250)]
|
72 |
+
x = self.fc(x)
|
73 |
+
return x
|
74 |
+
|
75 |
+
def _block(self, x):
|
76 |
+
x = self.padding2(x)
|
77 |
+
px = self.max_pool(x)
|
78 |
+
|
79 |
+
x = self.padding1(px)
|
80 |
+
x = F.relu(x)
|
81 |
+
x = self.conv(x)
|
82 |
+
|
83 |
+
x = self.padding1(x)
|
84 |
+
x = F.relu(x)
|
85 |
+
x = self.conv(x)
|
86 |
+
|
87 |
+
# Short Cut
|
88 |
+
x = x + px
|
89 |
+
return x
|
models/FastText.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: UTF-8
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
|
8 |
+
class Config(object):
|
9 |
+
|
10 |
+
"""配置参数"""
|
11 |
+
def __init__(self, dataset, embedding):
|
12 |
+
self.model_name = 'FastText'
|
13 |
+
self.train_path = dataset + '/data/train.txt' # 训练集
|
14 |
+
self.dev_path = dataset + '/data/dev.txt' # 验证集
|
15 |
+
self.test_path = dataset + '/data/test.txt' # 测试集
|
16 |
+
self.class_list = [x.strip() for x in open(
|
17 |
+
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
|
18 |
+
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
|
19 |
+
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
|
20 |
+
self.log_path = dataset + '/log/' + self.model_name
|
21 |
+
self.embedding_pretrained = torch.tensor(
|
22 |
+
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
|
23 |
+
if embedding != 'random' else None # 预训练词向量
|
24 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
|
25 |
+
|
26 |
+
self.dropout = 0.5 # 随机失活
|
27 |
+
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
|
28 |
+
self.num_classes = len(self.class_list) # 类别数
|
29 |
+
self.n_vocab = 0 # 词表大小,在运行时赋值
|
30 |
+
self.num_epochs = 20 # epoch数
|
31 |
+
self.batch_size = 128 # mini-batch大小
|
32 |
+
self.pad_size = 32 # 每句话处理成的长度(短填长切)
|
33 |
+
self.learning_rate = 1e-3 # 学习率
|
34 |
+
self.embed = self.embedding_pretrained.size(1)\
|
35 |
+
if self.embedding_pretrained is not None else 300 # 字向量维度
|
36 |
+
self.hidden_size = 256 # 隐藏层大小
|
37 |
+
self.n_gram_vocab = 250499 # ngram 词表大小
|
38 |
+
|
39 |
+
|
40 |
+
'''Bag of Tricks for Efficient Text Classification'''
|
41 |
+
|
42 |
+
|
43 |
+
class Model(nn.Module):
|
44 |
+
def __init__(self, config):
|
45 |
+
super(Model, self).__init__()
|
46 |
+
if config.embedding_pretrained is not None:
|
47 |
+
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
|
48 |
+
else:
|
49 |
+
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
|
50 |
+
self.embedding_ngram2 = nn.Embedding(config.n_gram_vocab, config.embed)
|
51 |
+
self.embedding_ngram3 = nn.Embedding(config.n_gram_vocab, config.embed)
|
52 |
+
self.dropout = nn.Dropout(config.dropout)
|
53 |
+
self.fc1 = nn.Linear(config.embed * 3, config.hidden_size)
|
54 |
+
# self.dropout2 = nn.Dropout(config.dropout)
|
55 |
+
self.fc2 = nn.Linear(config.hidden_size, config.num_classes)
|
56 |
+
|
57 |
+
def forward(self, x):
|
58 |
+
|
59 |
+
out_word = self.embedding(x[0])
|
60 |
+
out_bigram = self.embedding_ngram2(x[2])
|
61 |
+
out_trigram = self.embedding_ngram3(x[3])
|
62 |
+
out = torch.cat((out_word, out_bigram, out_trigram), -1)
|
63 |
+
|
64 |
+
out = out.mean(dim=1)
|
65 |
+
out = self.dropout(out)
|
66 |
+
out = self.fc1(out)
|
67 |
+
out = F.relu(out)
|
68 |
+
out = self.fc2(out)
|
69 |
+
return out
|
models/TextCNN.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: UTF-8
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
|
8 |
+
class Config(object):
|
9 |
+
|
10 |
+
"""配置参数"""
|
11 |
+
def __init__(self, dataset, embedding):
|
12 |
+
self.model_name = 'TextCNN'
|
13 |
+
self.train_path = dataset + '/data/train.txt' # 训练集
|
14 |
+
self.dev_path = dataset + '/data/dev.txt' # 验证集
|
15 |
+
self.test_path = dataset + '/data/test.txt' # 测试集
|
16 |
+
self.class_list = [x.strip() for x in open(
|
17 |
+
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
|
18 |
+
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
|
19 |
+
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
|
20 |
+
self.log_path = dataset + '/log/' + self.model_name
|
21 |
+
self.embedding_pretrained = torch.tensor(
|
22 |
+
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
|
23 |
+
if embedding != 'random' else None # 预训练词向量
|
24 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
|
25 |
+
|
26 |
+
self.dropout = 0.5 # 随机失活
|
27 |
+
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
|
28 |
+
self.num_classes = len(self.class_list) # 类别数
|
29 |
+
self.n_vocab = 0 # 词表大小,在运行时赋值
|
30 |
+
self.num_epochs = 20 # epoch数
|
31 |
+
self.batch_size = 1 # mini-batch大小
|
32 |
+
self.pad_size = 32 # 每句话处理成的长度(短填长切)
|
33 |
+
self.learning_rate = 1e-3 # 学习率
|
34 |
+
self.embed = self.embedding_pretrained.size(1)\
|
35 |
+
if self.embedding_pretrained is not None else 300 # 字向量维度
|
36 |
+
self.filter_sizes = (2, 3, 4) # 卷积核尺寸
|
37 |
+
self.num_filters = 256 # 卷积核数量(channels数)
|
38 |
+
|
39 |
+
|
40 |
+
'''Convolutional Neural Networks for Sentence Classification'''
|
41 |
+
|
42 |
+
|
43 |
+
class Model(nn.Module):
|
44 |
+
def __init__(self, config):
|
45 |
+
super(Model, self).__init__()
|
46 |
+
if config.embedding_pretrained is not None:
|
47 |
+
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
|
48 |
+
else:
|
49 |
+
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
|
50 |
+
self.convs = nn.ModuleList(
|
51 |
+
[nn.Conv2d(1, config.num_filters, (k, config.embed)) for k in config.filter_sizes])
|
52 |
+
self.dropout = nn.Dropout(config.dropout)
|
53 |
+
self.fc = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes)
|
54 |
+
|
55 |
+
def conv_and_pool(self, x, conv):
|
56 |
+
x = F.relu(conv(x)).squeeze(3)
|
57 |
+
x = F.max_pool1d(x, x.size(2)).squeeze(2)
|
58 |
+
return x
|
59 |
+
|
60 |
+
def forward(self, x):
|
61 |
+
out = self.embedding(x[0])
|
62 |
+
out = out.unsqueeze(1)
|
63 |
+
out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
|
64 |
+
out = self.dropout(out)
|
65 |
+
out = self.fc(out)
|
66 |
+
return out
|
67 |
+
|
models/TextRCNN.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: UTF-8
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
|
8 |
+
class Config(object):
|
9 |
+
|
10 |
+
"""配置参数"""
|
11 |
+
def __init__(self, dataset, embedding):
|
12 |
+
self.model_name = 'TextRCNN'
|
13 |
+
self.train_path = dataset + '/data/train.txt' # 训练集
|
14 |
+
self.dev_path = dataset + '/data/dev.txt' # 验证集
|
15 |
+
self.test_path = dataset + '/data/test.txt' # 测试集
|
16 |
+
self.class_list = [x.strip() for x in open(
|
17 |
+
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
|
18 |
+
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
|
19 |
+
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
|
20 |
+
self.log_path = dataset + '/log/' + self.model_name
|
21 |
+
self.embedding_pretrained = torch.tensor(
|
22 |
+
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
|
23 |
+
if embedding != 'random' else None # 预训练词向量
|
24 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
|
25 |
+
|
26 |
+
self.dropout = 1.0 # 随机失活
|
27 |
+
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
|
28 |
+
self.num_classes = len(self.class_list) # 类别数
|
29 |
+
self.n_vocab = 0 # 词表大小,在运行时赋值
|
30 |
+
self.num_epochs = 10 # epoch数
|
31 |
+
self.batch_size = 128 # mini-batch大小
|
32 |
+
self.pad_size = 32 # 每句话处理成的长度(短填长切)
|
33 |
+
self.learning_rate = 1e-3 # 学习率
|
34 |
+
self.embed = self.embedding_pretrained.size(1)\
|
35 |
+
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
|
36 |
+
self.hidden_size = 256 # lstm隐藏层
|
37 |
+
self.num_layers = 1 # lstm层数
|
38 |
+
|
39 |
+
|
40 |
+
'''Recurrent Convolutional Neural Networks for Text Classification'''
|
41 |
+
|
42 |
+
|
43 |
+
class Model(nn.Module):
|
44 |
+
def __init__(self, config):
|
45 |
+
super(Model, self).__init__()
|
46 |
+
if config.embedding_pretrained is not None:
|
47 |
+
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
|
48 |
+
else:
|
49 |
+
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
|
50 |
+
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
|
51 |
+
bidirectional=True, batch_first=True, dropout=config.dropout)
|
52 |
+
self.maxpool = nn.MaxPool1d(config.pad_size)
|
53 |
+
self.fc = nn.Linear(config.hidden_size * 2 + config.embed, config.num_classes)
|
54 |
+
|
55 |
+
def forward(self, x):
|
56 |
+
x, _ = x
|
57 |
+
embed = self.embedding(x) # [batch_size, seq_len, embeding]=[64, 32, 64]
|
58 |
+
out, _ = self.lstm(embed)
|
59 |
+
out = torch.cat((embed, out), 2)
|
60 |
+
out = F.relu(out)
|
61 |
+
out = out.permute(0, 2, 1)
|
62 |
+
out = self.maxpool(out).squeeze()
|
63 |
+
out = self.fc(out)
|
64 |
+
return out
|
models/TextRNN.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: UTF-8
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
|
7 |
+
class Config(object):
|
8 |
+
|
9 |
+
"""配置参数"""
|
10 |
+
def __init__(self, dataset, embedding):
|
11 |
+
self.model_name = 'TextRNN'
|
12 |
+
self.train_path = dataset + '/data/train.txt' # 训练集
|
13 |
+
self.dev_path = dataset + '/data/dev.txt' # 验证集
|
14 |
+
self.test_path = dataset + '/data/test.txt' # 测试集
|
15 |
+
self.class_list = [x.strip() for x in open(
|
16 |
+
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
|
17 |
+
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
|
18 |
+
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
|
19 |
+
self.log_path = dataset + '/log/' + self.model_name
|
20 |
+
self.embedding_pretrained = torch.tensor(
|
21 |
+
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
|
22 |
+
if embedding != 'random' else None # 预训练词向量
|
23 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
|
24 |
+
|
25 |
+
self.dropout = 0.5 # 随机失活
|
26 |
+
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
|
27 |
+
self.num_classes = len(self.class_list) # 类别数
|
28 |
+
self.n_vocab = 0 # 词表大小,在运行时赋值
|
29 |
+
self.num_epochs = 10 # epoch数
|
30 |
+
self.batch_size = 128 # mini-batch大小
|
31 |
+
self.pad_size = 32 # 每句话处理成的长度(短填长切)
|
32 |
+
self.learning_rate = 1e-3 # 学习率
|
33 |
+
self.embed = self.embedding_pretrained.size(1)\
|
34 |
+
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
|
35 |
+
self.hidden_size = 128 # lstm隐藏层
|
36 |
+
self.num_layers = 2 # lstm层数
|
37 |
+
|
38 |
+
|
39 |
+
'''Recurrent Neural Network for Text Classification with Multi-Task Learning'''
|
40 |
+
|
41 |
+
|
42 |
+
class Model(nn.Module):
|
43 |
+
def __init__(self, config):
|
44 |
+
super(Model, self).__init__()
|
45 |
+
if config.embedding_pretrained is not None:
|
46 |
+
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
|
47 |
+
else:
|
48 |
+
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
|
49 |
+
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
|
50 |
+
bidirectional=True, batch_first=True, dropout=config.dropout)
|
51 |
+
self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)
|
52 |
+
|
53 |
+
def forward(self, x):
|
54 |
+
x, _ = x
|
55 |
+
out = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
|
56 |
+
out, _ = self.lstm(out)
|
57 |
+
out = self.fc(out[:, -1, :]) # 句子最后时刻的 hidden state
|
58 |
+
return out
|
59 |
+
|
60 |
+
'''变长RNN,效果差不多,甚至还低了点...'''
|
61 |
+
# def forward(self, x):
|
62 |
+
# x, seq_len = x
|
63 |
+
# out = self.embedding(x)
|
64 |
+
# _, idx_sort = torch.sort(seq_len, dim=0, descending=True) # 长度从长到短排序(index)
|
65 |
+
# _, idx_unsort = torch.sort(idx_sort) # 排序后,原序列的 index
|
66 |
+
# out = torch.index_select(out, 0, idx_sort)
|
67 |
+
# seq_len = list(seq_len[idx_sort])
|
68 |
+
# out = nn.utils.rnn.pack_padded_sequence(out, seq_len, batch_first=True)
|
69 |
+
# # [batche_size, seq_len, num_directions * hidden_size]
|
70 |
+
# out, (hn, _) = self.lstm(out)
|
71 |
+
# out = torch.cat((hn[2], hn[3]), -1)
|
72 |
+
# # out, _ = nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
|
73 |
+
# out = out.index_select(0, idx_unsort)
|
74 |
+
# out = self.fc(out)
|
75 |
+
# return out
|
models/TextRNN_Att.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: UTF-8
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
|
8 |
+
class Config(object):
|
9 |
+
|
10 |
+
"""配置参数"""
|
11 |
+
def __init__(self, dataset, embedding):
|
12 |
+
self.model_name = 'TextRNN_Att'
|
13 |
+
self.train_path = dataset + '/data/train.txt' # 训练集
|
14 |
+
self.dev_path = dataset + '/data/dev.txt' # 验证集
|
15 |
+
self.test_path = dataset + '/data/test.txt' # 测试集
|
16 |
+
self.class_list = [x.strip() for x in open(
|
17 |
+
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
|
18 |
+
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
|
19 |
+
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
|
20 |
+
self.log_path = dataset + '/log/' + self.model_name
|
21 |
+
self.embedding_pretrained = torch.tensor(
|
22 |
+
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
|
23 |
+
if embedding != 'random' else None # 预训练词向量
|
24 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
|
25 |
+
|
26 |
+
self.dropout = 0.5 # 随机失活
|
27 |
+
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
|
28 |
+
self.num_classes = len(self.class_list) # 类别数
|
29 |
+
self.n_vocab = 0 # 词表大小,在运行时赋值
|
30 |
+
self.num_epochs = 10 # epoch数
|
31 |
+
self.batch_size = 128 # mini-batch大小
|
32 |
+
self.pad_size = 32 # 每句话处理成的长度(短填长切)
|
33 |
+
self.learning_rate = 1e-3 # 学习率
|
34 |
+
self.embed = self.embedding_pretrained.size(1)\
|
35 |
+
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
|
36 |
+
self.hidden_size = 128 # lstm隐藏层
|
37 |
+
self.num_layers = 2 # lstm层数
|
38 |
+
self.hidden_size2 = 64
|
39 |
+
|
40 |
+
|
41 |
+
'''Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification'''
|
42 |
+
|
43 |
+
|
44 |
+
class Model(nn.Module):
|
45 |
+
def __init__(self, config):
|
46 |
+
super(Model, self).__init__()
|
47 |
+
if config.embedding_pretrained is not None:
|
48 |
+
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
|
49 |
+
else:
|
50 |
+
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
|
51 |
+
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
|
52 |
+
bidirectional=True, batch_first=True, dropout=config.dropout)
|
53 |
+
self.tanh1 = nn.Tanh()
|
54 |
+
# self.u = nn.Parameter(torch.Tensor(config.hidden_size * 2, config.hidden_size * 2))
|
55 |
+
self.w = nn.Parameter(torch.zeros(config.hidden_size * 2))
|
56 |
+
self.tanh2 = nn.Tanh()
|
57 |
+
self.fc1 = nn.Linear(config.hidden_size * 2, config.hidden_size2)
|
58 |
+
self.fc = nn.Linear(config.hidden_size2, config.num_classes)
|
59 |
+
|
60 |
+
def forward(self, x):
|
61 |
+
x, _ = x
|
62 |
+
emb = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
|
63 |
+
H, _ = self.lstm(emb) # [batch_size, seq_len, hidden_size * num_direction]=[128, 32, 256]
|
64 |
+
|
65 |
+
M = self.tanh1(H) # [128, 32, 256]
|
66 |
+
# M = torch.tanh(torch.matmul(H, self.u))
|
67 |
+
alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze(-1) # [128, 32, 1]
|
68 |
+
out = H * alpha # [128, 32, 256]
|
69 |
+
out = torch.sum(out, 1) # [128, 256]
|
70 |
+
out = F.relu(out)
|
71 |
+
out = self.fc1(out)
|
72 |
+
out = self.fc(out) # [128, 64]
|
73 |
+
return out
|
models/Transformer.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
import numpy as np
|
5 |
+
import copy
|
6 |
+
|
7 |
+
|
8 |
+
class Config(object):
|
9 |
+
|
10 |
+
"""配置参数"""
|
11 |
+
def __init__(self, dataset, embedding):
|
12 |
+
self.model_name = 'Transformer'
|
13 |
+
self.train_path = dataset + '/data/train.txt' # 训练集
|
14 |
+
self.dev_path = dataset + '/data/dev.txt' # 验证集
|
15 |
+
self.test_path = dataset + '/data/test.txt' # 测试集
|
16 |
+
self.class_list = [x.strip() for x in open(
|
17 |
+
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
|
18 |
+
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
|
19 |
+
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
|
20 |
+
self.log_path = dataset + '/log/' + self.model_name
|
21 |
+
self.embedding_pretrained = torch.tensor(
|
22 |
+
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\
|
23 |
+
if embedding != 'random' else None # 预训练词向量
|
24 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
|
25 |
+
|
26 |
+
self.dropout = 0.5 # 随机失活
|
27 |
+
self.require_improvement = 2000 # 若超过1000batch效果还没提升,则提前结束训练
|
28 |
+
self.num_classes = len(self.class_list) # 类别数
|
29 |
+
self.n_vocab = 0 # 词表大小,在运行时赋值
|
30 |
+
self.num_epochs = 20 # epoch数
|
31 |
+
self.batch_size = 128 # mini-batch大小
|
32 |
+
self.pad_size = 32 # 每句话处理成的长度(短填长切)
|
33 |
+
self.learning_rate = 5e-4 # 学习率
|
34 |
+
self.embed = self.embedding_pretrained.size(1)\
|
35 |
+
if self.embedding_pretrained is not None else 300 # 字向量维度
|
36 |
+
self.dim_model = 300
|
37 |
+
self.hidden = 1024
|
38 |
+
self.last_hidden = 512
|
39 |
+
self.num_head = 5
|
40 |
+
self.num_encoder = 2
|
41 |
+
|
42 |
+
|
43 |
+
'''Attention Is All You Need'''
|
44 |
+
|
45 |
+
|
46 |
+
class Model(nn.Module):
|
47 |
+
def __init__(self, config):
|
48 |
+
super(Model, self).__init__()
|
49 |
+
if config.embedding_pretrained is not None:
|
50 |
+
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
|
51 |
+
else:
|
52 |
+
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
|
53 |
+
|
54 |
+
self.postion_embedding = Positional_Encoding(config.embed, config.pad_size, config.dropout, config.device)
|
55 |
+
self.encoder = Encoder(config.dim_model, config.num_head, config.hidden, config.dropout)
|
56 |
+
self.encoders = nn.ModuleList([
|
57 |
+
copy.deepcopy(self.encoder)
|
58 |
+
# Encoder(config.dim_model, config.num_head, config.hidden, config.dropout)
|
59 |
+
for _ in range(config.num_encoder)])
|
60 |
+
|
61 |
+
self.fc1 = nn.Linear(config.pad_size * config.dim_model, config.num_classes)
|
62 |
+
# self.fc2 = nn.Linear(config.last_hidden, config.num_classes)
|
63 |
+
# self.fc1 = nn.Linear(config.dim_model, config.num_classes)
|
64 |
+
|
65 |
+
def forward(self, x):
|
66 |
+
out = self.embedding(x[0])
|
67 |
+
out = self.postion_embedding(out)
|
68 |
+
for encoder in self.encoders:
|
69 |
+
out = encoder(out)
|
70 |
+
out = out.view(out.size(0), -1)
|
71 |
+
# out = torch.mean(out, 1)
|
72 |
+
out = self.fc1(out)
|
73 |
+
return out
|
74 |
+
|
75 |
+
|
76 |
+
class Encoder(nn.Module):
|
77 |
+
def __init__(self, dim_model, num_head, hidden, dropout):
|
78 |
+
super(Encoder, self).__init__()
|
79 |
+
self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
|
80 |
+
self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout)
|
81 |
+
|
82 |
+
def forward(self, x):
|
83 |
+
out = self.attention(x)
|
84 |
+
out = self.feed_forward(out)
|
85 |
+
return out
|
86 |
+
|
87 |
+
|
88 |
+
class Positional_Encoding(nn.Module):
|
89 |
+
def __init__(self, embed, pad_size, dropout, device):
|
90 |
+
super(Positional_Encoding, self).__init__()
|
91 |
+
self.device = device
|
92 |
+
self.pe = torch.tensor([[pos / (10000.0 ** (i // 2 * 2.0 / embed)) for i in range(embed)] for pos in range(pad_size)])
|
93 |
+
self.pe[:, 0::2] = np.sin(self.pe[:, 0::2])
|
94 |
+
self.pe[:, 1::2] = np.cos(self.pe[:, 1::2])
|
95 |
+
self.dropout = nn.Dropout(dropout)
|
96 |
+
|
97 |
+
def forward(self, x):
|
98 |
+
out = x + nn.Parameter(self.pe, requires_grad=False).to(self.device)
|
99 |
+
out = self.dropout(out)
|
100 |
+
return out
|
101 |
+
|
102 |
+
|
103 |
+
class Scaled_Dot_Product_Attention(nn.Module):
|
104 |
+
'''Scaled Dot-Product Attention '''
|
105 |
+
def __init__(self):
|
106 |
+
super(Scaled_Dot_Product_Attention, self).__init__()
|
107 |
+
|
108 |
+
def forward(self, Q, K, V, scale=None):
|
109 |
+
'''
|
110 |
+
Args:
|
111 |
+
Q: [batch_size, len_Q, dim_Q]
|
112 |
+
K: [batch_size, len_K, dim_K]
|
113 |
+
V: [batch_size, len_V, dim_V]
|
114 |
+
scale: 缩放因子 论文为根号dim_K
|
115 |
+
Return:
|
116 |
+
self-attention后的张量,以及attention张量
|
117 |
+
'''
|
118 |
+
attention = torch.matmul(Q, K.permute(0, 2, 1))
|
119 |
+
if scale:
|
120 |
+
attention = attention * scale
|
121 |
+
# if mask: # TODO change this
|
122 |
+
# attention = attention.masked_fill_(mask == 0, -1e9)
|
123 |
+
attention = F.softmax(attention, dim=-1)
|
124 |
+
context = torch.matmul(attention, V)
|
125 |
+
return context
|
126 |
+
|
127 |
+
|
128 |
+
class Multi_Head_Attention(nn.Module):
|
129 |
+
def __init__(self, dim_model, num_head, dropout=0.0):
|
130 |
+
super(Multi_Head_Attention, self).__init__()
|
131 |
+
self.num_head = num_head
|
132 |
+
assert dim_model % num_head == 0
|
133 |
+
self.dim_head = dim_model // self.num_head
|
134 |
+
self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
|
135 |
+
self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
|
136 |
+
self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
|
137 |
+
self.attention = Scaled_Dot_Product_Attention()
|
138 |
+
self.fc = nn.Linear(num_head * self.dim_head, dim_model)
|
139 |
+
self.dropout = nn.Dropout(dropout)
|
140 |
+
self.layer_norm = nn.LayerNorm(dim_model)
|
141 |
+
|
142 |
+
def forward(self, x):
|
143 |
+
batch_size = x.size(0)
|
144 |
+
Q = self.fc_Q(x)
|
145 |
+
K = self.fc_K(x)
|
146 |
+
V = self.fc_V(x)
|
147 |
+
Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
|
148 |
+
K = K.view(batch_size * self.num_head, -1, self.dim_head)
|
149 |
+
V = V.view(batch_size * self.num_head, -1, self.dim_head)
|
150 |
+
# if mask: # TODO
|
151 |
+
# mask = mask.repeat(self.num_head, 1, 1) # TODO change this
|
152 |
+
scale = K.size(-1) ** -0.5 # 缩放因子
|
153 |
+
context = self.attention(Q, K, V, scale)
|
154 |
+
|
155 |
+
context = context.view(batch_size, -1, self.dim_head * self.num_head)
|
156 |
+
out = self.fc(context)
|
157 |
+
out = self.dropout(out)
|
158 |
+
out = out + x # 残差连接
|
159 |
+
out = self.layer_norm(out)
|
160 |
+
return out
|
161 |
+
|
162 |
+
|
163 |
+
class Position_wise_Feed_Forward(nn.Module):
|
164 |
+
def __init__(self, dim_model, hidden, dropout=0.0):
|
165 |
+
super(Position_wise_Feed_Forward, self).__init__()
|
166 |
+
self.fc1 = nn.Linear(dim_model, hidden)
|
167 |
+
self.fc2 = nn.Linear(hidden, dim_model)
|
168 |
+
self.dropout = nn.Dropout(dropout)
|
169 |
+
self.layer_norm = nn.LayerNorm(dim_model)
|
170 |
+
|
171 |
+
def forward(self, x):
|
172 |
+
out = self.fc1(x)
|
173 |
+
out = F.relu(out)
|
174 |
+
out = self.fc2(out)
|
175 |
+
out = self.dropout(out)
|
176 |
+
out = out + x # 残差连接
|
177 |
+
out = self.layer_norm(out)
|
178 |
+
return out
|