巨鹿网站建设多少钱,网站建设金华,助邦建筑工程网,酒店 wordpressTextCNN的复现–pytorch的实现
对于TextCNN的讲解#xff0c;可以参考这篇文章
Convolutional Neural Networks for Sentence Classification - 知乎 (zhihu.com)
接下来主要是对代码内容的详解#xff0c;完整代码将在文章末尾给出。
使用的数据集为电影评论数据集…TextCNN的复现–pytorch的实现
对于TextCNN的讲解可以参考这篇文章
Convolutional Neural Networks for Sentence Classification - 知乎 (zhihu.com)
接下来主要是对代码内容的详解完整代码将在文章末尾给出。
使用的数据集为电影评论数据集其中正面数据集5000条左右负面的数据集也为5000条。
pyroch的基本训练过程
加载训练集–构建模型–模型训练–模型评价
首先是要对数据集进行加载在对数据集加载时候需要继承一下Dataset类代码如下
class Data_loader(Dataset):def __init__(self, file_pos, file_neg, model_path, word2_vecFalse):self.file_pos file_posself.file_neg file_negif word2_vec:self.x_train, self.y_train self.get_word2vec(model_path)else:self.x_train, self.y_train, self.dictionary self.pre_process()def __getitem__(self, idx):data self.x_train[idx]label self.y_train[idx]data torch.tensor(data)label torch.tensor(label)return data, labeldef __len__(self):return len(self.x_train)def clean_sentences(self, string):string re.sub(r[^A-Za-z0-9(),!?\\], , string)string re.sub(r\s, \s, string)string re.sub(r\ve, \ve, string)string re.sub(rn\t, n\t, string)string re.sub(r\re, \re, string)string re.sub(r\d, \d, string)string re.sub(r\ll, \ll, string)string re.sub(r,, , , string)string re.sub(r!, ! , string)string re.sub(r\(, \( , string)string re.sub(r\), \) , string)string re.sub(r\?, \? , string)string re.sub(r\s{2,}, , string)return string.strip().lower()def load_data_and_labels(self):positive_examples list(open(self.file_pos, r, encodingutf-8).readlines())positive_examples [s.strip() for s in positive_examples] # 对评论数据删除每一行数据的\t\nnegative_examples list(open(self.file_neg, r, encodingutf-8).readlines())negative_examples [s.strip() for s in negative_examples] # 对评论数据删除每一行数据的\t\nx_text positive_examples negative_examplesx_text [self.clean_sentences(_) for _ in x_text]positive_labels [[1, 0] for _ in positive_examples] # 正样本数据为1negative_labels [[0, 1] for _ in negative_examples] # 负样本数据为0y np.concatenate([positive_labels, negative_labels], 0)return x_text, y # 返回的是dataframe对象[0]data[0]为文本数据data[1]为标签def pre_process(self):加载数据并对之前使用的数据进行打乱返回,同时根据训练集和测试集的比列进行划分,默认百分80和百分20:return:测试数据、训练数据、以及生成的词汇表x_data, y_label self.load_data_and_labels()max_document_length max(len(x.split( )) for x in x_data)voc []word_split [][voc.extend(x.split()) for x in x_data] # 生成词典[word_split.append(x.split()) for x in x_data]if len(voc) ! 0:ordere_dict OrderedDict(sorted(Counter(_flatten(voc)).items(), keylambda x: x[1], reverseTrue))# 把文档映射成词汇的索引序列dictionary vocab(ordere_dict)x_data []for words in word_split:x list(dictionary.lookup_indices(words))temp_pos max_document_length - len(x)if temp_pos ! 0:for i in range(1, temp_pos 1):x.extend([0])x_data.append(x)x_data np.array(x_data)np.random.seed(10)# 将标签打乱顺序返回索引shuffle_indices np.random.permutation(np.arange(len(y_label)))x_shuffled x_data[shuffle_indices]y_shuffled y_label[shuffle_indices]return x_shuffled, y_shuffled, dictionarydef get_word2vec(self, model_path):model gensim.models.Word2Vec.load(model_path)x_data, y_label self.load_data_and_labels()word_split [][word_split.append(x.split()) for x in x_data]sentence_vectors []for sentence in word_split:sentence_vector []for word in sentence:try:v model.wv.get_vector(word)except Exception as e:v np.zeros(shape(model.vector_size,), dtypenp.float32)sentence_vector.append(v)sentence_vectors.append(sentence_vector)max_document_length max(len(x) for x in sentence_vectors)for vector in sentence_vectors:for i in range(1, max_document_length - len(vector) 1):v np.zeros(shape(model.vector_size,), dtypenp.float32)vector.append(v)vector_data np.asarray(sentence_vectors, dtypenp.float32)np.random.seed(10)# 将标签打乱顺序返回索引shuffle_indices np.random.permutation(np.arange(len(y_label)))x_shuffled vector_data[shuffle_indices]y_shuffled y_label[shuffle_indices]return x_shuffled, y_shuffled上述代码中的__init__ 、getitem 、len是必须要继承实现的方法clean_sentence是对读取的数据进行清洗load_data_and_label是加载数据且返回清洗过后的数据以及数据标签。pre_process是对数据进行编码原始的数据是英文数据因此需要对其进行分词、编码最后返回的数据将是数字一行数据就是一句评论。
例如
I like this movie
在对其进行编码返回后将是 0 1 2 3,0对应的为I1对应的为like以此类推。
get_word2vec则是使用word2vec预训练模型来对每个单词对应的数据内容进行映射。1个单词对应的将会是一个100维的矩阵该维度可以根据自己训练word2vec模型时候自己进行调整。
接下来是word2vec模型的训练及保存出于简便性训练word2vec模型时候直接使用了该数据集对word2vec模型进行训练。
代码如下所示:
def get_model(p_file, n_file):x_data, y_label load_data_and_labels(p_file, n_file)x_data [x.split() for x in x_data]max_document_length max(len(x) for x in x_data)model Word2Vec(x_data, vector_size256)return model在这儿设置的每个词的维度是256维。
接下来就是TextCNN模型的构建
class GlobalMaxPool1d(nn.Module):def __init__(self):super(GlobalMaxPool1d, self).__init__()def forward(self, x):return F.max_pool1d(x, kernel_sizex.shape[2])class TextCNN(nn.Module):def __init__(self, num_classes, num_embeddings-1, embedding_dim512, kernel_size[3, 4, 5, 6],num_channels[32, 32, 32, 32], embeddings_pretrainedNone):super(TextCNN, self).__init__()self.num_classes num_classesself.num_embeddings num_embeddingsif self.num_embeddings 0:self.embedding nn.Embedding(num_embeddings, embedding_dim)if embeddings_pretrained is not None:self.embedding self.embedding.from_pretrained(embeddings_pretrained, freezeFalse)self.cnn_layers nn.ModuleList() # 创建多个一维卷积层for c, k in zip(num_channels, kernel_size):cnn nn.Sequential(nn.Conv1d(in_channelsembedding_dim, out_channelsc, kernel_sizek),nn.BatchNorm1d(c),nn.ReLU(inplaceTrue))# cnn nn.Sequential(nn.Conv2d(in_channels1, out_channelsc, kernel_sizek),# nn.BatchNorm1d(c),# nn.ReLU(inplaceTrue)# )self.cnn_layers.append(cnn)self.pool GlobalMaxPool1d()self.classify nn.Sequential(nn.Dropout(p0.2),nn.Linear(sum(num_channels), self.num_classes))def forward(self, x):if self.num_embeddings 0:x self.embedding(x)# input torch.unsqueeze(x, dim1)# print(input.size())input x.permute(0, 2, 1)# print(input.size())# print(len(input[0]))y []for layer in self.cnn_layers:x layer(input)x self.pool(x).squeeze(-1)y.append(x)# print(y)y torch.cat(y, dim1)out self.classify(y)# out torch.sigmoid(out)return out在构建模型时候需要继承nn.moudule,同时要实现__init__、以及forward方法可以看作init在定义各个层forward在对各个层之间来进行连接。
接下来就是对模型进行训练代码如下所示:
batch_size 832
num_classes 2
file_pos E:\\PostGraduate\\Paper_review\\pytorch_TextCnn/data/rt-polarity.pos
file_neg E:\\PostGraduate\\Paper_review\\pytorch_TextCnn/data/rt-polarity.neg
word2vec_path E:\\PostGraduate\\Paper_review\\pytorch_TextCnn/word2vec1.model
train_data Data_loader(file_pos, file_neg, word2vec_path)
train_size int(len(train_data) * 0.8)
test_size len(train_data) - train_size
train_dataset, test_dataset torch.utils.data.random_split(train_data, [train_size, test_size])
train_iter DataLoader(train_dataset, batch_size830, shuffleTrue)
test_iter DataLoader(test_dataset, batch_size2133, shuffleTrue)
model TextCNN(num_classes, embeddings_pretrainedTrue)
# model TextCNN(num_classes, num_embeddings18764)
# 开始训练
epoch 100 # 训练轮次
optmizer torch.optim.Adam(model.parameters(), lr0.01)
# optmizer torch.optim.SGD(model.parameters(),lr0.01,momentum0.4)
train_losses []
train_counter []
test_losses []
log_interval 5
test_counter [i * len(train_iter.dataset) for i in range(epoch 1)]
device cpudef train_loop(n_epochs, optimizer, model, train_loader, device, test_iter):for epoch in range(1, n_epochs 1):print(开始第{}轮训练.format(epoch))model.train()correct 0for i, data in enumerate(train_loader):optimizer.zero_grad()(text_data, label) datatext_data text_data.to(device)label label.to(device)label label.long()output model(text_data)loss_func nn.BCEWithLogitsLoss()# output output.long()loss loss_func(output, label.float())loss.backward()optimizer.step()pred output.data.max(1, keepdimTrue)[1]label label.data.max(1, keepdimTrue)[1]correct pred.eq(label.data.view_as(pred)).sum()if i % log_interval 0:print(Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}.format(epoch, i * len(text_data), len(train_loader.dataset),100. * i / len(train_loader), loss.item()))train_losses.append(loss.item())train_counter.append((i * 64) ((epoch - 1) * len(train_loader.dataset)))torch.save(model.state_dict(), ./model.pth)torch.save(optimizer.state_dict(), ./optimizer.pth)print(Accuracy: {}/{} ({:.0f}%)\n.format(correct, len(train_loader.dataset),100. * correct / len(train_loader.dataset)))print(开始第{}轮评价.format(epoch))model.eval()test_loss 0correct 0with torch.no_grad():for data, target in test_iter:# for data, target in train_iter:data data.to(device)target target.to(device)output model(data)loss_func nn.BCEWithLogitsLoss()# output output.long()loss loss_func(output, target.float())test_loss losspred output.data.max(1, keepdimTrue)[1]label target.data.max(1, keepdimTrue)[1]correct pred.eq(label.data.view_as(pred)).sum()test_loss / len(test_iter.dataset)# test_loss / len(train_iter.dataset)test_losses.append(test_loss)print(\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n.format(test_loss, correct, len(test_iter.dataset),100. * correct / len(test_iter.dataset)))train_loop(epoch, optmizer, model, train_iter, device, test_iter)在上述中首先会对数据集加载进来然后分为80%的训练集和20%的测试集定义使用的优化器为adam。同时在训练的过程中会对优化器、损失函数等信息进行保存。
训练结果如下所示
完整代码链接
t, len(test_iter.dataset), 100. * correct / len(test_iter.dataset)))
train_loop(epoch, optmizer, model, train_iter, device, test_iter) 在上述中首先会对数据集加载进来然后分为80%的训练集和20%的测试集定义使用的优化器为adam。同时在训练的过程中会对优化器、损失函数等信息进行保存。训练结果为75%左右。完整代码链接[木南/TextCNN (gitee.com)](https://gitee.com/nanwang-crea/text-cnn)