当前位置: 首页 > news >正文

响应式网站多少钱 开发下载百度电商平台app

响应式网站多少钱 开发,下载百度电商平台app,wordpress做社区 商城,wordpress 禁止注册循环神经网络#xff08;RNN#xff09; 问题描述#xff1a; 利用循环神经网络#xff0c;实现唐诗生成任务 数据集#xff1a; 唐诗 题目要求#xff1a; 补全程序#xff0c;主要是前面的3个空和生成诗歌的一段代码#xff0c;pytorch需要补全对应的rnn.py文件…循环神经网络RNN 问题描述 利用循环神经网络实现唐诗生成任务 数据集 唐诗 题目要求 补全程序主要是前面的3个空和生成诗歌的一段代码pytorch需要补全对应的rnn.py文件中的两处代码生成诗歌开头词汇是“日、红、山、夜、湖、海、月” import numpy as np import collections import torch from torch.autograd import Variable import torch.optim as optimimport rnn as rnn_lstmstart_token G end_token E batch_size 64def process_poems1(file_name)::param file_name::return: poems_vector have tow dimmention ,first is the poem, the second is the word_indexe.g. [[1,2,3,4,5,6,7,8,9,10],[9,6,3,8,5,2,7,4,1]]poems []with open(file_name, r, encodingutf-8, ) as f:for line in f.readlines():try:title, content line.strip().split(:)# content content.replace( , ).replace(,).replace(。,)content content.replace( , )if _ in content or ( in content or in content or 《 in content or [ in content or \start_token in content or end_token in content:continueif len(content) 5 or len(content) 80:continuecontent start_token content end_tokenpoems.append(content)except ValueError as e:print(error)pass# 按诗的字数排序poems sorted(poems, keylambda line: len(line))# print(poems)# 统计每个字出现次数all_words []for poem in poems:all_words [word for word in poem]counter collections.Counter(all_words) # 统计词和词频。count_pairs sorted(counter.items(), keylambda x: -x[1]) # 排序words, _ zip(*count_pairs)words words[:len(words)] ( ,)word_int_map dict(zip(words, range(len(words))))poems_vector [list(map(word_int_map.get, poem)) for poem in poems]return poems_vector, word_int_map, wordsdef process_poems2(file_name)::param file_name::return: poems_vector have tow dimmention ,first is the poem, the second is the word_indexe.g. [[1,2,3,4,5,6,7,8,9,10],[9,6,3,8,5,2,7,4,1]]poems []with open(file_name, r, encodingutf-8, ) as f:# content for line in f.readlines():try:line line.strip()if line:content line.replace( , ).replace(, ).replace(。, )if _ in content or ( in content or in content or 《 in content or [ in content or \start_token in content or end_token in content:continueif len(content) 5 or len(content) 80:continue# print(content)content start_token content end_tokenpoems.append(content)# content except ValueError as e:# print(error)pass# 按诗的字数排序poems sorted(poems, keylambda line: len(line))# print(poems)# 统计每个字出现次数all_words []for poem in poems:all_words [word for word in poem]counter collections.Counter(all_words) # 统计词和词频。count_pairs sorted(counter.items(), keylambda x: -x[1]) # 排序words, _ zip(*count_pairs)words words[:len(words)] ( ,)word_int_map dict(zip(words, range(len(words))))poems_vector [list(map(word_int_map.get, poem)) for poem in poems]return poems_vector, word_int_map, wordsdef generate_batch(batch_size, poems_vec, word_to_int):n_chunk len(poems_vec) // batch_sizex_batches []y_batches []for i in range(n_chunk):start_index i * batch_sizeend_index start_index batch_sizex_data poems_vec[start_index:end_index]y_data []for row in x_data:y row[1:]y.append(row[-1])y_data.append(y)x_data y_data[6,2,4,6,9] [2,4,6,9,9][1,4,2,8,5] [4,2,8,5,5]# print(x_data[0])# print(y_data[0])# exit(0)x_batches.append(x_data)y_batches.append(y_data)return x_batches, y_batchesdef run_training():# 处理数据集# poems_vector, word_to_int, vocabularies process_poems2(./tangshi.txt)poems_vector, word_to_int, vocabularies process_poems1(./poems.txt)# 生成batchprint(finish loadding data)BATCH_SIZE 100torch.manual_seed(5)word_embedding rnn_lstm.word_embedding(vocab_lengthlen(word_to_int) 1, embedding_dim100)rnn_model rnn_lstm.RNN_model(batch_szBATCH_SIZE, vocab_lenlen(word_to_int) 1, word_embeddingword_embedding,embedding_dim100, lstm_hidden_dim128)rnn_model rnn_model.cuda()# optimizer optim.Adam(rnn_model.parameters(), lr 0.001)optimizer optim.RMSprop(rnn_model.parameters(), lr0.01)loss_fun torch.nn.NLLLoss()loss_fun loss_fun.cuda()rnn_model.load_state_dict(torch.load(./poem_generator_rnn)) # if you have already trained your model you can load it by this line.for epoch in range(20000):batches_inputs, batches_outputs generate_batch(BATCH_SIZE, poems_vector, word_to_int)n_chunk len(batches_inputs)for batch in range(n_chunk):batch_x batches_inputs[batch]batch_y batches_outputs[batch] # (batch , time_step)loss 0for index in range(BATCH_SIZE):x np.array(batch_x[index], dtypenp.int64)y np.array(batch_y[index], dtypenp.int64)x Variable(torch.from_numpy(np.expand_dims(x, axis1)))y Variable(torch.from_numpy(y))x x.cuda()y y.cuda()pre rnn_model(x)loss loss_fun(pre, y)if index 0:_, pre torch.max(pre, dim1)print(prediction,pre.data.tolist()) # the following three line can print the output and the predictionprint(b_y ,y.data.tolist()) # And you need to take a screenshot and then past is to your homework paper.print(* * 30)loss loss / BATCH_SIZEprint(epoch , epoch, batch number, batch, loss is: , loss.data.tolist())optimizer.zero_grad()loss.backward()torch.nn.utils.clip_grad_norm(rnn_model.parameters(), 1)optimizer.step()if batch % 20 0:torch.save(rnn_model.state_dict(), ./poem_generator_rnn)print(finish save model)def to_word(predict, vocabs): # 预测的结果转化成汉字sample np.argmax(predict)if sample len(vocabs):sample len(vocabs) - 1return vocabs[sample]def pretty_print_poem(poem): # 令打印的结果更工整shige []for w in poem:if w start_token or w end_token:breakshige.append(w)poem_sentences poem.split(。)for s in poem_sentences:if s ! and len(s) 10:print(s 。)def gen_poem(begin_word):# poems_vector, word_int_map, vocabularies process_poems2(./tangshi.txt) # use the other dataset to train the networkpoems_vector, word_int_map, vocabularies process_poems1(./poems.txt)word_embedding rnn_lstm.word_embedding(vocab_lengthlen(word_int_map) 1, embedding_dim100)rnn_model rnn_lstm.RNN_model(batch_sz64, vocab_lenlen(word_int_map) 1, word_embeddingword_embedding,embedding_dim100, lstm_hidden_dim128)rnn_model.load_state_dict(torch.load(./poem_generator_rnn))rnn_model rnn_model.cuda()rnn_model.eval()# 指定开始的字poem begin_wordword begin_wordwhile word ! end_token:input np.array([word_int_map[w] for w in poem], dtypenp.int64)input Variable(torch.from_numpy(input)).cuda()output rnn_model(input, is_testTrue)word to_word(output.data.tolist()[-1], vocabularies)poem word# print(word)# print(poem)if len(poem) 30:breakreturn poem# run_training() # 如果不是训练阶段 请注销这一行 。 网络训练时间很长。pretty_print_poem(gen_poem(日)) pretty_print_poem(gen_poem(红)) pretty_print_poem(gen_poem(山)) pretty_print_poem(gen_poem(夜)) pretty_print_poem(gen_poem(湖)) pretty_print_poem(gen_poem(湖)) pretty_print_poem(gen_poem(湖)) pretty_print_poem(gen_poem(君))import torch.nn as nn import torch from torch.autograd import Variable import torch.nn.functional as Fimport numpy as npdef weights_init(m):classname m.__class__.__name__ # obtain the class nameif classname.find(Linear) ! -1:weight_shape list(m.weight.data.size())fan_in weight_shape[1]fan_out weight_shape[0]w_bound np.sqrt(6. / (fan_in fan_out))m.weight.data.uniform_(-w_bound, w_bound)m.bias.data.fill_(0)print(inital linear weight )class word_embedding(nn.Module):def __init__(self, vocab_length, embedding_dim):super(word_embedding, self).__init__()w_embeding_random_intial np.random.uniform(-1, 1, size(vocab_length, embedding_dim))self.word_embedding nn.Embedding(vocab_length, embedding_dim)self.word_embedding.weight.data.copy_(torch.from_numpy(w_embeding_random_intial))def forward(self, input_sentence)::param input_sentence: a tensor ,contain several word index.:return: a tensor ,contain word embedding tensorsen_embed self.word_embedding(input_sentence)return sen_embedclass RNN_model(nn.Module):def __init__(self, batch_sz, vocab_len, word_embedding, embedding_dim, lstm_hidden_dim):super(RNN_model, self).__init__()self.word_embedding_lookup word_embeddingself.batch_size batch_szself.vocab_length vocab_lenself.word_embedding_dim embedding_dimself.lstm_dim lstm_hidden_dim########################################## here you need to define the self.rnn_lstm the input size is embedding_dim and the output size is lstm_hidden_dim# the lstm should have two layers, and the input and output tensors are provided as (batch, seq, feature)# ???self.rnn_lstm nn.LSTM(input_sizeself.word_embedding_dim, hidden_sizeself.lstm_dim, num_layers2,batch_firstTrue)##########################################self.fc nn.Linear(lstm_hidden_dim, vocab_len)self.apply(weights_init) # call the weights initial function.self.softmax nn.LogSoftmax() # the activation function.# self.tanh nn.Tanh()def forward(self, sentence, is_testFalse):batch_input self.word_embedding_lookup(sentence).view(1, -1, self.word_embedding_dim)# print(batch_input.size()) # print the size of the input################################################# here you need to put the batch_input input the self.lstm which is defined before.# the hidden output should be named as output, the initial hidden state and cell state set to zero.# ???h0 torch.zeros(2, 1, self.lstm_dim)c0 torch.zeros(2, 1, self.lstm_dim)h0 h0.cuda()c0 c0.cuda()output, _ self.rnn_lstm(batch_input, (h0, c0))################################################out output.contiguous().view(-1, self.lstm_dim)out F.relu(self.fc(out))out self.softmax(out)if is_test:prediction out[-1, :].view(1, -1)output predictionelse:output out# print(out)return output这里能训练但是loss一直降不下去同时还会出现梯度爆炸的情况先放在这里之后调试。
http://www.pierceye.com/news/20886/

相关文章:

  • 苏州seo网站优化软件wordpress框架解密
  • 如何建设一个工业品采购网站电子产品定制开发
  • 网站的版面布局注册网站不用手机短信验证的
  • 网站论坛模板下载现在去兰州会不会被隔离
  • 网站建设要准备的内容手机下载视频网站模板下载
  • 网站建设与维护实训福州网站制作好的企业
  • 好的网站怎么设计东莞营销网站建设哪家好
  • 免费试用网站 源码中国石化工程建设有限公司首页
  • 龙岗网站建设报价建筑设计资质
  • 孝感网站开发公司制作一个赚钱的网站
  • 钻石网站建设国外logo设计网站推荐
  • 深圳建筑网站网页设计常见的布局形式
  • 做物流的网站都有什么风险做个人的网站怎么做
  • 百度智能建站系统珠宝购物网站的建设
  • 融资网站建设方案什么网站可以找人做系统
  • 海南所有的网站建设类公司微网站建设渠道
  • 做资源下载网站违法吗网站运营团队建设
  • 建和做网站张店网站建设哪家好
  • 网站群建设方案6文山 砚山 网站建设
  • dede网站禁止ip访问WordPress禁止上传php
  • 中国关键词网站网站改版需要向百度000提交吗
  • 公众号第三方网站开发wordpress 全html支持
  • 做腰椎核磁证网站是 收 七前段模板网站
  • 广告设计图片及解析东莞公司seo优化
  • wordpress 全站搜索.net网站开发架构
  • 奉贤网站建设哪家好企业工商信息查询
  • 网站丢失了怎么办营销型网站的建设起步
  • 网站备案与域名关系源码网站有哪些
  • 网站搭建报价表网站移动窗口代码
  • 郑州网站建设zhuotop北京 外贸网站建设