当前位置: 首页 > news >正文

在百度里面做网站要多少钱查找域名

在百度里面做网站要多少钱,查找域名,计算机科学与技术网站建设方向,短链接恢复长连接一#xff0c;残差网络实现手写数字识别 数据集地址#xff1a;https://download.csdn.net/download/fanzonghao/10551018 首先来resnets_utils.py,里面有手势数字的数据集载入函数和随机产生mini-batch的函数#xff0c;代码如下#xff1a; import os import numpy as…一残差网络实现手写数字识别 数据集地址https://download.csdn.net/download/fanzonghao/10551018 首先来resnets_utils.py,里面有手势数字的数据集载入函数和随机产生mini-batch的函数代码如下 import os import numpy as np import tensorflow as tf import h5py import mathdef load_dataset():train_dataset h5py.File(datasets/train_signs.h5, r)train_set_x_orig np.array(train_dataset[train_set_x][:]) # your train set featurestrain_set_y_orig np.array(train_dataset[train_set_y][:]) # your train set labels# print(train_set_x_orig.shape)# print(train_set_y_orig.shape)test_dataset h5py.File(datasets/test_signs.h5, r)test_set_x_orig np.array(test_dataset[test_set_x][:]) # your test set featurestest_set_y_orig np.array(test_dataset[test_set_y][:]) # your test set labels# print(test_set_x_orig.shape)# print(test_set_y_orig.shape)classes np.array(test_dataset[list_classes][:]) # the list of classestrain_set_y_orig train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))test_set_y_orig test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes#load_dataset() def random_mini_batches(X, Y, mini_batch_size 64, seed 0):Creates a list of random minibatches from (X, Y)Arguments:X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)Y -- true label vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y)mini_batch_size - size of the mini-batches, integerseed -- this is only for the purpose of grading, so that youre random minibatches are the same as ours.Returns:mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)m X.shape[0] # number of training examplesmini_batches []np.random.seed(seed)# Step 1: Shuffle (X, Y)permutation list(np.random.permutation(m))shuffled_X X[permutation,:,:,:]shuffled_Y Y[permutation,:]# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.num_complete_minibatches math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionningfor k in range(0, num_complete_minibatches):mini_batch_X shuffled_X[k * mini_batch_size : k * mini_batch_size mini_batch_size,:,:,:]mini_batch_Y shuffled_Y[k * mini_batch_size : k * mini_batch_size mini_batch_size,:]mini_batch (mini_batch_X, mini_batch_Y)mini_batches.append(mini_batch)# Handling the end case (last mini-batch mini_batch_size)if m % mini_batch_size ! 0:mini_batch_X shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]mini_batch_Y shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]mini_batch (mini_batch_X, mini_batch_Y)mini_batches.append(mini_batch)return mini_batchesdef convert_to_one_hot(Y, C):Y np.eye(C)[Y.reshape(-1)].Treturn Ydef forward_propagation_for_predict(X, parameters):Implements the forward propagation for the model: LINEAR - RELU - LINEAR - RELU - LINEAR - SOFTMAXArguments:X -- input dataset placeholder, of shape (input size, number of examples)parameters -- python dictionary containing your parameters W1, b1, W2, b2, W3, b3the shapes are given in initialize_parametersReturns:Z3 -- the output of the last LINEAR unit# Retrieve the parameters from the dictionary parameters W1 parameters[W1]b1 parameters[b1]W2 parameters[W2]b2 parameters[b2]W3 parameters[W3]b3 parameters[b3] # Numpy Equivalents:Z1 tf.add(tf.matmul(W1, X), b1) # Z1 np.dot(W1, X) b1A1 tf.nn.relu(Z1) # A1 relu(Z1)Z2 tf.add(tf.matmul(W2, A1), b2) # Z2 np.dot(W2, a1) b2A2 tf.nn.relu(Z2) # A2 relu(Z2)Z3 tf.add(tf.matmul(W3, A2), b3) # Z3 np.dot(W3,Z2) b3return Z3def predict(X, parameters):W1 tf.convert_to_tensor(parameters[W1])b1 tf.convert_to_tensor(parameters[b1])W2 tf.convert_to_tensor(parameters[W2])b2 tf.convert_to_tensor(parameters[b2])W3 tf.convert_to_tensor(parameters[W3])b3 tf.convert_to_tensor(parameters[b3])params {W1: W1,b1: b1,W2: W2,b2: b2,W3: W3,b3: b3}x tf.placeholder(float, [12288, 1])z3 forward_propagation_for_predict(x, params)p tf.argmax(z3)sess tf.Session()prediction sess.run(p, feed_dict {x: X})return prediction 测试数据集代码如下 import resnets_utils import cv2 train_x, train_y, test_x, test_y, classesresnets_utils.load_dataset() print(训练样本{}.format(train_x.shape)) print(训练样本标签{}.format(train_y.shape)) print(测试样本{}.format(test_x.shape)) print(测试样本标签{}.format(test_y.shape)) print(第五个样本{}.format(train_y[0,5])) cv2.imshow(1.jpg,train_x[5,:,:,:]/255) cv2.waitKey() 打印结果可看出训练样本有1080个size为64643测试样本有120个手势四是用4代替。 先测试第一个残差学习单元,模型如下 代码如下 from keras.layers import Dense,Flatten,Input,Activation,ZeroPadding2D,AveragePooling2D,BatchNormalization,Conv2D,Add,MaxPooling2D from keras.models import Model import matplotlib.pyplot as plt from keras.preprocessing import image from keras.applications.imagenet_utils import preprocess_input import resnets_utils import keras.backend as K import numpy as np from keras.initializers import glorot_uniform import tensorflow as tf def identity_block(X,f,filters,stage,block):conv_name_baseresstr(stage)block_branchbn_name_basebnstr(stage)block_branchF1,F2,F3filtersX_shortcutXprint(输入尺寸{}.format(X.shape))#first convXConv2D(filtersF1,kernel_size(1,1),strides(1,1),paddingvalid,nameconv_name_base2a,kernel_initializerglorot_uniform(seed0))(X)print(输出尺寸{}.format(X.shape))XBatchNormalization(axis3,namebn_name_base2a)(X)XActivation(relu)(X)#second convX Conv2D(filtersF2, kernel_size(f, f), strides(1, 1), paddingsame, nameconv_name_base 2b,kernel_initializerglorot_uniform(seed0))(X)print(输出尺寸{}.format(X.shape))X BatchNormalization(axis3, namebn_name_base 2b)(X)X Activation(relu)(X)#third convX Conv2D(filtersF3, kernel_size(1, 1), strides(1, 1), paddingvalid, nameconv_name_base 2c,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2c)(X)print(输出尺寸{}.format(X.shape))#ResNetXAdd()([X,X_shortcut])X Activation(relu)(X)print(最终输出尺寸{}.format(X.shape))return X def test_identity_block():with tf.Session() as sess:np.random.seed(1)A_prevtf.placeholder(float,[3,4,4,6])Xnp.random.randn(3,4,4,6)Aidentity_block(A_prev,f2,filters[2,4,6],stage1,blocka)inittf.global_variables_initializer()sess.run(init)outsess.run([A],feed_dict{A_prev:X,K.learning_phase():0}) if __name____main__:test_identity_block() 打印结果由此可见经过三层卷积该残差单元的输出size和维度不变因为原始输入未进行卷积故只能这样才能进行特征融合。 下面是输出维度会发生变化的对原始输入X做了卷积变换再融合输出得到最终的输出模型如下 代码如下 from keras.layers import Dense,Flatten,Input,Activation,ZeroPadding2D,AveragePooling2D,BatchNormalization,Conv2D,Add,MaxPooling2D from keras.models import Model import matplotlib.pyplot as plt from keras.preprocessing import image from keras.applications.imagenet_utils import preprocess_input import resnets_utils import keras.backend as K import numpy as np from keras.initializers import glorot_uniform import tensorflow as tf def convolutional_block(X,f,filters,stage,block,s2):conv_name_base res str(stage) block _branchbn_name_base bn str(stage) block _branchF1, F2, F3 filtersX_shortcut Xprint(输入尺寸{}.format(X.shape))# first convX Conv2D(filtersF1, kernel_size(1, 1), strides(s, s), paddingvalid, nameconv_name_base 2a,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2a)(X)X Activation(relu)(X)print(输出尺寸{}.format(X.shape))# second convX Conv2D(filtersF2, kernel_size(f, f), strides(1, 1), paddingsame, nameconv_name_base 2b,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2b)(X)X Activation(relu)(X)print(输出尺寸{}.format(X.shape))#third convX Conv2D(filters8, kernel_size(1, 1), strides(1, 1), paddingvalid, nameconv_name_base 2c,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2c)(X)X Activation(relu)(X)print(输出尺寸{}.format(X.shape))#ResNetX_shortcutConv2D(filters8, kernel_size(1, 1), strides(s, s), paddingvalid, nameconv_name_base 1,kernel_initializerglorot_uniform(seed0))(X_shortcut)X_shortcut BatchNormalization(axis3, namebn_name_base 1)(X_shortcut)print(原始输入X经过变化的输出尺寸{}.format(X.shape))X Add()([X, X_shortcut])X Activation(relu)(X)print(最终输出尺寸{}.format(X.shape))return X def test_convolutional_block():#tf.reset_default_graph()with tf.Session() as sess:np.random.seed(1)A_prevtf.placeholder(float,[3,4,4,6])Xnp.random.randn(3,4,4,6)Aconvolutional_block(A_prev,f2,filters[2,4,6],stage1,blocka,s2)init tf.global_variables_initializer()sess.run(init)outsess.run(A,feed_dict{A_prev:X}) if __name____main__:#test_identity_block()test_convolutional_block() 打印结果可看出原始输入改变size为3228最终融合的输出也是3228故此种残差单元能够解决输出尺寸和维度的问题。 总体模型其中BLOCK2值得是输出尺度和维度会变化的BLOCK1指的是不会变化的。 下面用开始调用数据集其中convolutional_block表示输出尺寸和维度会变化identity_block表示输出与输入一样模型如下 代码如下 from keras.layers import Dense,Flatten,Input,Activation,ZeroPadding2D,AveragePooling2D,BatchNormalization,Conv2D,Add,MaxPooling2D from keras.models import Model import matplotlib.pyplot as plt from keras.preprocessing import image from keras.applications.imagenet_utils import preprocess_input import resnets_utils import keras.backend as K import numpy as np from keras.initializers import glorot_uniform import tensorflow as tf import time获取数据 并将标签转换成one-hotdef convert_data():train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classesresnets_utils.load_dataset()train_xtrain_set_x_orig/255test_x test_set_x_orig / 255train_yresnets_utils.convert_to_one_hot(train_set_y_orig,6).Ttest_y resnets_utils.convert_to_one_hot(test_set_y_orig, 6).T#print(train_y.shape)return train_x,train_y,test_x,test_y三层卷积的 残差单元 输出尺寸和维度不会变化def identity_block(X,f,filters,stage,block):conv_name_baseresstr(stage)block_branchbn_name_basebnstr(stage)block_branchF1,F2,F3filtersX_shortcutX# print(输入尺寸{}.format(X.shape))#first convXConv2D(filtersF1,kernel_size(1,1),strides(1,1),paddingvalid,nameconv_name_base2a,kernel_initializerglorot_uniform(seed0))(X)# print(输出尺寸{}.format(X.shape))XBatchNormalization(axis3,namebn_name_base2a)(X)XActivation(relu)(X)#second convX Conv2D(filtersF2, kernel_size(f, f), strides(1, 1), paddingsame, nameconv_name_base 2b,kernel_initializerglorot_uniform(seed0))(X)# print(输出尺寸{}.format(X.shape))X BatchNormalization(axis3, namebn_name_base 2b)(X)X Activation(relu)(X)#third convX Conv2D(filtersF3, kernel_size(1, 1), strides(1, 1), paddingvalid, nameconv_name_base 2c,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2c)(X)# print(输出尺寸{}.format(X.shape))#ResNetXAdd()([X,X_shortcut])X Activation(relu)(X)# print(最终输出尺寸{}.format(X.shape))return X 三层卷积的 残差单元 输出尺寸和维度会变化def convolutional_block(X,f,filters,stage,block,s2):conv_name_base res str(stage) block _branchbn_name_base bn str(stage) block _branchF1, F2, F3 filtersX_shortcut X# print(输入尺寸{}.format(X.shape))# first convX Conv2D(filtersF1, kernel_size(1, 1), strides(s, s), paddingvalid, nameconv_name_base 2a,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2a)(X)X Activation(relu)(X)# print(输出尺寸{}.format(X.shape))# second convX Conv2D(filtersF2, kernel_size(f, f), strides(1, 1), paddingsame, nameconv_name_base 2b,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2b)(X)X Activation(relu)(X)# print(输出尺寸{}.format(X.shape))#third convX Conv2D(filtersF3, kernel_size(1, 1), strides(1, 1), paddingvalid, nameconv_name_base 2c,kernel_initializerglorot_uniform(seed0))(X)X BatchNormalization(axis3, namebn_name_base 2c)(X)X Activation(relu)(X)# print(输出尺寸{}.format(X.shape))#ResNetX_shortcutConv2D(filtersF3, kernel_size(1, 1), strides(s, s), paddingvalid, nameconv_name_base 1,kernel_initializerglorot_uniform(seed0))(X_shortcut)X_shortcut BatchNormalization(axis3, namebn_name_base 1)(X_shortcut)# print(原始输入X经过变化的输出尺寸{}.format(X.shape))X Add()([X, X_shortcut])X Activation(relu)(X)# print(最终输出尺寸{}.format(X.shape))return X 50层残差网络def ResNet50(input_shape(64,64,3),classes6):X_inputInput(input_shape)print(输入尺寸{}.format(X_input.shape))XZeroPadding2D((3,3))(X_input)print(补完零尺寸{}.format(X.shape))#Stage 1XConv2D(filters64,kernel_size(7,7),strides(2,2),nameconv1,kernel_initializerglorot_uniform(seed0))(X)print(第一次卷积尺寸{}.format(X.shape))XBatchNormalization(axis3,namebn_conv1)(X)XActivation(relu)(X)XMaxPooling2D(pool_size(3,3),strides(2,2))(X)print(第一次池化尺寸{}.format(X.shape))#Stage 2Xconvolutional_block(X,f3,filters[64,64,256],stage2,blocka,s1)print(第一次convolutional_block尺寸{}.format(X.shape))X identity_block(X, f3, filters[64, 64, 256], stage2, blockb)X identity_block(X, f3, filters[64, 64, 256], stage2, blockc)print(两次identity_block尺寸{}.format(X.shape))#Stage 3X convolutional_block(X, f3, filters[128, 128, 512], stage3, blocka, s2)print(第二次convolutional_block尺寸{}.format(X.shape))X identity_block(X, f3, filters[128, 128, 512], stage3, blockb)X identity_block(X, f3, filters[128, 128, 512], stage3, blockc)X identity_block(X, f3, filters[128, 128, 512], stage3, blockd)print(三次identity_block尺寸{}.format(X.shape))#Stage 4X convolutional_block(X, f3, filters[256, 256, 1024], stage4, blocka, s2)print(第三次convolutional_block尺寸{}.format(X.shape))X identity_block(X, f3, filters[256, 256, 1024], stage4, blockb)X identity_block(X, f3, filters[256, 256, 1024], stage4, blockc)X identity_block(X, f3, filters[256, 256, 1024], stage4, blockd)X identity_block(X, f3, filters[256, 256, 1024], stage4, blocke)X identity_block(X, f3, filters[256, 256, 1024], stage4, blockf)print(五次identity_block尺寸{}.format(X.shape))#Stage 5X convolutional_block(X, f3, filters[512, 512, 2048], stage5, blocka, s2)print(第四次convolutional_block尺寸{}.format(X.shape))X identity_block(X, f3, filters[512, 512, 2048], stage5, blockb)X identity_block(X, f3, filters[512, 512, 2048], stage5, blockc)print(两次identity_block尺寸{}.format(X.shape))#PoolXAveragePooling2D(pool_size(2,2))(X)print(最后一次平均池化尺寸{}.format(X.shape))#OutPut FlattenFULLYCONNECTEDXFlatten()(X)XDense(unitsclasses,activationsoftmax,namefcstr(classes),kernel_initializerglorot_uniform(seed0))(X)#create modelmodelModel(inputsX_input,outputsX,nameResNet50)return model def test_identity_block():with tf.Session() as sess:np.random.seed(1)A_prevtf.placeholder(float,[3,4,4,6])Xnp.random.randn(3,4,4,6)Aidentity_block(A_prev,f2,filters[2,4,6],stage1,blocka)inittf.global_variables_initializer()sess.run(init)outsess.run([A],feed_dict{A_prev:X,K.learning_phase():0})# print(out,out[0][1][1][0]) def test_convolutional_block():#tf.reset_default_graph()with tf.Session() as sess:np.random.seed(1)A_prevtf.placeholder(float,[3,4,4,6])Xnp.random.randn(3,4,4,6)Aconvolutional_block(A_prev,f2,filters[2,4,6],stage1,blocka,s2)init tf.global_variables_initializer()sess.run(init)outsess.run(A,feed_dict{A_prev:X})print(out,out[0][0][0]) def test_ResNet50():#定义好模型结构Resnet50_modelResNet50(input_shape(64,64,3),classes6)#选定训练参数Resnet50_model.compile(optimizeradam,losscategorical_crossentropy,metrics[accuracy])#获取训练集和测试集train_x, train_y, test_x, test_yconvert_data()#训练集上训练start_timetime.time()print(开始训练)Resnet50_model.fit(xtrain_x,ytrain_y,batch_size32,epochs2)end_timetime.time()print(train_time{}.format(end_time-start_time))#测试集上测试predsResnet50_model.evaluate(xtest_x,ytest_y,batch_size32,)print(loss{}.format(preds[0]))print(Test Accuracy{}.format(preds[1]))if __name____main__:#test_identity_block()#test_convolutional_block()#convert_data()test_ResNet50()打印结果 其中问号代表的是样本数可看出最终卷积输出是1×1×2048 训练样本为1080个第一个Epoch每个样本时间为175ms所以共189s.第一次epoch训练精度为0.27。 第二个Epoch每个样本时间为165ms所以共178s.训练两次epoch时间为376S不等于两次epoch时间之和应该是有别的开支。第二次epoch训练精度为0.40提高了。 经过两次epoch的模型来测试120个样本测试精度为0.19恩很低所以还要多训练嘛。 二tensorboard显示 首先安装graphviz用于可视化网络 apt-get install graphviz pip install graphviz pip install pydot 可视化网络如下 调节学习率的clr_callback.py文件: from keras.callbacks import *class CyclicLR(Callback):This callback implements a cyclical learning rate policy (CLR).The method cycles the learning rate between two boundaries withsome constant frequency, as detailed in this paper (https://arxiv.org/abs/1506.01186).The amplitude of the cycle can be scaled on a per-iteration orper-cycle basis.This class has three built-in policies, as put forth in the paper.triangular:A basic triangular cycle w/ no amplitude scaling.triangular2:A basic triangular cycle that scales initial amplitude by half each cycle.exp_range:A cycle that scales initial amplitude by gamma**(cycle iterations) at eachcycle iteration.For more detail, please see paper.# Examplepythonclr CyclicLR(base_lr0.001, max_lr0.006,step_size2000., modetriangular)model.fit(X_train, Y_train, callbacks[clr])Class also supports custom scaling functions:pythonclr_fn lambda x: 0.5*(1np.sin(x*np.pi/2.))clr CyclicLR(base_lr0.001, max_lr0.006,step_size2000., scale_fnclr_fn,scale_modecycle)model.fit(X_train, Y_train, callbacks[clr])# Argumentsbase_lr: initial learning rate which is thelower boundary in the cycle.max_lr: upper boundary in the cycle. Functionally,it defines the cycle amplitude (max_lr - base_lr).The lr at any cycle is the sum of base_lrand some scaling of the amplitude; thereforemax_lr may not actually be reached depending onscaling function.step_size: number of training iterations perhalf cycle. Authors suggest setting step_size2-8 x training iterations in epoch.mode: one of {triangular, triangular2, exp_range}.Default triangular.Values correspond to policies detailed above.If scale_fn is not None, this argument is ignored.gamma: constant in exp_range scaling function:gamma**(cycle iterations)scale_fn: Custom scaling policy defined by a singleargument lambda function, where0 scale_fn(x) 1 for all x 0.mode paramater is ignoredscale_mode: {cycle, iterations}.Defines whether scale_fn is evaluated oncycle number or cycle iterations (trainingiterations since start of cycle). Default is cycle.def __init__(self, base_lr0.001, max_lr0.006, step_size2000., modetriangular,gamma1., scale_fnNone, scale_modecycle):super(CyclicLR, self).__init__()self.base_lr base_lrself.max_lr max_lrself.step_size step_sizeself.mode modeself.gamma gammaif scale_fn None:if self.mode triangular:self.scale_fn lambda x: 1.self.scale_mode cycleelif self.mode triangular2:self.scale_fn lambda x: 1 / (2. ** (x - 1))self.scale_mode cycleelif self.mode exp_range:self.scale_fn lambda x: gamma ** (x)self.scale_mode iterationselse:self.scale_fn scale_fnself.scale_mode scale_modeself.clr_iterations 0.self.trn_iterations 0.self.history {}self._reset()def _reset(self, new_base_lrNone, new_max_lrNone,new_step_sizeNone):Resets cycle iterations.Optional boundary/step size adjustment.if new_base_lr ! None:self.base_lr new_base_lrif new_max_lr ! None:self.max_lr new_max_lrif new_step_size ! None:self.step_size new_step_sizeself.clr_iterations 0.def clr(self):cycle np.floor(1 self.clr_iterations / (2 * self.step_size))x np.abs(self.clr_iterations / self.step_size - 2 * cycle 1)if self.scale_mode cycle:return self.base_lr (self.max_lr - self.base_lr) * np.maximum(0, (1 - x)) * self.scale_fn(cycle)else:return self.base_lr (self.max_lr - self.base_lr) * np.maximum(0, (1 - x)) * self.scale_fn(self.clr_iterations)def on_train_begin(self, logs{}):logs logs or {}if self.clr_iterations 0:K.set_value(self.model.optimizer.lr, self.base_lr)else:K.set_value(self.model.optimizer.lr, self.clr())def on_batch_end(self, epoch, logsNone):logs logs or {}self.trn_iterations 1self.clr_iterations 1self.history.setdefault(lr, []).append(K.get_value(self.model.optimizer.lr))self.history.setdefault(iterations, []).append(self.trn_iterations)for k, v in logs.items():self.history.setdefault(k, []).append(v)K.set_value(self.model.optimizer.lr, self.clr()) main.py文件 import keras from keras.models import Model import matplotlib.pyplot as plt from keras.preprocessing import image from keras.applications.imagenet_utils import preprocess_input import resnets_utils import keras.backend as K import numpy as np from keras.optimizers import Adam from keras.initializers import glorot_uniform from clr_callback import CyclicLR 获取数据 并将标签转换成one-hotdef convert_data():train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classesresnets_utils.load_dataset()train_xtrain_set_x_orig/255test_x test_set_x_orig / 255train_yresnets_utils.convert_to_one_hot(train_set_y_orig,6).Ttest_y resnets_utils.convert_to_one_hot(test_set_y_orig, 6).T#print(train_y.shape)return train_x[:320,...],train_y[:320,...],test_x,test_y if __name____main__:train_x, train_y, test_x, test_yconvert_data()print(train_x.shape)print(train_y.shape)print(test_x.shape)print(test_y.shape)best_score0classes6Epcoh50Batch32input_shape(64,64,3)X_input keras.layers.Input(input_shape)X keras.layers.Conv2D(filters6, kernel_size(3, 3), strides(2, 2), nameconv1,kernel_initializerglorot_uniform(seed0))(X_input)print(第一次卷积尺寸{}.format(X.shape))Xkeras.layers.GlobalAveragePooling2D()(X)print(X.shape)Ykeras.layers.Dense(classes, activationsoftmax, namefc_class)(X)model Model(inputsX_input, outputsY)model.summary()keras.utils.plot_model(model,to_file./model.jpg)lr 1e-2clr CyclicLR(base_lr1e-5, max_lrlr, step_size Epcoh/ Batch * 2, modetriangular2)adam Adam(lrlr, beta_10.9, beta_20.999, epsilon1e-08, amsgradTrue, )model.compile(optimizeradam, losscategorical_crossentropy,metrics[acc]) #metrics[mae, acc])tb_callbackkeras.callbacks.TensorBoard(log_dir./logs/keras,histogram_freq1,write_graphTrue,write_images1,write_gradsTrue)history model.fit(xtrain_x, ytrain_y,batch_sizeBatch,validation_data(test_x, test_y),epochsEpcoh,callbacks[tb_callback,clr])acchistory.history[acc]loss history.history[loss]val_acc history.history[val_acc]val_loss history.history[val_loss]print(acc, acc)print(loss, loss)print(val_acc,val_acc)print(val_loss,val_loss)score model.evaluate(xtest_x, ytest_y,batch_sizeBatch)print(loss,score[0])print(test_acc,score[1])if score[1] best_score:best_score score[1]model.save(./model.h5, overwriteTrue)
http://www.pierceye.com/news/27466/

相关文章:

  • 宁波网站建站推广深圳网站设计兴田德润官方网站
  • 淘宝 做网站空间 条件开发一套管理系统多少钱
  • 优秀设计网站点评福田专业网站建设公司
  • 个人网站素材图片wordpress签到
  • 单位怎样做网站修改网站dns
  • 网站建设赵玉敏网络市场前景分析
  • qq网站代码seo关键字怎么优化
  • 网站开发技术分享ppt免费网站建设自带后台管理程序
  • 网站没内容太原网站建设加王道下拉
  • 网站关键词优化怎么做的网站广告怎么做
  • 做面料那几个网站php商业网站制作
  • 兰州做网站一咨询兰州做网站公司Wordpress搜索指定页面内容
  • 网站建设工作室07fly网站收录后然后怎么做
  • 关于dw做网站如何建立网站后台程序
  • 公司展示网站模板梧州论坛藤县论坛
  • ftp网站建设小企业网站维护一年多少钱
  • 毕业设计网站建筑网站翻译编辑
  • wordpress清空演示数据网站制作网站优化
  • 建设招聘网站中国常用网站网址
  • 网站活动怎么做软件开发文档的重要性
  • 多人在线网站开发杭州网站制作平台公司
  • 图片交易网站如何建设网站建设费 账务处理
  • 如何选择企业网站建设泰安建设工程招聘信息网站
  • 企业营销型网站团队wordpress 标签云制作
  • tornado 做网站wordpress 网站导航
  • 类似淘宝网站模板广西住房城乡和建设厅网站首页
  • 网站建设实例下载网站开发服务费会计处理
  • 两峡一峰旅游开发公司官方网站网站费用明细
  • 怎样开发wordpress主题seo推广任务小结
  • 深圳企业网站哪家强wordpress1.29下载