NN: Add missing train and test scripts.

This commit is contained in:
Kwabena W. Agyeman 2018-10-08 23:34:17 -07:00 committed by iabdalkader
parent 10370fa9ce
commit 67ce39ee57
25 changed files with 293 additions and 0 deletions

View File

@ -0,0 +1,27 @@
# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
# then another factor of 10 after 10 more epochs (5000 iters)
# The train/test net protocol buffer definition
net: "models/cifar10/cifar10_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "fixed"
# Display every 200 iterations
display: 200
# The maximum number of iterations
max_iter: 60000
# snapshot intermediate results
snapshot: 10000
snapshot_format: HDF5
snapshot_prefix: "models/cifar10/cifar10"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -0,0 +1,27 @@
# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
# then another factor of 10 after 10 more epochs (5000 iters)
# The train/test net protocol buffer definition
net: "models/cifar10/cifar10_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.0001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "fixed"
# Display every 200 iterations
display: 200
# The maximum number of iterations
max_iter: 65000
# snapshot intermediate results
snapshot: 5000
snapshot_format: HDF5
snapshot_prefix: "models/cifar10/cifar10"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -0,0 +1,27 @@
# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
# then another factor of 10 after 10 more epochs (5000 iters)
# The train/test net protocol buffer definition
net: "models/cifar10/cifar10_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.00001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "fixed"
# Display every 200 iterations
display: 200
# The maximum number of iterations
max_iter: 70000
# snapshot intermediate results
snapshot: 5000
snapshot_format: HDF5
snapshot_prefix: "models/cifar10/cifar10"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -0,0 +1,8 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe test \
--model=models/cifar10/cifar10_train_test.prototxt \
--weights=models/cifar10/cifar10_iter_70000.caffemodel.h5 $@

View File

@ -0,0 +1,17 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe train \
--solver=models/cifar10/cifar10_solver.prototxt $@
# reduce learning rate by factor of 10
$TOOLS/caffe train \
--solver=models/cifar10/cifar10_solver_lr1.prototxt \
--snapshot=models/cifar10/cifar10_iter_60000.solverstate.h5 $@
# reduce learning rate by factor of 10
$TOOLS/caffe train \
--solver=models/cifar10/cifar10_solver_lr2.prototxt \
--snapshot=models/cifar10/cifar10_iter_65000.solverstate.h5 $@

View File

@ -0,0 +1,27 @@
# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
# then another factor of 10 after 10 more epochs (5000 iters)
# The train/test net protocol buffer definition
net: "models/cifar10_fast/cifar10_fast_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "fixed"
# Display every 200 iterations
display: 200
# The maximum number of iterations
max_iter: 60000
# snapshot intermediate results
snapshot: 10000
snapshot_format: HDF5
snapshot_prefix: "models/cifar10_fast/cifar10_fast"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -0,0 +1,27 @@
# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
# then another factor of 10 after 10 more epochs (5000 iters)
# The train/test net protocol buffer definition
net: "models/cifar10_fast/cifar10_fast_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.0001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "fixed"
# Display every 200 iterations
display: 200
# The maximum number of iterations
max_iter: 65000
# snapshot intermediate results
snapshot: 5000
snapshot_format: HDF5
snapshot_prefix: "models/cifar10_fast/cifar10_fast"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -0,0 +1,27 @@
# reduce learning rate after 120 epochs (60000 iters) by factor 0f 10
# then another factor of 10 after 10 more epochs (5000 iters)
# The train/test net protocol buffer definition
net: "models/cifar10_fast/cifar10_fast_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of CIFAR10, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 1000 training iterations.
test_interval: 1000
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.00001
momentum: 0.9
weight_decay: 0.004
# The learning rate policy
lr_policy: "fixed"
# Display every 200 iterations
display: 200
# The maximum number of iterations
max_iter: 70000
# snapshot intermediate results
snapshot: 5000
snapshot_format: HDF5
snapshot_prefix: "models/cifar10_fast/cifar10_fast"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -0,0 +1,8 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe test \
--model=models/cifar10_fast/cifar10_fast_train_test.prototxt \
--weights=models/cifar10_fast/cifar10_fast_iter_70000.caffemodel.h5 $@

View File

@ -0,0 +1,17 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe train \
--solver=models/cifar10_fast/cifar10_fast_solver.prototxt $@
# reduce learning rate by factor of 10
$TOOLS/caffe train \
--solver=models/cifar10_fast/cifar10_fast_solver_lr1.prototxt \
--snapshot=models/cifar10_fast/cifar10_fast_iter_60000.solverstate.h5 $@
# reduce learning rate by factor of 10
$TOOLS/caffe train \
--solver=models/cifar10_fast/cifar10_fast_solver_lr2.prototxt \
--snapshot=models/cifar10_fast/cifar10_fast_iter_65000.solverstate.h5 $@

View File

@ -0,0 +1,25 @@
# The train/test net protocol buffer definition
net: "models/lenet/lenet_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
# In the case of MNIST, we have test batch size 100 and 100 test iterations,
# covering the full 10,000 testing images.
test_iter: 100
# Carry out testing every 500 training iterations.
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 10000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "models/lenet/lenet"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -7,6 +7,9 @@ layer {
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "caffe/examples/mnist/mnist_train_lmdb"
batch_size: 64

View File

@ -0,0 +1,8 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe test \
--model=models/lenet/lenet_train_test.prototxt \
--weights=models/lenet/lenet_iter_10000.caffemodel $@

View File

@ -0,0 +1,7 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe train \
--solver=models/lenet/lenet_solver.prototxt $@

View File

@ -0,0 +1,23 @@
# The train/test net protocol buffer definition
net: "models/smile/smile_train_test.prototxt"
# test_iter specifies how many forward passes the test should carry out.
test_iter: 100
# Carry out testing every 500 training iterations.
test_interval: 500
# The base learning rate, momentum and the weight decay of the network.
base_lr: 0.01
momentum: 0.9
weight_decay: 0.0005
# The learning rate policy
lr_policy: "inv"
gamma: 0.0001
power: 0.75
# Display every 100 iterations
display: 100
# The maximum number of iterations
max_iter: 10000
# snapshot intermediate results
snapshot: 5000
snapshot_prefix: "models/smile/smile"
# solver mode: CPU or GPU
solver_mode: GPU

View File

@ -0,0 +1,8 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe test \
--model=models/smile/smile_train_test.prototxt \
--weights=models/smile/smile_iter_10000.caffemodel $@

View File

@ -0,0 +1,7 @@
#!/usr/bin/env sh
set -e
TOOLS=./caffe/build/tools
$TOOLS/caffe train \
--solver=models/smile/smile_solver.prototxt $@