18 #ifndef SINGA_MODEL_FEED_FORWARD_NET_H_ 19 #define SINGA_MODEL_FEED_FORWARD_NET_H_ 20 #include "singa/model/layer.h" 21 #include "singa/model/loss.h" 22 #include "singa/model/metric.h" 23 #include "singa/model/updater.h" 42 std::shared_ptr<Layer>
Add(std::shared_ptr<Layer> layer);
52 std::shared_ptr<Layer>
Add(
const LayerConf& conf,
53 const Shape* sample_shape =
nullptr);
73 void Compile(
bool shuffle,
bool to_register, std::shared_ptr<Updater> updater,
83 float val_split = 0.0f);
104 size_t batchsize = 128);
129 void ToDevice(std::shared_ptr<Device> device);
132 void AsType(DataType dtype);
139 [=]() {
Train(batchsize, nb_epoch, x, y, val_x, val_y); });
145 return std::thread([=]() {
Train(batchsize, nb_epoch, x, y); });
148 const vector<std::shared_ptr<Layer>> layers()
const {
return layers_; }
149 const vector<string> GetParamNames()
const;
150 const vector<ParamSpec> GetParamSpecs()
const;
151 const vector<Tensor> GetParamValues()
const;
154 vector<std::shared_ptr<Layer>> layers_;
155 std::shared_ptr<Updater> updater_;
159 bool shuffle_ =
true;
160 Device* device_ =
nullptr;
161 DataType dtype_ = kFloat32;
166 #endif // SINGA_MODEL_FEED_FORWARD_NET_H_ ~FeedForwardNet()
Delete all layers.
const vector< Tensor > Backward(int flag, const Tensor &grad)
Backward layers one by one using the gradient batch 'grad'.
const Tensor PredictOnBatch(const Tensor &x)
Predict for one batch data.
A Tensor instance is a multi-dimensional array resident on a Device (default device is the host CPU)...
Definition: tensor.h:56
void Train(size_t batchsize, int nb_epoch, const Tensor &x, const Tensor &y, float val_split=0.0f)
Conduct the training giving the training data 'x' and label 'y'.
const std::pair< float, float > TrainOnBatch(int epoch, const Tensor &x, const Tensor &y)
Train the neural net over one batch of training data.
std::pair< Tensor, Tensor > EvaluateOnBatch(const Tensor &x, const Tensor &y)
Evaluate the neural net for one batch of data.
std::thread TrainThread(size_t batchsize, int nb_epoch, const Tensor &x, const Tensor &y)
A wrapper method to spawn a thread to execute Train() method.
Definition: feed_forward_net.h:143
std::thread TrainThread(size_t batchsize, int nb_epoch, const Tensor &x, const Tensor &y, const Tensor &val_x, const Tensor &val_y)
A wrapper method to spawn a thread to execute Train() method.
Definition: feed_forward_net.h:135
Allocate memory and execute Tensor operations.
Definition: device.h:56
void Compile(bool shuffle, Optimizer *opt, Loss *loss, Metric *metric)
Set some fields used for training and evaluating the neural net.
The base loss class, which declares the APIs for computing the objective score (loss) for a pair of p...
Definition: loss.h:31
std::shared_ptr< Device > defaultDevice
a singleton CppDevice as the host for all devices.
std::pair< Tensor, Tensor > Evaluate(const Tensor &x, const Tensor &y, size_t batchsize=128)
Evaluate the neural net with given data.
void AsType(DataType dtype)
Set the data type of each layer.
void ToDevice(std::shared_ptr< Device > device)
Move the layer data to the given device.
const Tensor Forward(int flag, const Tensor &x)
Forward layers one by one using the data batch 'x'.
const Tensor Predict(const Tensor &x, size_t batchsize=128)
Predict the probability distributation over candicate classes for each data sample.
The base metric class, which declares the APIs for computing the performance evaluation metrics given...
Definition: metric.h:32
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements...
Definition: common.h:48
The feed-forward neural net.
Definition: feed_forward_net.h:31
The base class for gradient descent algorithms used to update the model parameters in order to optimi...
Definition: optimizer.h:41
FeedForwardNet Clone(std::shared_ptr< Device > device)
Clone the neuaral net by cloning every layer to the given device.
std::shared_ptr< Layer > Add(std::shared_ptr< Layer > layer)
Add a layer with the assumption that.