Apache SINGA
A distributed deep learning platform .
 All Classes Namespaces Files Functions Variables Typedefs Macros
neuron_layer.h
1 /************************************************************
2 *
3 * Licensed to the Apache Software Foundation (ASF) under one
4 * or more contributor license agreements. See the NOTICE file
5 * distributed with this work for additional information
6 * regarding copyright ownership. The ASF licenses this file
7 * to you under the Apache License, Version 2.0 (the
8 * "License"); you may not use this file except in compliance
9 * with the License. You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing,
14 * software distributed under the License is distributed on an
15 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16 * KIND, either express or implied. See the License for the
17 * specific language governing permissions and limitations
18 * under the License.
19 *
20 *************************************************************/
21 
22 #ifndef SINGA_NEURALNET_NEURON_LAYER_H_
23 #define SINGA_NEURALNET_NEURON_LAYER_H_
24 
25 #include <vector>
26 #include "neuralnet/layer.h"
27 #include "proto/job.pb.h"
28 
33 namespace singa {
37 class ConvolutionLayer : public NeuronLayer {
38  public:
40 
41  void Setup(const LayerProto& proto, int npartitions) override;
42  void ComputeFeature(int flag, Metric* perf) override;
43  void ComputeGradient(int flag, Metric* perf) override;
44  const std::vector<Param*> GetParams() const override {
45  std::vector<Param*> params{weight_, bias_};
46  return params;
47  }
48  ConnectionType src_neuron_connection(int k) const override {
49  // CHECK_LT(k, srclayers_.size());
50  return kOneToAll;
51  }
52 
53  protected:
54  int kernel_, pad_, stride_;
55  int batchsize_, channels_, height_, width_;
56  int col_height_, col_width_, conv_height_, conv_width_, num_filters_;
57  Param* weight_, *bias_;
58  Blob<float> col_data_, col_grad_;
59 };
60 
65  public:
66  void ComputeFeature(int flag, Metric* perf) override;
67  void ComputeGradient(int flag, Metric* perf) override;
68 };
69 
70 class DropoutLayer : public NeuronLayer {
71  public:
72  void Setup(const LayerProto& proto, int npartitions) override;
73  void ComputeFeature(int flag, Metric* perf) override;
74  void ComputeGradient(int flag, Metric* perf) override;
75  protected:
76  // drop probability
77  float pdrop_;
78  /* record which neuron is dropped, required for back propagating gradients,
79  * if mask[i]=0, then the i-th neuron is dropped.
80  */
81  Blob<float> mask_;
82 };
92 class LRNLayer : public NeuronLayer {
93  void Setup(const LayerProto& proto, int npartitions) override;
94  void ComputeFeature(int flag, Metric *perf) override;
95  void ComputeGradient(int flag, Metric* perf) override;
96 
97  protected:
99  int batchsize_, channels_, height_, width_;
101  int lsize_;
103  float alpha_, beta_, knorm_;
104  Blob<float> norm_;
105 };
106 
107 class PoolingLayer : public NeuronLayer {
108  public:
109  void Setup(const LayerProto& proto, int npartitions) override;
110  void ComputeFeature(int flag, Metric *perf) override;
111  void ComputeGradient(int flag, Metric* perf) override;
112 
113  protected:
114  int kernel_, pad_, stride_;
115  int batchsize_, channels_, height_, width_, pooled_height_, pooled_width_;
116  PoolingProto_PoolMethod pool_;
117 };
118 
122 class CPoolingLayer : public PoolingLayer {
123  public:
124  void Setup(const LayerProto& proto, int npartitions);
125  void ComputeFeature(int flag, Metric *perf) override;
126  void ComputeGradient(int flag, Metric* perf) override;
127  private:
128  Blob<float> mask_;
129 };
130 
131 class ReLULayer : public NeuronLayer {
132  public:
133  void Setup(const LayerProto& proto, int npartitions) override;
134  void ComputeFeature(int flag, Metric *perf) override;
135  void ComputeGradient(int flag, Metric* perf) override;
136 };
137 
139  public:
141  void Setup(const LayerProto& proto, int npartitions) override;
142  void ComputeFeature(int flag, Metric* perf) override;
143  void ComputeGradient(int flag, Metric* perf) override;
144  const std::vector<Param*> GetParams() const override {
145  std::vector<Param*> params{weight_, bias_};
146  return params;
147  }
148 
149  private:
150  int batchsize_;
151  int vdim_, hdim_;
152  bool transpose_;
153  Param *weight_, *bias_;
154 };
155 
160 class STanhLayer : public NeuronLayer {
161  public:
162  void Setup(const LayerProto& proto, int npartitions) override;
163  void ComputeFeature(int flag, Metric *perf) override;
164  void ComputeGradient(int flag, Metric* perf) override;
165 };
166 
172 class SigmoidLayer: public Layer {
173  public:
174  using Layer::ComputeFeature;
176 
177  void Setup(const LayerProto& proto, int npartitions) override;
178  void ComputeFeature(int flag, Metric* perf) override;
179  void ComputeGradient(int flag, Metric* perf) override;
180 };
181 
182 
186 class RBMLayer: public Layer {
187  public:
188  virtual ~RBMLayer() {}
189  void Setup(const LayerProto& proto, int npartitions) override;
190  const Blob<float>& neg_data(const Layer* layer) {
191  return neg_data_;
192  }
193  Blob<float>* mutable_neg_data(const Layer* layer) {
194  return &neg_data_;
195  }
196  const std::vector<Param*> GetParams() const override {
197  std::vector<Param*> params{weight_, bias_};
198  return params;
199  }
200  virtual Blob<float>* Sample(int flat);
201 
202  protected:
204  bool gaussian_;
206  int hdim_;
208  int vdim_;
209  int batchsize_;
210  bool first_gibbs_;
211  Param* weight_, *bias_;
212 
213  Blob<float> neg_data_;
214  Blob<float> neg_sample_;
215  Blob<float> sample_;
216 };
217 
221 class RBMVisLayer: public RBMLayer {
222  public:
223  ~RBMVisLayer();
224  void Setup(const LayerProto& proto, int npartitions) override;
225  void ComputeFeature(int flag, Metric* perf) override;
226  void ComputeGradient(int flag, Metric* perf) override;
227 
228  private:
229  RBMLayer* hid_layer_;
230  Layer* input_layer_;
231 };
235 class RBMHidLayer: public RBMLayer {
236  public:
237  ~RBMHidLayer();
238  void Setup(const LayerProto& proto, int npartitions) override;
239  void ComputeFeature(int flag, Metric* perf) override;
240  void ComputeGradient(int flag, Metric* perf) override;
241 
242  private:
243  RBMLayer *vis_layer_;
244 };
245 
246 } // namespace singa
247 
248 #endif // SINGA_NEURALNET_NEURON_LAYER_H_
Definition: neuron_layer.h:131
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
int lsize_
size local response (neighbor) area
Definition: neuron_layer.h:101
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
Definition: layer.h:206
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
float alpha_
hyper-parameter
Definition: neuron_layer.h:103
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
Definition: neuron_layer.h:107
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
int hdim_
dimension of the hidden layer
Definition: neuron_layer.h:206
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
Base paramter class.
Definition: param.h:93
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
int batchsize_
shape of the bottom layer feature
Definition: neuron_layer.h:99
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
ConnectionType src_neuron_connection(int k) const override
Return the connection type between one neuron of this layer and its source layer. ...
Definition: neuron_layer.h:48
RBM hidden layer.
Definition: neuron_layer.h:235
int vdim_
dimension of the visible layer
Definition: neuron_layer.h:208
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
Use book-keeping for BP following Caffe's pooling implementation.
Definition: neuron_layer.h:122
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
bool gaussian_
if ture, sampling according to guassian distribution
Definition: neuron_layer.h:204
RBM visible layer.
Definition: neuron_layer.h:221
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
This layer apply Sigmoid function to neuron activations.
Definition: neuron_layer.h:172
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
virtual void ComputeFeature(int flag, Metric *perf)=0
Compute features of this layer based on connected layers.
Local Response Normalization edge.
Definition: neuron_layer.h:92
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
virtual void ComputeGradient(int flag, Metric *perf)=0
Compute gradients for parameters and connected layers.
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
const std::vector< Param * > GetParams() const override
Layers that have paramters must override this function.
Definition: neuron_layer.h:44
Use im2col from Caffe.
Definition: neuron_layer.h:64
const std::vector< Param * > GetParams() const override
Layers that have paramters must override this function.
Definition: neuron_layer.h:196
Base layer class.
Definition: layer.h:44
Definition: neuron_layer.h:70
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
This layer apply scaled Tan function to neuron activations.
Definition: neuron_layer.h:160
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
Definition: neuron_layer.h:138
Convolution layer.
Definition: neuron_layer.h:37
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
void ComputeGradient(int flag, Metric *perf) override
Compute gradients for parameters and connected layers.
Performance mtrics.
Definition: common.h:85
void ComputeFeature(int flag, Metric *perf) override
Compute features of this layer based on connected layers.
void Setup(const LayerProto &proto, int npartitions)
Setup layer properties.
Base layer for RBM models.
Definition: neuron_layer.h:186
void Setup(const LayerProto &proto, int npartitions) override
Setup layer properties.
const std::vector< Param * > GetParams() const override
Layers that have paramters must override this function.
Definition: neuron_layer.h:144