12 #ifndef MLPACK_METHODS_SPARSE_AUTOENCODER_SPARSE_AUTOENCODER_HPP 13 #define MLPACK_METHODS_SPARSE_AUTOENCODER_SPARSE_AUTOENCODER_HPP 87 const double lambda = 0.0001,
88 const double beta = 3,
89 const double rho = 0.01);
118 void Sigmoid(
const arma::mat& x, arma::mat& output)
const 120 output = (1.0 / (1 + arma::exp(-x)));
126 this->visibleSize = visible;
138 this->hiddenSize = hidden;
202 #include "sparse_autoencoder_impl.hpp" double Lambda() const
Gets the L2-regularization parameter.
void GetNewFeatures(arma::mat &data, arma::mat &features)
Transforms the provided data into the representation learned by the sparse autoencoder.
void Sigmoid(const arma::mat &x, arma::mat &output) const
Returns the elementwise sigmoid of the passed matrix, where the sigmoid function of a real number 'x'...
Linear algebra utility functions, generally performed on matrices or vectors.
The core includes that mlpack expects; standard C++ includes and Armadillo.
void VisibleSize(const size_t visible)
Sets size of the visible layer.
double Rho() const
Gets the sparsity parameter.
arma::mat parameters
Parameters after optimization.
void Rho(const double r)
Sets the sparsity parameter.
A sparse autoencoder is a neural network whose aim to learn compressed representations of the data...
double lambda
L2-regularization parameter.
void HiddenSize(const size_t hidden)
Sets size of the hidden layer.
size_t visibleSize
Size of the visible layer.
void Lambda(const double l)
Sets the L2-regularization parameter.
size_t hiddenSize
Size of the hidden layer.
size_t VisibleSize() const
Gets size of the visible layer.
double rho
Sparsity parameter.
double Beta() const
Gets the KL divergence parameter.
double beta
KL divergence parameter.
void Beta(const double b)
Sets the KL divergence parameter.
size_t HiddenSize() const
Gets the size of the hidden layer.
SparseAutoencoder(const arma::mat &data, const size_t visibleSize, const size_t hiddenSize, const double lambda=0.0001, const double beta=3, const double rho=0.01)
Construct the sparse autoencoder model with the given training data.
The generic L-BFGS optimizer, which uses a back-tracking line search algorithm to minimize a function...