16 #ifndef __MLPACK_CORE_OPTIMIZERS_ADAM_ADAM_HPP 17 #define __MLPACK_CORE_OPTIMIZERS_ADAM_ADAM_HPP 22 namespace optimization {
61 template<
typename DecomposableFunctionType>
85 Adam(DecomposableFunctionType&
function,
87 const double beta1 = 0.9,
88 const double beta2 = 0.999,
89 const double eps = 1e-8,
102 double Optimize(arma::mat& iterate);
105 const DecomposableFunctionType&
Function()
const {
return function; }
107 DecomposableFunctionType&
Function() {
return function; }
146 DecomposableFunctionType&
function;
175 #include "adam_impl.hpp" double Beta2() const
Get the second moment coefficient.
const DecomposableFunctionType & Function() const
Get the instantiated function to be optimized.
DecomposableFunctionType & Function()
Modify the instantiated function.
double eps
The value used to initialise the mean squared gradient parameter.
size_t maxIterations
The maximum number of allowed iterations.
bool shuffle
Controls whether or not the individual functions are shuffled when iterating.
Linear algebra utility functions, generally performed on matrices or vectors.
size_t MaxIterations() const
Get the maximum number of iterations (0 indicates no limit).
The core includes that mlpack expects; standard C++ includes and Armadillo.
double & Beta2()
Modify the second moment coefficient.
size_t & MaxIterations()
Modify the maximum number of iterations (0 indicates no limit).
double stepSize
The step size for each example.
double Epsilon() const
Get the value used to initialise the mean squared gradient parameter.
double tolerance
The tolerance for termination.
bool Shuffle() const
Get whether or not the individual functions are shuffled.
double Beta1() const
Get the smoothing parameter.
double & Tolerance()
Modify the tolerance for termination.
double & Beta1()
Modify the smoothing parameter.
bool & Shuffle()
Modify whether or not the individual functions are shuffled.
double Tolerance() const
Get the tolerance for termination.
double beta1
Exponential decay rate for the first moment estimates.
double StepSize() const
Get the step size.
double & Epsilon()
Modify the value used to initialise the mean squared gradient parameter.
double Optimize(arma::mat &iterate)
Optimize the given function using Adam.
double & StepSize()
Modify the step size.
Adam is an optimizer that computes individual adaptive learning rates for different parameters from e...
double beta2
Exponential decay rate for the weighted infinity norm estimates.
Adam(DecomposableFunctionType &function, const double stepSize=0.001, const double beta1=0.9, const double beta2=0.999, const double eps=1e-8, const size_t maxIterations=100000, const double tolerance=1e-5, const bool shuffle=true)
Construct the Adam optimizer with the given function and parameters.