Fine-grained training
random_indices
-
vector<int> eddl::random_indices(int batch_size, int num_samples)
Generates a random sequence of indices for a batch.
- Parameters
batch_size – Length of the random sequence to generate
num_samples – Number of samples available, i.e. maximum value to include in the random sequence + 1
- Returns
Vector of integers
Example:
tshape s = x_train->getShape();
int num_batches = s[0]/batch_size;
for(i=0; i<epochs; i++) {
reset_loss(net);
for(j=0; j<num_batches; j++) {
vector<int> indices = random_indices(batch_size, s[0]);
train_batch(net, {x_train}, {y_train}, indices);
}
}
next_batch
-
void eddl::next_batch(vector<Tensor*> in, vector<Tensor*> out)
Loads the next batch of random samples from the input vector to the output vector.
- Parameters
in – Vector from where the samples of the next batch should be chosen from
out – Vector where the samples of the next batch should be stored
- Returns
(void)
Example:
Tensor* xbatch = new Tensor({batch_size, 784});
Tensor* ybatch = new Tensor({batch_size, 10})
tshape s = x_train->getShape();
int num_batches = s[0]/batch_size;
for(i=0; i<epochs; i++) {
reset_loss(net);
for(j=0; j<num_batches; j++) {
next_batch({x_train, y_train}, {xbatch, ybatch});
train_batch(net, {xbatch}, {ybatch});
}
}
train_batch
-
void eddl::train_batch(model net, vector<Tensor*> in, vector<Tensor*> out, vector<int> indices)
Train the model using the samples of the input vector that are on the selected indices vector.
- Parameters
net – Net to train
in – Vector of samples
out – Vector of labels or expected output
indices – Vector of indices of the samples to train
- Returns
(void)
Example:
tshape s = x_train->getShape();
int num_batches = s[0]/batch_size;
for(i=0; i<epochs; i++) {
reset_loss(net);
for(j=0; j<num_batches; j++) {
vector<int> indices = random_indices(batch_size, s[0]);
train_batch(net, {x_train}, {y_train}, indices);
}
}
-
void eddl::train_batch(model net, vector<Tensor*> in, vector<Tensor*> out)
Train the model using the samples of the input vector.
- Parameters
net – Net to train
in – Vector of samples
out – Vector of labels or expected output
- Returns
(void)
Example:
Tensor* xbatch = new Tensor({batch_size, 784});
Tensor* ybatch = new Tensor({batch_size, 10})
tshape s = x_train->getShape();
int num_batches = s[0]/batch_size;
for(i=0; i<epochs; i++) {
reset_loss(net);
for(j=0; j<num_batches; j++) {
next_batch({x_train, y_train}, {xbatch, ybatch});
train_batch(net, {xbatch}, {ybatch});
}
}
eval_batch
-
void eddl::eval_batch(model net, vector<Tensor*> in, vector<Tensor*> out, vector<int> indices)
Evaluate the model using the samples of the input vector that are on the selected indices vector.
- Parameters
net – Net to evaluate
in – Vector of samples
out – Vector of labels or expected output
indices – Vector of indices of the samples to evaluate
- Returns
(void)
Example:
for(j=0;j<num_batches;j++) {
vector<int> indices(batch_size);
for(int i=0;i<indices.size();i++)
indices[i]=(j*batch_size)+i;
eval_batch(net, {x_test}, {y_test}, indices);
}
}
-
void eddl::eval_batch(model net, vector<Tensor*> in, vector<Tensor*> out)
Evaluate the model using the samples of the input vector.
- Parameters
net – Net to evaluate
in – Vector of samples
out – Vector of labels or expected output
- Returns
(void)
set_mode
-
void eddl::set_mode(model net, int mode)
Set model mode.
- Parameters
net – Model
mode – Train 1, Test 0
- Returns
(void)
set_mode(net, 0); // Test model
set_mode(net, 1); // Train model
reset_loss
-
void eddl::reset_loss(model m)
Resets model loss.
- Parameters
m – Model
- Returns
(void)
Example:
reset_loss(net);
forward
-
vlayer eddl::forward(model m)
Computes the gradient of the model through the forward graph.
- Parameters
m – Model
- Returns
(void)
Example:
forward(net);
//Other ways
forward(net, layers); // Using vector of layers ``layers``
forward(net, tensors); // Using vector of tensors ``tensors``
forward(net, 10); // Forward resizing the batch size to 10
zeroGrads
-
void eddl::zeroGrads(model m)
Set model gradients to zero.
- Parameters
m – Model
- Returns
(void)
Example:
zeroGrads(net);
backward
-
void eddl::backward(model net)
Computes the gradient of the model through the backward graph.
- Parameters
net – Model
- Returns
(void)
-
void eddl::backward(model m, vector<Tensor*> target)
Computes the gradient by passing its argument (1x1 unit tensor by default) through the backward graph.
- Parameters
m – Model
target – Targets
- Returns
(void)
backward(net);
// Other ways
backward(net, target); // Using vector of tensors
backward(loss); // Computes gradients on the model associated to the loss object passed.
-
void eddl::backward(loss l)
Computes the gradient of the model associated to the given loss object through the backward graph.
- Parameters
l – Loss
- Returns
(void)
update
-
void eddl::update(model m)
Updates the weights of the model.
- Parameters
m – Model
- Returns
(void)
Example:
update(net);
print_loss
-
void eddl::print_loss(model m, int batch)
Prints model loss at some batch.
- Parameters
m – Model
batch – Batch number
- Returns
(void)
Example:
print_loss(net, j);
clamp
-
void eddl::clamp(model m, float min, float max)
Model parameters values clipping.
- Parameters
m – Model
min – Minimum value
max – Maximum value
- Returns
(void) Performs model clamp between min and max
clamp(net, 0.5, 0.7); // Clamps all the weights of the model between 0.5 and 0.7
compute_loss
-
float eddl::compute_loss(loss L)
Computes loss of the associated model.
- Parameters
L – Loss
- Returns
(float) Computed loss
Example:
loss mse = newloss(mse_loss, {out, target}, "mse_loss");
float my_loss = 0.0;
for(j=0; j<num_batches; j++) {
next_batch({x_train},{batch});
zeroGrads(net);
forward(net, {batch});
my_loss += compute_loss(mse)/batch_size;
update(net);
}
compute_metric
-
float eddl::compute_metric(loss L)
Computes loss of the associated model (same as
compute_loss
)- Parameters
L – Loss
- Returns
(float) Computed loss
loss mse = newloss(mse_loss, {out, target}, "mse_loss");
float my_loss = 0.0;
for(j=0; j<num_batches; j++) {
next_batch({x_train},{batch});
zeroGrads(net);
forward(net, {batch});
my_loss += compute_metric(mse)/batch_size;
update(net);
}
newloss
-
loss eddl::newloss(const std::function<Layer*(Layer*)> &f, Layer *in, string name)
Create new Loss.
- Parameters
f – Loss function
in – Loss input
name – Loss name
- Returns
Created Loss
-
loss eddl::newloss(const std::function<Layer*(vector<Layer*>)> &f, vector<Layer*> in, string name)
Create new Loss.
- Parameters
f – Loss function
in – Loss input
name – Loss name
- Returns
Created Loss
Example:
layer mse_loss(vector<layer> in) {
layer diff = Diff(in[0], in[1]);
return Mult(diff, diff);
}
loss mse = newloss(mse_loss, {out, target}, "mse_loss");
newmetric
-
loss eddl::newmetric(const std::function<Layer*(Layer*)> &f, Layer *in, string name)
Create new Loss.
- Parameters
f – Loss function
in – Loss input
name – Loss name
- Returns
Created Loss
-
loss eddl::newmetric(const std::function<Layer*(vector<Layer*>)> &f, vector<Layer*> in, string name)
Create new Loss.
- Parameters
f – Loss function
in – Loss input
name – Loss name
- Returns
Created Loss
detach
-
layer eddl::detach(layer l)
Sets a layer as detached, excluding it from the computation of the gradients.
- Parameters
l – Layer to detach
- Returns
Detached Layer
-
vlayer eddl::detach(vlayer l)
Sets the provided layers as detached, excluding them from the computation of the gradients.
- Parameters
l – Layers to detach
- Returns
Detached Layers