superviseddescent  0.4.0
 All Classes Namespaces Functions Variables Enumerations Enumerator
superviseddescent.hpp
1 /*
2  * superviseddescent: A C++11 implementation of the supervised descent
3  * optimisation method
4  * File: superviseddescent/superviseddescent.hpp
5  *
6  * Copyright 2014, 2015 Patrik Huber
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  * http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  */
20 #pragma once
21 
22 #ifndef SUPERVISEDDESCENT_HPP_
23 #define SUPERVISEDDESCENT_HPP_
24 
25 #include "superviseddescent/utils/ThreadPool.h"
26 
27 #include "cereal/cereal.hpp"
28 #include "cereal/types/vector.hpp"
29 
30 #include "opencv2/core/core.hpp"
31 
44 namespace superviseddescent {
45 
52 inline void no_eval(const cv::Mat& current_predictions)
53 {
54 };
55 
61 {
62 public:
71  inline cv::Mat operator()(cv::Mat params) {
72  return cv::Mat::ones(1, params.cols, params.type());
73  };
74 };
75 
85 template<class RegressorType, class NormalisationStrategy = NoNormalisation>
87 {
88 public:
95  SupervisedDescentOptimiser() = default;
96 
103  SupervisedDescentOptimiser(std::vector<RegressorType> regressors, NormalisationStrategy normalisation = NoNormalisation()) : regressors(std::move(regressors)), normalisation_strategy(std::move(normalisation))
104  {
105  };
106 
140  template<class ProjectionFunction>
141  void train(cv::Mat parameters, cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection)
142  {
143  return train(parameters, initialisations, templates, projection, no_eval);
144  };
145 
165  template<class ProjectionFunction, class OnTrainingEpochCallback>
166  void train(cv::Mat parameters, cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection, OnTrainingEpochCallback on_training_epoch_callback)
167  {
168  using cv::Mat;
169  Mat current_x = initialisations;
170  for (size_t regressor_level = 0; regressor_level < regressors.size(); ++regressor_level) {
171  // 1) Project current parameters x to feature space:
172  // Enqueue all tasks in a thread pool:
173  auto concurent_threads_supported = std::thread::hardware_concurrency();
174  if (concurent_threads_supported == 0) {
175  concurent_threads_supported = 4;
176  }
177  utils::ThreadPool thread_pool(concurent_threads_supported);
178  std::vector<std::future<typename std::result_of<ProjectionFunction(Mat, size_t, int)>::type>> results; // will be float or Mat. I might remove float for the sake of code clarity, as it's only useful for very simple examples.
179  results.reserve(current_x.rows);
180  for (int sample_index = 0; sample_index < current_x.rows; ++sample_index) {
181  results.emplace_back(
182  thread_pool.enqueue(projection, current_x.row(sample_index), regressor_level, sample_index)
183  );
184  }
185  // Gather the results from all threads and store the features:
186  Mat features;
187  for (auto&& result : results) {
188  features.push_back(result.get());
189  }
190  // Set the observed values, depending on if a template y is used:
191  Mat observed_values;
192  if (templates.empty()) { // unknown template training case
193  observed_values = features;
194  }
195  else { // known template
196  observed_values = features - templates;
197  }
198  //Mat b = currentX - parameters; // currentX - x;
199  Mat b; // Todo: reserve() for speedup. Also below with x_k.
200  // Apply the normalisation strategy to each sample in b:
201  for (int sample_index = 0; sample_index < current_x.rows; ++sample_index) {
202  cv::Mat update_step = current_x.row(sample_index) - parameters.row(sample_index);
203  update_step = update_step.mul(normalisation_strategy(current_x.row(sample_index)));
204  b.push_back(update_step);
205  }
206  // 2) Learn using that data:
207  regressors[regressor_level].learn(observed_values, b);
208  // 3) Apply the learned regressor and use the predictions to learn the next regressor in next loop iteration:
209  Mat x_k; // x_k = currentX - R * (h(currentX) - y):
210  for (int sample_index = 0; sample_index < current_x.rows; ++sample_index) {
211  // No need to re-extract the features, we already did so in step 1)
212  cv::Mat update_step = regressors[regressor_level].predict(observed_values.row(sample_index));
213  update_step = update_step.mul(1 / normalisation_strategy(current_x.row(sample_index))); // Need to multiply the regressor prediction with the IED of the current prediction
214  x_k.push_back(Mat(current_x.row(sample_index) - update_step));
215  }
216  current_x = x_k;
217  on_training_epoch_callback(current_x);
218  }
219  };
220 
237  template<class ProjectionFunction>
238  cv::Mat test(cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection)
239  {
240  return test(initialisations, templates, projection, no_eval);
241  };
242 
262  template<class ProjectionFunction, class OnRegressorIterationCallback>
263  cv::Mat test(cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection, OnRegressorIterationCallback on_regressor_iteration_callback)
264  {
265  using cv::Mat;
266  Mat current_x = initialisations;
267  for (size_t regressor_level = 0; regressor_level < regressors.size(); ++regressor_level) {
268  // Enqueue all tasks in a thread pool:
269  auto concurent_threads_supported = std::thread::hardware_concurrency();
270  if (concurent_threads_supported == 0) {
271  concurent_threads_supported = 4;
272  }
273  utils::ThreadPool thread_pool(concurent_threads_supported);
274  std::vector<std::future<typename std::result_of<ProjectionFunction(Mat, size_t, int)>::type>> results; // will be float or Mat. I might remove float for the sake of code clarity, as it's only useful for very simple examples.
275  results.reserve(current_x.rows);
276  for (int sample_index = 0; sample_index < current_x.rows; ++sample_index) {
277  results.emplace_back(
278  thread_pool.enqueue(projection, current_x.row(sample_index), regressor_level, sample_index)
279  );
280  }
281  // Gather the results from all threads and store the features:
282  Mat features;
283  for (auto&& result : results) {
284  features.push_back(result.get());
285  }
286 
287  Mat observed_values;
288  if (templates.empty()) { // unknown template training case
289  observed_values = features;
290  }
291  else { // known template
292  observed_values = features - templates;
293  }
294  Mat x_k;
295  // Calculate x_k = currentX - R * (h(currentX) - y):
296  for (int sample_index = 0; sample_index < current_x.rows; ++sample_index) {
297  cv::Mat update_step = regressors[regressor_level].predict(observed_values.row(sample_index));
298  update_step = update_step.mul(1 / normalisation_strategy(current_x.row(sample_index))); // Need to multiply the regressor prediction with the IED of the current prediction
299  x_k.push_back(Mat(current_x.row(sample_index) - update_step));
300  //x_k.push_back(Mat(currentX.row(sampleIndex) - regressors[regressorLevel].predict(observedValues.row(sampleIndex)))); // we need Mat() because the subtraction yields a (non-persistent) MatExpr
301  }
302  current_x = x_k;
303  on_regressor_iteration_callback(current_x);
304  }
305  return current_x; // Return the final predictions
306  };
307 
323  template<class ProjectionFunction>
324  cv::Mat predict(cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection)
325  {
326  using cv::Mat;
327  Mat current_x = initialisations;
328  for (size_t r = 0; r < regressors.size(); ++r) {
329  // calculate x_k = currentX - R * (h(currentX) - y):
330  Mat observed_values;
331  if (templates.empty()) { // unknown template training case
332  observed_values = projection(current_x, r);
333  }
334  else { // known template
335  observed_values = projection(current_x, r) - templates;
336  }
337  cv::Mat update_step = regressors[r].predict(observed_values);
338  update_step = update_step.mul(1 / normalisation_strategy(current_x)); // Need to multiply the regressor prediction with the IED of the current prediction
339  Mat x_k = current_x - update_step;
340  //Mat x_k = currentX - regressors[r].predict(observedValues);
341  current_x = x_k;
342  }
343  return current_x;
344  };
345 
346 private:
347  std::vector<RegressorType> regressors;
348  NormalisationStrategy normalisation_strategy;
349 
350  friend class cereal::access;
356  template<class Archive>
357  void serialize(Archive& ar)
358  {
359  ar(regressors, normalisation_strategy);
360  };
361 };
362 
363 } /* namespace superviseddescent */
364 #endif /* SUPERVISEDDESCENT_HPP_ */
Definition: superviseddescent.hpp:60
cv::Mat predict(cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection)
Definition: superviseddescent.hpp:324
void train(cv::Mat parameters, cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection)
Definition: superviseddescent.hpp:141
Definition: superviseddescent.hpp:86
cv::Mat test(cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection)
Definition: superviseddescent.hpp:238
cv::Mat operator()(cv::Mat params)
Definition: superviseddescent.hpp:71
cv::Mat test(cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection, OnRegressorIterationCallback on_regressor_iteration_callback)
Definition: superviseddescent.hpp:263
void no_eval(const cv::Mat &current_predictions)
Definition: superviseddescent.hpp:52
void train(cv::Mat parameters, cv::Mat initialisations, cv::Mat templates, ProjectionFunction projection, OnTrainingEpochCallback on_training_epoch_callback)
Definition: superviseddescent.hpp:166
SupervisedDescentOptimiser(std::vector< RegressorType > regressors, NormalisationStrategy normalisation=NoNormalisation())
Definition: superviseddescent.hpp:103