superviseddescent  0.4.0
 All Classes Namespaces Functions Variables Enumerations Enumerator
regressors.hpp
1 /*
2  * superviseddescent: A C++11 implementation of the supervised descent
3  * optimisation method
4  * File: superviseddescent/regressors.hpp
5  *
6  * Copyright 2014, 2015 Patrik Huber
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  * http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  */
20 #pragma once
21 
22 #ifndef REGRESSORS_HPP_
23 #define REGRESSORS_HPP_
24 
25 #include "cereal/cereal.hpp"
26 #include "superviseddescent/utils/mat_cerealisation.hpp"
27 
28 #include "Eigen/Dense"
29 
30 #include "opencv2/core/core.hpp"
31 
32 #include <iostream>
33 
34 namespace superviseddescent {
35 
43 class Regressor
44 {
45 public:
46  virtual ~Regressor() {};
47 
57  virtual bool learn(cv::Mat data, cv::Mat labels) = 0;
58 
68  virtual double test(cv::Mat data, cv::Mat labels) = 0;
69 
76  virtual cv::Mat predict(cv::Mat values) = 0;
77 };
78 
88 {
89 public:
93  enum class RegularisationType
94  {
95  Manual,
96  MatrixNorm,
97  };
98 
113  Regulariser(RegularisationType regularisation_type = RegularisationType::Manual, float param = 0.0f, bool regularise_last_row = true) : regularisation_type(regularisation_type), lambda(param), regularise_last_row(regularise_last_row)
114  {
115  };
116 
126  cv::Mat get_matrix(cv::Mat data, int num_training_elements)
127  {
128  switch (regularisation_type)
129  {
131  // We just take lambda as it was given, no calculation necessary.
132  break;
134  // The given lambda is the factor we have to multiply the automatic value with:
135  lambda = lambda * static_cast<float>(cv::norm(data)) / static_cast<float>(num_training_elements);
136  break;
137  default:
138  break;
139  }
140 
141  cv::Mat regulariser = cv::Mat::eye(data.rows, data.cols, CV_32FC1) * lambda;
142 
143  if (!regularise_last_row) {
144  // no lambda for the bias:
145  regulariser.at<float>(regulariser.rows - 1, regulariser.cols - 1) = 0.0f;
146  }
147  return regulariser;
148  };
149 
150 private:
151  RegularisationType regularisation_type;
152  float lambda;
153  bool regularise_last_row;
154 
155  friend class cereal::access;
164  template<class Archive>
165  void serialize(Archive& ar)
166  {
167  ar(regularisation_type, lambda, regularise_last_row);
168  }
169 };
170 
181 {
182 public:
199  cv::Mat solve(cv::Mat data, cv::Mat labels, Regulariser regulariser)
200  {
201  using cv::Mat;
202  using RowMajorMatrixXf = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
203 
204  // Map the cv::Mat data and labels to Eigen matrices:
205  Eigen::Map<RowMajorMatrixXf> A_Eigen(data.ptr<float>(), data.rows, data.cols);
206  Eigen::Map<RowMajorMatrixXf> labels_Eigen(labels.ptr<float>(), labels.rows, labels.cols);
207 
208  RowMajorMatrixXf AtA_Eigen = A_Eigen.transpose() * A_Eigen;
209 
210  // Note: This is a bit of unnecessary back-and-forth mapping, just for the regularisation:
211  Mat AtA_Map(static_cast<int>(AtA_Eigen.rows()), static_cast<int>(AtA_Eigen.cols()), CV_32FC1, AtA_Eigen.data());
212  Mat regularisation_matrix = regulariser.get_matrix(AtA_Map, data.rows);
213  Eigen::Map<RowMajorMatrixXf> reg_Eigen(regularisation_matrix.ptr<float>(), regularisation_matrix.rows, regularisation_matrix.cols);
214 
215  Eigen::DiagonalMatrix<float, Eigen::Dynamic> reg_Eigen_diag(regularisation_matrix.rows);
216  Eigen::VectorXf diag_vec(regularisation_matrix.rows);
217  for (int i = 0; i < diag_vec.size(); ++i) {
218  diag_vec(i) = regularisation_matrix.at<float>(i, i);
219  }
220  reg_Eigen_diag.diagonal() = diag_vec;
221  AtA_Eigen = AtA_Eigen + reg_Eigen_diag.toDenseMatrix();
222 
223  // Perform a fast PartialPivLU and use ::solve() (better than inverting):
224  Eigen::PartialPivLU<RowMajorMatrixXf> lu_of_AtA(AtA_Eigen);
225  RowMajorMatrixXf x_Eigen = lu_of_AtA.solve(A_Eigen.transpose() * labels_Eigen);
226  //RowMajorMatrixXf x_Eigen = AtA_Eigen.partialPivLu.solve(A_Eigen.transpose() * labels_Eigen);
227 
228  // Map the resulting x back to a cv::Mat by creating a Mat header:
229  Mat x(static_cast<int>(x_Eigen.rows()), static_cast<int>(x_Eigen.cols()), CV_32FC1, x_Eigen.data());
230 
231  // We have to copy the data because the underlying data is managed by Eigen::Matrix x_Eigen, which will go out of scope after we leave this function:
232  return x.clone();
233  //return qrOfAtA.isInvertible();
234  };
235 };
236 
246 {
247 public:
264  cv::Mat solve(cv::Mat data, cv::Mat labels, Regulariser regulariser)
265  {
266  using cv::Mat;
267  using RowMajorMatrixXf = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
268  // Map the cv::Mat data and labels to Eigen matrices:
269  Eigen::Map<RowMajorMatrixXf> A_Eigen(data.ptr<float>(), data.rows, data.cols);
270  Eigen::Map<RowMajorMatrixXf> labels_Eigen(labels.ptr<float>(), labels.rows, labels.cols);
271 
272  RowMajorMatrixXf AtA_Eigen = A_Eigen.transpose() * A_Eigen;
273 
274  // Note: This is a bit of unnecessary back-and-forth mapping, just for the regularisation:
275  Mat AtA_Map(static_cast<int>(AtA_Eigen.rows()), static_cast<int>(AtA_Eigen.cols()), CV_32FC1, AtA_Eigen.data());
276  Mat regularisation_matrix = regulariser.get_matrix(AtA_Map, data.rows);
277  Eigen::Map<RowMajorMatrixXf> reg_Eigen(regularisation_matrix.ptr<float>(), regularisation_matrix.rows, regularisation_matrix.cols);
278 
279  Eigen::DiagonalMatrix<float, Eigen::Dynamic> reg_Eigen_diag(regularisation_matrix.rows);
280  Eigen::VectorXf diag_vec(regularisation_matrix.rows);
281  for (int i = 0; i < diag_vec.size(); ++i) {
282  diag_vec(i) = regularisation_matrix.at<float>(i, i);
283  }
284  reg_Eigen_diag.diagonal() = diag_vec;
285  AtA_Eigen = AtA_Eigen + reg_Eigen_diag.toDenseMatrix();
286 
287  // Perform a ColPivHouseholderQR (faster than FullPivLU) that allows to check for invertibility:
288  Eigen::ColPivHouseholderQR<RowMajorMatrixXf> qr_of_AtA(AtA_Eigen);
289  auto rankOfAtA = qr_of_AtA.rank();
290  if (!qr_of_AtA.isInvertible()) {
291  // Eigen may return garbage (their docu is not very specific). Best option is to increase regularisation.
292  std::cout << "The regularised AtA is not invertible. We continued learning, but Eigen may return garbage (their docu is not very specific). (The rank is " << std::to_string(rankOfAtA) << ", full rank would be " << std::to_string(AtA_Eigen.rows()) << "). Increase lambda." << std::endl;
293  }
294  RowMajorMatrixXf AtAInv_Eigen = qr_of_AtA.inverse();
295 
296  // x = (AtAReg)^-1 * At * b:
297  RowMajorMatrixXf x_Eigen = AtAInv_Eigen * A_Eigen.transpose() * labels_Eigen;
298 
299  // Map the resulting x back to a cv::Mat by creating a Mat header:
300  Mat x(static_cast<int>(x_Eigen.rows()), static_cast<int>(x_Eigen.cols()), CV_32FC1, x_Eigen.data());
301 
302  // We have to copy the data because the underlying data is managed by Eigen::Matrix x_Eigen, which will go out of scope after we leave this function:
303  return x.clone();
304  //return qrOfAtA.isInvertible();
305  };
306 };
307 
318 template<class Solver = PartialPivLUSolver>
320 {
321 
322 public:
328  LinearRegressor(Regulariser regulariser = Regulariser()) : x(), regulariser(regulariser)
329  {
330  };
331 
345  bool learn(cv::Mat data, cv::Mat labels) override
346  {
347  cv::Mat x = solver.solve(data, labels, regulariser);
348  this->x = x;
349  return true; // see todo above
350  };
351 
361  double test(cv::Mat data, cv::Mat labels) override
362  {
363  cv::Mat predictions;
364  for (int i = 0; i < data.rows; ++i) {
365  cv::Mat prediction = predict(data.row(i));
366  predictions.push_back(prediction);
367  }
368  return cv::norm(predictions, labels, cv::NORM_L2) / cv::norm(labels, cv::NORM_L2);
369  };
370 
377  cv::Mat predict(cv::Mat values) override
378  {
379  cv::Mat prediction = values * x;
380  return prediction;
381  };
382 
383  cv::Mat x;
384 
385 private:
386  Regulariser regulariser;
387  Solver solver;
388 
389  friend class cereal::access;
395  template<class Archive>
396  void serialize(Archive& ar)
397  {
398  ar(x, regulariser);
399  }
400 };
401 
402 } /* namespace superviseddescent */
403 #endif /* REGRESSORS_HPP_ */
cv::Mat predict(cv::Mat values) override
Definition: regressors.hpp:377
double test(cv::Mat data, cv::Mat labels) override
Definition: regressors.hpp:361
Use A suitable default for param suggested by the SDM authors is 0.5.
virtual double test(cv::Mat data, cv::Mat labels)=0
Definition: regressors.hpp:180
cv::Mat x
The linear model we learn ( ). TODO: Make private member variable.
Definition: regressors.hpp:381
Definition: regressors.hpp:43
cv::Mat solve(cv::Mat data, cv::Mat labels, Regulariser regulariser)
Definition: regressors.hpp:264
Definition: regressors.hpp:319
cv::Mat get_matrix(cv::Mat data, int num_training_elements)
Definition: regressors.hpp:126
virtual bool learn(cv::Mat data, cv::Mat labels)=0
Definition: regressors.hpp:87
Definition: regressors.hpp:245
RegularisationType
Definition: regressors.hpp:93
Regulariser(RegularisationType regularisation_type=RegularisationType::Manual, float param=0.0f, bool regularise_last_row=true)
Definition: regressors.hpp:113
LinearRegressor(Regulariser regulariser=Regulariser())
Definition: regressors.hpp:328
cv::Mat solve(cv::Mat data, cv::Mat labels, Regulariser regulariser)
Definition: regressors.hpp:199
virtual cv::Mat predict(cv::Mat values)=0
bool learn(cv::Mat data, cv::Mat labels) override
Definition: regressors.hpp:345