先从简单的开始学习。
1.继承SizedCostFunction,构造CostFunction
#include <vector>
#include "ceres/ceres.h"
#include "glog/logging.h"
using ceres::CostFunction;
using ceres::SizedCostFunction;
using ceres::Problem;
using ceres::Solver;
using ceres::Solve;
// class inheritance of SizedCostFunction
class QuadraticCostFunction
: public SizedCostFunction<1 /* number of residuals */,
1 /* size of first parameter */> {
public:
virtual ~QuadraticCostFunction() {}
virtual bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
double x = parameters[0][0];
// 残差函数
residuals[0] = 10 - x;
// f'(x) = -1. 只有一个参数,参数只有一个维度,jacobians只有一个元素
//
// 因为jacobians可以设置为NULL, Evaluate必须检查jacobians是否需要计算
//
// 对这个简单问题,检查jacobians[0]是否为空是足够的,但是通常的CostFunctions来说,
// Ceres可能只需要参数的子集的导数
if (jacobians != NULL && jacobians[0] != NULL) {
jacobians[0][0] = -1;
}
return true;
}
};
// main
int main(int argc, char** argv) {
google::InitGoogleLogging(argv[0]);
// 变量初始值
double x = 0.5;
const double initial_x = x;
// Build the problem.
Problem problem;
// 构建CostFunction(残差方程)
CostFunction* cost_function = new QuadraticCostFunction;
// CostFunction为QuadraticCostFunction,不使用LossFunction,只有一个优化参数x
problem.AddResidualBlock(cost_function, NULL, &x);
// Run the solver!
Solver::Options options;
options.minimizer_progress_to_stdout = true;
Solver::Summary summary;
Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
std::cout << "x : " << initial_x
<< " -> " << x << "\n";
return 0;
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
这个例子并没能解决我的疑惑,jacobians用在哪里?我们继续下一个例子。
2.曲线拟合与LossFunction
#include "ceres/ceres.h"
#include "glog/logging.h"
using ceres::AutoDiffCostFunction;
using ceres::CostFunction;
using ceres::Problem;
using ceres::Solver;
using ceres::Solve;
// 用于拟合曲线的点数量
const int kNumObservations = 67;
const double data[] = {
0.000000e+00, 1.133898e+00, //第1个点
7.500000e-02, 1.334902e+00, //第2个点
//..., 这里省略64个点
4.950000e+00, 4.669206e+00, //第67个点
}
// 构建CostFunction结构体
struct ExponentialResidual {
// 输入一对座标x,y
ExponentialResidual(double x, double y)
: x_(x), y_(y) {}
// 函数y = e^{mx + c}.
// 残差为 y_ - y
template <typename T> bool operator()(const T* const m,
const T* const c,
T* residual) const {
residual[0] = T(y_) - exp(m[0] * T(x_) + c[0]);
return true;
}
private:
const double x_;
const double y_;
};
// main
int main(int argc, char** argv) {
google::InitGoogleLogging(argv[0]);
// m c 初始值
double m = 0.0;
double c = 0.0;
Problem problem;
for (int i = 0; i < kNumObservations; ++i) {
problem.AddResidualBlock(
// CostFunction为ExponentialResidual,
// 残差个数为1,参数1(x)维度为1,参数2(y)维度为1,
// 损失函数为CauchyLoss(0.5)
new AutoDiffCostFunction<ExponentialResidual, 1, 1, 1>(
new ExponentialResidual(data[2 * i], data[2 * i + 1])),
new CauchyLoss(0.5),
&m, &c);
}
Solver::Options options;
options.max_num_iterations = 25;
options.linear_solver_type = ceres::DENSE_QR;
options.minimizer_progress_to_stdout = true;
Solver::Summary summary;
Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
std::cout << "Initial m: " << 0.0 << " c: " << 0.0 << "\n";
std::cout << "Final m: " << m << " c: " << c << "\n";
return 0;
}
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
介绍了两个简单的例子,下一节介绍更复杂一点的BA