Kalman filter
需要线性模型EKF
通过泰勒展开线性化Unscented Transform
-> UKF
Unscented Transform
Sigma 点
Cholesky Matrix
开方根法
Sigma points and Weight
Sigma
点与均值的距离Prediction
χ t − 1 = ( μ t − 1 , μ t − 1 + ( n + λ ) ∑ t − 1 μ t − 1 − ( n + λ ) ∑ t − 1 ) {\chi_{t-1}=(\mu_{t-1},\ \ \mu_{t-1}+\sqrt{(n+\lambda)\sum_{t-1}}\ \ \mu_{t-1}-\sqrt{(n+\lambda)\sum_{t-1}})} χt−1=(μt−1, μt−1+(n+λ)∑t−1 μt−1−(n+λ)∑t−1)
χ ˉ t ∗ = g ( u t , χ t − 1 ) {\bar{\chi}_t^* = g(u_t,\chi_{t-1})} χˉt∗=g(ut,χt−1)
μ t ˉ = ∑ i = 0 2 n w m [ i ] χ ˉ t ∗ [ i ] {\bar{\mu_t}=\sum_{i=0}^{2n}w_m^{[i]}\bar{\chi}_t^{*[i]}} μtˉ=∑i=02nwm[i]χˉt∗[i]
Σ ˉ t = ∑ i = 0 2 n w c [ i ] ( χ ˉ t ∗ [ i ] − μ t ˉ ) ( χ ˉ t ∗ [ i ] − μ t ˉ ) T + R t {\bar{\Sigma}_t=\sum_{i=0}^{2n}w_c^{[i]}(\bar{\chi}_t^{*[i]}-\bar{\mu_t})(\bar{\chi}_t^{*[i]}-\bar{\mu_t})^T+R_t} Σˉt=∑i=02nwc[i](χˉt∗[i]−μtˉ)(χˉt∗[i]−μtˉ)T+Rt
Correction
检查A是否是对称矩阵,A减去A的转置~=0
// Checks if 'A' is a symmetric matrix.
template <typename FloatType, int N>
void CheckSymmetric(const Eigen::Matrix<FloatType, N, N>& A) {
// This should be pretty much Eigen::Matrix<>::Zero() if the matrix is
// symmetric.
//The NaN values are used to identify undefined or non-representable values for floating-point elements,
//such as the square root of negative numbers or the result of 0/0.
const FloatType norm = (A - A.transpose()).norm();
CHECK(!std::isnan(norm) && std::abs(norm) < 1e-5)
<< "Symmetry check failed with norm: '" << norm << "' from matrix:\n"
<< A;
}
本代码使用的为上述中 对角化的方法:
代码实现:
几个术语: Av=λv. λ 为一标量,称为 v 对应的特征值。也称 v 为特征值 λ 对应的特征向量。
eigendecomposition,特征分解,谱分解,是将矩阵分解为由其特征值和特征向量表示的矩阵之积的方法。
需要注意只有对可对角化矩阵才可以施以特征分解。
eigenvalues 特征值,
eigenvectors 特征向量
返回对称半正定矩阵的平方根B,M=B*B
template <typename FloatType, int N>
Eigen::Matrix<FloatType, N, N> MatrixSqrt(
const Eigen::Matrix<FloatType, N, N>& A) {
CheckSymmetric(A);//必须是对称矩阵
Eigen::SelfAdjointEigenSolver<Eigen::Matrix<FloatType, N, N>>
adjoint_eigen_solver((A + A.transpose()) / 2.);
//A==A的转置,构造特征分解对象。
const auto& eigenvalues = adjoint_eigen_solver.eigenvalues();//特征值
CHECK_GT(eigenvalues.minCoeff(), -1e-5)
<< "MatrixSqrt failed with negative eigenvalues: "
<< eigenvalues.transpose();
//Cholesky分解:把一个对称正定的矩阵表示成一个下三角矩阵L和其转置的乘积的分解。
return adjoint_eigen_solver.eigenvectors() *
adjoint_eigen_solver.eigenvalues()
.cwiseMax(Eigen::Matrix<FloatType, N, 1>::Zero())
.cwiseSqrt()
.asDiagonal() *
adjoint_eigen_solver.eigenvectors().transpose();
}
高斯分布
均值 : N × 1 N\times 1 N×1
协方差: N × N N\times N N×N
/*
高斯分布类
构造函数是N*1的均值矩阵和N*N的协方差矩阵
*/
template <typename T, int N>
class GaussianDistribution {
public:
GaussianDistribution(const Eigen::Matrix<T, N, 1>& mean,
const Eigen::Matrix<T, N, N>& covariance)
: mean_(mean), covariance_(covariance) {}
const Eigen::Matrix<T, N, 1>& GetMean() const { return mean_; }
const Eigen::Matrix<T, N, N>& GetCovariance() const { return covariance_; }
private:
Eigen::Matrix<T, N, 1> mean_; //N*1,均值
Eigen::Matrix<T, N, N> covariance_; //N*N。协方差
};
高斯 相加
均值:均值+均值
协方差:协方差+协方差
/*高斯分布性质:
https://zh.wikipedia.org/wiki/%E6%AD%A3%E6%80%81%E5%88%86%E5%B8%83
*/
template <typename T, int N>
GaussianDistribution<T, N> operator+(const GaussianDistribution<T, N>& lhs,
const GaussianDistribution<T, N>& rhs) {
return GaussianDistribution<T, N>(lhs.GetMean() + rhs.GetMean(),
lhs.GetCovariance() + rhs.GetCovariance());
}
高斯 相乘
均值: ( N × M ) ⋅ ( M × 1 ) = ( N × 1 ) (N\times M) \cdot (M \times 1) = (N \times 1) (N×M)⋅(M×1)=(N×1)
协方差: ( N × M ) ⋅ ( M × M ) ⋅ ( M × N ) = ( N × N ) (N\times M) \cdot (M \times M) \cdot (M \times N) = (N \times N) (N×M)⋅(M×M)⋅(M×N)=(N×N)
template
GaussianDistribution operator*(const Eigen::Matrix& lhs,
const GaussianDistribution& rhs) {
return GaussianDistribution(
lhs * rhs.GetMean(), // N*M || M*1 -> N*1
lhs * rhs.GetCovariance() * lhs.transpose()); // N*M ||M*M || M*N -> N*N
}
卡尔曼滤波器的操作包括两个阶段:
template
class UnscentedKalmanFilter {
public:
using StateType = Eigen::Matrix; //状态矩阵N*1
using StateCovarianceType = Eigen::Matrix; //协方差矩阵N*N
explicit UnscentedKalmanFilter(
const GaussianDistribution& initial_belief, //参数1
std::function //参数2
add_delta = [](const StateType& state,
const StateType& delta) { return state + delta; },
std::function //参数3
compute_delta =
[](const StateType& origin, const StateType& target) {
return target - origin;
})
: belief_(initial_belief),
add_delta_(add_delta),
compute_delta_(compute_delta) {}
/**
* @brief Predict 预测step。使用卡尔曼滤波对当前状态进行预测。
* @param g 控制必须由函数 g 隐式添加,该函数也进行状态转换。
* @param epsilon “epsilon”是控制噪声和模型噪声的加法组合。
*/
void Predict(std::function g,
const GaussianDistribution& epsilon);
/**
* @brief Observe 观测步骤
* @param h 'h' 将状态转换为应该为零的观察值,传感器读数应该已经包含在这个函数中。
* @param delta 'delta' 是测量噪声,必须具有零均值。
*/
template
void Observe(
std::function(const StateType&)> h,
const GaussianDistribution& delta)
private:
//计算带权重的偏差,权重事先算好了
StateType ComputeWeightedError(const StateType& mean_estimate,
const std::vector& states) {
StateType weighted_error =
kMeanWeight0 * compute_delta_(mean_estimate, states[0]);
for (int i = 1; i != 2 * N + 1; ++i) {
weighted_error += kMeanWeightI * compute_delta_(mean_estimate, states[i]);
}
return weighted_error;
}
//计算均值。基于权重误差评判计算
StateType ComputeMean(const std::vector& states) {
CHECK_EQ(states.size(), 2 * N + 1);
StateType current_estimate = states[0];
StateType weighted_error = ComputeWeightedError(current_estimate, states);
int iterations = 0;
while (weighted_error.norm() > 1e-9) {
double step_size = 1.;
while (true) {
const StateType next_estimate =
add_delta_(current_estimate, step_size * weighted_error);
const StateType next_error =
ComputeWeightedError(next_estimate, states);
if (next_error.norm() < weighted_error.norm()) {
current_estimate = next_estimate;
weighted_error = next_error;
break;
}
step_size *= 0.5;
CHECK_GT(step_size, 1e-3) << "Step size too small, line search failed.";
}
++iterations;
CHECK_LT(iterations, 20) << "Too many iterations.";
}
return current_estimate;
}
// 常值确定参数 sqr=a*a, OuterProduct=V*V^T N*1 || 1*N -> 外积,N*N
constexpr static FloatType kAlpha = 1e-3;
constexpr static FloatType kKappa = 0.;
constexpr static FloatType kBeta = 2.;
constexpr static FloatType kLambda = sqr(kAlpha) * (N + kKappa) - N;
constexpr static FloatType kMeanWeight0 = kLambda / (N + kLambda);
constexpr static FloatType kCovWeight0 =
kLambda / (N + kLambda) + (1. - sqr(kAlpha) + kBeta);
constexpr static FloatType kMeanWeightI = 1. / (2. * (N + kLambda));
constexpr static FloatType kCovWeightI = kMeanWeightI;
// 成员变量
//1),N*1矩阵,对N个变量的估计
GaussianDistribution belief_;
//2),add_delta_,加法操作
const std::function
add_delta_;
//3),compute_delta_,计算偏差操作
const std::function
compute_delta_;
};
//外部声明。感觉这儿没啥用
template
constexpr FloatType UnscentedKalmanFilter::kAlpha;
template
constexpr FloatType UnscentedKalmanFilter::kKappa;
template
constexpr FloatType UnscentedKalmanFilter::kBeta;
template
constexpr FloatType UnscentedKalmanFilter::kLambda;
template
constexpr FloatType UnscentedKalmanFilter::kMeanWeight0;
template
constexpr FloatType UnscentedKalmanFilter::kCovWeight0;
template
constexpr FloatType UnscentedKalmanFilter::kMeanWeightI;
template
constexpr FloatType UnscentedKalmanFilter::kCovWeightI;
步骤:
1、判断协方差 为对称矩阵
2、取当前状态的均值和 协方差开方根 (生成Sigma点要用)
3、计算状态转移矩阵,均值+协方差
均值直接权重相加,然后基于误差在一定范围得到`最优均值`
协方差 直接基于 `最优均值` 重新计算
4、更新状态 均值+协方差
代码:
void Predict(std::function<StateType(const StateType&)> g,
const GaussianDistribution<FloatType, N>& epsilon) {
/// 1. 协方差是对称矩阵
CheckSymmetric(epsilon.GetCovariance());
// Get the state mean and matrix root of its covariance.
/// 2. 取当前状态的均值+协方差开方根
const StateType& mu = belief_.GetMean();
const StateCovarianceType sqrt_sigma = MatrixSqrt(belief_.GetCovariance());//N*N
// 由于UKF,采用点2N+1
std::vector<StateType> Y;//需要计算的状态矩阵,N*1矩阵。
Y.reserve(2 * N + 1);//公式:p65
/// 3. 计算状态转移矩阵,均值+协方差
Y.emplace_back(g(mu));//状态转移方程,公式p65:3.68,p70:3
const FloatType kSqrtNPlusLambda = std::sqrt(N + kLambda);
for (int i = 0; i < N; ++i) {
// Order does not matter here as all have the same weights in the
// summation later on anyways.
Y.emplace_back(g(add_delta_(mu, kSqrtNPlusLambda * sqrt_sigma.col(i))));
Y.emplace_back(g(add_delta_(mu, -kSqrtNPlusLambda * sqrt_sigma.col(i))));
}
// 得到最优 均值 (误差小于阈值)
const StateType new_mu = ComputeMean(Y);
// 基于最新阈值重新计算新的协方差
StateCovarianceType new_sigma =
kCovWeight0 * OuterProduct<FloatType, N>(compute_delta_(new_mu, Y[0]));
for (int i = 0; i < N; ++i) {
new_sigma += kCovWeightI * OuterProduct<FloatType, N>(
compute_delta_(new_mu, Y[2 * i + 1]));
new_sigma += kCovWeightI * OuterProduct<FloatType, N>(
compute_delta_(new_mu, Y[2 * i + 2]));
}
CheckSymmetric(new_sigma);
//更新状态 均值+协方差
belief_ = GaussianDistribution<FloatType, N>(new_mu, new_sigma) + epsilon;
}
步骤:
1、判断 观测均值和协方差,均值0附近,协方差对称矩阵
2、取当前状态的均值和 协方差开方根 (生成Sigma点要用)
3、计算0均值sigma点和Z,得出 总体观测均值 ,协方差
均值经转移方程后直接权重相加,得到`最新均值`
协方差 直接基于 `最新均值` 重新计算,同时加上观测自身的协方差
4、计算 (x,z)的sigma。x 即sigma,z (z_t -z_u)
5、得到 K_t
6、更新 协方差 。原协方差-ksk^T
7、更新状态 均值+协方差
代码实现:
/**
* @brief Observe 观测步骤
* @param h 'h' 将状态转换为应该为零的观察值,传感器读数应该已经包含在这个函数中。
* @param delta 'delta' 是测量噪声,必须具有零均值。
*/
void Observe(
std::function<Eigen::Matrix<FloatType, K, 1>(const StateType&)> h,
const GaussianDistribution<FloatType, K>& delta) {
/// 1. 观测均值0附近,观测协方差是对称矩阵
CheckSymmetric(delta.GetCovariance());
// We expect zero mean delta.
CHECK_NEAR(delta.GetMean().norm(), 0., 1e-9);
/// 2. 取当前状态的均值+协方差开方根
// Get the state mean and matrix root of its covariance.
const StateType& mu = belief_.GetMean();
const StateCovarianceType sqrt_sigma = MatrixSqrt(belief_.GetCovariance());
// As in Kraft's paper, we compute W containing the zero-mean sigma points,
// since this is all we need.
// 计算0均值的sigma点,及转移后的sigma值
std::vector<StateType> W;
W.reserve(2 * N + 1);
W.emplace_back(StateType::Zero());
std::vector<Eigen::Matrix<FloatType, K, 1>> Z;
Z.reserve(2 * N + 1);
Z.emplace_back(h(mu));
/// 3. 计算0均值sigma点和Z,得出 总体观测均值
Eigen::Matrix<FloatType, K, 1> z_hat = kMeanWeight0 * Z[0];
const FloatType kSqrtNPlusLambda = std::sqrt(N + kLambda);
for (int i = 0; i < N; ++i) {
// Order does not matter here as all have the same weights in the
// summation later on anyways.
W.emplace_back(kSqrtNPlusLambda * sqrt_sigma.col(i));
Z.emplace_back(h(add_delta_(mu, W.back())));
W.emplace_back(-kSqrtNPlusLambda * sqrt_sigma.col(i));
Z.emplace_back(h(add_delta_(mu, W.back())));
z_hat += kMeanWeightI * Z[2 * i + 1];
z_hat += kMeanWeightI * Z[2 * i + 2];
}
/// 4. 得出总体观测的 协方差(递推协方差+自身协方差)
Eigen::Matrix<FloatType, K, K> S =
kCovWeight0 * OuterProduct<FloatType, K>(Z[0] - z_hat);
for (int i = 0; i < N; ++i) {
S += kCovWeightI * OuterProduct<FloatType, K>(Z[2 * i + 1] - z_hat);
S += kCovWeightI * OuterProduct<FloatType, K>(Z[2 * i + 2] - z_hat);
}
CheckSymmetric(S); // 递推协方差 对称矩阵
S += delta.GetCovariance();
/// 5. 计算Sigma_x,z
Eigen::Matrix<FloatType, N, K> sigma_bar_xz =
kCovWeight0 * W[0] * (Z[0] - z_hat).transpose();
for (int i = 0; i < N; ++i) {
sigma_bar_xz +=
kCovWeightI * W[2 * i + 1] * (Z[2 * i + 1] - z_hat).transpose();
sigma_bar_xz +=
kCovWeightI * W[2 * i + 2] * (Z[2 * i + 2] - z_hat).transpose();
}
/// 6. 计算 K_t,更新协方差
const Eigen::Matrix<FloatType, N, K> kalman_gain =
sigma_bar_xz * S.inverse();
const StateCovarianceType new_sigma =
belief_.GetCovariance() - kalman_gain * S * kalman_gain.transpose();
CheckSymmetric(new_sigma);
/// 7. 更新 状态方程
belief_ = GaussianDistribution<FloatType, N>(
add_delta_(mu, kalman_gain * -z_hat), new_sigma);
}