カルマンフィルタの基礎 第13回
前回と異なる非線形モデルで、UKF(Unscented Kalman Filter)を確認する
UKFの実践(非線形モデル2)
下記のモデルを用いる
モデルに入力 が加わっているので、関数の入力を少々変更
import matplotlib.pyplot as plt import numpy as np import pandas as pd def f_(x_, k_): return 0.2 * x_ + 25. * x_ / (1. + x_**2) + np.array([[8.]]) * np.cos(1.2 * k_) B_ = np.array([[1.]]) def h_(x_, k_=0.): return x_**2 / 20. Q_ = np.array([[1.]]) R_ = np.array([[3.]])
カルマンフィルタの実装(EKF)
def Af(x_): return 0.2 + 25. / (1. + x_**2) + 25. * x_ / (1. + x_**2)**2 * (-1) * (2 * x_) def Ch(x_): return x_ / 10. def ekf(f_, Af, B_, h_, Ch, Q_, R_, k_, y_, xhat, P_): xhatm = f_(xhat, k_) Am = Af(xhat) Pm = Am @ P_ @ Am.T + B_ @ Q_ @ B_.T C_ = Ch(xhatm) G_ = Pm @ C_ @ (C_.T @ Pm @ C_ + R_)**-1 xhat_new = xhatm + G_ @ (y_ - h_(xhatm, k_)) P_new = (np.eye(Am.shape[0]) - G_ @ C_.T) @ Pm return xhat_new, P_new, G_, Pm
カルマンフィルタの実装(UKF)
def calc_Xm(x_, P_, kappa): n_ = x_.shape[0] x_ = x_.reshape(n_) L = np.linalg.cholesky(P_) Xm = [x_] for i in range(n_): mu = [] for j in range(n_): mu += [x_[j] + np.sqrt(n_ + kappa) * L[j][i]] Xm += [mu] for i in range(n_): mu = [] for j in range(n_): mu += [x_[j] - np.sqrt(n_ + kappa) * L[j][i]] Xm += [mu] Xm = np.array(Xm) Xm = Xm.reshape(2 * n_ + 1, n_, 1) return Xm def calc_mean(Xm, kappa): n2 = Xm.shape[0] nl = Xm.shape[1] n_ = int((n2 - 1) / 2) Xm = Xm.reshape(n2, nl) w0 = kappa / (n_ + kappa) wi = 1. / (2 * (n_ + kappa)) xhatm = [] for i in range(nl): xhatm += [w0 * Xm[0][i]] for j in range(n_): xhatm[i] += wi * Xm[j + 1][i] xhatm[i] += wi * Xm[j + n_ + 1][i] xhatm = np.array(xhatm) xhatm = xhatm.reshape(nl, 1) return xhatm def calc_Pm(Xm, Ym, kappa): xhatm = calc_mean(Xm, kappa) yhatm = calc_mean(Ym, kappa) Xm = Xm.reshape(Xm.shape[0], Xm.shape[1]) Ym = Ym.reshape(Ym.shape[0], Ym.shape[1]) xhatm = xhatm.reshape(xhatm.shape[0]) yhatm = yhatm.reshape(yhatm.shape[0]) l1 = Xm.shape[1] l2 = Ym.shape[1] n2 = Xm.shape[0] n_ = int((n2 - 1) / 2) w0 = kappa / (n_ + kappa) wi = 1. / (2 * (n_ + kappa)) Pm = [] for j in range(l1): Pm += [[]] for k in range(l2): Pm[j] += [w0 * (Xm[0][j] - xhatm[j]) * (Ym[0][k] - yhatm[k])] for i in range(2 * n_): Pm[j][k] += wi * (Xm[i + 1][j] - xhatm[j]) * (Ym[i + 1][k] - yhatm[k]) return np.array(Pm) def ut(f_, k_, x_, P_, kappa): X_ = calc_Xm(x_, P_, kappa) Xm = np.array([f_(x_, k_) for x_ in X_]) xhatm = calc_mean(Xm, kappa) Pm = calc_Pm(Xm ,Xm, kappa) return X_, Xm, xhatm, Pm def ukf(f_, h_, B_, Q_, R_, k_, y_, xhat, P_, kappa): X_, Xm, xhatm, Pxx = ut(f_, k_, xhat, P_, kappa) Pxx = Pxx + B_ @ Q_ @ B_.T Xp, Ym, yhatm, Pyy = ut(h_, k_, xhatm, Pxx, kappa) Pxy = calc_Pm(Xp, Ym, kappa) kG = Pxy @ (Pyy + R_)**-1 xhat_new = xhatm + kG @ (y_ - yhatm) P_new = Pxx - kG @ Pxy.T return xhat_new, P_new, kG, Pxx
シミュレーションの実施
N_ = 50 np.random.seed(0) v_ = np.array([[np.random.randn() * np.sqrt(Q_[0][0])]]) # システム雑音 w_ = np.array([[np.random.randn() * np.sqrt(R_[0][0])]]) # 観測雑音 x_arr = [np.array([[0.]])] y_arr = [h_(x_arr[0]) + w_] xhat_ekf_arr = [np.array([[0.]])] P_ekf_arr = [np.array([[1.]])] G_ekf_arr = [] xhat_ukf_arr = [np.array([[0.]])] P_ukf_arr = [np.array([[1.]])] G_ukf_arr = [] for i in range(1, N_): v_ = np.array([[np.random.randn() * np.sqrt(Q_[0][0])]]) # システム雑音 w_ = np.array([[np.random.randn() * np.sqrt(R_[0][0])]]) # 観測雑音 x_ = f_(x_arr[-1], i) + B_ @ v_ y_ = h_(x_) + w_ x_arr += [x_] y_arr += [y_] xhat, P_, G_, Pm = ekf( f_, Af, B_, h_, Ch, Q_, R_, i, y_, xhat_ekf_arr[-1], P_ekf_arr[-1]) xhat_ekf_arr += [xhat] P_ekf_arr += [P_] G_ekf_arr += [G_] xhat, P_, G_, Pm = ukf( f_, h_, B_, Q_, R_, i, y_, xhat_ukf_arr[-1], P_ukf_arr[-1], kappa=2) xhat_ukf_arr += [xhat] P_ukf_arr += [P_] G_ukf_arr += [G_] x_arr = np.array(x_arr) y_arr = np.array(y_arr) xhat_ekf_arr = np.array(xhat_ekf_arr) P_ekf_arr = np.array(P_ekf_arr) G_ekf_arr = np.array(G_ekf_arr) xhat_ukf_arr = np.array(xhat_ukf_arr) P_ukf_arr = np.array(P_ukf_arr) G_ukf_arr = np.array(G_ukf_arr)
シミュレーションの確認
fig = plt.figure(figsize=(6, 5)) ax1 = fig.add_subplot(2, 1, 1) ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) ax1.plot(xhat_ekf_arr[:, 0, 0], ls='--', label='$\hat{x}_{ekf}$') ax1.plot(xhat_ukf_arr[:, 0, 0], ls='--', label='$\hat{x}_{ukf}$') ax1.plot(x_arr[:, 0, 0], ls='-', label='$x$') ax1.plot(y_arr[:, 0, 0], '*', label='$y$') ax1.set_ylabel('$x$') ax1.grid(ls=':'); ax1.legend() std_ = (xhat_ekf_arr[:, 0, 0] - x_arr[:, 0, 0])[20:].std() label = '$\~{x}_{ekf} \quad \sigma =$ %7.3f' % std_ ax2.plot(xhat_ekf_arr[:, 0, 0] - x_arr[:, 0, 0], ls='--', label=label) std_ = (xhat_ukf_arr[:, 0, 0] - x_arr[:, 0, 0])[20:].std() label = '$\~{x}_{ekf} \quad \sigma =$ %7.3f' % std_ ax2.plot(xhat_ukf_arr[:, 0, 0] - x_arr[:, 0, 0], ls='--', label=label) ax2.set_ylabel('$\~{x}$') ax2.grid(ls=':'); ax2.legend() ax2.set_xlabel('$k$') fig.tight_layout()
fig = plt.figure(figsize=(6, 5)) ax1 = fig.add_subplot(2, 1, 1) ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) ax1.plot(P_ekf_arr[:, 0, 0], '--', label='$P_{ekf}$') ax1.plot(P_ukf_arr[:, 0, 0], '--', label='$P_{ukf}$') ax1.set_ylabel('$P$') ax1.grid(ls=':'); ax1.legend() ax2.plot(G_ekf_arr[:, 0, 0], '--', label='$G_{ekf}$') ax2.plot(G_ukf_arr[:, 0, 0], '--', label='$G_{ekf}$') ax2.set_ylabel('$G$') ax2.grid(ls=':'); ax2.legend() ax2.set_xlabel('$k$') fig.tight_layout()
今回の例の場合、UKFの方が性能が良い
参考文献
この記事は以下の書籍を参考にしましたが、
私の拙い知識で書いておりますので、誤り等ありましたらご指摘ください