001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.Matrix;
008import edu.wpi.first.math.Nat;
009import edu.wpi.first.math.StateSpaceUtil;
010import edu.wpi.first.math.VecBuilder;
011import edu.wpi.first.math.geometry.Pose2d;
012import edu.wpi.first.math.geometry.Rotation2d;
013import edu.wpi.first.math.geometry.Translation2d;
014import edu.wpi.first.math.kinematics.MecanumDriveKinematics;
015import edu.wpi.first.math.kinematics.MecanumDriveWheelSpeeds;
016import edu.wpi.first.math.numbers.N1;
017import edu.wpi.first.math.numbers.N3;
018import edu.wpi.first.util.WPIUtilJNI;
019import java.util.function.BiConsumer;
020
021/**
022 * This class wraps an {@link UnscentedKalmanFilter Unscented Kalman Filter} to fuse
023 * latency-compensated vision measurements with mecanum drive encoder velocity measurements. It will
024 * correct for noisy measurements and encoder drift. It is intended to be an easy but more accurate
025 * drop-in for {@link edu.wpi.first.math.kinematics.MecanumDriveOdometry}.
026 *
027 * <p>{@link MecanumDrivePoseEstimator#update} should be called every robot loop. If your loops are
028 * faster or slower than the default of 0.02s, then you should change the nominal delta time using
029 * the secondary constructor: {@link MecanumDrivePoseEstimator#MecanumDrivePoseEstimator(Rotation2d,
030 * Pose2d, MecanumDriveKinematics, Matrix, Matrix, Matrix, double)}.
031 *
032 * <p>{@link MecanumDrivePoseEstimator#addVisionMeasurement} can be called as infrequently as you
033 * want; if you never call it, then this class will behave mostly like regular encoder odometry.
034 *
035 * <p>The state-space system used internally has the following states (x), inputs (u), and outputs
036 * (y):
037 *
038 * <p><strong> x = [x, y, theta]ᵀ </strong> in the field coordinate system containing x position, y
039 * position, and heading.
040 *
041 * <p><strong> u = [v_l, v_r, dtheta]ᵀ </strong> containing left wheel velocity, right wheel
042 * velocity, and change in gyro heading.
043 *
044 * <p><strong> y = [x, y, theta]ᵀ </strong> from vision containing x position, y position, and
045 * heading; or <strong> y = [theta]ᵀ </strong> containing gyro heading.
046 */
047public class MecanumDrivePoseEstimator {
048  private final UnscentedKalmanFilter<N3, N3, N1> m_observer;
049  private final MecanumDriveKinematics m_kinematics;
050  private final BiConsumer<Matrix<N3, N1>, Matrix<N3, N1>> m_visionCorrect;
051  private final KalmanFilterLatencyCompensator<N3, N3, N1> m_latencyCompensator;
052
053  private final double m_nominalDt; // Seconds
054  private double m_prevTimeSeconds = -1.0;
055
056  private Rotation2d m_gyroOffset;
057  private Rotation2d m_previousAngle;
058
059  private Matrix<N3, N3> m_visionContR;
060
061  /**
062   * Constructs a MecanumDrivePoseEstimator.
063   *
064   * @param gyroAngle The current gyro angle.
065   * @param initialPoseMeters The starting pose estimate.
066   * @param kinematics A correctly-configured kinematics object for your drivetrain.
067   * @param stateStdDevs Standard deviations of model states. Increase these numbers to trust your
068   *     model's state estimates less. This matrix is in the form [x, y, theta]ᵀ, with units in
069   *     meters and radians.
070   * @param localMeasurementStdDevs Standard deviations of the encoder and gyro measurements.
071   *     Increase these numbers to trust sensor readings from encoders and gyros less. This matrix
072   *     is in the form [theta], with units in radians.
073   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
074   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
075   *     theta]ᵀ, with units in meters and radians.
076   */
077  public MecanumDrivePoseEstimator(
078      Rotation2d gyroAngle,
079      Pose2d initialPoseMeters,
080      MecanumDriveKinematics kinematics,
081      Matrix<N3, N1> stateStdDevs,
082      Matrix<N1, N1> localMeasurementStdDevs,
083      Matrix<N3, N1> visionMeasurementStdDevs) {
084    this(
085        gyroAngle,
086        initialPoseMeters,
087        kinematics,
088        stateStdDevs,
089        localMeasurementStdDevs,
090        visionMeasurementStdDevs,
091        0.02);
092  }
093
094  /**
095   * Constructs a MecanumDrivePoseEstimator.
096   *
097   * @param gyroAngle The current gyro angle.
098   * @param initialPoseMeters The starting pose estimate.
099   * @param kinematics A correctly-configured kinematics object for your drivetrain.
100   * @param stateStdDevs Standard deviations of model states. Increase these numbers to trust your
101   *     model's state estimates less. This matrix is in the form [x, y, theta]ᵀ, with units in
102   *     meters and radians.
103   * @param localMeasurementStdDevs Standard deviations of the encoder and gyro measurements.
104   *     Increase these numbers to trust sensor readings from encoders and gyros less. This matrix
105   *     is in the form [theta], with units in radians.
106   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
107   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
108   *     theta]ᵀ, with units in meters and radians.
109   * @param nominalDtSeconds The time in seconds between each robot loop.
110   */
111  @SuppressWarnings("ParameterName")
112  public MecanumDrivePoseEstimator(
113      Rotation2d gyroAngle,
114      Pose2d initialPoseMeters,
115      MecanumDriveKinematics kinematics,
116      Matrix<N3, N1> stateStdDevs,
117      Matrix<N1, N1> localMeasurementStdDevs,
118      Matrix<N3, N1> visionMeasurementStdDevs,
119      double nominalDtSeconds) {
120    m_nominalDt = nominalDtSeconds;
121
122    m_observer =
123        new UnscentedKalmanFilter<>(
124            Nat.N3(),
125            Nat.N1(),
126            (x, u) -> u,
127            (x, u) -> x.extractRowVector(2),
128            stateStdDevs,
129            localMeasurementStdDevs,
130            AngleStatistics.angleMean(2),
131            AngleStatistics.angleMean(0),
132            AngleStatistics.angleResidual(2),
133            AngleStatistics.angleResidual(0),
134            AngleStatistics.angleAdd(2),
135            m_nominalDt);
136    m_kinematics = kinematics;
137    m_latencyCompensator = new KalmanFilterLatencyCompensator<>();
138
139    // Initialize vision R
140    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
141
142    m_visionCorrect =
143        (u, y) ->
144            m_observer.correct(
145                Nat.N3(),
146                u,
147                y,
148                (x, u1) -> x,
149                m_visionContR,
150                AngleStatistics.angleMean(2),
151                AngleStatistics.angleResidual(2),
152                AngleStatistics.angleResidual(2),
153                AngleStatistics.angleAdd(2));
154
155    m_gyroOffset = initialPoseMeters.getRotation().minus(gyroAngle);
156    m_previousAngle = initialPoseMeters.getRotation();
157    m_observer.setXhat(StateSpaceUtil.poseTo3dVector(initialPoseMeters));
158  }
159
160  /**
161   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
162   * vision measurements after the autonomous period, or to change trust as distance to a vision
163   * target increases.
164   *
165   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
166   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
167   *     theta]ᵀ, with units in meters and radians.
168   */
169  public void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
170    m_visionContR = StateSpaceUtil.makeCovarianceMatrix(Nat.N3(), visionMeasurementStdDevs);
171  }
172
173  /**
174   * Resets the robot's position on the field.
175   *
176   * <p>You NEED to reset your encoders (to zero) when calling this method.
177   *
178   * <p>The gyroscope angle does not need to be reset in the user's robot code. The library
179   * automatically takes care of offsetting the gyro angle.
180   *
181   * @param poseMeters The position on the field that your robot is at.
182   * @param gyroAngle The angle reported by the gyroscope.
183   */
184  public void resetPosition(Pose2d poseMeters, Rotation2d gyroAngle) {
185    // Reset state estimate and error covariance
186    m_observer.reset();
187    m_latencyCompensator.reset();
188
189    m_observer.setXhat(StateSpaceUtil.poseTo3dVector(poseMeters));
190
191    m_gyroOffset = getEstimatedPosition().getRotation().minus(gyroAngle);
192    m_previousAngle = poseMeters.getRotation();
193  }
194
195  /**
196   * Gets the pose of the robot at the current time as estimated by the Unscented Kalman Filter.
197   *
198   * @return The estimated robot pose in meters.
199   */
200  public Pose2d getEstimatedPosition() {
201    return new Pose2d(
202        m_observer.getXhat(0), m_observer.getXhat(1), new Rotation2d(m_observer.getXhat(2)));
203  }
204
205  /**
206   * Add a vision measurement to the Unscented Kalman Filter. This will correct the odometry pose
207   * estimate while still accounting for measurement noise.
208   *
209   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
210   * MecanumDrivePoseEstimator#update} every loop.
211   *
212   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
213   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
214   *     don't use your own time source by calling {@link MecanumDrivePoseEstimator#updateWithTime}
215   *     then you must use a timestamp with an epoch since FPGA startup (i.e. the epoch of this
216   *     timestamp is the same epoch as Timer.getFPGATimestamp.) This means that you should use
217   *     Timer.getFPGATimestamp as your time source or sync the epochs.
218   */
219  public void addVisionMeasurement(Pose2d visionRobotPoseMeters, double timestampSeconds) {
220    m_latencyCompensator.applyPastGlobalMeasurement(
221        Nat.N3(),
222        m_observer,
223        m_nominalDt,
224        StateSpaceUtil.poseTo3dVector(visionRobotPoseMeters),
225        m_visionCorrect,
226        timestampSeconds);
227  }
228
229  /**
230   * Add a vision measurement to the Unscented Kalman Filter. This will correct the odometry pose
231   * estimate while still accounting for measurement noise.
232   *
233   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
234   * MecanumDrivePoseEstimator#update} every loop.
235   *
236   * <p>Note that the vision measurement standard deviations passed into this method will continue
237   * to apply to future measurements until a subsequent call to {@link
238   * MecanumDrivePoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
239   *
240   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
241   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
242   *     don't use your own time source by calling {@link MecanumDrivePoseEstimator#updateWithTime}
243   *     then you must use a timestamp with an epoch since FPGA startup (i.e. the epoch of this
244   *     timestamp is the same epoch as Timer.getFPGATimestamp.) This means that you should use
245   *     Timer.getFPGATimestamp as your time source in this case.
246   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
247   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
248   *     theta]ᵀ, with units in meters and radians.
249   */
250  public void addVisionMeasurement(
251      Pose2d visionRobotPoseMeters,
252      double timestampSeconds,
253      Matrix<N3, N1> visionMeasurementStdDevs) {
254    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
255    addVisionMeasurement(visionRobotPoseMeters, timestampSeconds);
256  }
257
258  /**
259   * Updates the the Unscented Kalman Filter using only wheel encoder information. This should be
260   * called every loop, and the correct loop period must be passed into the constructor of this
261   * class.
262   *
263   * @param gyroAngle The current gyro angle.
264   * @param wheelSpeeds The current speeds of the mecanum drive wheels.
265   * @return The estimated pose of the robot in meters.
266   */
267  public Pose2d update(Rotation2d gyroAngle, MecanumDriveWheelSpeeds wheelSpeeds) {
268    return updateWithTime(WPIUtilJNI.now() * 1.0e-6, gyroAngle, wheelSpeeds);
269  }
270
271  /**
272   * Updates the the Unscented Kalman Filter using only wheel encoder information. This should be
273   * called every loop, and the correct loop period must be passed into the constructor of this
274   * class.
275   *
276   * @param currentTimeSeconds Time at which this method was called, in seconds.
277   * @param gyroAngle The current gyroscope angle.
278   * @param wheelSpeeds The current speeds of the mecanum drive wheels.
279   * @return The estimated pose of the robot in meters.
280   */
281  @SuppressWarnings("LocalVariableName")
282  public Pose2d updateWithTime(
283      double currentTimeSeconds, Rotation2d gyroAngle, MecanumDriveWheelSpeeds wheelSpeeds) {
284    double dt = m_prevTimeSeconds >= 0 ? currentTimeSeconds - m_prevTimeSeconds : m_nominalDt;
285    m_prevTimeSeconds = currentTimeSeconds;
286
287    var angle = gyroAngle.plus(m_gyroOffset);
288    var omega = angle.minus(m_previousAngle).getRadians() / dt;
289
290    var chassisSpeeds = m_kinematics.toChassisSpeeds(wheelSpeeds);
291    var fieldRelativeVelocities =
292        new Translation2d(chassisSpeeds.vxMetersPerSecond, chassisSpeeds.vyMetersPerSecond)
293            .rotateBy(angle);
294
295    var u = VecBuilder.fill(fieldRelativeVelocities.getX(), fieldRelativeVelocities.getY(), omega);
296    m_previousAngle = angle;
297
298    var localY = VecBuilder.fill(angle.getRadians());
299    m_latencyCompensator.addObserverState(m_observer, u, localY, currentTimeSeconds);
300    m_observer.predict(u, dt);
301    m_observer.correct(u, localY);
302
303    return getEstimatedPosition();
304  }
305}