Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add recovery probability estimator #295

Merged
merged 8 commits into from
Jan 25, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions beluga/include/beluga/algorithm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
* \brief Includes all beluga algorithms.
*/

#include <beluga/algorithm/adaptive_probability_estimator.hpp>
#include <beluga/algorithm/distance_map.hpp>
#include <beluga/algorithm/effective_sample_size.hpp>
#include <beluga/algorithm/estimation.hpp>
Expand Down
100 changes: 100 additions & 0 deletions beluga/include/beluga/algorithm/adaptive_probability_estimator.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
// Copyright 2024 Ekumen, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef BELUGA_ALGORITHM_ADAPTIVE_PROBABILITY_ESTIMATOR_HPP
#define BELUGA_ALGORITHM_ADAPTIVE_PROBABILITY_ESTIMATOR_HPP

#include <beluga/algorithm/exponential_filter.hpp>
#include <beluga/type_traits/particle_traits.hpp>
#include <beluga/views/particles.hpp>

#include <range/v3/numeric/accumulate.hpp>

namespace beluga {

/// Random particle probability estimator.
/**
* This class implements an estimator for what probability to use for injecting random
* particles (not sampled directly from the particle set) during the resampling step of a
* particle filter.
*
* The addition of random samples allows the filter to recover in case it converged to
* the wrong estimate, adding an additional level of robustness.
*
* This estimator averages the total weight of the particles and computes the ratio
* between a short-term and a long-term average over time.
*
* See Probabilistic Robotics \cite thrun2005probabilistic, Chapter 8.3.3.
*/
class AdaptiveProbabilityEstimator {
public:
/// Constructor.
/**
* \param alpha_slow Decay rate for the long-term average.
* \param alpha_fast Decay rate for the short-term average.
*/
constexpr AdaptiveProbabilityEstimator(double alpha_slow, double alpha_fast) noexcept
: slow_filter_{alpha_slow}, fast_filter_{alpha_fast} {
assert(0 < alpha_slow);
assert(alpha_slow < alpha_fast);
}

/// Reset the internal state of the estimator.
/**
* It is recommended to reset the estimator after injecting random particles
* to avoid spiraling off into complete randomness.
*/
constexpr void reset() noexcept {
slow_filter_.reset();
fast_filter_.reset();
probability_ = 0.0;
}

/// Update the estimation based on a particle range.
template <class Range>
constexpr void update(Range&& range) {
static_assert(ranges::sized_range<Range>);
static_assert(beluga::is_particle_range_v<Range>);
const auto size = static_cast<double>(range.size());

if (size == 0.0) {
reset();
return;
}

const double total_weight = ranges::accumulate(beluga::views::weights(range), 0.0);
const double average_weight = total_weight / size;
const double fast_average = fast_filter_(average_weight);
const double slow_average = slow_filter_(average_weight);

if (slow_average == 0.0) {
probability_ = 0.0;
return;
}

probability_ = std::clamp(1.0 - fast_average / slow_average, 0.0, 1.0);
}

/// Returns the estimated random state probability to be used by the filter.
constexpr double operator()() const noexcept { return probability_; }

private:
ExponentialFilter slow_filter_;
ExponentialFilter fast_filter_;
double probability_ = 0.0;
};

} // namespace beluga

#endif
6 changes: 3 additions & 3 deletions beluga/include/beluga/algorithm/exponential_filter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,16 @@ class ExponentialFilter {
/**
* \param alpha The exponential filter smoothing factor.
*/
explicit ExponentialFilter(double alpha) : alpha_{alpha} {}
constexpr explicit ExponentialFilter(double alpha) noexcept : alpha_{alpha} {}

/// Resets the output of the exponential filter to zero.
void reset() { output_ = 0.; }
constexpr void reset() noexcept { output_ = 0.; }

/// Updates the exponential filter output given an input.
/**
* \param input Next value to be exponentially filtered.
*/
[[nodiscard]] double operator()(double input) {
[[nodiscard]] constexpr double operator()(double input) noexcept {
output_ += (output_ == 0.) ? input : alpha_ * (input - output_);
return output_;
}
Expand Down
1 change: 1 addition & 0 deletions beluga/test/beluga/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ add_executable(
actions/test_propagate.cpp
actions/test_reweight.cpp
algorithm/raycasting/test_bresenham.cpp
algorithm/test_adaptive_probability_estimator.cpp
algorithm/test_distance_map.cpp
algorithm/test_effective_sample_size.cpp
algorithm/test_estimation.cpp
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
// Copyright 2024 Ekumen, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <gmock/gmock.h>

#include <beluga/algorithm/adaptive_probability_estimator.hpp>

namespace {

TEST(AdaptiveProbabilityEstimator, ProbabilityWithNoParticles) {
// Test the probability when the particle set is empty.
const double alpha_slow = 0.2;
const double alpha_fast = 0.4;
auto estimator = beluga::AdaptiveProbabilityEstimator{alpha_slow, alpha_fast};
estimator.update(std::vector<std::tuple<int, beluga::Weight>>{});
ASSERT_EQ(estimator(), 0.0);
}

TEST(AdaptiveProbabilityEstimator, ProbabilityWithZeroWeight) {
// Test the probability when the total weight is zero.
const double alpha_slow = 0.2;
const double alpha_fast = 0.4;
auto estimator = beluga::AdaptiveProbabilityEstimator{alpha_slow, alpha_fast};
estimator.update(std::vector<std::tuple<int, beluga::Weight>>{{1, 0.0}, {2, 0.0}});
ASSERT_EQ(estimator(), 0.0);
}

TEST(AdaptiveProbabilityEstimator, ProbabilityAfterUpdateAndReset) {
const double alpha_slow = 0.5;
const double alpha_fast = 1.0;
auto estimator = beluga::AdaptiveProbabilityEstimator{alpha_slow, alpha_fast};

// Test the probability after updating the estimator with particle weights.
estimator.update(std::vector<std::tuple<int, beluga::Weight>>{{5, 1.0}, {6, 2.0}, {7, 3.0}});
ASSERT_EQ(estimator(), 0.0);

estimator.update(std::vector<std::tuple<int, beluga::Weight>>{{5, 0.5}, {6, 1.0}, {7, 1.5}});
ASSERT_NEAR(estimator(), 0.33, 0.01);

estimator.update(std::vector<std::tuple<int, beluga::Weight>>{{5, 0.5}, {6, 1.0}, {7, 1.5}});
ASSERT_NEAR(estimator(), 0.20, 0.01);

// Test the probability after resetting the estimator.
estimator.reset();
ASSERT_EQ(estimator(), 0.0);
}

class AdaptiveProbabilityWithParam : public ::testing::TestWithParam<std::tuple<double, double, double>> {};

TEST_P(AdaptiveProbabilityWithParam, Probabilities) {
const auto [initial_weight, final_weight, expected_probability] = GetParam();

const double alpha_slow = 0.001;
const double alpha_fast = 0.1;
auto estimator = beluga::AdaptiveProbabilityEstimator{alpha_slow, alpha_fast};
auto particles = std::vector<std::tuple<int, beluga::Weight>>{{1, initial_weight}};

estimator.update(particles);
ASSERT_NEAR(estimator(), 0.0, 0.01);

beluga::weight(particles.front()) = final_weight;

estimator.update(particles);
ASSERT_NEAR(estimator(), expected_probability, 0.01);
}

INSTANTIATE_TEST_SUITE_P(
AdaptiveProbability,
AdaptiveProbabilityWithParam,
testing::Values(
std::make_tuple(1.0, 1.5, 0.00),
std::make_tuple(1.0, 2.0, 0.00),
std::make_tuple(1.0, 0.5, 0.05),
std::make_tuple(0.5, 0.1, 0.08),
std::make_tuple(0.5, 0.0, 0.10)));

} // namespace
16 changes: 7 additions & 9 deletions beluga_system_tests/test/test_system_new.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,8 @@ auto particle_filter_test(
return ++count % params.resample_interval_count == 0;
};

auto adaptive_probability_estimator = beluga::AdaptiveProbabilityEstimator(params.alpha_slow, params.alpha_fast);

// Iteratively run the filter through all the data points.
for (auto [measurement, odom, ground_truth] : datapoints) {
if (!far_enough_to_update(odom)) {
Expand Down Expand Up @@ -201,10 +203,7 @@ auto particle_filter_test(
const double total_weight = ranges::accumulate(beluga::views::weights(particles), 0.0);
particles |= beluga::actions::reweight([total_weight](auto) { return 1.0 / total_weight; }); // HACK

// TODO(nahuel): Implement adaptive probability estimator.
/**
* adaptive_probability_estimator.update(particles);
*/
adaptive_probability_estimator.update(particles);

// TODO(nahuel): Sort out resampling policies.
if (!should_resample()) {
Expand All @@ -214,11 +213,10 @@ auto particle_filter_test(
// Nav2 updates the filter estimates regardless of whether selective resampling actually resamples or not.
if (!params.selective_resampling ||
beluga::effective_sample_size(particles) < static_cast<double>(ranges::size(particles)) / 2.0) {
// TODO(nahuel): Implement adaptive probability estimator.
/**
* const auto random_state_probability = adaptive_probability_estimator();
*/
const auto random_state_probability = 0.1;
const auto random_state_probability = adaptive_probability_estimator();
if (random_state_probability > 0.0) {
adaptive_probability_estimator.reset();
}

auto make_random_particle = [&sensor] {
static thread_local auto engine = std::mt19937{std::random_device()()};
Expand Down
Loading