Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/lkw303/task estimation #408

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
233 changes: 233 additions & 0 deletions rmf_fleet_adapter/src/rmf_fleet_adapter/TaskManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,9 @@
#include <rmf_api_msgs/schemas/commission.hpp>
#include <rmf_api_msgs/schemas/robot_commission_request.hpp>
#include <rmf_api_msgs/schemas/robot_commission_response.hpp>
#include <rmf_api_msgs/schemas/task_estimate_result.hpp>
#include <rmf_api_msgs/schemas/task_estimate_request.hpp>
#include <rmf_api_msgs/schemas/task_estimate_response.hpp>

namespace rmf_fleet_adapter {

Expand Down Expand Up @@ -231,6 +234,10 @@ TaskManagerPtr TaskManager::make(
rmf_api_msgs::schemas::commission,
rmf_api_msgs::schemas::robot_commission_request,
rmf_api_msgs::schemas::robot_commission_response,
rmf_api_msgs::schemas::error,
rmf_api_msgs::schemas::task_estimate_result,
rmf_api_msgs::schemas::task_estimate_request,
rmf_api_msgs::schemas::task_estimate_response
};

for (const auto& schema : schemas)
Expand Down Expand Up @@ -1170,6 +1177,198 @@ std::vector<nlohmann::json> TaskManager::task_log_updates() const
return logs;
}

//==============================================================================
nlohmann::json TaskManager::estimate_task_request(
const nlohmann::json& request,
const std::string& request_id)
{
auto fleet_handle = _fleet_handle.lock();
if (!fleet_handle)
{
return _make_error_response(
18, "Shutdown", "The fleet adapter is shutting down");
}
const auto& fleet = _context->group();
const auto& robot = _context->name();
const auto& impl =
agv::FleetUpdateHandle::Implementation::get(*fleet_handle);
std::vector<std::string> errors;
const auto new_request = impl.convert(request_id, request["task_request"],
errors);

if (!new_request)
{
RCLCPP_ERROR(
_context->node()->get_logger(),
"Unable to generate a valid request for direct task [%s]:\n%s",
request_id.c_str(),
request.dump().c_str());

nlohmann::json response_json;
response_json["success"] = false;
std::vector<nlohmann::json> json_errors = {};
for (const auto& e : errors)
{
RCLCPP_ERROR(_context->node()->get_logger(), "%s", e.c_str());
try
{
auto error = nlohmann::json::parse(e);
json_errors.push_back(error);
}
catch (const std::exception&)
{
json_errors.push_back(e);
}
}
response_json["errors"] = std::move(json_errors);

return response_json;
}
// Get finish state and deployment time
const auto task_planner = _context->task_planner();
if (!task_planner)
{
RCLCPP_ERROR(
_context->node()->get_logger(),
"Fleet [%s] is not configured with parameters for task planning."
"Use FleetUpdateHandle::set_task_planner_params(~) to set the "
"parameters required.", fleet.c_str());

return _make_error_response(
19, "Misconfigured",
"The fleet adapter is not configured for task planning");
}

// Convert JSON into rmf_task::State
rmf_task::State start_state;

if (request.find("state") == request.end())
start_state = expected_finish_state();
else
{
if (std::size_t(request["state"]["waypoint"].get<int>()) >
(_context->navigation_graph().num_waypoints() -1))
{
return _make_error_response(
20, "Invalid", "Invalid Task State");
}
start_state.waypoint(std::size_t(request["state"]["waypoint"].get<int>()));
start_state.orientation(request["state"]["orientation"].get<double>());
std::chrono::steady_clock::time_point dp_time(
rmf_traffic::time::from_seconds(request["state"]["time"].get<double>()/
1000));
start_state.time(dp_time);
start_state.battery_soc(request["state"]["battery_soc"].get<double>());
}


const auto& constraints = task_planner->configuration().constraints();
const auto& parameters = task_planner->configuration().parameters();
const auto model = new_request->description()->make_model(
new_request->booking()->earliest_start_time(),
parameters);
const auto estimate = model->estimate_finish(
start_state,
constraints,
*_travel_estimator);

rmf_task::State finish_state;
rmf_traffic::Time deployment_time;

if (!estimate.has_value())
{
RCLCPP_WARN(
_context->node()->get_logger(),
"Unable to estimate final state for direct task request [%s]. This may "
"be due to insufficient resources to perform the task.",
request_id.c_str());
return _make_error_response(
21, "Failed", "Failed Task Estimation");
}
else
{
finish_state = estimate.value().finish_state();
deployment_time = estimate.value().wait_until();
}


// calculate estimated duration of task
nlohmann::json result_json;

RCLCPP_INFO(
_context->node()->get_logger(),
"Estimated results: \n"
);

result_json["deployment_time"] =
to_millis(deployment_time.time_since_epoch()).count();

RCLCPP_INFO(
_context->node()->get_logger(),
" estimated deployment time: [%f]\n",
result_json["deployment_time"].get<double>()
);

if (finish_state.time().has_value())
{
result_json["finish_time"] =
to_millis(finish_state.time().value().time_since_epoch()).count();

RCLCPP_INFO(
_context->node()->get_logger(),
" estimated finish time: [%f]\n",
result_json["finish_time"].get<double>()
);

auto estimate_duration_rmf =
std::chrono::duration_cast<std::chrono::milliseconds>(
finish_state.time().value() - deployment_time);
result_json["duration"] = estimate_duration_rmf.count();

RCLCPP_INFO(
_context->node()->get_logger(),
" estimated duration: [%f] seconds\n",
result_json["duration"].get<double>()
);
}

// Convert rmf_task::State into a JSON
nlohmann::json state;
if (!finish_state.waypoint().has_value() || !finish_state.orientation().has_value()
|| !finish_state.battery_soc().has_value())
{
return _make_error_response(
21, "Failed", "Failed Task Estimation");
}

state["waypoint"] = int(finish_state.waypoint().value());
RCLCPP_INFO(
_context->node()->get_logger(),
" waypoint: [%i]\n",
state["waypoint"].get<int>()
);

state["orientation"] = finish_state.orientation().value();
RCLCPP_INFO(
_context->node()->get_logger(),
" orientation: [%f]\n",
state["orientation"].get<double>()
);

state["battery_soc"] = finish_state.battery_soc().value();
RCLCPP_INFO(
_context->node()->get_logger(),
" battery soc: [%f]\n",
state["battery_soc"].get<double>()
);

result_json["state"] = state;
nlohmann::json response_json;
response_json["success"] = true;
response_json["result"] = result_json;
return response_json;
}

//==============================================================================
nlohmann::json TaskManager::submit_direct_request(
const nlohmann::json& request,
Expand Down Expand Up @@ -2624,6 +2823,8 @@ void TaskManager::_handle_request(
_handle_direct_request(request_json, request_id);
else if (type_str == "robot_commission_request")
_handle_commission_request(request_json, request_id);
else if (type_str == "estimate_task_request")
_handle_estimate_request(request_json, request_id);
else
return;
}
Expand All @@ -2635,6 +2836,38 @@ void TaskManager::_handle_request(
}
}

//==============================================================================
void TaskManager::_handle_estimate_request(
const nlohmann::json& request_json,
const std::string& request_id)
{
// TODO: Add a validator for estimate_task_request
static auto request_validator =
_make_validator(rmf_api_msgs::schemas::task_estimate_request);

static auto response_validator =
_make_validator(rmf_api_msgs::schemas::task_estimate_response);

if (!_validate_request_message(request_json, request_validator, request_id))
return;

const auto& robot = request_json["robot"].get<std::string>();
if (robot.empty() || robot != _context->name())
return;

const auto& fleet = request_json["fleet"].get<std::string>();
if (fleet.empty() || fleet != _context->group())
return;

const nlohmann::json& request = request_json["request"];

const auto response = estimate_task_request(
request,
request_id);

_validate_and_publish_api_response(response, response_validator, request_id);
}

//==============================================================================
void TaskManager::_handle_direct_request(
const nlohmann::json& request_json,
Expand Down
20 changes: 20 additions & 0 deletions rmf_fleet_adapter/src/rmf_fleet_adapter/TaskManager.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,22 @@ class TaskManager : public std::enable_shared_from_this<TaskManager>
const nlohmann::json& task_request,
const std::string& request_id);

/// Submit an estimate task request to this manager
///
/// \param[in] task_request
/// A JSON description of the task request. It should match the
/// task_request.json schema of rmf_api_msgs, in particular it must contain
/// `category` and `description` properties.
///
/// \param[in] request_id
/// The unique ID for this task request.
///
/// \return A robot_task_response.json message from rmf_api_msgs (note: this
/// message is not validated before being returned).
nlohmann::json estimate_task_request(
const nlohmann::json& task_request,
const std::string& request_id);

class Interruption
{
public:
Expand Down Expand Up @@ -635,6 +651,10 @@ class TaskManager : public std::enable_shared_from_this<TaskManager>
const nlohmann::json& request_json,
const std::string& request_id);

void _handle_estimate_request(
const nlohmann::json& request_json,
const std::string& request_id);

};

using TaskManagerPtr = std::shared_ptr<TaskManager>;
Expand Down
Loading