From 0bd233e64210c6605e8a6ec1424fa81f9ea8a681 Mon Sep 17 00:00:00 2001 From: Aaron Rossetto Date: Thu, 17 Oct 2019 08:44:11 -0500 Subject: uhd: Introduce I/O service manager - Implement I/O service detach link methods - The I/O service manager instantiates new I/O services or connects links to existing I/O services based on options provided by the user in stream_args. - Add a streamer ID parameter to methods to create transports so that the I/O service manager can group transports appropriately when using offload threads. - Change X300 and MPMD to use I/O service manager to connect links to I/O services. - There is now a single I/O service manager per rfnoc_graph (and it is also stored in the graph) - The I/O service manager now also knows the device args for the rfnoc_graph it was created with, and can make decisions based upon those (e.g, use a specific I/O service for DPDK, share cores between streamers, etc.) - The I/O Service Manager does not get any decision logic with this commit, though - The MB ifaces for mpmd and x300 now access this global I/O service manager - Add configuration of link parameters with overrides Co-Authored-By: Martin Braun Co-Authored-By: Aaron Rossetto --- host/lib/transport/inline_io_service.cpp | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) (limited to 'host/lib/transport/inline_io_service.cpp') diff --git a/host/lib/transport/inline_io_service.cpp b/host/lib/transport/inline_io_service.cpp index 9dd0814ca..93967e09a 100644 --- a/host/lib/transport/inline_io_service.cpp +++ b/host/lib/transport/inline_io_service.cpp @@ -272,10 +272,19 @@ void inline_io_service::attach_recv_link(recv_link_if::sptr link) { auto link_ptr = link.get(); UHD_ASSERT_THROW(_recv_tbl.count(link_ptr) == 0); - _recv_tbl[link_ptr] = - std::tuple(nullptr, nullptr); + _recv_tbl[link_ptr] = std::tuple(nullptr, nullptr); _recv_links.push_back(link); -}; +} + +void inline_io_service::detach_recv_link(recv_link_if::sptr link) +{ + auto link_ptr = link.get(); + UHD_ASSERT_THROW(_recv_tbl.count(link_ptr) != 0); + _recv_tbl.erase(link_ptr); + + _recv_links.remove_if( + [link_ptr](recv_link_if::sptr& item) { return item.get() == link_ptr; }); +} recv_io_if::sptr inline_io_service::make_recv_client(recv_link_if::sptr data_link, size_t num_recv_frames, @@ -301,9 +310,17 @@ recv_io_if::sptr inline_io_service::make_recv_client(recv_link_if::sptr data_lin void inline_io_service::attach_send_link(send_link_if::sptr link) { - UHD_ASSERT_THROW(std::find(_send_links.begin(), _send_links.end(), link) == _send_links.end()); + UHD_ASSERT_THROW( + std::find(_send_links.begin(), _send_links.end(), link) == _send_links.end()); _send_links.push_back(link); -}; +} + +void inline_io_service::detach_send_link(send_link_if::sptr link) +{ + auto link_ptr = link.get(); + _send_links.remove_if( + [link_ptr](send_link_if::sptr& item) { return item.get() == link_ptr; }); +} send_io_if::sptr inline_io_service::make_send_client(send_link_if::sptr send_link, size_t num_send_frames, @@ -365,8 +382,7 @@ void inline_io_service::connect_receiver( _recv_tbl[link] = std::make_tuple(mux, rcvr); } -void inline_io_service::disconnect_receiver( - recv_link_if* link, inline_recv_cb* cb) +void inline_io_service::disconnect_receiver(recv_link_if* link, inline_recv_cb* cb) { inline_recv_mux* mux; inline_recv_cb* rcvr; -- cgit v1.2.3