From 00cd6018405b57a0982b0ce103ff858c646ee18c Mon Sep 17 00:00:00 2001 From: Josh Blum Date: Fri, 1 Oct 2010 18:22:41 -0700 Subject: uhd: implemented a double timeout (in seconds) for send and recv chains converted all size_t timeout_ms to double timeout bounded and alignment buffer now take double timeout added timeout to device::send and zero_copy_if::get_send_buff --- host/lib/transport/udp_zero_copy_asio.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'host/lib/transport/udp_zero_copy_asio.cpp') diff --git a/host/lib/transport/udp_zero_copy_asio.cpp b/host/lib/transport/udp_zero_copy_asio.cpp index 0a6c9f2af..3130830a5 100644 --- a/host/lib/transport/udp_zero_copy_asio.cpp +++ b/host/lib/transport/udp_zero_copy_asio.cpp @@ -109,11 +109,11 @@ private: boost::asio::io_service _io_service; int _sock_fd; - ssize_t recv(const boost::asio::mutable_buffer &buff, size_t timeout_ms){ + ssize_t recv(const boost::asio::mutable_buffer &buff, double timeout){ //setup timeval for timeout timeval tv; tv.tv_sec = 0; - tv.tv_usec = timeout_ms*1000; + tv.tv_usec = long(timeout*1e6); //setup rset for timeout fd_set rset; -- cgit v1.2.3 From b57c84b34bcdd6c66eb053695b83e6bd6c481774 Mon Sep 17 00:00:00 2001 From: Josh Blum Date: Sun, 3 Oct 2010 01:41:24 -0700 Subject: uhd: implemented udp zero copy asio with async calls --- host/lib/transport/CMakeLists.txt | 2 +- host/lib/transport/libusb1_zero_copy.cpp | 6 +- host/lib/transport/udp_zero_copy_asio.cpp | 186 +++++++++++++++++++++--------- 3 files changed, 133 insertions(+), 61 deletions(-) (limited to 'host/lib/transport/udp_zero_copy_asio.cpp') diff --git a/host/lib/transport/CMakeLists.txt b/host/lib/transport/CMakeLists.txt index 2be2c89ec..bf92b0822 100644 --- a/host/lib/transport/CMakeLists.txt +++ b/host/lib/transport/CMakeLists.txt @@ -101,7 +101,7 @@ SET_SOURCE_FILES_PROPERTIES( LIBUHD_APPEND_SOURCES( ${CMAKE_SOURCE_DIR}/lib/transport/if_addrs.cpp ${CMAKE_SOURCE_DIR}/lib/transport/udp_simple.cpp - #${CMAKE_SOURCE_DIR}/lib/transport/udp_zero_copy_asio.cpp + ${CMAKE_SOURCE_DIR}/lib/transport/udp_zero_copy_asio.cpp ${CMAKE_SOURCE_DIR}/lib/transport/vrt_packet_handler.hpp ${CMAKE_SOURCE_DIR}/lib/transport/zero_copy.cpp ) diff --git a/host/lib/transport/libusb1_zero_copy.cpp b/host/lib/transport/libusb1_zero_copy.cpp index e1cc8398c..c819302b6 100644 --- a/host/lib/transport/libusb1_zero_copy.cpp +++ b/host/lib/transport/libusb1_zero_copy.cpp @@ -277,7 +277,7 @@ libusb_transfer *usb_endpoint::get_lut_with_wait(double timeout){ /*********************************************************************** * USB zero_copy device class **********************************************************************/ -class libusb_zero_copy_impl : public usb_zero_copy { +class libusb_zero_copy_impl : public usb_zero_copy, public boost::enable_shared_from_this { public: typedef boost::shared_ptr sptr; @@ -378,7 +378,7 @@ managed_recv_buffer::sptr libusb_zero_copy_impl::get_recv_buff(double timeout){ else { return managed_recv_buffer::make_safe( boost::asio::const_buffer(lut->buffer, lut->actual_length), - boost::bind(&libusb_zero_copy_impl::release, this, lut) + boost::bind(&libusb_zero_copy_impl::release, shared_from_this(), lut) ); } } @@ -398,7 +398,7 @@ managed_send_buffer::sptr libusb_zero_copy_impl::get_send_buff(double timeout){ else { return managed_send_buffer::make_safe( boost::asio::mutable_buffer(lut->buffer, _send_xfer_size), - boost::bind(&libusb_zero_copy_impl::commit, this, lut, _1) + boost::bind(&libusb_zero_copy_impl::commit, shared_from_this(), lut, _1) ); } } diff --git a/host/lib/transport/udp_zero_copy_asio.cpp b/host/lib/transport/udp_zero_copy_asio.cpp index 3130830a5..70e7514a1 100644 --- a/host/lib/transport/udp_zero_copy_asio.cpp +++ b/host/lib/transport/udp_zero_copy_asio.cpp @@ -17,20 +17,23 @@ #include #include //mtu +#include #include #include -#include +#include #include #include +#include #include using namespace uhd::transport; +namespace asio = boost::asio; /*********************************************************************** * Constants **********************************************************************/ //enough buffering for half a second of samples at full rate on usrp2 -static const size_t MIN_RECV_SOCK_BUFF_SIZE = size_t(sizeof(boost::uint32_t) * 25e6 * 0.5); +static const size_t MIN_RECV_SOCK_BUFF_SIZE = size_t(4 * 25e6 * 0.5); //Large buffers cause more underflow at high rates. //Perhaps this is due to the kernel scheduling, //but may change with host-based flow control. @@ -43,39 +46,55 @@ static const size_t MIN_SEND_SOCK_BUFF_SIZE = size_t(10e3); * However, it is not a true zero copy implementation as each * send and recv requires a copy operation to/from userspace. **********************************************************************/ -class udp_zero_copy_impl: - public phony_zero_copy_recv_if, - public phony_zero_copy_send_if, - public udp_zero_copy -{ +class udp_zero_copy_asio_impl : public udp_zero_copy, public boost::enable_shared_from_this { public: - typedef boost::shared_ptr sptr; - - udp_zero_copy_impl( - const std::string &addr, - const std::string &port - ): - phony_zero_copy_recv_if(udp_simple::mtu), - phony_zero_copy_send_if(udp_simple::mtu) - { + typedef boost::shared_ptr sptr; + + udp_zero_copy_asio_impl(const std::string &addr, const std::string &port){ //std::cout << boost::format("Creating udp transport for %s %s") % addr % port << std::endl; // resolve the address - boost::asio::ip::udp::resolver resolver(_io_service); - boost::asio::ip::udp::resolver::query query(boost::asio::ip::udp::v4(), addr, port); - boost::asio::ip::udp::endpoint receiver_endpoint = *resolver.resolve(query); + asio::ip::udp::resolver resolver(_io_service); + asio::ip::udp::resolver::query query(asio::ip::udp::v4(), addr, port); + asio::ip::udp::endpoint receiver_endpoint = *resolver.resolve(query); // create, open, and connect the socket - _socket = new boost::asio::ip::udp::socket(_io_service); - _socket->open(boost::asio::ip::udp::v4()); + _socket = new asio::ip::udp::socket(_io_service); + _socket->open(asio::ip::udp::v4()); _socket->connect(receiver_endpoint); - _sock_fd = _socket->native(); } - ~udp_zero_copy_impl(void){ + ~udp_zero_copy_asio_impl(void){ + _io_service.stop(); + _thread_group.join_all(); delete _socket; } + /*! + * Init, the second contructor: + * Allocate memory and spwan service thread. + */ + void init(void){ + //allocate all recv frames and release them to begin xfers + _pending_recv_buffs = pending_buffs_type::make(this->get_num_recv_frames()); + for (size_t i = 0; i < this->get_num_recv_frames(); i++){ + boost::shared_array buff(new char[udp_simple::mtu]); + _buffers.push_back(buff); //store a reference to this shared array + release(buff.get()); + } + + //allocate all send frames and push them into the fifo + _pending_send_buffs = pending_buffs_type::make(this->get_num_send_frames()); + for (size_t i = 0; i < this->get_num_send_frames(); i++){ + boost::shared_array buff(new char[udp_simple::mtu]); + _buffers.push_back(buff); //store a reference to this shared array + handle_send(buff.get()); + } + + //spawn the service thread that will run the io service + _thread_group.create_thread(boost::bind(&udp_zero_copy_asio_impl::service, this)); + } + //get size for internal socket buffer template size_t get_buff_size(void) const{ Opt option; @@ -90,60 +109,111 @@ public: return get_buff_size(); } - //The number of frames is approximately the buffer size divided by the max datagram size. //In reality, this is a phony zero-copy interface and the number of frames is infinite. //However, its sensible to advertise a frame count that is approximate to buffer size. //This way, the transport caller will have an idea about how much buffering to create. size_t get_num_recv_frames(void) const{ - return this->get_buff_size()/udp_simple::mtu; + return this->get_buff_size()/udp_simple::mtu; } size_t get_num_send_frames(void) const{ - return this->get_buff_size()/udp_simple::mtu; + return this->get_buff_size()/udp_simple::mtu; + } + + //! pop a filled recv buffer off of the fifo and bind with the release callback + managed_recv_buffer::sptr get_recv_buff(double timeout){ + boost::this_thread::disable_interruption di; //disable because the wait can throw + asio::mutable_buffer buff; + if (_pending_recv_buffs->pop_with_timed_wait(buff, timeout)){ + return managed_recv_buffer::make_safe( + buff, boost::bind( + &udp_zero_copy_asio_impl::release, + shared_from_this(), + asio::buffer_cast(buff) + ) + ); + } + return managed_recv_buffer::sptr(); + } + + //! pop an empty send buffer off of the fifo and bind with the commit callback + managed_send_buffer::sptr get_send_buff(double timeout){ + boost::this_thread::disable_interruption di; //disable because the wait can throw + asio::mutable_buffer buff; + if (_pending_send_buffs->pop_with_timed_wait(buff, timeout)){ + return managed_send_buffer::make_safe( + buff, boost::bind( + &udp_zero_copy_asio_impl::commit, + shared_from_this(), + asio::buffer_cast(buff), _1 + ) + ); + } + return managed_send_buffer::sptr(); } private: - boost::asio::ip::udp::socket *_socket; - boost::asio::io_service _io_service; - int _sock_fd; - - ssize_t recv(const boost::asio::mutable_buffer &buff, double timeout){ - //setup timeval for timeout - timeval tv; - tv.tv_sec = 0; - tv.tv_usec = long(timeout*1e6); - - //setup rset for timeout - fd_set rset; - FD_ZERO(&rset); - FD_SET(_sock_fd, &rset); - - //call select to perform timed wait - if (::select(_sock_fd+1, &rset, NULL, NULL, &tv) <= 0) return 0; - - return ::recv( - _sock_fd, - boost::asio::buffer_cast(buff), - boost::asio::buffer_size(buff), 0 + void service(void){ + _io_service.run(); + } + + /******************************************************************* + * The async send and receive callbacks + ******************************************************************/ + + //! handle a recv callback -> push the filled memory into the fifo + void handle_recv(void *mem, size_t len){ + boost::this_thread::disable_interruption di; //disable because the wait can throw + _pending_recv_buffs->push_with_wait(boost::asio::buffer(mem, len)); + } + + //! release a recv buffer -> start an async recv on the buffer + void release(void *mem){ + _socket->async_receive( + boost::asio::buffer(mem, udp_simple::mtu), + boost::bind( + &udp_zero_copy_asio_impl::handle_recv, + shared_from_this(), mem, + asio::placeholders::bytes_transferred + ) ); } - ssize_t send(const boost::asio::const_buffer &buff){ - return ::send( - _sock_fd, - boost::asio::buffer_cast(buff), - boost::asio::buffer_size(buff), 0 + //! handle a send callback -> push the emptied memory into the fifo + void handle_send(void *mem){ + boost::this_thread::disable_interruption di; //disable because the wait can throw + _pending_send_buffs->push_with_wait(boost::asio::buffer(mem, udp_simple::mtu)); + } + + //! commit a send buffer -> start an async send on the buffer + void commit(void *mem, size_t len){ + _socket->async_send( + boost::asio::buffer(mem, len), + boost::bind( + &udp_zero_copy_asio_impl::handle_send, + shared_from_this(), mem + ) ); } + + //asio guts -> socket and service + asio::ip::udp::socket *_socket; + asio::io_service _io_service; + + //memory management -> buffers and fifos + boost::thread_group _thread_group; + std::vector > _buffers; + typedef bounded_buffer pending_buffs_type; + pending_buffs_type::sptr _pending_recv_buffs, _pending_send_buffs; }; /*********************************************************************** * UDP zero copy make function **********************************************************************/ template static void resize_buff_helper( - udp_zero_copy_impl::sptr udp_trans, + udp_zero_copy_asio_impl::sptr udp_trans, size_t target_size, const std::string &name ){ @@ -183,11 +253,13 @@ udp_zero_copy::sptr udp_zero_copy::make( size_t recv_buff_size, size_t send_buff_size ){ - udp_zero_copy_impl::sptr udp_trans(new udp_zero_copy_impl(addr, port)); + udp_zero_copy_asio_impl::sptr udp_trans(new udp_zero_copy_asio_impl(addr, port)); //call the helper to resize send and recv buffers - resize_buff_helper(udp_trans, recv_buff_size, "recv"); - resize_buff_helper (udp_trans, send_buff_size, "send"); + resize_buff_helper(udp_trans, recv_buff_size, "recv"); + resize_buff_helper (udp_trans, send_buff_size, "send"); + + udp_trans->init(); //buffers resized -> call init() to use return udp_trans; } -- cgit v1.2.3 From c6f17f1e2f908747ae1547fa43b2c22c3c20ba50 Mon Sep 17 00:00:00 2001 From: Josh Blum Date: Sun, 3 Oct 2010 19:34:41 -0700 Subject: uhd: changed buffer allocations to be in a single chunk, udp: pass frame sizes into the impl constructor --- host/lib/transport/libusb1_zero_copy.cpp | 27 +++++----- host/lib/transport/udp_zero_copy_asio.cpp | 85 ++++++++++++++++--------------- 2 files changed, 59 insertions(+), 53 deletions(-) (limited to 'host/lib/transport/udp_zero_copy_asio.cpp') diff --git a/host/lib/transport/libusb1_zero_copy.cpp b/host/lib/transport/libusb1_zero_copy.cpp index c819302b6..ab48e4fc4 100644 --- a/host/lib/transport/libusb1_zero_copy.cpp +++ b/host/lib/transport/libusb1_zero_copy.cpp @@ -100,13 +100,13 @@ private: lut_buff_type::sptr _completed_list; //! a list of all transfer structs we allocated - std::vector _all_luts; + std::vector _all_luts; - //! a list of shared arrays for the transfer buffers - std::vector > _buffers; + //! a block of memory for the transfer buffers + boost::shared_array _buffer; // Calls for processing asynchronous I/O - libusb_transfer *allocate_transfer(int buff_len); + libusb_transfer *allocate_transfer(void *mem, size_t len); void print_transfer_status(libusb_transfer *lut); }; @@ -154,9 +154,9 @@ usb_endpoint::usb_endpoint( _input(input) { _completed_list = lut_buff_type::make(num_transfers); - + _buffer = boost::shared_array(new char[num_transfers*transfer_size]); for (size_t i = 0; i < num_transfers; i++){ - _all_luts.push_back(allocate_transfer(transfer_size)); + _all_luts.push_back(allocate_transfer(_buffer.get() + i*transfer_size, transfer_size)); //input luts are immediately submitted to be filled //output luts go into the completed list as free buffers @@ -193,23 +193,23 @@ usb_endpoint::~usb_endpoint(void){ * Allocate a libusb transfer * The allocated transfer - and buffer it contains - is repeatedly * submitted, reaped, and reused and should not be freed until shutdown. - * \param buff_len size of the individual buffer held by each transfer + * \param mem a pointer to the buffer memory + * \param len size of the individual buffer * \return pointer to an allocated libusb_transfer */ -libusb_transfer *usb_endpoint::allocate_transfer(int buff_len){ +libusb_transfer *usb_endpoint::allocate_transfer(void *mem, size_t len){ libusb_transfer *lut = libusb_alloc_transfer(0); - - boost::shared_array buff(new boost::uint8_t[buff_len]); - _buffers.push_back(buff); //store a reference to this shared array + UHD_ASSERT_THROW(lut != NULL); unsigned int endpoint = ((_endpoint & 0x7f) | (_input ? 0x80 : 0)); + unsigned char *buff = reinterpret_cast(mem); libusb_transfer_cb_fn lut_callback = libusb_transfer_cb_fn(&callback); libusb_fill_bulk_transfer(lut, // transfer _handle->get(), // dev_handle endpoint, // endpoint - buff.get(), // buffer - buff_len, // length + buff, // buffer + len, // length lut_callback, // callback this, // user_data 0); // timeout @@ -232,6 +232,7 @@ void usb_endpoint::submit(libusb_transfer *lut){ * \param lut pointer to an libusb_transfer */ void usb_endpoint::print_transfer_status(libusb_transfer *lut){ + std::cout << "here " << lut->status << std::endl; switch (lut->status) { case LIBUSB_TRANSFER_COMPLETED: if (lut->actual_length < lut->length) { diff --git a/host/lib/transport/udp_zero_copy_asio.cpp b/host/lib/transport/udp_zero_copy_asio.cpp index 70e7514a1..a1eb516fc 100644 --- a/host/lib/transport/udp_zero_copy_asio.cpp +++ b/host/lib/transport/udp_zero_copy_asio.cpp @@ -18,6 +18,7 @@ #include #include //mtu #include +#include #include #include #include @@ -26,6 +27,7 @@ #include #include +using namespace uhd; using namespace uhd::transport; namespace asio = boost::asio; @@ -34,11 +36,15 @@ namespace asio = boost::asio; **********************************************************************/ //enough buffering for half a second of samples at full rate on usrp2 static const size_t MIN_RECV_SOCK_BUFF_SIZE = size_t(4 * 25e6 * 0.5); + //Large buffers cause more underflow at high rates. //Perhaps this is due to the kernel scheduling, //but may change with host-based flow control. static const size_t MIN_SEND_SOCK_BUFF_SIZE = size_t(10e3); +//the number of async frames to allocate for each send and recv +static const size_t DEFAULT_NUM_ASYNC_FRAMES = 32; + /*********************************************************************** * Zero Copy UDP implementation with ASIO: * This is the portable zero copy implementation for systems @@ -50,51 +56,52 @@ class udp_zero_copy_asio_impl : public udp_zero_copy, public boost::enable_share public: typedef boost::shared_ptr sptr; - udp_zero_copy_asio_impl(const std::string &addr, const std::string &port){ + udp_zero_copy_asio_impl( + const std::string &addr, const std::string &port, + size_t recv_frame_size, size_t num_recv_frames, + size_t send_frame_size, size_t num_send_frames + ): + _recv_frame_size(recv_frame_size), _num_recv_frames(num_recv_frames), + _send_frame_size(send_frame_size), _num_send_frames(num_send_frames) + { //std::cout << boost::format("Creating udp transport for %s %s") % addr % port << std::endl; - // resolve the address + //resolve the address asio::ip::udp::resolver resolver(_io_service); asio::ip::udp::resolver::query query(asio::ip::udp::v4(), addr, port); asio::ip::udp::endpoint receiver_endpoint = *resolver.resolve(query); - // create, open, and connect the socket + //create, open, and connect the socket _socket = new asio::ip::udp::socket(_io_service); _socket->open(asio::ip::udp::v4()); _socket->connect(receiver_endpoint); } - ~udp_zero_copy_asio_impl(void){ - _io_service.stop(); - _thread_group.join_all(); - delete _socket; - } - - /*! - * Init, the second contructor: - * Allocate memory and spwan service thread. - */ void init(void){ //allocate all recv frames and release them to begin xfers - _pending_recv_buffs = pending_buffs_type::make(this->get_num_recv_frames()); - for (size_t i = 0; i < this->get_num_recv_frames(); i++){ - boost::shared_array buff(new char[udp_simple::mtu]); - _buffers.push_back(buff); //store a reference to this shared array - release(buff.get()); + _pending_recv_buffs = pending_buffs_type::make(_num_recv_frames); + _recv_buffer = boost::shared_array(new char[_num_recv_frames*_recv_frame_size]); + for (size_t i = 0; i < _num_recv_frames; i++){ + release(_recv_buffer.get() + i*_recv_frame_size); } //allocate all send frames and push them into the fifo - _pending_send_buffs = pending_buffs_type::make(this->get_num_send_frames()); - for (size_t i = 0; i < this->get_num_send_frames(); i++){ - boost::shared_array buff(new char[udp_simple::mtu]); - _buffers.push_back(buff); //store a reference to this shared array - handle_send(buff.get()); + _pending_send_buffs = pending_buffs_type::make(_num_send_frames); + _send_buffer = boost::shared_array(new char[_num_send_frames*_send_frame_size]); + for (size_t i = 0; i < _num_send_frames; i++){ + handle_send(_send_buffer.get() + i*_send_frame_size); } //spawn the service thread that will run the io service _thread_group.create_thread(boost::bind(&udp_zero_copy_asio_impl::service, this)); } + ~udp_zero_copy_asio_impl(void){ + _io_service.stop(); + _thread_group.join_all(); + delete _socket; + } + //get size for internal socket buffer template size_t get_buff_size(void) const{ Opt option; @@ -109,19 +116,6 @@ public: return get_buff_size(); } - //The number of frames is approximately the buffer size divided by the max datagram size. - //In reality, this is a phony zero-copy interface and the number of frames is infinite. - //However, its sensible to advertise a frame count that is approximate to buffer size. - //This way, the transport caller will have an idea about how much buffering to create. - - size_t get_num_recv_frames(void) const{ - return this->get_buff_size()/udp_simple::mtu; - } - - size_t get_num_send_frames(void) const{ - return this->get_buff_size()/udp_simple::mtu; - } - //! pop a filled recv buffer off of the fifo and bind with the release callback managed_recv_buffer::sptr get_recv_buff(double timeout){ boost::this_thread::disable_interruption di; //disable because the wait can throw @@ -138,6 +132,8 @@ public: return managed_recv_buffer::sptr(); } + size_t get_num_recv_frames(void) const {return _num_recv_frames;} + //! pop an empty send buffer off of the fifo and bind with the commit callback managed_send_buffer::sptr get_send_buff(double timeout){ boost::this_thread::disable_interruption di; //disable because the wait can throw @@ -154,8 +150,11 @@ public: return managed_send_buffer::sptr(); } + size_t get_num_send_frames(void) const {return _num_send_frames;} + private: void service(void){ + set_thread_priority_safe(); _io_service.run(); } @@ -172,7 +171,7 @@ private: //! release a recv buffer -> start an async recv on the buffer void release(void *mem){ _socket->async_receive( - boost::asio::buffer(mem, udp_simple::mtu), + boost::asio::buffer(mem, _recv_frame_size), boost::bind( &udp_zero_copy_asio_impl::handle_recv, shared_from_this(), mem, @@ -184,7 +183,7 @@ private: //! handle a send callback -> push the emptied memory into the fifo void handle_send(void *mem){ boost::this_thread::disable_interruption di; //disable because the wait can throw - _pending_send_buffs->push_with_wait(boost::asio::buffer(mem, udp_simple::mtu)); + _pending_send_buffs->push_with_wait(boost::asio::buffer(mem, _send_frame_size)); } //! commit a send buffer -> start an async send on the buffer @@ -204,9 +203,11 @@ private: //memory management -> buffers and fifos boost::thread_group _thread_group; - std::vector > _buffers; + boost::shared_array _send_buffer, _recv_buffer; typedef bounded_buffer pending_buffs_type; pending_buffs_type::sptr _pending_recv_buffs, _pending_send_buffs; + const size_t _recv_frame_size, _num_recv_frames; + const size_t _send_frame_size, _num_send_frames; }; /*********************************************************************** @@ -253,7 +254,11 @@ udp_zero_copy::sptr udp_zero_copy::make( size_t recv_buff_size, size_t send_buff_size ){ - udp_zero_copy_asio_impl::sptr udp_trans(new udp_zero_copy_asio_impl(addr, port)); + udp_zero_copy_asio_impl::sptr udp_trans(new udp_zero_copy_asio_impl( + addr, port, + udp_simple::mtu, DEFAULT_NUM_ASYNC_FRAMES, //recv + udp_simple::mtu, DEFAULT_NUM_ASYNC_FRAMES //send + )); //call the helper to resize send and recv buffers resize_buff_helper(udp_trans, recv_buff_size, "recv"); -- cgit v1.2.3 From 57ad303e850f143573d8c6edb1a88542b7b43350 Mon Sep 17 00:00:00 2001 From: Josh Blum Date: Mon, 4 Oct 2010 09:28:26 -0700 Subject: udp: added io service work to keep service running --- host/lib/transport/udp_zero_copy_asio.cpp | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'host/lib/transport/udp_zero_copy_asio.cpp') diff --git a/host/lib/transport/udp_zero_copy_asio.cpp b/host/lib/transport/udp_zero_copy_asio.cpp index a1eb516fc..2cf7bde18 100644 --- a/host/lib/transport/udp_zero_copy_asio.cpp +++ b/host/lib/transport/udp_zero_copy_asio.cpp @@ -45,6 +45,9 @@ static const size_t MIN_SEND_SOCK_BUFF_SIZE = size_t(10e3); //the number of async frames to allocate for each send and recv static const size_t DEFAULT_NUM_ASYNC_FRAMES = 32; +//a single concurrent thread for io_service seems to be the fastest +static const size_t CONCURRENCY_HINT = 1; + /*********************************************************************** * Zero Copy UDP implementation with ASIO: * This is the portable zero copy implementation for systems @@ -61,6 +64,8 @@ public: size_t recv_frame_size, size_t num_recv_frames, size_t send_frame_size, size_t num_send_frames ): + _io_service(CONCURRENCY_HINT), + _work(new asio::io_service::work(_io_service)), _recv_frame_size(recv_frame_size), _num_recv_frames(num_recv_frames), _send_frame_size(send_frame_size), _num_send_frames(num_send_frames) { @@ -92,13 +97,15 @@ public: handle_send(_send_buffer.get() + i*_send_frame_size); } - //spawn the service thread that will run the io service - _thread_group.create_thread(boost::bind(&udp_zero_copy_asio_impl::service, this)); + //spawn the service threads that will run the io service + for (size_t i = 0; i < CONCURRENCY_HINT; i++) _thread_group.create_thread( + boost::bind(&udp_zero_copy_asio_impl::service, this) + ); } ~udp_zero_copy_asio_impl(void){ - _io_service.stop(); - _thread_group.join_all(); + delete _work; //allow io_service run to complete + _thread_group.join_all(); //wait for service threads to exit delete _socket; } @@ -200,6 +207,7 @@ private: //asio guts -> socket and service asio::ip::udp::socket *_socket; asio::io_service _io_service; + asio::io_service::work *_work; //memory management -> buffers and fifos boost::thread_group _thread_group; -- cgit v1.2.3