From 4e38eef817813c1bbd8a9cf972e4cf0134d24308 Mon Sep 17 00:00:00 2001 From: Alex Williams Date: Sun, 1 Dec 2019 21:58:13 -0800 Subject: dpdk: Add new DPDK stack to integrate with I/O services docs: Update DPDK docs with new parameters: Parameter names have had their hyphens changed to underscores, and the I/O CPU argument is now named after the lcores and reflects the naming used by DPDK. transport: Add new udp_dpdk_link, based atop the new APIs: This link is tightly coupled with the DPDK I/O service. The link class carries all the address information to communicate with the other host, and it can send packets directly through the DPDK NIC ports. However, for receiving packets, the I/O service must pull the packets from the DMA queue and attach them to the appropriate link object. The link object merely formats the frame_buff object underneath, which is embedded in the rte_mbuf container. For get_recv_buff, the link will pull buffers only from its internal queue (the one filled by the I/O service). transport: Add DPDK-specific I/O service: The I/O service is split into two parts, the user threads and the I/O worker threads. The user threads submit requests through various appropriate queues, and the I/O threads perform all the I/O on their behalf. This includes routing UDP packets to the correct receiver and getting the MAC address of a destination (by performing the ARP request and handling the ARP replies). The DPDK context stores I/O services. The context spawns all I/O services on init(), and I/O services can be fetched from the dpdk_ctx object by using a port ID. I/O service clients: The clients have two lockless ring buffers. One is to get a buffer from the I/O service; the other is to release a buffer back to the I/O service. Threads sleeping on buffer I/O are kept in a separate list from the service queue and are processed in the course of doing RX or TX. The list nodes are embedded in the dpdk_io_if, and the head of the list is on the dpdk_io_service. The I/O service will transfer the embedded wait_req to the list if it cannot acquire the mutex to complete the condition for waking. Co-authored-by: Martin Braun Co-authored-by: Ciro Nishiguchi Co-authored-by: Brent Stapleton --- host/lib/include/uhdlib/transport/dpdk/udp.hpp | 115 +++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 host/lib/include/uhdlib/transport/dpdk/udp.hpp (limited to 'host/lib/include/uhdlib/transport/dpdk/udp.hpp') diff --git a/host/lib/include/uhdlib/transport/dpdk/udp.hpp b/host/lib/include/uhdlib/transport/dpdk/udp.hpp new file mode 100644 index 000000000..65e561315 --- /dev/null +++ b/host/lib/include/uhdlib/transport/dpdk/udp.hpp @@ -0,0 +1,115 @@ +// +// Copyright 2019 Ettus Research, a National Instruments brand +// +// SPDX-License-Identifier: GPL-3.0-or-later +// +#ifndef _INCLUDED_UHDLIB_TRANSPORT_DPDK_UDP_HPP_ +#define _INCLUDED_UHDLIB_TRANSPORT_DPDK_UDP_HPP_ + +#include +#include +#include +#include +#include +#include + +namespace uhd { namespace transport { namespace dpdk { + +constexpr size_t HDR_SIZE_UDP_IPV4 = + (sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr)); + +/*! + * An enumerated type representing the type of flow for an IPv4 client + * Currently, only UDP is supported (FLOW_TYPE_UDP) + */ +enum flow_type { + FLOW_TYPE_UDP, + FLOW_TYPE_COUNT, +}; + +/*! + * A tuple for IPv4 flows that can be used for hashing + */ +struct ipv4_5tuple +{ + enum flow_type flow_type; + ipv4_addr src_ip; + ipv4_addr dst_ip; + uint16_t src_port; + uint16_t dst_port; +}; + +inline void fill_ipv4_hdr(struct rte_mbuf* mbuf, + const dpdk_port* port, + uint32_t dst_ipv4_addr, + uint8_t proto_id, + uint32_t payload_len) +{ + struct ether_hdr* eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr*); + struct ipv4_hdr* ip_hdr = (struct ipv4_hdr*)ð_hdr[1]; + + ip_hdr->version_ihl = 0x40 | 5; + ip_hdr->type_of_service = 0; + ip_hdr->total_length = rte_cpu_to_be_16(20 + payload_len); + ip_hdr->packet_id = 0; + ip_hdr->fragment_offset = rte_cpu_to_be_16(IPV4_HDR_DF_FLAG); + ip_hdr->time_to_live = 64; + ip_hdr->next_proto_id = proto_id; + ip_hdr->hdr_checksum = 0; // Require HW offload + ip_hdr->src_addr = port->get_ipv4(); + ip_hdr->dst_addr = dst_ipv4_addr; + + mbuf->ol_flags = PKT_TX_IP_CKSUM | PKT_TX_IPV4; + mbuf->l2_len = sizeof(struct ether_hdr); + mbuf->l3_len = sizeof(struct ipv4_hdr); + mbuf->pkt_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + payload_len; + mbuf->data_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + payload_len; +} + +/* All values except payload length must be in network order */ +inline void fill_udp_hdr(struct rte_mbuf* mbuf, + const dpdk_port* port, + uint32_t dst_ipv4_addr, + uint16_t src_port, + uint16_t dst_port, + uint32_t payload_len) +{ + struct ether_hdr* eth_hdr; + struct ipv4_hdr* ip_hdr; + struct udp_hdr* tx_hdr; + + fill_ipv4_hdr( + mbuf, port, dst_ipv4_addr, IPPROTO_UDP, sizeof(struct udp_hdr) + payload_len); + + eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr*); + ip_hdr = (struct ipv4_hdr*)ð_hdr[1]; + tx_hdr = (struct udp_hdr*)&ip_hdr[1]; + + tx_hdr->src_port = src_port; + tx_hdr->dst_port = dst_port; + tx_hdr->dgram_len = rte_cpu_to_be_16(8 + payload_len); + tx_hdr->dgram_cksum = 0; + mbuf->l4_len = sizeof(struct udp_hdr); +} + +//! Return an IPv4 address (numeric, in network order) into a string +inline std::string ipv4_num_to_str(const uint32_t ip_addr) +{ + char addr_str[INET_ADDRSTRLEN]; + struct in_addr ipv4_addr; + ipv4_addr.s_addr = ip_addr; + inet_ntop(AF_INET, &ipv4_addr, addr_str, sizeof(addr_str)); + return std::string(addr_str); +} + +inline std::string eth_addr_to_string(const struct ether_addr mac_addr) +{ + auto mac_stream = boost::format("%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx"); + mac_stream % (uint32_t)mac_addr.addr_bytes[0] % (uint32_t)mac_addr.addr_bytes[1] + % (uint32_t)mac_addr.addr_bytes[2] % (uint32_t)mac_addr.addr_bytes[3] + % (uint32_t)mac_addr.addr_bytes[4] % (uint32_t)mac_addr.addr_bytes[5]; + return mac_stream.str(); +} + +}}} /* namespace uhd::transport::dpdk */ +#endif /* _INCLUDED_UHDLIB_TRANSPORT_DPDK_UDP_HPP_ */ -- cgit v1.2.3