[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

Bug#981002: marked as done (buster-pu: package dpdk/18.11.11-1~deb10u1)



Your message dated Sat, 06 Feb 2021 10:39:26 +0000
with message-id <6425525e38201ecf9a2d3e0f1e63c0d3b08e0fc0.camel@adam-barratt.org.uk>
and subject line Closing p-u bugs for updates in 10.8
has caused the Debian Bug report #981002,
regarding buster-pu: package dpdk/18.11.11-1~deb10u1
to be marked as done.

This means that you claim that the problem has been dealt with.
If this is not the case it is now your responsibility to reopen the
Bug report if necessary, and/or fix the problem forthwith.

(NB: If you are a system administrator and have no idea what this
message is talking about, this may indicate a serious mail system
misconfiguration somewhere. Please contact owner@bugs.debian.org
immediately.)


-- 
981002: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=981002
Debian Bug Tracking System
Contact owner@bugs.debian.org with problems
--- Begin Message ---
Package: release.debian.org
Severity: normal
Tags: buster
User: release.debian.org@packages.debian.org
Usertags: pu
X-Debbugs-CC: pkg-dpdk-devel@lists.alioth.debian.org

Dear release team,

We would like to upload a new LTS release version of DPDK to Buster.
We have already done this previously, and it was approved, for 18.11.2-
2+deb10u1 through 18.11.10~deb10u1, therefore I already proceeded to
upload to buster-pu in accordance with the new workflow.

As before, the LTS point release has only bug fixes and no API changes
and has been tested with regression tests.

This will be the last upload of a point release in Buster, since 18.11
is now EOL.

The source debdiff is attached. The only packaging changes were
dropping a patch, merged upstream, and refreshing another one due to
fuzz.

-- 
Kind regards,
Luca Boccassi
diff -Nru dpdk-18.11.10/app/test-crypto-perf/meson.build dpdk-18.11.11/app/test-crypto-perf/meson.build
--- dpdk-18.11.10/app/test-crypto-perf/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/app/test-crypto-perf/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -13,3 +13,6 @@
 		'cperf_test_verify.c',
 		'main.c')
 deps += ['cryptodev']
+if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER')
+	deps += 'pmd_crypto_scheduler'
+endif
diff -Nru dpdk-18.11.10/app/test-eventdev/evt_options.c dpdk-18.11.11/app/test-eventdev/evt_options.c
--- dpdk-18.11.10/app/test-eventdev/evt_options.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/app/test-eventdev/evt_options.c	2021-01-20 12:18:20.000000000 +0000
@@ -186,6 +186,10 @@
 	int ret;
 
 	ret = parser_read_uint8(&(opt->nb_timer_adptrs), arg);
+	if (opt->nb_timer_adptrs <= 0) {
+		evt_err("Number of timer adapters cannot be <= 0");
+		return -EINVAL;
+	}
 
 	return ret;
 }
diff -Nru dpdk-18.11.10/app/test-pmd/bpf_cmd.c dpdk-18.11.11/app/test-pmd/bpf_cmd.c
--- dpdk-18.11.10/app/test-pmd/bpf_cmd.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/app/test-pmd/bpf_cmd.c	2021-01-20 12:18:20.000000000 +0000
@@ -55,7 +55,7 @@
 struct cmd_bpf_ld_result {
 	cmdline_fixed_string_t bpf;
 	cmdline_fixed_string_t dir;
-	uint8_t port;
+	uint16_t port;
 	uint16_t queue;
 	cmdline_fixed_string_t op;
 	cmdline_fixed_string_t flags;
@@ -153,7 +153,7 @@
 struct cmd_bpf_unld_result {
 	cmdline_fixed_string_t bpf;
 	cmdline_fixed_string_t dir;
-	uint8_t port;
+	uint16_t port;
 	uint16_t queue;
 };
 
diff -Nru dpdk-18.11.10/app/test-pmd/cmdline.c dpdk-18.11.11/app/test-pmd/cmdline.c
--- dpdk-18.11.10/app/test-pmd/cmdline.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/app/test-pmd/cmdline.c	2021-01-20 12:18:20.000000000 +0000
@@ -617,7 +617,7 @@
 			"set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)"
 			"	Set Aggregation mode for IEEE802.3AD (mode 4)"
 
-			"set bonding xmit_balance_policy (port_id) (l2|l23|l34)\n"
+			"set bonding balance_xmit_policy (port_id) (l2|l23|l34)\n"
 			"	Set the transmit balance policy for bonded device running in balance mode.\n\n"
 
 			"set bonding mon_period (port_id) (value)\n"
@@ -4000,6 +4000,9 @@
 {
 	struct cmd_tx_vlan_set_result *res = parsed_result;
 
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+
 	if (!port_is_stopped(res->port_id)) {
 		printf("Please stop port %d first\n", res->port_id);
 		return;
@@ -4054,6 +4057,9 @@
 {
 	struct cmd_tx_vlan_set_qinq_result *res = parsed_result;
 
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+
 	if (!port_is_stopped(res->port_id)) {
 		printf("Please stop port %d first\n", res->port_id);
 		return;
@@ -4167,6 +4173,9 @@
 {
 	struct cmd_tx_vlan_reset_result *res = parsed_result;
 
+	if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+		return;
+
 	if (!port_is_stopped(res->port_id)) {
 		printf("Please stop port %d first\n", res->port_id);
 		return;
diff -Nru dpdk-18.11.10/app/test-pmd/cmdline_flow.c dpdk-18.11.11/app/test-pmd/cmdline_flow.c
--- dpdk-18.11.10/app/test-pmd/cmdline_flow.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/app/test-pmd/cmdline_flow.c	2021-01-20 12:18:20.000000000 +0000
@@ -3259,26 +3259,15 @@
 			.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
 			.level = 0,
 			.types = rss_hf,
-			.key_len = sizeof(action_rss_data->key),
+			.key_len = 0,
 			.queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
-			.key = action_rss_data->key,
+			.key = NULL,
 			.queue = action_rss_data->queue,
 		},
-		.key = "testpmd's default RSS hash key, "
-			"override it for better balancing",
 		.queue = { 0 },
 	};
 	for (i = 0; i < action_rss_data->conf.queue_num; ++i)
 		action_rss_data->queue[i] = i;
-	if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
-	    ctx->port != (portid_t)RTE_PORT_ALL) {
-		struct rte_eth_dev_info info;
-
-		rte_eth_dev_info_get(ctx->port, &info);
-		action_rss_data->conf.key_len =
-			RTE_MIN(sizeof(action_rss_data->key),
-				info.hash_key_size);
-	}
 	action->conf = &action_rss_data->conf;
 	return ret;
 }
diff -Nru dpdk-18.11.10/app/test-pmd/config.c dpdk-18.11.11/app/test-pmd/config.c
--- dpdk-18.11.10/app/test-pmd/config.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/app/test-pmd/config.c	2021-01-20 12:18:20.000000000 +0000
@@ -1398,7 +1398,7 @@
 
 /** List flow rules. */
 void
-port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
+port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group)
 {
 	struct rte_port *port;
 	struct port_flow *pf;
@@ -1709,10 +1709,17 @@
 		struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
 		uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
 		uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
-		uint16_t nb_rx_desc_tmp;
-		uint16_t nb_tx_desc_tmp;
 		struct rte_eth_rxq_info rx_qinfo;
 		struct rte_eth_txq_info tx_qinfo;
+		uint16_t rx_free_thresh_tmp;
+		uint16_t tx_free_thresh_tmp;
+		uint16_t tx_rs_thresh_tmp;
+		uint16_t nb_rx_desc_tmp;
+		uint16_t nb_tx_desc_tmp;
+		uint64_t offloads_tmp;
+		uint8_t pthresh_tmp;
+		uint8_t hthresh_tmp;
+		uint8_t wthresh_tmp;
 		int32_t rc;
 
 		/* per port config */
@@ -1726,41 +1733,64 @@
 		/* per rx queue config only for first queue to be less verbose */
 		for (qid = 0; qid < 1; qid++) {
 			rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
-			if (rc)
+			if (rc) {
 				nb_rx_desc_tmp = nb_rx_desc[qid];
-			else
+				rx_free_thresh_tmp =
+					rx_conf[qid].rx_free_thresh;
+				pthresh_tmp = rx_conf[qid].rx_thresh.pthresh;
+				hthresh_tmp = rx_conf[qid].rx_thresh.hthresh;
+				wthresh_tmp = rx_conf[qid].rx_thresh.wthresh;
+				offloads_tmp = rx_conf[qid].offloads;
+			} else {
 				nb_rx_desc_tmp = rx_qinfo.nb_desc;
+				rx_free_thresh_tmp =
+						rx_qinfo.conf.rx_free_thresh;
+				pthresh_tmp = rx_qinfo.conf.rx_thresh.pthresh;
+				hthresh_tmp = rx_qinfo.conf.rx_thresh.hthresh;
+				wthresh_tmp = rx_qinfo.conf.rx_thresh.wthresh;
+				offloads_tmp = rx_qinfo.conf.offloads;
+			}
 
 			printf("    RX queue: %d\n", qid);
 			printf("      RX desc=%d - RX free threshold=%d\n",
-				nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
+				nb_rx_desc_tmp, rx_free_thresh_tmp);
 			printf("      RX threshold registers: pthresh=%d hthresh=%d "
 				" wthresh=%d\n",
-				rx_conf[qid].rx_thresh.pthresh,
-				rx_conf[qid].rx_thresh.hthresh,
-				rx_conf[qid].rx_thresh.wthresh);
-			printf("      RX Offloads=0x%"PRIx64"\n",
-				rx_conf[qid].offloads);
+				pthresh_tmp, hthresh_tmp, wthresh_tmp);
+			printf("      RX Offloads=0x%"PRIx64"\n", offloads_tmp);
 		}
 
 		/* per tx queue config only for first queue to be less verbose */
 		for (qid = 0; qid < 1; qid++) {
 			rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
-			if (rc)
+			if (rc) {
 				nb_tx_desc_tmp = nb_tx_desc[qid];
-			else
+				tx_free_thresh_tmp =
+					tx_conf[qid].tx_free_thresh;
+				pthresh_tmp = tx_conf[qid].tx_thresh.pthresh;
+				hthresh_tmp = tx_conf[qid].tx_thresh.hthresh;
+				wthresh_tmp = tx_conf[qid].tx_thresh.wthresh;
+				offloads_tmp = tx_conf[qid].offloads;
+				tx_rs_thresh_tmp = tx_conf[qid].tx_rs_thresh;
+			} else {
 				nb_tx_desc_tmp = tx_qinfo.nb_desc;
+				tx_free_thresh_tmp =
+						tx_qinfo.conf.tx_free_thresh;
+				pthresh_tmp = tx_qinfo.conf.tx_thresh.pthresh;
+				hthresh_tmp = tx_qinfo.conf.tx_thresh.hthresh;
+				wthresh_tmp = tx_qinfo.conf.tx_thresh.wthresh;
+				offloads_tmp = tx_qinfo.conf.offloads;
+				tx_rs_thresh_tmp = tx_qinfo.conf.tx_rs_thresh;
+			}
 
 			printf("    TX queue: %d\n", qid);
 			printf("      TX desc=%d - TX free threshold=%d\n",
-				nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
+				nb_tx_desc_tmp, tx_free_thresh_tmp);
 			printf("      TX threshold registers: pthresh=%d hthresh=%d "
 				" wthresh=%d\n",
-				tx_conf[qid].tx_thresh.pthresh,
-				tx_conf[qid].tx_thresh.hthresh,
-				tx_conf[qid].tx_thresh.wthresh);
+				pthresh_tmp, hthresh_tmp, wthresh_tmp);
 			printf("      TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
-				tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
+				offloads_tmp, tx_rs_thresh_tmp);
 		}
 	}
 }
@@ -2404,6 +2434,10 @@
 void
 set_fwd_lcores_number(uint16_t nb_lc)
 {
+	if (test_done == 0) {
+		printf("Please stop forwarding first\n");
+		return;
+	}
 	if (nb_lc > nb_cfg_lcores) {
 		printf("nb fwd cores %u > %u (max. number of configured "
 		       "lcores) - ignored\n",
@@ -2885,9 +2919,11 @@
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
-	if (diag < 0)
+	if (diag < 0) {
 		printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
 	       "diag=%d\n", port_id, on, diag);
+		return;
+	}
 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
 }
 
@@ -2912,9 +2948,11 @@
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
-	if (diag < 0)
+	if (diag < 0) {
 		printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
 	       "diag=%d\n", port_id, on, diag);
+		return;
+	}
 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
 }
 
@@ -2953,9 +2991,11 @@
 	}
 
 	diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
-	if (diag < 0)
+	if (diag < 0) {
 		printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
 	       "diag=%d\n", port_id, on, diag);
+		return;
+	}
 	ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads;
 }
 
@@ -3012,8 +3052,6 @@
 {
 	struct rte_eth_dev_info dev_info;
 
-	if (port_id_is_invalid(port_id, ENABLED_WARN))
-		return;
 	if (vlan_id_is_invalid(vlan_id))
 		return;
 
@@ -3039,8 +3077,6 @@
 {
 	struct rte_eth_dev_info dev_info;
 
-	if (port_id_is_invalid(port_id, ENABLED_WARN))
-		return;
 	if (vlan_id_is_invalid(vlan_id))
 		return;
 	if (vlan_id_is_invalid(vlan_id_outer))
@@ -3063,8 +3099,6 @@
 void
 tx_vlan_reset(portid_t port_id)
 {
-	if (port_id_is_invalid(port_id, ENABLED_WARN))
-		return;
 	ports[port_id].dev_conf.txmode.offloads &=
 				~(DEV_TX_OFFLOAD_VLAN_INSERT |
 				  DEV_TX_OFFLOAD_QINQ_INSERT);
diff -Nru dpdk-18.11.10/app/test-pmd/meson.build dpdk-18.11.11/app/test-pmd/meson.build
--- dpdk-18.11.10/app/test-pmd/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/app/test-pmd/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -33,6 +33,18 @@
 if dpdk_conf.has('RTE_LIBRTE_PDUMP')
 	deps += 'pdump'
 endif
+if dpdk_conf.has('RTE_LIBRTE_BITRATESTATS')
+	deps += 'bitratestats'
+endif
+if dpdk_conf.has('RTE_LIBRTE_LATENCYSTATS')
+	deps += 'latencystats'
+endif
+if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER')
+	deps += 'pmd_crypto_scheduler'
+endif
+if dpdk_conf.has('RTE_LIBRTE_BOND_PMD')
+	deps += 'pmd_bond'
+endif
 if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD')
 	deps += 'pmd_bnxt'
 endif
diff -Nru dpdk-18.11.10/buildtools/meson.build dpdk-18.11.11/buildtools/meson.build
--- dpdk-18.11.10/buildtools/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/buildtools/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -1,6 +1,8 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017 Intel Corporation
 
+pkgconf = find_program('pkg-config', 'pkgconf', required: false)
+
 subdir('pmdinfogen')
 
 pmdinfo = find_program('gen-pmdinfo-cfile.sh')
diff -Nru dpdk-18.11.10/buildtools/pmdinfogen/pmdinfogen.h dpdk-18.11.11/buildtools/pmdinfogen/pmdinfogen.h
--- dpdk-18.11.10/buildtools/pmdinfogen/pmdinfogen.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/buildtools/pmdinfogen/pmdinfogen.h	2021-01-20 12:18:20.000000000 +0000
@@ -82,7 +82,7 @@
 	___x = le##width##toh(x); \
 else \
 	___x = be##width##toh(x); \
-	___x; \
+___x; \
 })
 
 #define TO_NATIVE(fend, width, x) CONVERT_NATIVE(fend, width, x)
diff -Nru dpdk-18.11.10/config/defconfig_arm-armv7a-linuxapp-gcc dpdk-18.11.11/config/defconfig_arm-armv7a-linuxapp-gcc
--- dpdk-18.11.10/config/defconfig_arm-armv7a-linuxapp-gcc	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/config/defconfig_arm-armv7a-linuxapp-gcc	2021-01-20 12:18:20.000000000 +0000
@@ -45,7 +45,6 @@
 CONFIG_RTE_LIBRTE_E1000_PMD=n
 CONFIG_RTE_LIBRTE_ENIC_PMD=n
 CONFIG_RTE_LIBRTE_FM10K_PMD=n
-CONFIG_RTE_LIBRTE_I40E_PMD=n
 CONFIG_RTE_LIBRTE_IXGBE_PMD=n
 CONFIG_RTE_LIBRTE_MLX4_PMD=n
 CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
diff -Nru dpdk-18.11.10/config/meson.build dpdk-18.11.11/config/meson.build
--- dpdk-18.11.10/config/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/config/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -90,11 +90,9 @@
 endif
 
 # check for pcap
-pcap_dep = dependency('pcap', required: false)
-if pcap_dep.found()
-	# pcap got a pkg-config file only in 1.9.0 and before that meson uses
-	# an internal pcap-config finder, which is not compatible with
-	# cross-compilation, so try to fallback to find_library
+pcap_dep = dependency('libpcap', required: false, method: 'pkg-config')
+if not pcap_dep.found()
+	# pcap got a pkg-config file only in 1.9.0
 	pcap_dep = cc.find_library('pcap', required: false)
 endif
 if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep)
diff -Nru dpdk-18.11.10/config/rte_config.h dpdk-18.11.11/config/rte_config.h
--- dpdk-18.11.10/config/rte_config.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/config/rte_config.h	2021-01-20 12:18:20.000000000 +0000
@@ -89,6 +89,9 @@
 
 /****** driver defines ********/
 
+/* Packet prefetching in PMDs */
+#define RTE_PMD_PACKET_PREFETCH 1
+
 /* QuickAssist device */
 /* Max. number of QuickAssist devices which can be attached */
 #define RTE_PMD_QAT_MAX_PCI_DEVICES 48
diff -Nru dpdk-18.11.10/debian/changelog dpdk-18.11.11/debian/changelog
--- dpdk-18.11.10/debian/changelog	2020-10-26 10:44:57.000000000 +0000
+++ dpdk-18.11.11/debian/changelog	2021-01-25 10:52:31.000000000 +0000
@@ -1,3 +1,13 @@
+dpdk (18.11.11-1~deb10u1) buster; urgency=medium
+
+  * New upstream version 18.11.11; for a list of changes see
+    http://doc.dpdk.org/guides-18.11/rel_notes/release_18_11.html
+  * Refresh 0004-build-bump-minimum-Meson-version-to-0.47.1.patch for
+    18.11.11
+  * Drop 0008-net-i40e-support-aarch32.patch, merged upstream
+
+ -- Luca Boccassi <bluca@debian.org>  Mon, 25 Jan 2021 10:52:31 +0000
+
 dpdk (18.11.10-1~deb10u2) buster; urgency=medium
 
   * Backport patch to fix armhf build with NEON
diff -Nru dpdk-18.11.10/debian/patches/0004-build-bump-minimum-Meson-version-to-0.47.1.patch dpdk-18.11.11/debian/patches/0004-build-bump-minimum-Meson-version-to-0.47.1.patch
--- dpdk-18.11.10/debian/patches/0004-build-bump-minimum-Meson-version-to-0.47.1.patch	2020-10-26 10:30:53.000000000 +0000
+++ dpdk-18.11.11/debian/patches/0004-build-bump-minimum-Meson-version-to-0.47.1.patch	2021-01-25 10:52:31.000000000 +0000
@@ -35,7 +35,7 @@
 --- a/meson.build
 +++ b/meson.build
 @@ -5,7 +5,7 @@
- 	version: '18.11.10',
+ 	version: '18.11.11',
  	license: 'BSD',
  	default_options: ['buildtype=release', 'default_library=static'],
 -	meson_version: '>= 0.41'
diff -Nru dpdk-18.11.10/debian/patches/0008-net-i40e-support-aarch32.patch dpdk-18.11.11/debian/patches/0008-net-i40e-support-aarch32.patch
--- dpdk-18.11.10/debian/patches/0008-net-i40e-support-aarch32.patch	2020-10-26 10:44:57.000000000 +0000
+++ dpdk-18.11.11/debian/patches/0008-net-i40e-support-aarch32.patch	1970-01-01 01:00:00.000000000 +0100
@@ -1,42 +0,0 @@
-Author: Ruifeng Wang <ruifeng.wang@arm.com>
-Origin: https://git.dpdk.org/dpdk/commit/?id=78bfe1666b2063e3fc3fa51e757159f53e1fc779
-Description: fix armhf build with NEON
---- a/config/defconfig_arm-armv7a-linuxapp-gcc
-+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
-@@ -45,7 +45,6 @@ CONFIG_RTE_LIBRTE_CXGBE_PMD=n
- CONFIG_RTE_LIBRTE_E1000_PMD=n
- CONFIG_RTE_LIBRTE_ENIC_PMD=n
- CONFIG_RTE_LIBRTE_FM10K_PMD=n
--CONFIG_RTE_LIBRTE_I40E_PMD=n
- CONFIG_RTE_LIBRTE_IXGBE_PMD=n
- CONFIG_RTE_LIBRTE_MLX4_PMD=n
- CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
---- a/drivers/net/i40e/Makefile
-+++ b/drivers/net/i40e/Makefile
-@@ -74,7 +74,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
- 
- SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
- SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
--ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
-+ifneq ($(filter y,$(CONFIG_RTE_ARCH_ARM) $(CONFIG_RTE_ARCH_ARM64)),)
- SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
- else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
- SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
---- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
-+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
-@@ -6,6 +6,7 @@
- #include <stdint.h>
- #include <rte_ethdev_driver.h>
- #include <rte_malloc.h>
-+#include <rte_vect.h>
- 
- #include "base/i40e_prototype.h"
- #include "base/i40e_type.h"
-@@ -13,7 +14,6 @@
- #include "i40e_rxtx.h"
- #include "i40e_rxtx_vec_common.h"
- 
--#include <arm_neon.h>
- 
- #pragma GCC diagnostic ignored "-Wcast-qual"
- 
diff -Nru dpdk-18.11.10/debian/patches/series dpdk-18.11.11/debian/patches/series
--- dpdk-18.11.10/debian/patches/series	2020-10-26 10:44:37.000000000 +0000
+++ dpdk-18.11.11/debian/patches/series	2021-01-25 10:52:31.000000000 +0000
@@ -2,4 +2,3 @@
 0005-build-use-dependency-instead-of-find_library.patch
 0006-build-reorder-libraries-and-build-eal-before-cmdline.patch
 0007-build-use-dependency-for-libbsd-instead-of-manual-ap.patch
-0008-net-i40e-support-aarch32.patch
diff -Nru dpdk-18.11.10/devtools/check-forbidden-tokens.awk dpdk-18.11.11/devtools/check-forbidden-tokens.awk
--- dpdk-18.11.10/devtools/check-forbidden-tokens.awk	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/devtools/check-forbidden-tokens.awk	2021-01-20 12:18:20.000000000 +0000
@@ -54,7 +54,7 @@
 	}
 	for (i in deny_folders) {
 		re = "^\\+\\+\\+ b/" deny_folders[i];
-		if ($0 ~ deny_folders[i]) {
+		if ($0 ~ re) {
 			in_file = 1
 			last_file = $0
 		}
diff -Nru dpdk-18.11.10/doc/build-sdk-meson.txt dpdk-18.11.11/doc/build-sdk-meson.txt
--- dpdk-18.11.10/doc/build-sdk-meson.txt	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/build-sdk-meson.txt	2021-01-20 12:18:20.000000000 +0000
@@ -1,3 +1,6 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2018 Intel Corporation.
+
 INSTALLING DPDK USING THE MESON BUILD SYSTEM
 ---------------------------------------------
 
@@ -97,11 +100,13 @@
 
 	meson configure -Dmax_lcores=8
 
-NOTE: once meson has been run to configure a build in a directory, it
-cannot be run again on the same directory. Instead ``meson configure``
-should be used to change the build settings within the directory, and when
-``ninja`` is called to do the build itself, it will trigger the necessary
-re-scan from meson.
+.. note::
+
+        once meson has been run to configure a build in a directory, it
+        cannot be run again on the same directory. Instead ``meson configure``
+        should be used to change the build settings within the directory, and when
+        ``ninja`` is called to do the build itself, it will trigger the necessary
+        re-scan from meson.
 
 As well as those settings taken from ``meson configure``, other options
 such as the compiler to use can be passed via environment variables. For
@@ -109,9 +114,11 @@
 
 	CC=clang meson clang-build
 
-NOTE: for more comprehensive overriding of compilers or other environment
-settings, the tools for cross-compilation may be considered. However, for
-basic overriding of the compiler etc., the above form works as expected.
+.. note::
+
+        for more comprehensive overriding of compilers or other environment
+        settings, the tools for cross-compilation may be considered. However, for
+        basic overriding of the compiler etc., the above form works as expected.
 
 
 Performing the Build
diff -Nru dpdk-18.11.10/doc/guides/cryptodevs/features/octeontx.ini dpdk-18.11.11/doc/guides/cryptodevs/features/octeontx.ini
--- dpdk-18.11.10/doc/guides/cryptodevs/features/octeontx.ini	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/cryptodevs/features/octeontx.ini	2021-01-20 12:18:20.000000000 +0000
@@ -10,6 +10,7 @@
 In Place SGL           = Y
 OOP SGL In LB  Out     = Y
 OOP SGL In SGL Out     = Y
+OOP LB  In LB  Out     = Y
 
 ;
 ; Supported crypto algorithms of 'octeontx' crypto driver.
diff -Nru dpdk-18.11.10/doc/guides/linux_gsg/build_sample_apps.rst dpdk-18.11.11/doc/guides/linux_gsg/build_sample_apps.rst
--- dpdk-18.11.10/doc/guides/linux_gsg/build_sample_apps.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/linux_gsg/build_sample_apps.rst	2021-01-20 12:18:20.000000000 +0000
@@ -4,7 +4,7 @@
 Compiling and Running Sample Applications
 =========================================
 
-The chapter describes how to compile and run applications in an DPDK environment.
+The chapter describes how to compile and run applications in a DPDK environment.
 It also provides a pointer to where sample applications are stored.
 
 .. note::
@@ -185,7 +185,7 @@
 Since these logical core numbers, and their mapping to specific cores on specific NUMA sockets, can vary from platform to platform,
 it is recommended that the core layout for each platform be considered when choosing the coremask/corelist to use in each case.
 
-On initialization of the EAL layer by an DPDK application, the logical cores to be used and their socket location are displayed.
+On initialization of the EAL layer by a DPDK application, the logical cores to be used and their socket location are displayed.
 This information can also be determined for all cores on the system by examining the ``/proc/cpuinfo`` file, for example, by running cat ``/proc/cpuinfo``.
 The physical id attribute listed for each processor indicates the CPU socket to which it belongs.
 This can be useful when using other processors to understand the mapping of the logical cores to the sockets.
diff -Nru dpdk-18.11.10/doc/guides/linux_gsg/enable_func.rst dpdk-18.11.11/doc/guides/linux_gsg/enable_func.rst
--- dpdk-18.11.10/doc/guides/linux_gsg/enable_func.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/linux_gsg/enable_func.rst	2021-01-20 12:18:20.000000000 +0000
@@ -58,22 +58,51 @@
     if any, and on what is available on the system at runtime.
 
 Running DPDK Applications Without Root Privileges
---------------------------------------------------------
+-------------------------------------------------
+
+In order to run DPDK as non-root, the following Linux filesystem objects'
+permissions should be adjusted to ensure that the Linux account being used to
+run the DPDK application has access to them:
+
+*   All directories which serve as hugepage mount points, for example, ``/dev/hugepages``
+
+*   If the HPET is to be used,  ``/dev/hpet``
+
+When running as non-root user, there may be some additional resource limits
+that are imposed by the system. Specifically, the following resource limits may
+need to be adjusted in order to ensure normal DPDK operation:
+
+* RLIMIT_LOCKS (number of file locks that can be held by a process)
+
+* RLIMIT_NOFILE (number of open file descriptors that can be held open by a process)
+
+* RLIMIT_MEMLOCK (amount of pinned pages the process is allowed to have)
+
+The above limits can usually be adjusted by editing
+``/etc/security/limits.conf`` file, and rebooting.
+
+Additionally, depending on which kernel driver is in use, the relevant
+resources also should be accessible by the user running the DPDK application.
+
+For ``vfio-pci`` kernel driver, the following Linux file system objects'
+permissions should be adjusted:
+
+* The VFIO device file, ``/dev/vfio/vfio``
+
+* The directories under ``/dev/vfio`` that correspond to IOMMU group numbers of
+  devices intended to be used by DPDK, for example, ``/dev/vfio/50``
 
 .. note::
 
-    The instructions below will allow running DPDK as non-root with older
-    Linux kernel versions. However, since version 4.0, the kernel does not allow
-    unprivileged processes to read the physical address information from
-    the pagemaps file, making it impossible for those processes to use HW
-    devices which require physical addresses
-
-Although applications using the DPDK use network ports and other hardware resources directly,
-with a number of small permission adjustments it is possible to run these applications as a user other than "root".
-To do so, the ownership, or permissions, on the following Linux file system objects should be adjusted to ensure that
-the Linux user account being used to run the DPDK application has access to them:
+    The instructions below will allow running DPDK with ``igb_uio`` or
+    ``uio_pci_generic`` drivers as non-root with older Linux kernel versions.
+    However, since version 4.0, the kernel does not allow unprivileged processes
+    to read the physical address information from the pagemaps file, making it
+    impossible for those processes to be used by non-privileged users. In such
+    cases, using the VFIO driver is recommended.
 
-*   All directories which serve as hugepage mount points, for example,   ``/mnt/huge``
+For ``igb_uio`` or ``uio_pci_generic`` kernel drivers, the following Linux file
+system objects' permissions should be adjusted:
 
 *   The userspace-io device files in  ``/dev``, for example,  ``/dev/uio0``, ``/dev/uio1``, and so on
 
@@ -82,11 +111,6 @@
        /sys/class/uio/uio0/device/config
        /sys/class/uio/uio0/device/resource*
 
-*   If the HPET is to be used,  ``/dev/hpet``
-
-.. note::
-
-    On some Linux installations, ``/dev/hugepages``  is also a hugepage mount point created by default.
 
 Power Management and Power Saving Functionality
 -----------------------------------------------
@@ -112,7 +136,7 @@
 Using Linux Core Isolation to Reduce Context Switches
 -----------------------------------------------------
 
-While the threads used by an DPDK application are pinned to logical cores on the system,
+While the threads used by a DPDK application are pinned to logical cores on the system,
 it is possible for the Linux scheduler to run other tasks on those cores also.
 To help prevent additional workloads from running on those cores,
 it is possible to use the ``isolcpus`` Linux kernel parameter to isolate them from the general Linux scheduler.
diff -Nru dpdk-18.11.10/doc/guides/linux_gsg/linux_drivers.rst dpdk-18.11.11/doc/guides/linux_gsg/linux_drivers.rst
--- dpdk-18.11.10/doc/guides/linux_gsg/linux_drivers.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/linux_gsg/linux_drivers.rst	2021-01-20 12:18:20.000000000 +0000
@@ -115,7 +115,7 @@
     PMDs Which use the bifurcated driver should not be unbind from their kernel drivers. this section is for PMDs which use the UIO or VFIO drivers.
 
 As of release 1.4, DPDK applications no longer automatically unbind all supported network ports from the kernel driver in use.
-Instead, in case the PMD being used use the UIO or VFIO drivers, all ports that are to be used by an DPDK application must be bound to the
+Instead, in case the PMD being used use the UIO or VFIO drivers, all ports that are to be used by a DPDK application must be bound to the
 ``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module before the application is run.
 For such PMDs, any network ports under Linux* control will be ignored and cannot be used by the application.
 
diff -Nru dpdk-18.11.10/doc/guides/linux_gsg/nic_perf_intel_platform.rst dpdk-18.11.11/doc/guides/linux_gsg/nic_perf_intel_platform.rst
--- dpdk-18.11.10/doc/guides/linux_gsg/nic_perf_intel_platform.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/linux_gsg/nic_perf_intel_platform.rst	2021-01-20 12:18:20.000000000 +0000
@@ -1,3 +1,6 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright(c) 2015 Intel Corporation.
+
 How to get best performance with NICs on Intel platforms
 ========================================================
 
diff -Nru dpdk-18.11.10/doc/guides/linux_gsg/sys_reqs.rst dpdk-18.11.11/doc/guides/linux_gsg/sys_reqs.rst
--- dpdk-18.11.10/doc/guides/linux_gsg/sys_reqs.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/linux_gsg/sys_reqs.rst	2021-01-20 12:18:20.000000000 +0000
@@ -100,7 +100,7 @@
 Running DPDK Applications
 -------------------------
 
-To run an DPDK application, some customization may be required on the target machine.
+To run a DPDK application, some customization may be required on the target machine.
 
 System Software
 ~~~~~~~~~~~~~~~
@@ -156,8 +156,36 @@
 Reserving Hugepages for DPDK Use
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The allocation of hugepages should be done at boot time or as soon as possible after system boot
-to prevent memory from being fragmented in physical memory.
+The reservation of hugepages can be performed at run time.
+This is done by echoing the number of hugepages required
+to a ``nr_hugepages`` file in the ``/sys/kernel/`` directory
+corresponding to a specific page size (in Kilobytes).
+For a single-node system, the command to use is as follows
+(assuming that 1024 of 2MB pages are required)::
+
+    echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
+
+On a NUMA machine, the above command will usually divide the number of hugepages
+equally across all NUMA nodes (assuming there is enough memory on all NUMA nodes).
+However, pages can also be reserved explicitly on individual NUMA nodes
+using a ``nr_hugepages`` file in the ``/sys/devices/`` directory::
+
+    echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
+    echo 1024 > /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages
+
+.. note::
+
+    Some kernel versions may not allow reserving 1 GB hugepages at run time,
+    so reserving them at boot time may be the only option.
+    Please see below for instructions.
+
+**Alternative:**
+
+In the general case, reserving hugepages at run time is perfectly fine,
+but in use cases where having lots of physically contiguous memory is required,
+it is preferable to reserve hugepages at boot time,
+as that will help in preventing physical memory from becoming heavily fragmented.
+
 To reserve hugepages at boot time, a parameter is passed to the Linux kernel on the kernel command line.
 
 For 2 MB pages, just pass the hugepages option to the kernel. For example, to reserve 1024 pages of 2 MB, use::
@@ -186,35 +214,29 @@
 
 See the Documentation/admin-guide/kernel-parameters.txt file in your Linux source tree for further details of these and other kernel options.
 
-**Alternative:**
-
-For 2 MB pages, there is also the option of allocating hugepages after the system has booted.
-This is done by echoing the number of hugepages required to a nr_hugepages file in the ``/sys/devices/`` directory.
-For a single-node system, the command to use is as follows (assuming that 1024 pages are required)::
-
-    echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
-
-On a NUMA machine, pages should be allocated explicitly on separate nodes::
-
-    echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
-    echo 1024 > /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages
-
-.. note::
-
-    For 1G pages, it is not possible to reserve the hugepage memory after the system has booted.
-
 Using Hugepages with the DPDK
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-Once the hugepage memory is reserved, to make the memory available for DPDK use, perform the following steps::
+If secondary process support is not required, DPDK is able to use hugepages
+without any configuration by using "in-memory" mode.
+Please see :doc:`linux_eal_parameters` for more details.
+
+If secondary process support is required,
+mount points for hugepages need to be created.
+On modern Linux distributions, a default mount point for hugepages
+is provided by the system and is located at ``/dev/hugepages``.
+This mount point will use the default hugepage size
+set by the kernel parameters as described above.
 
-    mkdir /mnt/huge
-    mount -t hugetlbfs nodev /mnt/huge
+However, in order to use hugepage sizes other than the default, it is necessary
+to manually create mount points for those hugepage sizes (e.g. 1GB pages).
 
-The mount point can be made permanent across reboots, by adding the following line to the ``/etc/fstab`` file::
+To make the hugepages of size 1GB available for DPDK use,
+following steps must be performed::
 
-    nodev /mnt/huge hugetlbfs defaults 0 0
+    mkdir /mnt/huge
+    mount -t hugetlbfs pagesize=1GB /mnt/huge
 
-For 1GB pages, the page size must be specified as a mount option::
+The mount point can be made permanent across reboots, by adding the following line to the ``/etc/fstab`` file::
 
-    nodev /mnt/huge_1GB hugetlbfs pagesize=1GB 0 0
+    nodev /mnt/huge hugetlbfs pagesize=1GB 0 0
diff -Nru dpdk-18.11.10/doc/guides/nics/dpaa2.rst dpdk-18.11.11/doc/guides/nics/dpaa2.rst
--- dpdk-18.11.10/doc/guides/nics/dpaa2.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/nics/dpaa2.rst	2021-01-20 12:18:20.000000000 +0000
@@ -1,5 +1,5 @@
 ..  SPDX-License-Identifier: BSD-3-Clause
-    Copyright 2016 NXP
+    Copyright 2016,2020 NXP
 
 
 DPAA2 Poll Mode Driver
@@ -300,7 +300,7 @@
 scenario and the objects bound to each driver.  A brief description
 of each driver follows.
 
-.. code-block: console
+.. code-block:: console
 
 
                                        +------------+
diff -Nru dpdk-18.11.10/doc/guides/nics/i40e.rst dpdk-18.11.11/doc/guides/nics/i40e.rst
--- dpdk-18.11.10/doc/guides/nics/i40e.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/nics/i40e.rst	2021-01-20 12:18:20.000000000 +0000
@@ -557,6 +557,15 @@
 However, the Rx statistics, when calling `rte_eth_stats_get` incorrectly
 shows it as received.
 
+RX/TX statistics may be incorrect when register overflowed
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rx_bytes/tx_bytes statistics register is 48 bit length.
+Although this limitation is enlarged to 64 bit length on the software side,
+but there is no way to detect if the overflow occurred more than once.
+So rx_bytes/tx_bytes statistics data is correct when statistics are
+updated at least once between two overflows.
+
 VF & TC max bandwidth setting
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff -Nru dpdk-18.11.10/doc/guides/nics/mlx5.rst dpdk-18.11.11/doc/guides/nics/mlx5.rst
--- dpdk-18.11.10/doc/guides/nics/mlx5.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/nics/mlx5.rst	2021-01-20 12:18:20.000000000 +0000
@@ -110,6 +110,30 @@
   is set to multi-packet send or Enhanced multi-packet send. Otherwise it must have
   less than 50 segments.
 
+- When using DV flow engine (``dv_flow_en`` = 1), flow pattern without VLAN item
+  will match untagged packets only.
+  The flow rule::
+
+        flow create 0 ingress pattern eth / ipv4 / end ...
+
+  Will match untagged packets only.
+  The flow rule::
+
+        flow create 0 ingress pattern eth / vlan / ipv4 / end ...
+
+  Will match tagged packets only, with any VLAN ID value.
+  The flow rule::
+
+        flow create 0 ingress pattern eth / vlan vid is 3 / ipv4 / end ...
+
+  Will only match tagged packets with VLAN ID 3.
+
+- VLAN pop offload command:
+
+  - Flow rules having a VLAN pop offload command as one of their actions and
+    are lacking a match on VLAN as one of their items are not supported.
+  - The command is not supported on egress traffic.
+
 - Count action for RTE flow is **only supported in Mellanox OFED**.
 
 - Flows with a VXLAN Network Identifier equal (or ends to be equal)
diff -Nru dpdk-18.11.10/doc/guides/nics/nfp.rst dpdk-18.11.11/doc/guides/nics/nfp.rst
--- dpdk-18.11.10/doc/guides/nics/nfp.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/nics/nfp.rst	2021-01-20 12:18:20.000000000 +0000
@@ -128,22 +128,39 @@
 more than one SmartNIC, same type of SmartNIC or different ones, and to upload a
 different firmware to each SmartNIC.
 
+   .. Note::
+      Currently the NFP PMD supports using the PF with Agilio Basic Firmware. See
+      https://help.netronome.com/support/solutions for more information on the
+      various firmwares supported by the Netronome Agilio CX smartNIC.
 
 PF multiport support
 --------------------
 
-Some NFP cards support several physical ports with just one single PCI device.
-The DPDK core is designed with a 1:1 relationship between PCI devices and DPDK
-ports, so NFP PMD PF support requires handling the multiport case specifically.
-During NFP PF initialization, the PMD will extract the information about the
-number of PF ports from the firmware and will create as many DPDK ports as
-needed.
-
-Because the unusual relationship between a single PCI device and several DPDK
-ports, there are some limitations when using more than one PF DPDK port: there
-is no support for RX interrupts and it is not possible either to use those PF
-ports with the device hotplug functionality.
+The NFP PMD can work with up to 8 ports on the same PF device. The number of
+available ports is firmware and hardware dependent, and the driver looks for a
+firmware symbol during initialization to know how many can be used.
+
+DPDK apps work with ports, and a port is usually a PF or a VF PCI device.
+However, with the NFP PF multiport there is just one PF PCI device. Supporting
+this particular configuration requires the PMD to create ports in a special way,
+although once they are created, DPDK apps should be able to use them as normal
+PCI ports.
+
+NFP ports belonging to same PF can be seen inside PMD initialization with a
+suffix added to the PCI ID: wwww:xx:yy.z_port_n. For example, a PF with PCI ID
+0000:03:00.0 and four ports is seen by the PMD code as:
+
+   .. code-block:: console
+
+      0000:03:00.0_port_0
+      0000:03:00.0_port_1
+      0000:03:00.0_port_2
+      0000:03:00.0_port_3
 
+   .. Note::
+
+      There are some limitations with multiport support: RX interrupts and
+      device hot-plugging are not supported.
 
 System configuration
 --------------------
diff -Nru dpdk-18.11.10/doc/guides/nics/pcap_ring.rst dpdk-18.11.11/doc/guides/nics/pcap_ring.rst
--- dpdk-18.11.10/doc/guides/nics/pcap_ring.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/nics/pcap_ring.rst	2021-01-20 12:18:20.000000000 +0000
@@ -138,7 +138,7 @@
 .. code-block:: console
 
     $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
-        --vdev 'net_pcap0,iface=eth0' --vdev='net_pcap1;iface=eth1'
+        --vdev 'net_pcap0,iface=eth0' --vdev='net_pcap1,iface=eth1'
 
 Enable 2 tx queues on a network interface:
 
diff -Nru dpdk-18.11.10/doc/guides/nics/sfc_efx.rst dpdk-18.11.11/doc/guides/nics/sfc_efx.rst
--- dpdk-18.11.10/doc/guides/nics/sfc_efx.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/nics/sfc_efx.rst	2021-01-20 12:18:20.000000000 +0000
@@ -314,7 +314,7 @@
 Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify
 boolean parameters value.
 
-- ``rx_datapath`` [auto|efx|ef10|ef10_esps] (default **auto**)
+- ``rx_datapath`` [auto|efx|ef10|ef10_essb] (default **auto**)
 
   Choose receive datapath implementation.
   **auto** allows the driver itself to make a choice based on firmware
@@ -323,7 +323,7 @@
   **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is
   more efficient than libefx-based and provides richer packet type
   classification.
-  **ef10_esps** chooses SFNX2xxx equal stride packed stream datapath
+  **ef10_essb** chooses SFNX2xxx equal stride super-buffer datapath
   which may be used on DPDK firmware variant only
   (see notes about its limitations above).
 
diff -Nru dpdk-18.11.10/doc/guides/prog_guide/kernel_nic_interface.rst dpdk-18.11.11/doc/guides/prog_guide/kernel_nic_interface.rst
--- dpdk-18.11.10/doc/guides/prog_guide/kernel_nic_interface.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/prog_guide/kernel_nic_interface.rst	2021-01-20 12:18:20.000000000 +0000
@@ -178,7 +178,7 @@
 -------------------------
 
 Before any KNI interfaces can be created, the ``rte_kni`` kernel module must
-be loaded into the kernel and configured withe ``rte_kni_init()`` function.
+be loaded into the kernel and configured with the ``rte_kni_init()`` function.
 
 The KNI interfaces are created by a DPDK application dynamically via the
 ``rte_kni_alloc()`` function.
diff -Nru dpdk-18.11.10/doc/guides/prog_guide/multi_proc_support.rst dpdk-18.11.11/doc/guides/prog_guide/multi_proc_support.rst
--- dpdk-18.11.10/doc/guides/prog_guide/multi_proc_support.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/prog_guide/multi_proc_support.rst	2021-01-20 12:18:20.000000000 +0000
@@ -75,7 +75,7 @@
 
 
 The EAL also supports an auto-detection mode (set by EAL ``--proc-type=auto`` flag ),
-whereby an DPDK process is started as a secondary instance if a primary instance is already running.
+whereby a DPDK process is started as a secondary instance if a primary instance is already running.
 
 Deployment Models
 -----------------
diff -Nru dpdk-18.11.10/doc/guides/prog_guide/packet_classif_access_ctrl.rst dpdk-18.11.11/doc/guides/prog_guide/packet_classif_access_ctrl.rst
--- dpdk-18.11.10/doc/guides/prog_guide/packet_classif_access_ctrl.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/prog_guide/packet_classif_access_ctrl.rst	2021-01-20 12:18:20.000000000 +0000
@@ -373,6 +373,12 @@
 
 *   **RTE_ACL_CLASSIFY_AVX2**: vector implementation, can process up to 16 flows in parallel. Requires AVX2 support.
 
+*   **RTE_ACL_CLASSIFY_NEON**: vector implementation, can process up to 8 flows
+    in parallel. Requires NEON support.
+
+*   **RTE_ACL_CLASSIFY_ALTIVEC**: vector implementation, can process up to 8
+    flows in parallel. Requires ALTIVEC support.
+
 It is purely a runtime decision which method to choose, there is no build-time difference.
 All implementations operates over the same internal RT structures and use similar principles. The main difference is that vector implementations can manually exploit IA SIMD instructions and process several input data flows in parallel.
 At startup ACL library determines the highest available classify method for the given platform and sets it as default one. Though the user has an ability to override the default classifier function for a given ACL context or perform particular search using non-default classify method. In that case it is user responsibility to make sure that given platform supports selected classify implementation.
diff -Nru dpdk-18.11.10/doc/guides/rel_notes/release_18_11.rst dpdk-18.11.11/doc/guides/rel_notes/release_18_11.rst
--- dpdk-18.11.10/doc/guides/rel_notes/release_18_11.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/rel_notes/release_18_11.rst	2021-01-20 12:18:20.000000000 +0000
@@ -3722,3 +3722,369 @@
 * 87520e59fb  net/bnxt: fix freeing filters on flow creation failure
 * 11b58ac709  net/i40e: fix filter pctype
 * 750ff30a8f  net/mlx5: fix tunnel flow priority
+
+18.11.11 Release Notes
+----------------------
+
+18.11.11 Fixes
+~~~~~~~~~~~~~~
+
+* app/eventdev: check timer adadpters number
+* app: fix ethdev port id size
+* app: fix missing dependencies
+* app/testpmd: do not allow dynamic change of core number
+* app/testpmd: fix bonding xmit balance policy command
+* app/testpmd: fix build with gcc 11
+* app/testpmd: fix descriptor id check
+* app/testpmd: fix displaying Rx/Tx queues information
+* app/testpmd: fix port id check in Tx VLAN command
+* app/testpmd: fix RSS key for flow API RSS rule
+* app/testpmd: fix VLAN configuration on failure
+* app/testpmd: remove restriction on Tx segments set
+* baseband/turbo_sw: fix memory leak in error path
+* build: skip detecting libpcap via pcap-config
+* bus/fslmc: fix dpio close
+* bus/pci: fix leak on VFIO mapping error
+* bus/pci: fix memory leak when unmapping VFIO resource
+* bus/pci: remove duplicate declaration
+* bus/pci: remove unused scan by address
+* common/qat: add missing kmod dependency info
+* compress/isal: check allocation in queue setup
+* config: enable packet prefetching with Meson
+* cryptodev: fix parameter parsing
+* crypto/dpaa2_sec: fix stats query without queue pair
+* crypto/dpaa2_sec: remove dead code
+* crypto/octeontx: fix out-of-place support
+* crypto/scheduler: fix header install with meson
+* devtools: fix directory filter in forbidden token check
+* distributor: fix API documentation
+* distributor: fix buffer use after free
+* distributor: fix clearing returns buffer
+* distributor: fix flushing in flight packets
+* distributor: fix handshake deadlock
+* distributor: fix handshake synchronization
+* distributor: fix return pkt calls in single mode
+* distributor: fix scalar matching
+* distributor: handle worker shutdown in burst mode
+* doc: add SPDX license tag header to Intel performance guide
+* doc: add SPDX license tag header to meson guide
+* doc: clarify instructions on running as non-root
+* doc: fix diagram in dpaa2 guide
+* doc: fix EF10 Rx mode name in sfc guide
+* doc: fix ethdev port id size
+* doc: fix formatting of notes in meson guide
+* doc: fix grammar
+* doc: fix missing classify methods in ACL guide
+* doc: fix rule file parameters in l3fwd-acl guide
+* doc: fix typo in KNI guide
+* doc: fix typo in pcap guide
+* doc: improve multiport PF in nfp guide
+* doc: update information on using hugepages
+* eal: fix doxygen for EAL cleanup
+* eal: fix leak on device event callback unregister
+* eal: fix parallel build
+* eal/linux: fix memory leak in uevent handling
+* eal: remove useless makefiles
+* eal/x86: fix memcpy AVX-512 enablement
+* efd: fix tailq entry leak in error path
+* ethdev: fix data type for port id
+* ethdev: fix memory ordering for callback functions
+* ethdev: move non-offload capabilities
+* ethdev: remove redundant license text
+* eventdev: check allocation in Tx adapter
+* eventdev: fix adapter leak in error path
+* event/dpaa2: fix dereference before null check
+* examples/fips_validation: fix buffer overflow
+* examples/fips_validation: fix missed version line
+* examples/kni: fix build with pkg-config
+* examples/l2fwd-crypto: fix build with pkg-config
+* examples/l2fwd-crypto: fix missing dependency
+* examples/l2fwd-keepalive: skip meson build if no librt
+* examples/l3fwd-power: check packet types after start
+* examples/multi_process: fix build on Ubuntu 20.04
+* examples/multi_process: fix compilation
+* examples/performance-thread: fix build with low core count
+* examples/performance-thread: fix build with pkg-config
+* examples/qos_sched: fix usage string
+* examples/vhost_crypto: add new line character in usage
+* examples/vm_power: fix build on Ubuntu 20.04
+* fix spellings that Lintian complains about
+* gso: fix payload unit size for UDP
+* kni: fix build on RHEL 8.3
+* kni: fix ethtool build error on kernel 5.9
+* license: add licenses for exception cases
+* maintainers: update maintainer names and emails
+* malloc: fix style in free list index computation
+* mem: fix allocation failure on non-NUMA kernel
+* mem: fix allocation in container with SELinux
+* mem: fix config name in error logs
+* mempool/octeontx: fix aura to pool mapping
+* net/avf: fix command after PF reset
+* net/avf: fix flow flush after PF reset
+* net/avf: fix iterator for RSS LUT
+* net/avf: fix performance drop after port reset
+* net/avf: fix releasing mbufs
+* net/avf: fix RSS RETA settings invalid
+* net/avf: fix scattered Rx enabling
+* net/avf: fix vector Rx
+* net/bnx2x: add QLogic vendor id for BCM57840
+* net/bnxt: add memory allocation check in VF info init
+* net/bnxt: fix boolean operator usage
+* net/bnxt: fix checking VNIC in shutdown path
+* net/bnxt: fix drop enable in get Rx queue info
+* net/bnxt: fix endianness while setting L4 destination port
+* net/bnxt: fix flow error on filter creation
+* net/bnxt: fix for memleak during queue restart
+* net/bnxt: fix memory leak when freeing VF info
+* net/bnxt: fix queue get info
+* net/bnxt: fix queue release
+* net/bnxt: fix resetting mbuf data offset
+* net/bnxt: fix shift operation
+* net/bnxt: fix structure variable initialization
+* net/bnxt: fix to advance producer index
+* net/bnxt: fix UDP tunnel port removal
+* net/bnxt: increase size of Rx CQ
+* net/bonding: fix possible unbalanced packet receiving
+* net/bonding: fix Rx queue conversion
+* net: check segment pointer in raw checksum processing
+* net/cxgbe: fix duplicate MAC addresses in MPS TCAM
+* net/cxgbe: fix queue DMA ring leaks during port close
+* net/dpaa2: fix misuse of interface index
+* net/ena/base: fix release of wait event
+* net/ena/base: specify delay operations
+* net/ena/base: use min/max macros with type conversion
+* net/ena: remove unused macro
+* net/failsafe: fix double space in warning log
+* net/failsafe: fix state synchro cleanup
+* net/fm10k: fix memory leak when thresh check fails
+* net/fm10k: fix memory leak when Tx thresh check fails
+* net/fm10k: fix vector Rx
+* net/i40e: add C++ include guard
+* net/i40e/base: fix function header arguments
+* net/i40e/base: fix Rx only for unicast promisc on VLAN
+* net/i40e: fix build for log format specifier
+* net/i40e: fix byte counters
+* net/i40e: fix filter pctype
+* net/i40e: fix flow director for eth + VLAN pattern
+* net/i40e: fix flow director initialisation
+* net/i40e: fix link status
+* net/i40e: fix QinQ flow pattern to allow non full mask
+* net/i40e: fix queue region in RSS flow
+* net/i40e: fix recreating flexible flow director rule
+* net/i40e: fix vector Rx
+* net/i40e: fix virtual channel conflict
+* net/i40e: support aarch32
+* net/iavf: downgrade error log
+* net/ixgbe: check switch domain allocation result
+* net/ixgbe: fix vector Rx
+* net/ixgbe: fix VF reset HW error handling
+* net/ixgbe: remove redundant MAC flag check
+* net/mlx5: fix doorbell register mmap offset
+* net/mlx5: fix match on empty VLAN item in DV mode
+* net/mlx5: fix netlink buffer allocation from stack
+* net/mlx5: fix PCI address lookup
+* net/mlx5: fix representor interrupts handler
+* net/mlx5: fix Rx descriptor status
+* net/mlx5: fix Rx queue count calculation
+* net/mlx5: fix Rx queue count calculation
+* net/mlx5: fix secondary process resources release
+* net/mlx5: fix selection between encap and decap
+* net/mlx5: fix tunnel flow prioriity
+* net/mlx5: fix UAR remap initialization for 32-bit systems
+* net/mlx5: fix xstats reset reinitialization
+* net/mlx5: remove unused log macros
+* net/mlx5: remove unused variable in Tx queue creation
+* net/mlx5: use open/read/close for ib stats query
+* net/mlx: fix overlinking with meson and glue dlopen
+* net/mvpp2: fix memory leak in error path
+* net/netvsc: allocate contiguous physical memory for RNDIS
+* net/netvsc: check for overflow on packet info from host
+* net/netvsc: fix multiple channel Rx
+* net/netvsc: fix stale value after free
+* net/netvsc: fix Tx queue leak in error path
+* net/netvsc: replace compiler builtin overflow check
+* net/nfp: expand device info get
+* net/qede: fix dereference before null check
+* net/qede: fix getting link details
+* net/qede: fix milliseconds sleep macro
+* net/ring: check internal arguments
+* net/ring: fix typo in log message
+* net/sfc/base: fix tunnel configuration
+* net/tap: free mempool when closing
+* net/thunderx: fix memory leak on rbdr desc ring failure
+* net/vdev_netvsc: fix device probing error flow
+* net/vhost: fix xstats after clearing stats
+* net/virtio: check raw checksum failure
+* pmdinfogen: fix build with gcc 11
+* port: remove useless assignment
+* raw/dpaa2_qdma: fix reset
+* raw/skeleton: allow closing already closed device
+* raw/skeleton: reset test statistics
+* Revert "app/testpmd: fix descriptor id check"
+* Revert "app/testpmd: remove restriction on Tx segments set"
+* table: fix hash for 32-bit
+* test/crypto: fix device number
+* test/crypto: fix stats test
+* test/distributor: collect return mbufs
+* test/distributor: fix freeing mbufs
+* test/distributor: fix lcores statistics
+* test/distributor: fix mbuf leak on failure
+* test/distributor: fix quitting workers in burst mode
+* test/distributor: fix race conditions on shutdown
+* test/distributor: fix shutdown of busy worker
+* test/event: fix function arguments for crypto adapter
+* timer: add limitation note for sync stop and reset
+* usertools: fix CPU layout script to be PEP8 compliant
+* usertools: fix pmdinfo parsing
+* vdpa/ifc: fix build with recent kernels
+* version: 18.11.11-rc1
+* vfio: fix group descriptor check
+* vhost/crypto: fix feature negotiation
+* vhost: fix error path when setting memory tables
+* vhost: fix fd leak in dirty logging setup
+* vhost: fix fd leak in kick setup
+* vhost: fix IOTLB mempool single-consumer flag
+* vhost: fix virtio-net header length with packed ring
+* vhost: fix virtqueue initialization
+* vhost: fix virtqueues metadata allocation
+* vhost: validate index in available entries API
+* vhost: validate index in guest notification API
+
+18.11.11 Validation
+~~~~~~~~~~~~~~~~~~~
+
+* Red Hat(R) Testing
+
+   * RHEL 8
+   * QEMU 5.2
+   * Functionality
+
+      * PF assignment
+      * VF assignment
+      * vhost single/multi queues and cross-NUMA
+      * vhostclient reconnect
+      * vhost live migration with single/multi queues and cross-NUMA
+      * OVS PVP
+
+   * Tested NICs
+
+      * X540-AT2 NIC(ixgbe, 10G)
+
+* Intel(R) Testing
+
+   * Basic Intel(R) NIC(ixgbe and i40e)
+
+      * PF (i40e)
+      * PF (ixgbe)
+      * VF (i40e)
+      * VF (ixgbe)
+      * Compile Testing
+      * Intel NIC single core/NIC performance
+
+   * Basic cryptodev and virtio
+
+      * vhost/virtio basic loopback, PVP and performance
+
+         * See known issues
+
+      * cryptodev function
+      * cryptodev performance
+
+* Intel(R) Testing with Open vSwitch
+
+   * OVS testing with OVS branches 2.12 and 2.11 with VSPERF
+
+   * i40e
+
+      * RFC2544 and throughput
+      * RSS validation
+      * Jumbo Frames
+      * Flow Control
+
+   * i40evf
+
+      * Basic functionality
+      * Jumbo Frames
+
+   * ixgbe
+
+      * RFC2544 and throughput
+      * RSS validation
+      * Jumbo Frames
+      * Flow Control
+      * RSS
+
+   * vhostuserclient
+
+      * Jumbo Frames
+      * Reconnect
+      * NUMA node
+
+* Mellanox(R) Testing
+
+   * testpmd send and receive multiple types of traffic
+   * testpmd xstats counter
+   * testpmd timestamp
+   * Changing/checking link status through testpmd
+   * RTE flow and flow_director
+   * RSS
+   * VLAN stripping and insertion
+   * Checksum and TSO
+   * ptype
+   * l3fwd-power example application
+   * multi-process example applications
+
+   * ConnectX-4 Lx
+
+      * RHEL 7.4
+      * driver MLNX_OFED_LINUX-5.2-1.0.4.0
+      * fw 14.29.1016
+
+   * ConnectX-5
+
+      * RHEL 7.4
+      * driver MLNX_OFED_LINUX-5.2-1.0.4.0
+      * fw 16.29.1016
+
+18.11.11 Known Issues
+~~~~~~~~~~~~~~~~~~~~~
+
+* DPDK 18.11.11 contains fixes up to DPDK v20.11
+* Issues identified/fixed in DPDK main branch after DPDK v20.11 may be present in DPDK 18.11.11
+* rss_to_rte_flow/set_key_keylen: create rule failed: Invalid argument
+
+   * https://bugs.dpdk.org/show_bug.cgi?id=573
+
+* The UDP fragmentation offload feature of Virtio-net device can not be turned on in the VM
+
+   * https://bugzilla.kernel.org/show_bug.cgi?id=207075
+
+18.11.11 Fixes skipped and status unresolved
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* b149a7064  eal/freebsd: add config reattach in secondary process
+* a135e050a  examples/ipsec-secgw: fix packet length
+* 9d10f53e4  test/metrics: fix second run
+* 6080796f6  mem: make base address hint OS specific
+* 6d3f9917f  eal: fix memory config allocation for multi-process
+* f0617163b  mempool/dpaa2: report error on endless loop in mbuf release
+* 7392ad06f  app/testpmd: use better randomness for Tx split
+* ec8615607  crypto/dpaa_sec: fix IOVA conversions
+* 207b1c813  test: fix build without ring PMD
+* f6752f660  net/sfc: set priority of created filters to manual
+* 4236694f0  mem: preallocate VA space in no-huge mode
+* 036d82365  mempool: remove inline functions from export list
+* be4ef2e0d  net/i40e: fix flow director enabling
+* f6e63e59e  app/testpmd: fix global variable multiple definitions
+* c6854a412  net/netvsc: fix warning when VF is removed
+* a4f53bec7  net/netvsc: do not query VF link state
+* cb4261e0b  event/octeontx2: improve datapath memory locality
+* 91d581dc1  crypto/dpaa2_sec: fix HFN override
+* 7838d3a6a  net/netvsc: check for overflow on packet info from host
+* b253a6bbf  app/testpmd: fix packet header in txonly mode
+* 07bfb9047  crypto/aesni_mb: fix CCM digest size check
+* c6887eca5  crypto/caam_jr: fix device tree parsing for SEC_ERA
+* ead06572b  net/iavf: fix performance with writeback policy
+* 856edce2b  net/ena: fix setting Rx checksum flags in mbuf
+* ceccbcd73  raw/ifpga: use trusted buffer to free
+* fe5d0e85b  net/i40e: fix flow director flex configuration
diff -Nru dpdk-18.11.10/doc/guides/sample_app_ug/flow_classify.rst dpdk-18.11.11/doc/guides/sample_app_ug/flow_classify.rst
--- dpdk-18.11.10/doc/guides/sample_app_ug/flow_classify.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/sample_app_ug/flow_classify.rst	2021-01-20 12:18:20.000000000 +0000
@@ -271,7 +271,7 @@
 .. code-block:: c
 
     static inline int
-    port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+    port_init(uint16_t port, struct rte_mempool *mbuf_pool)
     {
         struct rte_eth_conf port_conf = port_conf_default;
         const uint16_t rx_rings = 1, tx_rings = 1;
diff -Nru dpdk-18.11.10/doc/guides/sample_app_ug/flow_filtering.rst dpdk-18.11.11/doc/guides/sample_app_ug/flow_filtering.rst
--- dpdk-18.11.10/doc/guides/sample_app_ug/flow_filtering.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/sample_app_ug/flow_filtering.rst	2021-01-20 12:18:20.000000000 +0000
@@ -373,7 +373,7 @@
 .. code-block:: c
 
    static struct rte_flow *
-   generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
+   generate_ipv4_flow(uint16_t port_id, uint16_t rx_q,
                    uint32_t src_ip, uint32_t src_mask,
                    uint32_t dest_ip, uint32_t dest_mask,
                    struct rte_flow_error *error)
diff -Nru dpdk-18.11.10/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst dpdk-18.11.11/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
--- dpdk-18.11.10/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst	2021-01-20 12:18:20.000000000 +0000
@@ -236,7 +236,7 @@
 
 ..  code-block:: console
 
-    ./build/l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME rule_ipv6 FILENAME [--scalar] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa]
+    ./build/l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--scalar] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa]
 
 
 where,
@@ -268,7 +268,7 @@
 
 ..  code-block:: console
 
-    ./build/l3fwd-acl -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)" --rule_ipv4="./rule_ipv4.db" -- rule_ipv6="./rule_ipv6.db" --scalar
+    ./build/l3fwd-acl -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)" --rule_ipv4="rule_ipv4.db" -- rule_ipv6="rule_ipv6.db" --scalar
 
 In this command:
 
@@ -290,9 +290,9 @@
     |          |            |           |                                     |
     +----------+------------+-----------+-------------------------------------+
 
-*   The --rule_ipv4 option specifies the reading of IPv4 rules sets from the ./ rule_ipv4.db file.
+*   The --rule_ipv4 option specifies the reading of IPv4 rules sets from the rule_ipv4.db file.
 
-*   The --rule_ipv6 option specifies the reading of IPv6 rules sets from the ./ rule_ipv6.db file.
+*   The --rule_ipv6 option specifies the reading of IPv6 rules sets from the rule_ipv6.db file.
 
 *   The --scalar option specifies the performing of rule lookup with a scalar function.
 
diff -Nru dpdk-18.11.10/doc/guides/sample_app_ug/l3_forward_power_man.rst dpdk-18.11.11/doc/guides/sample_app_ug/l3_forward_power_man.rst
--- dpdk-18.11.10/doc/guides/sample_app_ug/l3_forward_power_man.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/sample_app_ug/l3_forward_power_man.rst	2021-01-20 12:18:20.000000000 +0000
@@ -49,7 +49,7 @@
 
 This application includes a P-state power management algorithm to generate a frequency hint to be sent to CPUFreq.
 The algorithm uses the number of received and available Rx packets on recent polls to make a heuristic decision to scale frequency up/down.
-Specifically, some thresholds are checked to see whether a specific core running an DPDK polling thread needs to increase frequency
+Specifically, some thresholds are checked to see whether a specific core running a DPDK polling thread needs to increase frequency
 a step up based on the near to full trend of polled Rx queues.
 Also, it decreases frequency a step if packet processed per loop is far less than the expected threshold
 or the thread's sleeping time exceeds a threshold.
diff -Nru dpdk-18.11.10/doc/guides/testpmd_app_ug/testpmd_funcs.rst dpdk-18.11.11/doc/guides/testpmd_app_ug/testpmd_funcs.rst
--- dpdk-18.11.10/doc/guides/testpmd_app_ug/testpmd_funcs.rst	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/doc/guides/testpmd_app_ug/testpmd_funcs.rst	2021-01-20 12:18:20.000000000 +0000
@@ -2252,16 +2252,16 @@
 
    testpmd> set bonding mac 10 00:00:00:00:00:01
 
-set bonding xmit_balance_policy
+set bonding balance_xmit_policy
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Set the transmission policy for a Link Bonding device when it is in Balance XOR mode::
 
-   testpmd> set bonding xmit_balance_policy (port_id) (l2|l23|l34)
+   testpmd> set bonding balance_xmit_policy (port_id) (l2|l23|l34)
 
 For example, set a Link Bonding device (port 10) to use a balance policy of layer 3+4 (IP addresses & UDP ports)::
 
-   testpmd> set bonding xmit_balance_policy 10 l34
+   testpmd> set bonding balance_xmit_policy 10 l34
 
 
 set bonding mon_period
diff -Nru dpdk-18.11.10/drivers/baseband/turbo_sw/bbdev_turbo_software.c dpdk-18.11.11/drivers/baseband/turbo_sw/bbdev_turbo_software.c
--- dpdk-18.11.10/drivers/baseband/turbo_sw/bbdev_turbo_software.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/baseband/turbo_sw/bbdev_turbo_software.c	2021-01-20 12:18:20.000000000 +0000
@@ -10,6 +10,7 @@
 #include <rte_ring.h>
 #include <rte_kvargs.h>
 #include <rte_cycles.h>
+#include <rte_errno.h>
 
 #include <rte_bbdev.h>
 #include <rte_bbdev_pmd.h>
@@ -233,7 +234,8 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->enc_out = rte_zmalloc_socket(name,
 			((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) *
@@ -242,6 +244,7 @@
 	if (q->enc_out == NULL) {
 		rte_bbdev_log(ERR,
 			"Failed to allocate queue memory for %s", name);
+		ret = -ENOMEM;
 		goto free_q;
 	}
 
@@ -253,7 +256,8 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->enc_in = rte_zmalloc_socket(name,
 			(RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
@@ -261,6 +265,7 @@
 	if (q->enc_in == NULL) {
 		rte_bbdev_log(ERR,
 			"Failed to allocate queue memory for %s", name);
+		ret = -ENOMEM;
 		goto free_q;
 	}
 
@@ -271,7 +276,8 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->ag = rte_zmalloc_socket(name,
 			RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag),
@@ -279,6 +285,7 @@
 	if (q->ag == NULL) {
 		rte_bbdev_log(ERR,
 			"Failed to allocate queue memory for %s", name);
+		ret = -ENOMEM;
 		goto free_q;
 	}
 
@@ -289,7 +296,8 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->code_block = rte_zmalloc_socket(name,
 			RTE_BBDEV_MAX_CB_SIZE * sizeof(*q->code_block),
@@ -297,6 +305,7 @@
 	if (q->code_block == NULL) {
 		rte_bbdev_log(ERR,
 			"Failed to allocate queue memory for %s", name);
+		ret = -ENOMEM;
 		goto free_q;
 	}
 
@@ -308,7 +317,8 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->deint_input = rte_zmalloc_socket(name,
 			DEINT_INPUT_BUF_SIZE * sizeof(*q->deint_input),
@@ -316,6 +326,7 @@
 	if (q->deint_input == NULL) {
 		rte_bbdev_log(ERR,
 			"Failed to allocate queue memory for %s", name);
+		ret = -ENOMEM;
 		goto free_q;
 	}
 
@@ -327,7 +338,8 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->deint_output = rte_zmalloc_socket(NULL,
 			DEINT_OUTPUT_BUF_SIZE * sizeof(*q->deint_output),
@@ -335,6 +347,7 @@
 	if (q->deint_output == NULL) {
 		rte_bbdev_log(ERR,
 			"Failed to allocate queue memory for %s", name);
+		ret = -ENOMEM;
 		goto free_q;
 	}
 
@@ -346,7 +359,8 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->adapter_output = rte_zmalloc_socket(NULL,
 			ADAPTER_OUTPUT_BUF_SIZE * sizeof(*q->adapter_output),
@@ -354,6 +368,7 @@
 	if (q->adapter_output == NULL) {
 		rte_bbdev_log(ERR,
 			"Failed to allocate queue memory for %s", name);
+		ret = -ENOMEM;
 		goto free_q;
 	}
 
@@ -364,12 +379,14 @@
 		rte_bbdev_log(ERR,
 				"Creating queue name for device %u queue %u failed",
 				dev->data->dev_id, q_id);
-		return -ENAMETOOLONG;
+		ret = -ENAMETOOLONG;
+		goto free_q;
 	}
 	q->processed_pkts = rte_ring_create(name, queue_conf->queue_size,
 			queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
 	if (q->processed_pkts == NULL) {
 		rte_bbdev_log(ERR, "Failed to create ring for %s", name);
+		ret = -rte_errno;
 		goto free_q;
 	}
 
@@ -389,7 +406,7 @@
 	rte_free(q->deint_output);
 	rte_free(q->adapter_output);
 	rte_free(q);
-	return -EFAULT;
+	return ret;
 }
 
 static const struct rte_bbdev_ops pmd_ops = {
diff -Nru dpdk-18.11.10/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c dpdk-18.11.11/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
--- dpdk-18.11.10/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c	2021-01-20 12:18:20.000000000 +0000
@@ -531,8 +531,13 @@
 
 err:
 	if (dpio_dev->dpio) {
-		dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
-		dpio_close(dpio_dev->dpio, CMD_PRI_LOW,  dpio_dev->token);
+		if (dpio_dev->token) {
+			dpio_disable(dpio_dev->dpio, CMD_PRI_LOW,
+				     dpio_dev->token);
+			dpio_close(dpio_dev->dpio, CMD_PRI_LOW,
+				   dpio_dev->token);
+		}
+
 		free(dpio_dev->dpio);
 	}
 	rte_free(dpio_dev);
diff -Nru dpdk-18.11.10/drivers/bus/pci/bsd/pci.c dpdk-18.11.11/drivers/bus/pci/bsd/pci.c
--- dpdk-18.11.10/drivers/bus/pci/bsd/pci.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/bus/pci/bsd/pci.c	2021-01-20 12:18:20.000000000 +0000
@@ -386,55 +386,6 @@
 	return RTE_IOVA_PA;
 }
 
-int
-pci_update_device(const struct rte_pci_addr *addr)
-{
-	int fd;
-	struct pci_conf matches[2];
-	struct pci_match_conf match = {
-		.pc_sel = {
-			.pc_domain = addr->domain,
-			.pc_bus = addr->bus,
-			.pc_dev = addr->devid,
-			.pc_func = addr->function,
-		},
-	};
-	struct pci_conf_io conf_io = {
-		.pat_buf_len = 0,
-		.num_patterns = 1,
-		.patterns = &match,
-		.match_buf_len = sizeof(matches),
-		.matches = &matches[0],
-	};
-
-	fd = open("/dev/pci", O_RDONLY);
-	if (fd < 0) {
-		RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
-		goto error;
-	}
-
-	if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
-		RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
-				__func__, strerror(errno));
-		goto error;
-	}
-
-	if (conf_io.num_matches != 1)
-		goto error;
-
-	if (pci_scan_one(fd, &matches[0]) < 0)
-		goto error;
-
-	close(fd);
-
-	return 0;
-
-error:
-	if (fd >= 0)
-		close(fd);
-	return -1;
-}
-
 /* Read PCI config space. */
 int rte_pci_read_config(const struct rte_pci_device *dev,
 		void *buf, size_t len, off_t offset)
diff -Nru dpdk-18.11.10/drivers/bus/pci/linux/pci.c dpdk-18.11.11/drivers/bus/pci/linux/pci.c
--- dpdk-18.11.10/drivers/bus/pci/linux/pci.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/bus/pci/linux/pci.c	2021-01-20 12:18:20.000000000 +0000
@@ -395,18 +395,6 @@
 	return 0;
 }
 
-int
-pci_update_device(const struct rte_pci_addr *addr)
-{
-	char filename[PATH_MAX];
-
-	snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT,
-		 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
-		 addr->function);
-
-	return pci_scan_one(filename, addr);
-}
-
 /*
  * split up a pci address into its constituent parts.
  */
diff -Nru dpdk-18.11.10/drivers/bus/pci/linux/pci_vfio.c dpdk-18.11.11/drivers/bus/pci/linux/pci_vfio.c
--- dpdk-18.11.10/drivers/bus/pci/linux/pci_vfio.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/bus/pci/linux/pci_vfio.c	2021-01-20 12:18:20.000000000 +0000
@@ -809,7 +809,8 @@
 err_vfio_res:
 	rte_free(vfio_res);
 err_vfio_dev_fd:
-	close(vfio_dev_fd);
+	rte_vfio_release_device(rte_pci_get_sysfs_path(),
+			pci_addr, vfio_dev_fd);
 	return -1;
 }
 
@@ -877,7 +878,8 @@
 
 	return 0;
 err_vfio_dev_fd:
-	close(vfio_dev_fd);
+	rte_vfio_release_device(rte_pci_get_sysfs_path(),
+			pci_addr, vfio_dev_fd);
 	return -1;
 }
 
@@ -986,7 +988,7 @@
 	}
 
 	TAILQ_REMOVE(vfio_res_list, vfio_res, next);
-
+	rte_free(vfio_res);
 	return 0;
 }
 
diff -Nru dpdk-18.11.10/drivers/bus/pci/private.h dpdk-18.11.11/drivers/bus/pci/private.h
--- dpdk-18.11.10/drivers/bus/pci/private.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/bus/pci/private.h	2021-01-20 12:18:20.000000000 +0000
@@ -15,8 +15,6 @@
 struct rte_pci_driver;
 struct rte_pci_device;
 
-extern struct rte_pci_bus rte_pci_bus;
-
 /**
  * Scan the content of the PCI bus, and the devices in the devices
  * list
@@ -58,19 +56,6 @@
 		struct rte_pci_device *new_pci_dev);
 
 /**
- * Update a pci device object by asking the kernel for the latest information.
- *
- * This function is private to EAL.
- *
- * @param addr
- *	The PCI Bus-Device-Function address to look for
- * @return
- *   - 0 on success.
- *   - negative on error.
- */
-int pci_update_device(const struct rte_pci_addr *addr);
-
-/**
  * Map the PCI resource of a PCI device in virtual memory
  *
  * This function is private to EAL.
diff -Nru dpdk-18.11.10/drivers/common/qat/qat_device.c dpdk-18.11.11/drivers/common/qat/qat_device.c
--- dpdk-18.11.10/drivers/common/qat/qat_device.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/common/qat/qat_device.c	2021-01-20 12:18:20.000000000 +0000
@@ -301,3 +301,4 @@
 
 RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
+RTE_PMD_REGISTER_KMOD_DEP(QAT_PCI_NAME, "* igb_uio | uio_pci_generic | vfio-pci");
diff -Nru dpdk-18.11.10/drivers/compress/isal/isal_compress_pmd_ops.c dpdk-18.11.11/drivers/compress/isal/isal_compress_pmd_ops.c
--- dpdk-18.11.10/drivers/compress/isal/isal_compress_pmd_ops.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/compress/isal/isal_compress_pmd_ops.c	2021-01-20 12:18:20.000000000 +0000
@@ -247,16 +247,27 @@
 	qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
 			sizeof(struct isal_zstream),  RTE_CACHE_LINE_SIZE,
 			socket_id);
-
+	if (qp->stream == NULL) {
+		ISAL_PMD_LOG(ERR, "Failed to allocate compression stream memory");
+		goto qp_setup_cleanup;
+	}
 	/* Initialize memory for compression level buffer */
 	qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
 			ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
 			socket_id);
+	if (qp->stream->level_buf == NULL) {
+		ISAL_PMD_LOG(ERR, "Failed to allocate compression level_buf memory");
+		goto qp_setup_cleanup;
+	}
 
 	/* Initialize memory for decompression state structure */
 	qp->state = rte_zmalloc_socket("Isa-l decompression state",
 			sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
 			socket_id);
+	if (qp->state == NULL) {
+		ISAL_PMD_LOG(ERR, "Failed to allocate decompression state memory");
+		goto qp_setup_cleanup;
+	}
 
 	qp->id = qp_id;
 	dev->data->queue_pairs[qp_id] = qp;
@@ -282,8 +293,11 @@
 	return 0;
 
 qp_setup_cleanup:
-	if (qp)
-		rte_free(qp);
+	if (qp->stream)
+		rte_free(qp->stream->level_buf);
+	rte_free(qp->stream);
+	rte_free(qp->state);
+	rte_free(qp);
 
 	return -1;
 }
diff -Nru dpdk-18.11.10/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c dpdk-18.11.11/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
--- dpdk-18.11.10/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c	2021-01-20 12:18:20.000000000 +0000
@@ -2423,12 +2423,6 @@
 	return 0;
 }
 
-#ifdef RTE_LIBRTE_SECURITY_TEST
-static uint8_t aes_cbc_iv[] = {
-	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
-#endif
-
 static int
 dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 			    struct rte_security_session_conf *conf,
@@ -3066,7 +3060,7 @@
 		return;
 	}
 	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
-		if (qp[i] == NULL) {
+		if (qp == NULL || qp[i] == NULL) {
 			DPAA2_SEC_DEBUG("Uninitialised queue pair");
 			continue;
 		}
diff -Nru dpdk-18.11.10/drivers/crypto/octeontx/otx_cryptodev_ops.c dpdk-18.11.11/drivers/crypto/octeontx/otx_cryptodev_ops.c
--- dpdk-18.11.10/drivers/crypto/octeontx/otx_cryptodev_ops.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/crypto/octeontx/otx_cryptodev_ops.c	2021-01-20 12:18:20.000000000 +0000
@@ -509,6 +509,7 @@
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
 			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 			RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+			RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
 			RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
 
diff -Nru dpdk-18.11.10/drivers/crypto/scheduler/meson.build dpdk-18.11.11/drivers/crypto/scheduler/meson.build
--- dpdk-18.11.10/drivers/crypto/scheduler/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/crypto/scheduler/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -13,7 +13,7 @@
 	'scheduler_roundrobin.c',
 )
 
-headers = files(
+install_headers(
 	'rte_cryptodev_scheduler.h',
 	'rte_cryptodev_scheduler_operations.h',
 )
diff -Nru dpdk-18.11.10/drivers/event/dpaa2/dpaa2_eventdev.c dpdk-18.11.11/drivers/event/dpaa2/dpaa2_eventdev.c
--- dpdk-18.11.10/drivers/event/dpaa2/dpaa2_eventdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/event/dpaa2/dpaa2_eventdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -553,14 +553,14 @@
 
 	EVENTDEV_INIT_FUNC_TRACE();
 
+	if (portal == NULL)
+		return;
+
 	/* TODO: Cleanup is required when ports are in linked state. */
 	if (portal->is_port_linked)
 		DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
 
-	if (portal)
-		rte_free(portal);
-
-	portal = NULL;
+	rte_free(portal);
 }
 
 static int
diff -Nru dpdk-18.11.10/drivers/mempool/octeontx/octeontx_fpavf.c dpdk-18.11.11/drivers/mempool/octeontx/octeontx_fpavf.c
--- dpdk-18.11.10/drivers/mempool/octeontx/octeontx_fpavf.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/mempool/octeontx/octeontx_fpavf.c	2021-01-20 12:18:20.000000000 +0000
@@ -241,7 +241,7 @@
 		POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
 		POOL_ENA;
 
-	cfg.aid = FPA_AURA_IDX(gpool);
+	cfg.aid = 0;
 	cfg.pool_cfg = reg;
 	cfg.pool_stack_base = phys_addr;
 	cfg.pool_stack_end = phys_addr + memsz;
@@ -325,7 +325,7 @@
 	hdr.vfid = gpool_index;
 	hdr.res_code = 0;
 	memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
-	cfg.aid = FPA_AURA_IDX(gpool_index);
+	cfg.aid = 0;
 
 	ret = octeontx_mbox_send(&hdr, &cfg,
 					sizeof(struct octeontx_mbox_fpa_cfg),
@@ -354,7 +354,7 @@
 		goto err;
 	}
 
-	cfg.aid = FPA_AURA_IDX(gpool_index);
+	cfg.aid = 0;
 	hdr.coproc = FPA_COPROC;
 	hdr.msg = FPA_DETACHAURA;
 	hdr.vfid = gpool_index;
diff -Nru dpdk-18.11.10/drivers/net/avf/avf_ethdev.c dpdk-18.11.11/drivers/net/avf/avf_ethdev.c
--- dpdk-18.11.10/drivers/net/avf/avf_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/avf/avf_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -121,41 +121,11 @@
 };
 
 static int
-avf_dev_configure(struct rte_eth_dev *dev)
-{
-	struct avf_adapter *ad =
-		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-	struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(ad);
-	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
-
-	ad->rx_bulk_alloc_allowed = true;
-#ifdef RTE_LIBRTE_AVF_INC_VECTOR
-	/* Initialize to TRUE. If any of Rx queues doesn't meet the
-	 * vector Rx/Tx preconditions, it will be reset.
-	 */
-	ad->rx_vec_allowed = true;
-	ad->tx_vec_allowed = true;
-#else
-	ad->rx_vec_allowed = false;
-	ad->tx_vec_allowed = false;
-#endif
-
-	/* Vlan stripping setting */
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
-		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
-			avf_enable_vlan_strip(ad);
-		else
-			avf_disable_vlan_strip(ad);
-	}
-	return 0;
-}
-
-static int
 avf_init_rss(struct avf_adapter *adapter)
 {
 	struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(adapter);
 	struct rte_eth_rss_conf *rss_conf;
-	uint8_t i, j, nb_q;
+	uint16_t i, j, nb_q;
 	int ret;
 
 	rss_conf = &adapter->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
@@ -207,6 +177,43 @@
 }
 
 static int
+avf_dev_configure(struct rte_eth_dev *dev)
+{
+	struct avf_adapter *ad =
+		AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct avf_info *vf =  AVF_DEV_PRIVATE_TO_VF(ad);
+	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+
+	ad->rx_bulk_alloc_allowed = true;
+#ifdef RTE_LIBRTE_AVF_INC_VECTOR
+	/* Initialize to TRUE. If any of Rx queues doesn't meet the
+	 * vector Rx/Tx preconditions, it will be reset.
+	 */
+	ad->rx_vec_allowed = true;
+	ad->tx_vec_allowed = true;
+#else
+	ad->rx_vec_allowed = false;
+	ad->tx_vec_allowed = false;
+#endif
+
+	/* Vlan stripping setting */
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
+		if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			avf_enable_vlan_strip(ad);
+		else
+			avf_disable_vlan_strip(ad);
+	}
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+		if (avf_init_rss(ad) != 0) {
+			PMD_DRV_LOG(ERR, "configure rss failed");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int
 avf_init_rxq(struct rte_eth_dev *dev, struct avf_rx_queue *rxq)
 {
 	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -246,7 +253,7 @@
 
 	rxq->max_pkt_len = max_pkt_len;
 	if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
-	    (rxq->max_pkt_len + 2 * AVF_VLAN_TAG_SIZE) > buf_size) {
+	    rxq->max_pkt_len > buf_size) {
 		dev_data->scattered_rx = 1;
 	}
 	AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
@@ -425,14 +432,6 @@
 		PMD_DRV_LOG(ERR, "failed to do Queue init");
 		return -1;
 	}
-
-	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
-		if (avf_init_rss(adapter) != 0) {
-			PMD_DRV_LOG(ERR, "configure rss failed");
-			goto err_rss;
-		}
-	}
-
 	if (avf_configure_queues(adapter) != 0) {
 		PMD_DRV_LOG(ERR, "configure queues failed");
 		goto err_queue;
@@ -461,7 +460,6 @@
 err_mac:
 	avf_add_del_all_mac_addr(adapter, FALSE);
 err_queue:
-err_rss:
 	return -1;
 }
 
@@ -1138,6 +1136,7 @@
 			goto err_rss;
 		}
 	}
+
 	return 0;
 err_rss:
 	rte_free(vf->rss_key);
@@ -1268,6 +1267,7 @@
 static void
 avf_dev_close(struct rte_eth_dev *dev)
 {
+	struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
 	struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1281,6 +1281,8 @@
 	rte_intr_callback_unregister(intr_handle,
 				     avf_dev_interrupt_handler, dev);
 	avf_disable_irq0(hw);
+
+	vf->vf_reset = false;
 }
 
 static int
diff -Nru dpdk-18.11.10/drivers/net/avf/avf.h dpdk-18.11.11/drivers/net/avf/avf.h
--- dpdk-18.11.10/drivers/net/avf/avf.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/avf/avf.h	2021-01-20 12:18:20.000000000 +0000
@@ -98,7 +98,7 @@
 	enum virtchnl_link_speed link_speed;
 
 	struct avf_vsi vsi;
-	bool vf_reset;
+	bool vf_reset;	/* true for VF reset pending, false for no VF reset */
 	uint64_t flags;
 
 	uint8_t *rss_lut;
diff -Nru dpdk-18.11.10/drivers/net/avf/avf_rxtx.c dpdk-18.11.11/drivers/net/avf/avf_rxtx.c
--- dpdk-18.11.10/drivers/net/avf/avf_rxtx.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/avf/avf_rxtx.c	2021-01-20 12:18:20.000000000 +0000
@@ -169,6 +169,8 @@
 	rxq->nb_rx_hold = 0;
 	rxq->pkt_first_seg = NULL;
 	rxq->pkt_last_seg = NULL;
+	rxq->rxrearm_nb = 0;
+	rxq->rxrearm_start = 0;
 }
 
 static inline void
diff -Nru dpdk-18.11.10/drivers/net/avf/avf_rxtx_vec_sse.c dpdk-18.11.11/drivers/net/avf/avf_rxtx_vec_sse.c
--- dpdk-18.11.10/drivers/net/avf/avf_rxtx_vec_sse.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/avf/avf_rxtx_vec_sse.c	2021-01-20 12:18:20.000000000 +0000
@@ -227,10 +227,12 @@
 	rx_pkts[3]->packet_type = type_table[_mm_extract_epi8(ptype1, 8)];
 }
 
-/* Notice:
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= AVF_VPMD_DESCS_PER_LOOP)
+ *
+ * Notice:
  * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > AVF_VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
- *   numbers of DD bits
+ * - floor align nb_pkts to a AVF_VPMD_DESCS_PER_LOOP power-of-two
  */
 static inline uint16_t
 _recv_raw_pkts_vec(struct avf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
@@ -260,9 +262,6 @@
 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
 	__m128i dd_check, eop_check;
 
-	/* nb_pkts shall be less equal than AVF_VPMD_RX_MAX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, AVF_VPMD_RX_MAX_BURST);
-
 	/* nb_pkts has to be floor-aligned to AVF_VPMD_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, AVF_VPMD_DESCS_PER_LOOP);
 
@@ -486,15 +485,15 @@
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
-/* vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
  * Notice:
  * - nb_pkts < AVF_VPMD_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > VPMD_RX_MAX_BURST, only scan AVF_VPMD_RX_MAX_BURST
- *   numbers of DD bits
  */
-uint16_t
-avf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-			    uint16_t nb_pkts)
+static uint16_t
+avf_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			     uint16_t nb_pkts)
 {
 	struct avf_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[AVF_VPMD_RX_MAX_BURST] = {0};
@@ -527,6 +526,32 @@
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+avf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			    uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > AVF_VPMD_RX_MAX_BURST) {
+		uint16_t burst;
+
+		burst = avf_recv_scattered_burst_vec(rx_queue,
+						     rx_pkts + retval,
+						     AVF_VPMD_RX_MAX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < AVF_VPMD_RX_MAX_BURST)
+			return retval;
+	}
+
+	return retval + avf_recv_scattered_burst_vec(rx_queue,
+						     rx_pkts + retval,
+						     nb_pkts);
+}
+
 static inline void
 vtx1(volatile struct avf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
 {
diff -Nru dpdk-18.11.10/drivers/net/avf/avf_vchnl.c dpdk-18.11.11/drivers/net/avf/avf_vchnl.c
--- dpdk-18.11.10/drivers/net/avf/avf_vchnl.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/avf/avf_vchnl.c	2021-01-20 12:18:20.000000000 +0000
@@ -73,6 +73,9 @@
 	int err = 0;
 	int i = 0;
 
+	if (vf->vf_reset)
+		return -EIO;
+
 	if (_atomic_set_cmd(vf, args->ops))
 		return -1;
 
@@ -147,6 +150,7 @@
 	switch (pf_msg->event) {
 	case VIRTCHNL_EVENT_RESET_IMPENDING:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
+		vf->vf_reset = true;
 		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
 					      NULL);
 		break;
@@ -222,7 +226,7 @@
 			}
 			break;
 		default:
-			PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+			PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
 				    aq_opc);
 			break;
 		}
diff -Nru dpdk-18.11.10/drivers/net/bnx2x/bnx2x_ethdev.c dpdk-18.11.11/drivers/net/bnx2x/bnx2x_ethdev.c
--- dpdk-18.11.10/drivers/net/bnx2x/bnx2x_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnx2x/bnx2x_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -19,6 +19,7 @@
  * The set of PCI devices this driver supports
  */
 #define BROADCOM_PCI_VENDOR_ID 0x14E4
+#define QLOGIC_PCI_VENDOR_ID 0x1077
 static const struct rte_pci_id pci_id_bnx2x_map[] = {
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
@@ -26,11 +27,13 @@
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
+	{ RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
 #ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
+	{ RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
 #endif
 	{ .vendor_id = 0, }
 };
@@ -40,6 +43,7 @@
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
 	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
+	{ RTE_PCI_DEVICE(QLOGIC_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
 	{ .vendor_id = 0, }
 };
 
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_ethdev.c dpdk-18.11.11/drivers/net/bnxt/bnxt_ethdev.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -445,8 +445,7 @@
 			.wthresh = 0,
 		},
 		.rx_free_thresh = 32,
-		/* If no descriptors available, pkts are dropped by default */
-		.rx_drop_en = 1,
+		.rx_drop_en = BNXT_DEFAULT_RX_DROP_EN,
 	};
 
 	dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -753,7 +752,7 @@
 	struct bnxt_filter_info *filter;
 	int rc = 0;
 
-	if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
+	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
 		PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
 		return -ENOTSUP;
 	}
@@ -1308,14 +1307,6 @@
 	}
 
 	rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
-	if (!rc) {
-		if (tunnel_type ==
-		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
-			bp->vxlan_port = 0;
-		if (tunnel_type ==
-		    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
-			bp->geneve_port = 0;
-	}
 	return rc;
 }
 
@@ -1577,8 +1568,9 @@
 	qinfo->nb_desc = rxq->nb_rx_desc;
 
 	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
-	qinfo->conf.rx_drop_en = 0;
+	qinfo->conf.rx_drop_en = rxq->drop_en;
 	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 }
 
 static void
@@ -1598,6 +1590,7 @@
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = 0;
 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
 }
 
 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
@@ -3518,8 +3511,7 @@
 
 	if (bp->dev_stopped == 0)
 		bnxt_dev_close_op(eth_dev);
-	if (bp->pf.vf_info)
-		rte_free(bp->pf.vf_info);
+	bnxt_hwrm_free_vf_info(bp);
 	eth_dev->dev_ops = NULL;
 	eth_dev->rx_pkt_burst = NULL;
 	eth_dev->tx_pkt_burst = NULL;
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_filter.c dpdk-18.11.11/drivers/net/bnxt/bnxt_filter.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_filter.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_filter.c	2021-01-20 12:18:20.000000000 +0000
@@ -80,6 +80,15 @@
 	struct bnxt_filter_info *filter, *temp_filter;
 	unsigned int i;
 
+	for (i = 0; i < bp->pf.max_vfs; i++) {
+		STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
+			bnxt_hwrm_clear_l2_filter(bp, filter);
+		}
+	}
+
+	if (bp->vnic_info == NULL)
+		return;
+
 	for (i = 0; i < bp->nr_vnics; i++) {
 		vnic = &bp->vnic_info[i];
 		filter = STAILQ_FIRST(&vnic->filter);
@@ -93,12 +102,6 @@
 		}
 		STAILQ_INIT(&vnic->filter);
 	}
-
-	for (i = 0; i < bp->pf.max_vfs; i++) {
-		STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
-			bnxt_hwrm_clear_l2_filter(bp, filter);
-		}
-	}
 }
 
 void bnxt_free_filter_mem(struct bnxt *bp)
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_flow.c dpdk-18.11.11/drivers/net/bnxt/bnxt_flow.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_flow.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_flow.c	2021-01-20 12:18:20.000000000 +0000
@@ -1055,12 +1055,24 @@
 		filter->enables |=
 			HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
 		ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
+		if (ret != 0) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to create EM filter");
+			goto free_filter;
+		}
 	}
 
 	if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
 		filter->enables |=
 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
 		ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
+		if (ret != 0) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to create ntuple filter");
+			goto free_filter;
+		}
 	}
 
 	for (i = 0; i < bp->nr_vnics; i++) {
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_hwrm.c dpdk-18.11.11/drivers/net/bnxt/bnxt_hwrm.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_hwrm.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_hwrm.c	2021-01-20 12:18:20.000000000 +0000
@@ -52,7 +52,7 @@
 	if (size <= 1 << 30)
 		return 30;
 	PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
-	return sizeof(void *) * 8 - 1;
+	return sizeof(int) * 8 - 1;
 }
 
 static int page_roundup(size_t size)
@@ -507,6 +507,20 @@
 	return 0;
 }
 
+void bnxt_hwrm_free_vf_info(struct bnxt *bp)
+{
+	uint16_t i;
+
+	for (i = 0; i < bp->pf.max_vfs; i++) {
+		rte_free(bp->pf.vf_info[i].vlan_table);
+		bp->pf.vf_info[i].vlan_table = NULL;
+		rte_free(bp->pf.vf_info[i].vlan_as_table);
+		bp->pf.vf_info[i].vlan_as_table = NULL;
+	}
+	rte_free(bp->pf.vf_info);
+	bp->pf.vf_info = NULL;
+}
+
 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
 	int rc = 0;
@@ -533,9 +547,13 @@
 		new_max_vfs = bp->pdev->max_vfs;
 		if (new_max_vfs != bp->pf.max_vfs) {
 			if (bp->pf.vf_info)
-				rte_free(bp->pf.vf_info);
-			bp->pf.vf_info = rte_malloc("bnxt_vf_info",
+				bnxt_hwrm_free_vf_info(bp);
+			bp->pf.vf_info = rte_zmalloc("bnxt_vf_info",
 			    sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
+			if (bp->pf.vf_info == NULL) {
+				PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
+				return -ENOMEM;
+			}
 			bp->pf.max_vfs = new_max_vfs;
 			for (i = 0; i < new_max_vfs; i++) {
 				bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
@@ -1929,23 +1947,11 @@
 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX);
 		ring->fw_ring_id = INVALID_HW_RING_ID;
 		bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
-		memset(rxr->rx_desc_ring, 0,
-		       rxr->rx_ring_struct->ring_size *
-		       sizeof(*rxr->rx_desc_ring));
-		memset(rxr->rx_buf_ring, 0,
-		       rxr->rx_ring_struct->ring_size *
-		       sizeof(*rxr->rx_buf_ring));
-		rxr->rx_prod = 0;
 	}
 	ring = rxr->ag_ring_struct;
 	if (ring->fw_ring_id != INVALID_HW_RING_ID) {
 		bnxt_hwrm_ring_free(bp, ring,
 				    HWRM_RING_FREE_INPUT_RING_TYPE_RX);
-		ring->fw_ring_id = INVALID_HW_RING_ID;
-		memset(rxr->ag_buf_ring, 0,
-		       rxr->ag_ring_struct->ring_size *
-		       sizeof(*rxr->ag_buf_ring));
-		rxr->ag_prod = 0;
 		bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
 	}
 	if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
@@ -2107,11 +2113,10 @@
 	if (bp->vxlan_port_cnt)
 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
-	bp->vxlan_port = 0;
+
 	if (bp->geneve_port_cnt)
 		bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
 			HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
-	bp->geneve_port = 0;
 }
 
 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
@@ -2864,17 +2869,19 @@
 
 	HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
 	req.tunnel_type = tunnel_type;
-	req.tunnel_dst_port_val = port;
+	req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
 
 	switch (tunnel_type) {
 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
-		bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+		bp->vxlan_fw_dst_port_id =
+			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
 		bp->vxlan_port = port;
 		break;
 	case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
-		bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
+		bp->geneve_fw_dst_port_id =
+			rte_le_to_cpu_16(resp->tunnel_dst_port_id);
 		bp->geneve_port = port;
 		break;
 	default:
@@ -2902,6 +2909,18 @@
 	HWRM_CHECK_RESULT();
 	HWRM_UNLOCK();
 
+	if (tunnel_type ==
+	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN) {
+		bp->vxlan_port = 0;
+		bp->vxlan_port_cnt = 0;
+	}
+
+	if (tunnel_type ==
+	    HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE) {
+		bp->geneve_port = 0;
+		bp->geneve_port_cnt = 0;
+	}
+
 	return rc;
 }
 
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_hwrm.h dpdk-18.11.11/drivers/net/bnxt/bnxt_hwrm.h
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_hwrm.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_hwrm.h	2021-01-20 12:18:20.000000000 +0000
@@ -60,6 +60,7 @@
 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
 int bnxt_hwrm_func_driver_register(struct bnxt *bp);
 int bnxt_hwrm_func_qcaps(struct bnxt *bp);
+void bnxt_hwrm_free_vf_info(struct bnxt *bp);
 int bnxt_hwrm_func_reset(struct bnxt *bp);
 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_ring.h dpdk-18.11.11/drivers/net/bnxt/bnxt_ring.h
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_ring.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_ring.h	2021-01-20 12:18:20.000000000 +0000
@@ -27,7 +27,7 @@
 #define DEFAULT_TX_RING_SIZE	256
 
 #define BNXT_TPA_MAX		64
-#define AGG_RING_SIZE_FACTOR	2
+#define AGG_RING_SIZE_FACTOR	4
 #define AGG_RING_MULTIPLIER	2
 
 /* These assume 4k pages */
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_rxq.c dpdk-18.11.11/drivers/net/bnxt/bnxt_rxq.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_rxq.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_rxq.c	2021-01-20 12:18:20.000000000 +0000
@@ -205,7 +205,7 @@
 	struct bnxt_tpa_info *tpa_info;
 	uint16_t i;
 
-	if (!rxq)
+	if (!rxq || !rxq->rx_ring)
 		return;
 
 	rte_spinlock_lock(&rxq->lock);
@@ -265,12 +265,21 @@
 		bnxt_rx_queue_release_mbufs(rxq);
 
 		/* Free RX ring hardware descriptors */
-		bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
-		/* Free RX Agg ring hardware descriptors */
-		bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+		if (rxq->rx_ring) {
+			bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+			rte_free(rxq->rx_ring->rx_ring_struct);
+			/* Free RX Agg ring hardware descriptors */
+			bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
+			rte_free(rxq->rx_ring->ag_ring_struct);
 
+			rte_free(rxq->rx_ring);
+		}
 		/* Free RX completion ring hardware descriptors */
-		bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+		if (rxq->cp_ring) {
+			bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
+			rte_free(rxq->cp_ring->cp_ring_struct);
+			rte_free(rxq->cp_ring);
+		}
 
 		bnxt_free_rxq_stats(rxq);
 		rte_memzone_free(rxq->mz);
@@ -302,8 +311,7 @@
 
 	if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
 		PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
-		rc = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
 	if (eth_dev->data->rx_queues) {
@@ -315,19 +323,26 @@
 				 RTE_CACHE_LINE_SIZE, socket_id);
 	if (!rxq) {
 		PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
-		rc = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
 	rxq->bp = bp;
 	rxq->mb_pool = mp;
 	rxq->nb_rx_desc = nb_desc;
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
+	if (rx_conf->rx_drop_en != BNXT_DEFAULT_RX_DROP_EN)
+		PMD_DRV_LOG(NOTICE,
+			    "Per-queue config of drop-en is not supported.\n");
+	rxq->drop_en = BNXT_DEFAULT_RX_DROP_EN;
+
 	PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
 
 	rc = bnxt_init_rx_ring_struct(rxq, socket_id);
-	if (rc)
-		goto out;
+	if (rc) {
+		PMD_DRV_LOG(ERR,
+			    "init_rx_ring_struct failed!\n");
+		goto err;
+	}
 
 	PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_size);
 	rxq->queue_id = queue_idx;
@@ -342,10 +357,8 @@
 	if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
 			"rxr")) {
 		PMD_DRV_LOG(ERR,
-			"ring_dma_zone_reserve for rx_ring failed!\n");
-		bnxt_rx_queue_release_op(rxq);
-		rc = -ENOMEM;
-		goto out;
+			    "ring_dma_zone_reserve for rx_ring failed!\n");
+		goto err;
 	}
 	rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
 
@@ -369,7 +382,10 @@
 	/* Configure mtu if it is different from what was configured before */
 	if (!queue_idx)
 		bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
-out:
+
+	return 0;
+err:
+	bnxt_rx_queue_release_op(rxq);
 	return rc;
 }
 
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_rxq.h dpdk-18.11.11/drivers/net/bnxt/bnxt_rxq.h
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_rxq.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_rxq.h	2021-01-20 12:18:20.000000000 +0000
@@ -6,6 +6,9 @@
 #ifndef _BNXT_RQX_H_
 #define _BNXT_RQX_H_
 
+/* Drop by default when receive desc is not available. */
+#define BNXT_DEFAULT_RX_DROP_EN		1
+
 struct bnxt;
 struct bnxt_rx_ring_info;
 struct bnxt_cp_ring_info;
@@ -27,6 +30,7 @@
 	uint8_t			crc_len; /* 0 if CRC stripped, 4 otherwise */
 	uint8_t			rx_deferred_start; /* not in global dev start */
 	uint8_t			rx_started; /* RX queue is started */
+	uint8_t			drop_en; /* Drop when rx desc not available. */
 
 	struct bnxt		*bp;
 	int			index;
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_rxr.c dpdk-18.11.11/drivers/net/bnxt/bnxt_rxr.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_rxr.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_rxr.c	2021-01-20 12:18:20.000000000 +0000
@@ -141,6 +141,7 @@
 	tpa_info->mbuf = mbuf;
 	tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
 
+	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
 	mbuf->nb_segs = 1;
 	mbuf->next = NULL;
 	mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
@@ -694,6 +695,7 @@
 	ring->bd_dma = rxr->rx_desc_mapping;
 	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
 	ring->vmem = (void **)&rxr->rx_buf_ring;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
 
 	cpr = rte_zmalloc_socket("bnxt_rx_ring",
 				 sizeof(struct bnxt_cp_ring_info),
@@ -715,6 +717,7 @@
 	ring->bd_dma = cpr->cp_desc_mapping;
 	ring->vmem_size = 0;
 	ring->vmem = NULL;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
 
 	/* Allocate Aggregator rings */
 	ring = rte_zmalloc_socket("bnxt_rx_ring_struct",
@@ -730,6 +733,7 @@
 	ring->bd_dma = rxr->ag_desc_mapping;
 	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd);
 	ring->vmem = (void **)&rxr->ag_buf_ring;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
 
 	return 0;
 }
@@ -768,11 +772,13 @@
 
 	prod = rxr->rx_prod;
 	for (i = 0; i < ring->ring_size; i++) {
-		if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
-			PMD_DRV_LOG(WARNING,
-				"init'ed rx ring %d with %d/%d mbufs only\n",
-				rxq->queue_id, i, ring->ring_size);
-			break;
+		if (unlikely(!(rxr->rx_buf_ring[i].mbuf))) {
+			if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
+				PMD_DRV_LOG(WARNING,
+					    "init'ed rx ring %d with %d/%d mbufs only\n",
+					    rxq->queue_id, i, ring->ring_size);
+				break;
+			}
 		}
 		rxr->rx_prod = prod;
 		prod = RING_NEXT(rxr->rx_ring_struct, prod);
@@ -784,11 +790,13 @@
 	prod = rxr->ag_prod;
 
 	for (i = 0; i < ring->ring_size; i++) {
-		if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
-			PMD_DRV_LOG(WARNING,
-			"init'ed AG ring %d with %d/%d mbufs only\n",
-			rxq->queue_id, i, ring->ring_size);
-			break;
+		if (unlikely(!(rxr->ag_buf_ring[i].mbuf))) {
+			if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
+				PMD_DRV_LOG(WARNING,
+					    "init'ed AG ring %d with %d/%d mbufs only\n",
+					    rxq->queue_id, i, ring->ring_size);
+				break;
+			}
 		}
 		rxr->ag_prod = prod;
 		prod = RING_NEXT(rxr->ag_ring_struct, prod);
@@ -797,11 +805,13 @@
 
 	if (rxr->tpa_info) {
 		for (i = 0; i < BNXT_TPA_MAX; i++) {
-			rxr->tpa_info[i].mbuf =
-				__bnxt_alloc_rx_data(rxq->mb_pool);
-			if (!rxr->tpa_info[i].mbuf) {
-				rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
-				return -ENOMEM;
+			if (unlikely(!(rxr->tpa_info[i].mbuf))) {
+				rxr->tpa_info[i].mbuf =
+					__bnxt_alloc_rx_data(rxq->mb_pool);
+				if (!rxr->tpa_info[i].mbuf) {
+					rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
+					return -ENOMEM;
+				}
 			}
 		}
 	}
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_txq.c dpdk-18.11.11/drivers/net/bnxt/bnxt_txq.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_txq.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_txq.c	2021-01-20 12:18:20.000000000 +0000
@@ -28,7 +28,7 @@
 	struct bnxt_sw_tx_bd *sw_ring;
 	uint16_t i;
 
-	if (!txq)
+	if (!txq || !txq->tx_ring)
 		return;
 
 	sw_ring = txq->tx_ring->tx_buf_ring;
@@ -60,10 +60,18 @@
 	if (txq) {
 		/* Free TX ring hardware descriptors */
 		bnxt_tx_queue_release_mbufs(txq);
-		bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+		if (txq->tx_ring) {
+			bnxt_free_ring(txq->tx_ring->tx_ring_struct);
+			rte_free(txq->tx_ring->tx_ring_struct);
+			rte_free(txq->tx_ring);
+		}
 
 		/* Free TX completion ring hardware descriptors */
-		bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+		if (txq->cp_ring) {
+			bnxt_free_ring(txq->cp_ring->cp_ring_struct);
+			rte_free(txq->cp_ring->cp_ring_struct);
+			rte_free(txq->cp_ring);
+		}
 
 		bnxt_free_txq_stats(txq);
 		rte_memzone_free(txq->mz);
@@ -92,8 +100,7 @@
 
 	if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
 		PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
-		rc = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
 	if (eth_dev->data->tx_queues) {
@@ -107,8 +114,7 @@
 				 RTE_CACHE_LINE_SIZE, socket_id);
 	if (!txq) {
 		PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
-		rc = -ENOMEM;
-		goto out;
+		return -ENOMEM;
 	}
 	txq->bp = bp;
 	txq->nb_tx_desc = nb_desc;
@@ -117,7 +123,7 @@
 
 	rc = bnxt_init_tx_ring_struct(txq, socket_id);
 	if (rc)
-		goto out;
+		goto err;
 
 	txq->queue_id = queue_idx;
 	txq->port_id = eth_dev->data->port_id;
@@ -126,16 +132,14 @@
 	if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
 			"txr")) {
 		PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
-		bnxt_tx_queue_release_op(txq);
 		rc = -ENOMEM;
-		goto out;
+		goto err;
 	}
 
 	if (bnxt_init_one_tx_ring(txq)) {
 		PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
-		bnxt_tx_queue_release_op(txq);
 		rc = -ENOMEM;
-		goto out;
+		goto err;
 	}
 
 	eth_dev->data->tx_queues[queue_idx] = txq;
@@ -145,6 +149,8 @@
 	else
 		txq->tx_started = true;
 
-out:
+	return 0;
+err:
+	bnxt_tx_queue_release_op(txq);
 	return rc;
 }
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_txr.c dpdk-18.11.11/drivers/net/bnxt/bnxt_txr.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_txr.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_txr.c	2021-01-20 12:18:20.000000000 +0000
@@ -79,6 +79,7 @@
 	ring->bd_dma = txr->tx_desc_mapping;
 	ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
 	ring->vmem = (void **)&txr->tx_buf_ring;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
 
 	cpr = rte_zmalloc_socket("bnxt_tx_ring",
 				 sizeof(struct bnxt_cp_ring_info),
@@ -99,6 +100,7 @@
 	ring->bd_dma = cpr->cp_desc_mapping;
 	ring->vmem_size = 0;
 	ring->vmem = NULL;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
 
 	return 0;
 }
diff -Nru dpdk-18.11.10/drivers/net/bnxt/bnxt_vnic.c dpdk-18.11.11/drivers/net/bnxt/bnxt_vnic.c
--- dpdk-18.11.10/drivers/net/bnxt/bnxt_vnic.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bnxt/bnxt_vnic.c	2021-01-20 12:18:20.000000000 +0000
@@ -78,6 +78,9 @@
 	struct bnxt_vnic_info *temp;
 	unsigned int i;
 
+	if (bp->vnic_info == NULL)
+		return;
+
 	for (i = 0; i < bp->nr_vnics; i++) {
 		temp = &bp->vnic_info[i];
 		STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next);
diff -Nru dpdk-18.11.10/drivers/net/bonding/rte_eth_bond_api.c dpdk-18.11.11/drivers/net/bonding/rte_eth_bond_api.c
--- dpdk-18.11.10/drivers/net/bonding/rte_eth_bond_api.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bonding/rte_eth_bond_api.c	2021-01-20 12:18:20.000000000 +0000
@@ -129,12 +129,6 @@
 	RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves));
 	internals->active_slave_count = active_count;
 
-	/* Resetting active_slave when reaches to max
-	 * no of slaves in active list
-	 */
-	if (internals->active_slave >= active_count)
-		internals->active_slave = 0;
-
 	if (eth_dev->data->dev_started) {
 		if (internals->mode == BONDING_MODE_8023AD) {
 			bond_mode_8023ad_start(eth_dev);
diff -Nru dpdk-18.11.10/drivers/net/bonding/rte_eth_bond_pmd.c dpdk-18.11.11/drivers/net/bonding/rte_eth_bond_pmd.c
--- dpdk-18.11.10/drivers/net/bonding/rte_eth_bond_pmd.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bonding/rte_eth_bond_pmd.c	2021-01-20 12:18:20.000000000 +0000
@@ -68,7 +68,7 @@
 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
 	internals = bd_rx_q->dev_private;
 	slave_count = internals->active_slave_count;
-	active_slave = internals->active_slave;
+	active_slave = bd_rx_q->active_slave;
 
 	for (i = 0; i < slave_count && nb_pkts; i++) {
 		uint16_t num_rx_slave;
@@ -85,8 +85,8 @@
 			active_slave = 0;
 	}
 
-	if (++internals->active_slave >= slave_count)
-		internals->active_slave = 0;
+	if (++bd_rx_q->active_slave >= slave_count)
+		bd_rx_q->active_slave = 0;
 	return num_rx_total;
 }
 
@@ -282,9 +282,9 @@
 	memcpy(slaves, internals->active_slaves,
 			sizeof(internals->active_slaves[0]) * slave_count);
 
-	idx = internals->active_slave;
+	idx = bd_rx_q->active_slave;
 	if (idx >= slave_count) {
-		internals->active_slave = 0;
+		bd_rx_q->active_slave = 0;
 		idx = 0;
 	}
 	for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
@@ -342,8 +342,8 @@
 			idx = 0;
 	}
 
-	if (++internals->active_slave >= slave_count)
-		internals->active_slave = 0;
+	if (++bd_rx_q->active_slave >= slave_count)
+		bd_rx_q->active_slave = 0;
 
 	return num_rx_total;
 }
@@ -508,8 +508,8 @@
 static uint16_t
 bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
-	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
-	struct bond_dev_private *internals = bd_tx_q->dev_private;
+	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
+	struct bond_dev_private *internals = bd_rx_q->dev_private;
 	struct ether_hdr *eth_h;
 	uint16_t ether_type, offset;
 	uint16_t nb_recv_pkts;
diff -Nru dpdk-18.11.10/drivers/net/bonding/rte_eth_bond_private.h dpdk-18.11.11/drivers/net/bonding/rte_eth_bond_private.h
--- dpdk-18.11.10/drivers/net/bonding/rte_eth_bond_private.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/bonding/rte_eth_bond_private.h	2021-01-20 12:18:20.000000000 +0000
@@ -50,6 +50,8 @@
 /** Port Queue Mapping Structure */
 struct bond_rx_queue {
 	uint16_t queue_id;
+	/**< Next active_slave to poll */
+	uint16_t active_slave;
 	/**< Queue Id */
 	struct bond_dev_private *dev_private;
 	/**< Reference to eth_dev private structure */
@@ -135,7 +137,6 @@
 	uint16_t nb_rx_queues;			/**< Total number of rx queues */
 	uint16_t nb_tx_queues;			/**< Total number of tx queues*/
 
-	uint16_t active_slave;		/**< Next active_slave to poll */
 	uint16_t active_slave_count;		/**< Number of active slaves */
 	uint16_t active_slaves[RTE_MAX_ETHPORTS];    /**< Active slave list */
 
diff -Nru dpdk-18.11.10/drivers/net/cxgbe/base/adapter.h dpdk-18.11.11/drivers/net/cxgbe/base/adapter.h
--- dpdk-18.11.10/drivers/net/cxgbe/base/adapter.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/cxgbe/base/adapter.h	2021-01-20 12:18:20.000000000 +0000
@@ -825,6 +825,7 @@
 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq);
 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
 void t4_sge_eth_clear_queues(struct port_info *pi);
+void t4_sge_eth_release_queues(struct port_info *pi);
 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
 			       unsigned int cnt);
 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
diff -Nru dpdk-18.11.10/drivers/net/cxgbe/cxgbe_ethdev.c dpdk-18.11.11/drivers/net/cxgbe/cxgbe_ethdev.c
--- dpdk-18.11.10/drivers/net/cxgbe/cxgbe_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/cxgbe/cxgbe_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -329,12 +329,7 @@
 		return;
 
 	cxgbe_down(pi);
-
-	/*
-	 *  We clear queues only if both tx and rx path of the port
-	 *  have been disabled
-	 */
-	t4_sge_eth_clear_queues(pi);
+	t4_sge_eth_release_queues(pi);
 }
 
 /* Start the device.
diff -Nru dpdk-18.11.10/drivers/net/cxgbe/mps_tcam.c dpdk-18.11.11/drivers/net/cxgbe/mps_tcam.c
--- dpdk-18.11.10/drivers/net/cxgbe/mps_tcam.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/cxgbe/mps_tcam.c	2021-01-20 12:18:20.000000000 +0000
@@ -140,6 +140,7 @@
 	/* idx can now be different from what user provided */
 	entry = &mpstcam->entry[idx];
 	memcpy(entry->eth_addr, addr, ETHER_ADDR_LEN);
+	memset(entry->mask, ~0, ETHER_ADDR_LEN);
 	/* NOTE: we have considered the case that idx returned by t4_change_mac
 	 * will be different from the user provided value only if user
 	 * provided value is -1
diff -Nru dpdk-18.11.10/drivers/net/cxgbe/sge.c dpdk-18.11.11/drivers/net/cxgbe/sge.c
--- dpdk-18.11.10/drivers/net/cxgbe/sge.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/cxgbe/sge.c	2021-01-20 12:18:20.000000000 +0000
@@ -1423,16 +1423,49 @@
 	return ctrl_xmit(q, mbuf);
 }
 
+static int cxgbe_dma_mzone_name(char *name, size_t len, uint16_t port_id,
+				uint16_t queue_id, const char *ring_name)
+{
+	return snprintf(name, len, "eth_p%d_q%d_%s",
+			port_id, queue_id, ring_name);
+}
+
+static int cxgbe_dma_zone_free(const struct rte_eth_dev *dev,
+			       const char *ring_name,
+			       uint16_t queue_id)
+{
+	char z_name[RTE_MEMZONE_NAMESIZE];
+	const struct rte_memzone *mz;
+	int rc = 0;
+
+	rc = cxgbe_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
+				  queue_id, ring_name);
+	if (rc >= RTE_MEMZONE_NAMESIZE) {
+		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
+		return -ENAMETOOLONG;
+	}
+
+	mz = rte_memzone_lookup(z_name);
+	if (mz)
+		rc = rte_memzone_free(mz);
+	else
+		rc = -ENOENT;
+
+	return rc;
+}
+
 /**
  * alloc_ring - allocate resources for an SGE descriptor ring
- * @dev: the PCI device's core device
+ * @dev: the port associated with the queue
+ * @z_name: memzone's name
+ * @queue_id: queue index
+ * @socket_id: preferred socket id for memory allocations
  * @nelem: the number of descriptors
  * @elem_size: the size of each descriptor
+ * @stat_size: extra space in HW ring for status information
  * @sw_size: the size of the SW state associated with each ring element
  * @phys: the physical address of the allocated ring
  * @metadata: address of the array holding the SW state for the ring
- * @stat_size: extra space in HW ring for status information
- * @node: preferred node for memory allocations
  *
  * Allocates resources for an SGE descriptor ring, such as Tx queues,
  * free buffer lists, or response queues.  Each SGE ring requires
@@ -1442,39 +1475,34 @@
  * of the function), the bus address of the HW ring, and the address
  * of the SW ring.
  */
-static void *alloc_ring(size_t nelem, size_t elem_size,
-			size_t sw_size, dma_addr_t *phys, void *metadata,
-			size_t stat_size, __rte_unused uint16_t queue_id,
-			int socket_id, const char *z_name,
-			const char *z_name_sw)
+static void *alloc_ring(struct rte_eth_dev *dev, const char *z_name,
+			uint16_t queue_id, int socket_id, size_t nelem,
+			size_t elem_size, size_t stat_size, size_t sw_size,
+			dma_addr_t *phys, void *metadata)
 {
 	size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
+	char z_name_sw[RTE_MEMZONE_NAMESIZE];
 	const struct rte_memzone *tz;
 	void *s = NULL;
 
+	snprintf(z_name_sw, sizeof(z_name_sw), "eth_p%d_q%d_%s_sw_ring",
+		 dev->data->port_id, queue_id, z_name);
+
 	dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
 		  "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
 		  " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
 		  stat_size, queue_id, socket_id, z_name, z_name_sw);
 
-	tz = rte_memzone_lookup(z_name);
-	if (tz) {
-		dev_debug(adapter, "%s: tz exists...returning existing..\n",
-			  __func__);
-		goto alloc_sw_ring;
-	}
-
 	/*
 	 * Allocate TX/RX ring hardware descriptors. A memzone large enough to
 	 * handle the maximum ring size is allocated in order to allow for
 	 * resizing in later calls to the queue setup function.
 	 */
-	tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
-			RTE_MEMZONE_IOVA_CONTIG, 4096);
+	tz = rte_eth_dma_zone_reserve(dev, z_name, queue_id, len, 4096,
+				      socket_id);
 	if (!tz)
 		return NULL;
 
-alloc_sw_ring:
 	memset(tz->addr, 0, len);
 	if (sw_size) {
 		s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
@@ -1882,21 +1910,15 @@
 	struct fw_iq_cmd c;
 	struct sge *s = &adap->sge;
 	struct port_info *pi = eth_dev->data->dev_private;
-	char z_name[RTE_MEMZONE_NAMESIZE];
-	char z_name_sw[RTE_MEMZONE_NAMESIZE];
 	unsigned int nb_refill;
 	u8 pciechan;
 
 	/* Size needs to be multiple of 16, including status entry. */
 	iq->size = cxgbe_roundup(iq->size, 16);
 
-	snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
-			eth_dev->data->port_id, queue_id,
-			fwevtq ? "fwq_ring" : "rx_ring");
-	snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
-
-	iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
-			      queue_id, socket_id, z_name, z_name_sw);
+	iq->desc = alloc_ring(eth_dev, fwevtq ? "fwq_ring" : "rx_ring",
+			      queue_id, socket_id, iq->size, iq->iqe_len,
+			      0, 0, &iq->phys_addr, NULL);
 	if (!iq->desc)
 		return -ENOMEM;
 
@@ -1954,18 +1976,14 @@
 			fl->size = s->fl_starve_thres - 1 + 2 * 8;
 		fl->size = cxgbe_roundup(fl->size, 8);
 
-		snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
-				eth_dev->data->port_id, queue_id,
-				fwevtq ? "fwq_ring" : "fl_ring");
-		snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
-
-		fl->desc = alloc_ring(fl->size, sizeof(__be64),
+		fl->desc = alloc_ring(eth_dev, "fl_ring", queue_id, socket_id,
+				      fl->size, sizeof(__be64), s->stat_len,
 				      sizeof(struct rx_sw_desc),
-				      &fl->addr, &fl->sdesc, s->stat_len,
-				      queue_id, socket_id, z_name, z_name_sw);
-
-		if (!fl->desc)
-			goto fl_nomem;
+				      &fl->addr, &fl->sdesc);
+		if (!fl->desc) {
+			ret = -ENOMEM;
+			goto err;
+		}
 
 		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
 		c.iqns_to_fl0congen |=
@@ -2085,8 +2103,6 @@
 refill_fl_err:
 	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
 		   iq->cntxt_id, fl->cntxt_id, 0xffff);
-fl_nomem:
-	ret = -ENOMEM;
 err:
 	iq->cntxt_id = 0;
 	iq->abs_id = 0;
@@ -2152,21 +2168,15 @@
 	struct fw_eq_eth_cmd c;
 	struct sge *s = &adap->sge;
 	struct port_info *pi = eth_dev->data->dev_private;
-	char z_name[RTE_MEMZONE_NAMESIZE];
-	char z_name_sw[RTE_MEMZONE_NAMESIZE];
 	u8 pciechan;
 
 	/* Add status entries */
 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
-	snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
-			eth_dev->data->port_id, queue_id, "tx_ring");
-	snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
-
-	txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
-				 sizeof(struct tx_sw_desc), &txq->q.phys_addr,
-				 &txq->q.sdesc, s->stat_len, queue_id,
-				 socket_id, z_name, z_name_sw);
+	txq->q.desc = alloc_ring(eth_dev, "tx_ring", queue_id, socket_id,
+				 txq->q.size, sizeof(struct tx_desc),
+				 s->stat_len, sizeof(struct tx_sw_desc),
+				 &txq->q.phys_addr, &txq->q.sdesc);
 	if (!txq->q.desc)
 		return -ENOMEM;
 
@@ -2231,20 +2241,13 @@
 	struct fw_eq_ctrl_cmd c;
 	struct sge *s = &adap->sge;
 	struct port_info *pi = eth_dev->data->dev_private;
-	char z_name[RTE_MEMZONE_NAMESIZE];
-	char z_name_sw[RTE_MEMZONE_NAMESIZE];
 
 	/* Add status entries */
 	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
-	snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
-			eth_dev->data->port_id, queue_id, "ctrl_tx_ring");
-	snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
-
-	txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
-				 0, &txq->q.phys_addr,
-				 NULL, 0, queue_id,
-				 socket_id, z_name, z_name_sw);
+	txq->q.desc = alloc_ring(eth_dev, "ctrl_tx_ring", queue_id,
+				 socket_id, txq->q.size, sizeof(struct tx_desc),
+				 0, 0, &txq->q.phys_addr, NULL);
 	if (!txq->q.desc)
 		return -ENOMEM;
 
@@ -2356,6 +2359,36 @@
 	}
 }
 
+void t4_sge_eth_release_queues(struct port_info *pi)
+{
+	struct adapter *adap = pi->adapter;
+	struct sge_eth_rxq *rxq;
+	struct sge_eth_txq *txq;
+	unsigned int i;
+
+	rxq = &adap->sge.ethrxq[pi->first_qset];
+	/* clean up Ethernet Tx/Rx queues */
+	for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
+		/* Free only the queues allocated */
+		if (rxq->rspq.desc) {
+			t4_sge_eth_rxq_release(adap, rxq);
+			cxgbe_dma_zone_free(rxq->rspq.eth_dev, "fl_ring", i);
+			cxgbe_dma_zone_free(rxq->rspq.eth_dev, "rx_ring", i);
+			rxq->rspq.eth_dev = NULL;
+		}
+	}
+
+	txq = &adap->sge.ethtxq[pi->first_qset];
+	for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
+		/* Free only the queues allocated */
+		if (txq->q.desc) {
+			t4_sge_eth_txq_release(adap, txq);
+			cxgbe_dma_zone_free(txq->eth_dev, "tx_ring", i);
+			txq->eth_dev = NULL;
+		}
+	}
+}
+
 void t4_sge_tx_monitor_start(struct adapter *adap)
 {
 	rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
@@ -2375,21 +2408,6 @@
 void t4_free_sge_resources(struct adapter *adap)
 {
 	unsigned int i;
-	struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
-	struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
-
-	/* clean up Ethernet Tx/Rx queues */
-	for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) {
-		/* Free only the queues allocated */
-		if (rxq->rspq.desc) {
-			t4_sge_eth_rxq_release(adap, rxq);
-			rxq->rspq.eth_dev = NULL;
-		}
-		if (txq->q.desc) {
-			t4_sge_eth_txq_release(adap, txq);
-			txq->eth_dev = NULL;
-		}
-	}
 
 	/* clean up control Tx queues */
 	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
@@ -2399,12 +2417,17 @@
 			reclaim_completed_tx_imm(&cq->q);
 			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
 					cq->q.cntxt_id);
+			cxgbe_dma_zone_free(adap->eth_dev, "ctrl_tx_ring", i);
+			rte_mempool_free(cq->mb_pool);
 			free_txq(&cq->q);
 		}
 	}
 
-	if (adap->sge.fw_evtq.desc)
+	/* clean up firmware event queue */
+	if (adap->sge.fw_evtq.desc) {
 		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+		cxgbe_dma_zone_free(adap->eth_dev, "fwq_ring", 0);
+	}
 }
 
 /**
diff -Nru dpdk-18.11.10/drivers/net/dpaa2/dpaa2_ethdev.c dpdk-18.11.11/drivers/net/dpaa2/dpaa2_ethdev.c
--- dpdk-18.11.10/drivers/net/dpaa2/dpaa2_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/dpaa2/dpaa2_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -192,8 +192,6 @@
 
 	PMD_INIT_FUNC_TRACE();
 
-	dev_info->if_index = priv->hw_id;
-
 	dev_info->max_mac_addrs = priv->max_mac_filters;
 	dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
 	dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
diff -Nru dpdk-18.11.10/drivers/net/ena/base/ena_com.c dpdk-18.11.11/drivers/net/ena/base/ena_com.c
--- dpdk-18.11.10/drivers/net/ena/base/ena_com.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ena/base/ena_com.c	2021-01-20 12:18:20.000000000 +0000
@@ -1482,9 +1482,11 @@
 	struct ena_com_aenq *aenq = &ena_dev->aenq;
 	u16 size;
 
-	ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
-	if (admin_queue->comp_ctx)
+	if (admin_queue->comp_ctx) {
+		ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
 		ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
+	}
+
 	admin_queue->comp_ctx = NULL;
 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
 	if (sq->entries)
diff -Nru dpdk-18.11.10/drivers/net/ena/base/ena_plat_dpdk.h dpdk-18.11.11/drivers/net/ena/base/ena_plat_dpdk.h
--- dpdk-18.11.10/drivers/net/ena/base/ena_plat_dpdk.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ena/base/ena_plat_dpdk.h	2021-01-20 12:18:20.000000000 +0000
@@ -54,8 +54,8 @@
 
 #define ENA_ABORT() abort()
 
-#define ENA_MSLEEP(x) rte_delay_ms(x)
-#define ENA_UDELAY(x) rte_delay_us(x)
+#define ENA_MSLEEP(x) rte_delay_us_sleep(x * 1000)
+#define ENA_UDELAY(x) rte_delay_us_block(x)
 
 #define ENA_TOUCH(x) ((void)(x))
 #define memcpy_toio memcpy
@@ -68,25 +68,15 @@
 #define ENA_GET_SYSTEM_USECS()						\
 	(rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz())
 
-#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
-#define ENA_ASSERT(cond, format, arg...)				\
-	do {								\
-		if (unlikely(!(cond))) {				\
-			RTE_LOG(ERR, PMD, format, ##arg);		\
-			rte_panic("line %d\tassert \"" #cond "\""	\
-					"failed\n", __LINE__);		\
-		}							\
-	} while (0)
-#else
-#define ENA_ASSERT(cond, format, arg...) do {} while (0)
-#endif
-
-#define ENA_MAX32(x, y) RTE_MAX((x), (y))
-#define ENA_MAX16(x, y) RTE_MAX((x), (y))
-#define ENA_MAX8(x, y) RTE_MAX((x), (y))
-#define ENA_MIN32(x, y) RTE_MIN((x), (y))
-#define ENA_MIN16(x, y) RTE_MIN((x), (y))
-#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+
+#define ENA_MAX_T(type, x, y) RTE_MAX((type)(x), (type)(y))
+#define ENA_MAX32(x, y) ENA_MAX_T(uint32_t, (x), (y))
+#define ENA_MAX16(x, y) ENA_MAX_T(uint16_t, (x), (y))
+#define ENA_MAX8(x, y) ENA_MAX_T(uint8_t, (x), (y))
+#define ENA_MIN_T(type, x, y) RTE_MIN((type)(x), (type)(y))
+#define ENA_MIN32(x, y) ENA_MIN_T(uint32_t, (x), (y))
+#define ENA_MIN16(x, y) ENA_MIN_T(uint16_t, (x), (y))
+#define ENA_MIN8(x, y) ENA_MIN_T(uint8_t, (x), (y))
 
 #define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
 #define U64_C(x) x ## ULL
diff -Nru dpdk-18.11.10/drivers/net/failsafe/failsafe_ether.c dpdk-18.11.11/drivers/net/failsafe/failsafe_ether.c
--- dpdk-18.11.10/drivers/net/failsafe/failsafe_ether.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/failsafe/failsafe_ether.c	2021-01-20 12:18:20.000000000 +0000
@@ -312,10 +312,10 @@
 	if (err) {
 		uint64_t timestamp = sdev->stats_snapshot.timestamp;
 
-		WARN("Could not access latest statistics from sub-device %d.\n",
+		WARN("Could not access latest statistics from sub-device %d.",
 			 SUB_ID(sdev));
 		if (timestamp != 0)
-			WARN("Using latest snapshot taken before %"PRIu64" seconds.\n",
+			WARN("Using latest snapshot taken before %"PRIu64" seconds.",
 				 (rte_rdtsc() - timestamp) / rte_get_tsc_hz());
 	}
 	failsafe_stats_increment(&PRIV(sdev->fs_dev)->stats_accumulator,
@@ -372,14 +372,23 @@
 	struct sub_device *sdev;
 	uint8_t i;
 
-	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
-		if (sdev->remove && fs_rxtx_clean(sdev)) {
-			if (fs_lock(dev, 1) != 0)
-				return;
+	FOREACH_SUBDEV(sdev, i, dev) {
+		if (!sdev->remove)
+			continue;
+
+		/* Active devices must have finished their burst and
+		 * their stats must be saved.
+		 */
+		if (sdev->state >= DEV_ACTIVE &&
+		    fs_rxtx_clean(sdev) == 0)
+			continue;
+		if (fs_lock(dev, 1) != 0)
+			return;
+		if (sdev->state >= DEV_ACTIVE)
 			fs_dev_stats_save(sdev);
-			fs_dev_remove(sdev);
-			fs_unlock(dev, 1);
-		}
+		fs_dev_remove(sdev);
+		fs_unlock(dev, 1);
+	}
 }
 
 static int
diff -Nru dpdk-18.11.10/drivers/net/fm10k/fm10k_ethdev.c dpdk-18.11.11/drivers/net/fm10k/fm10k_ethdev.c
--- dpdk-18.11.10/drivers/net/fm10k/fm10k_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/fm10k/fm10k_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -1861,9 +1861,10 @@
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
 	q->offloads = offloads;
-	if (handle_rxconf(q, conf))
+	if (handle_rxconf(q, conf)) {
+		rte_free(q);
 		return -EINVAL;
-
+	}
 	/* allocate memory for the software ring */
 	q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
 			(nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
@@ -2043,8 +2044,10 @@
 	q->ops = &def_txq_ops;
 	q->tail_ptr = (volatile uint32_t *)
 		&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
-	if (handle_txconf(q, conf))
+	if (handle_txconf(q, conf)) {
+		rte_free(q);
 		return -EINVAL;
+	}
 
 	/* allocate memory for the software ring */
 	q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
diff -Nru dpdk-18.11.10/drivers/net/fm10k/fm10k_rxtx_vec.c dpdk-18.11.11/drivers/net/fm10k/fm10k_rxtx_vec.c
--- dpdk-18.11.10/drivers/net/fm10k/fm10k_rxtx_vec.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/fm10k/fm10k_rxtx_vec.c	2021-01-20 12:18:20.000000000 +0000
@@ -645,18 +645,15 @@
 	return pkt_idx;
 }
 
-/*
- * vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
  *
  * Notice:
  * - don't support ol_flags for rss and csum err
- * - nb_pkts > RTE_FM10K_MAX_RX_BURST, only scan RTE_FM10K_MAX_RX_BURST
- *   numbers of DD bit
  */
-uint16_t
-fm10k_recv_scattered_pkts_vec(void *rx_queue,
-				struct rte_mbuf **rx_pkts,
-				uint16_t nb_pkts)
+static uint16_t
+fm10k_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
 {
 	struct fm10k_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[RTE_FM10K_MAX_RX_BURST] = {0};
@@ -691,6 +688,32 @@
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+fm10k_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_FM10K_MAX_RX_BURST) {
+		uint16_t burst;
+
+		burst = fm10k_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       RTE_FM10K_MAX_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_FM10K_MAX_RX_BURST)
+			return retval;
+	}
+
+	return retval + fm10k_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       nb_pkts);
+}
+
 static const struct fm10k_txq_ops vec_txq_ops = {
 	.reset = fm10k_reset_tx_queue,
 };
diff -Nru dpdk-18.11.10/drivers/net/i40e/base/i40e_adminq_cmd.h dpdk-18.11.11/drivers/net/i40e/base/i40e_adminq_cmd.h
--- dpdk-18.11.10/drivers/net/i40e/base/i40e_adminq_cmd.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/base/i40e_adminq_cmd.h	2021-01-20 12:18:20.000000000 +0000
@@ -1215,7 +1215,7 @@
 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST	0x04
 #define I40E_AQC_SET_VSI_DEFAULT		0x08
 #define I40E_AQC_SET_VSI_PROMISC_VLAN		0x10
-#define I40E_AQC_SET_VSI_PROMISC_TX		0x8000
+#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY	0x8000
 	__le16	seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
 	__le16	vlan_tag;
diff -Nru dpdk-18.11.10/drivers/net/i40e/base/i40e_common.c dpdk-18.11.11/drivers/net/i40e/base/i40e_common.c
--- dpdk-18.11.10/drivers/net/i40e/base/i40e_common.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/base/i40e_common.c	2021-01-20 12:18:20.000000000 +0000
@@ -2,6 +2,8 @@
  * Copyright(c) 2001-2020 Intel Corporation
  */
 
+#include <inttypes.h>
+
 #include "i40e_type.h"
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
@@ -2225,6 +2227,22 @@
 }
 
 /**
+ * i40e_hw_ver_ge
+ * @hw: pointer to the hw struct
+ * @maj: api major value
+ * @min: api minor value
+ *
+ * Assert whether current HW api version is greater/equal than provided.
+ **/
+static bool i40e_hw_ver_ge(struct i40e_hw *hw, u16 maj, u16 min)
+{
+	if (hw->aq.api_maj_ver > maj ||
+	    (hw->aq.api_maj_ver == maj && hw->aq.api_min_ver >= min))
+		return true;
+	return false;
+}
+
+/**
  * i40e_aq_add_vsi
  * @hw: pointer to the hw struct
  * @vsi_ctx: pointer to a vsi context struct
@@ -2349,18 +2367,16 @@
 
 	if (set) {
 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
-		if (rx_only_promisc &&
-		    (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
-		     (hw->aq.api_maj_ver > 1)))
-			flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+		if (rx_only_promisc && i40e_hw_ver_ge(hw, 1, 5))
+			flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
 	}
 
 	cmd->promiscuous_flags = CPU_TO_LE16(flags);
 
 	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
-	if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
-	     (hw->aq.api_maj_ver > 1))
-		cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
+	if (i40e_hw_ver_ge(hw, 1, 5))
+		cmd->valid_flags |=
+			CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
 
 	cmd->seid = CPU_TO_LE16(seid);
 	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2492,11 +2508,17 @@
 	i40e_fill_default_direct_cmd_desc(&desc,
 					i40e_aqc_opc_set_vsi_promiscuous_modes);
 
-	if (enable)
+	if (enable) {
 		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+		if (i40e_hw_ver_ge(hw, 1, 5))
+			flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
+	}
 
 	cmd->promiscuous_flags = CPU_TO_LE16(flags);
 	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+	if (i40e_hw_ver_ge(hw, 1, 5))
+		cmd->valid_flags |=
+			CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
 	cmd->seid = CPU_TO_LE16(seid);
 	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
 
@@ -3965,7 +3987,7 @@
 			p->wr_csr_prot = (u64)number;
 			p->wr_csr_prot |= (u64)logical_id << 32;
 			i40e_debug(hw, I40E_DEBUG_INIT,
-				   "HW Capability: wr_csr_prot = 0x%llX\n\n",
+				   "HW Capability: wr_csr_prot = 0x%" PRIX64 "\n\n",
 				   (p->wr_csr_prot & 0xffff));
 			break;
 		case I40E_AQ_CAP_ID_NVM_MGMT:
diff -Nru dpdk-18.11.10/drivers/net/i40e/base/i40e_dcb.c dpdk-18.11.11/drivers/net/i40e/base/i40e_dcb.c
--- dpdk-18.11.10/drivers/net/i40e/base/i40e_dcb.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/base/i40e_dcb.c	2021-01-20 12:18:20.000000000 +0000
@@ -1212,7 +1212,8 @@
 
 /**
  * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
- * @hw: pointer to the hw struct
+ * @lldpmib: pointer to mib to be output
+ * @miblen: pointer to u16 for length of lldpmib
  * @dcbcfg: store for LLDPDU data
  *
  * send DCB configuration to FW
diff -Nru dpdk-18.11.10/drivers/net/i40e/base/i40e_nvm.c dpdk-18.11.11/drivers/net/i40e/base/i40e_nvm.c
--- dpdk-18.11.10/drivers/net/i40e/base/i40e_nvm.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/base/i40e_nvm.c	2021-01-20 12:18:20.000000000 +0000
@@ -2,6 +2,8 @@
  * Copyright(c) 2001-2020 Intel Corporation
  */
 
+#include <inttypes.h>
+
 #include "i40e_prototype.h"
 
 /**
@@ -77,7 +79,7 @@
 
 	if (ret_code)
 		i40e_debug(hw, I40E_DEBUG_NVM,
-			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+			   "NVM acquire type %d failed time_left=%" PRIu64 " ret=%d aq_err=%d\n",
 			   access, time_left, ret_code, hw->aq.asq_last_status);
 
 	if (ret_code && time_left) {
@@ -99,7 +101,7 @@
 		if (ret_code != I40E_SUCCESS) {
 			hw->nvm.hw_semaphore_timeout = 0;
 			i40e_debug(hw, I40E_DEBUG_NVM,
-				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+				   "NVM acquire timed out, wait %" PRIu64 " ms before trying again. status=%d aq_err=%d\n",
 				   time_left, ret_code, hw->aq.asq_last_status);
 		}
 	}
@@ -1203,7 +1205,7 @@
 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
 		if (gtime >= hw->nvm.hw_semaphore_timeout) {
 			i40e_debug(hw, I40E_DEBUG_ALL,
-				   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+				   "NVMUPD: write semaphore expired (%d >= %" PRIu64 "), retrying\n",
 				   gtime, hw->nvm.hw_semaphore_timeout);
 			i40e_release_nvm(hw);
 			status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
diff -Nru dpdk-18.11.10/drivers/net/i40e/base/virtchnl.h dpdk-18.11.11/drivers/net/i40e/base/virtchnl.h
--- dpdk-18.11.10/drivers/net/i40e/base/virtchnl.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/base/virtchnl.h	2021-01-20 12:18:20.000000000 +0000
@@ -229,7 +229,8 @@
 #define VIRTCHNL_VF_OFFLOAD_ENCAP		0X00100000
 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM		0X00200000
 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM	0X00400000
-
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED		0x00000080
 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
 			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -518,10 +519,23 @@
 struct virtchnl_pf_event {
 	enum virtchnl_event_codes event;
 	union {
+		/* If the PF driver does not support the new speed reporting
+		 * capabilities then use link_event else use link_event_adv to
+		 * get the speed and link information. The ability to understand
+		 * new speeds is indicated by setting the capability flag
+		 * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+		 * in virtchnl_vf_resource struct and can be used to determine
+		 * which link event struct to use below.
+		 */
 		struct {
 			enum virtchnl_link_speed link_speed;
 			bool link_status;
 		} link_event;
+		struct {
+			/* link_speed provided in Mbps */
+			u32 link_speed;
+			u8 link_status;
+		} link_event_adv;
 	} event_data;
 
 	int severity;
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_ethdev.c dpdk-18.11.11/drivers/net/i40e/i40e_ethdev.c
--- dpdk-18.11.10/drivers/net/i40e/i40e_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -2824,6 +2824,21 @@
 	return ret;
 }
 
+static void
+i40e_stat_update_48_in_64(struct i40e_hw *hw, uint32_t hireg,
+			  uint32_t loreg, bool offset_loaded, uint64_t *offset,
+			  uint64_t *stat, uint64_t *prev_stat)
+{
+	i40e_stat_update_48(hw, hireg, loreg, offset_loaded, offset, stat);
+	/* enlarge the limitation when statistics counters overflowed */
+	if (offset_loaded) {
+		if (I40E_RXTX_BYTES_L_48_BIT(*prev_stat) > *stat)
+			*stat += (uint64_t)1 << I40E_48_BIT_WIDTH;
+		*stat += I40E_RXTX_BYTES_H_16_BIT(*prev_stat);
+	}
+	*prev_stat = *stat;
+}
+
 /* Get all the statistics of a VSI */
 void
 i40e_update_vsi_stats(struct i40e_vsi *vsi)
@@ -2833,9 +2848,9 @@
 	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
 	int idx = rte_le_to_cpu_16(vsi->info.stat_counter_idx);
 
-	i40e_stat_update_48(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
-			    vsi->offset_loaded, &oes->rx_bytes,
-			    &nes->rx_bytes);
+	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(idx), I40E_GLV_GORCL(idx),
+				  vsi->offset_loaded, &oes->rx_bytes,
+				  &nes->rx_bytes, &vsi->prev_rx_bytes);
 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(idx), I40E_GLV_UPRCL(idx),
 			    vsi->offset_loaded, &oes->rx_unicast,
 			    &nes->rx_unicast);
@@ -2856,9 +2871,9 @@
 	i40e_stat_update_32(hw, I40E_GLV_RUPP(idx), vsi->offset_loaded,
 			    &oes->rx_unknown_protocol,
 			    &nes->rx_unknown_protocol);
-	i40e_stat_update_48(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
-			    vsi->offset_loaded, &oes->tx_bytes,
-			    &nes->tx_bytes);
+	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(idx), I40E_GLV_GOTCL(idx),
+				  vsi->offset_loaded, &oes->tx_bytes,
+				  &nes->tx_bytes, &vsi->prev_tx_bytes);
 	i40e_stat_update_48(hw, I40E_GLV_UPTCH(idx), I40E_GLV_UPTCL(idx),
 			    vsi->offset_loaded, &oes->tx_unicast,
 			    &nes->tx_unicast);
@@ -2900,17 +2915,18 @@
 	struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
 
 	/* Get rx/tx bytes of internal transfer packets */
-	i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
-			I40E_GLV_GORCL(hw->port),
-			pf->offset_loaded,
-			&pf->internal_stats_offset.rx_bytes,
-			&pf->internal_stats.rx_bytes);
-
-	i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
-			I40E_GLV_GOTCL(hw->port),
-			pf->offset_loaded,
-			&pf->internal_stats_offset.tx_bytes,
-			&pf->internal_stats.tx_bytes);
+	i40e_stat_update_48_in_64(hw, I40E_GLV_GORCH(hw->port),
+				  I40E_GLV_GORCL(hw->port),
+				  pf->offset_loaded,
+				  &pf->internal_stats_offset.rx_bytes,
+				  &pf->internal_stats.rx_bytes,
+				  &pf->internal_prev_rx_bytes);
+	i40e_stat_update_48_in_64(hw, I40E_GLV_GOTCH(hw->port),
+				  I40E_GLV_GOTCL(hw->port),
+				  pf->offset_loaded,
+				  &pf->internal_stats_offset.tx_bytes,
+				  &pf->internal_stats.tx_bytes,
+				  &pf->internal_prev_tx_bytes);
 	/* Get total internal rx packet count */
 	i40e_stat_update_48(hw, I40E_GLV_UPRCH(hw->port),
 			    I40E_GLV_UPRCL(hw->port),
@@ -2950,10 +2966,10 @@
 		pf->internal_stats.rx_broadcast) * ETHER_CRC_LEN;
 
 	/* Get statistics of struct i40e_eth_stats */
-	i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
-			    I40E_GLPRT_GORCL(hw->port),
-			    pf->offset_loaded, &os->eth.rx_bytes,
-			    &ns->eth.rx_bytes);
+	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GORCH(hw->port),
+				  I40E_GLPRT_GORCL(hw->port),
+				  pf->offset_loaded, &os->eth.rx_bytes,
+				  &ns->eth.rx_bytes, &pf->prev_rx_bytes);
 	i40e_stat_update_48(hw, I40E_GLPRT_UPRCH(hw->port),
 			    I40E_GLPRT_UPRCL(hw->port),
 			    pf->offset_loaded, &os->eth.rx_unicast,
@@ -3007,10 +3023,10 @@
 			    pf->offset_loaded,
 			    &os->eth.rx_unknown_protocol,
 			    &ns->eth.rx_unknown_protocol);
-	i40e_stat_update_48(hw, I40E_GLPRT_GOTCH(hw->port),
-			    I40E_GLPRT_GOTCL(hw->port),
-			    pf->offset_loaded, &os->eth.tx_bytes,
-			    &ns->eth.tx_bytes);
+	i40e_stat_update_48_in_64(hw, I40E_GLPRT_GOTCH(hw->port),
+				  I40E_GLPRT_GOTCL(hw->port),
+				  pf->offset_loaded, &os->eth.tx_bytes,
+				  &ns->eth.tx_bytes, &pf->prev_tx_bytes);
 	i40e_stat_update_48(hw, I40E_GLPRT_UPTCH(hw->port),
 			    I40E_GLPRT_UPTCL(hw->port),
 			    pf->offset_loaded, &os->eth.tx_unicast,
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_ethdev.h dpdk-18.11.11/drivers/net/i40e/i40e_ethdev.h
--- dpdk-18.11.10/drivers/net/i40e/i40e_ethdev.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_ethdev.h	2021-01-20 12:18:20.000000000 +0000
@@ -271,6 +271,9 @@
 #define I40E_ETH_OVERHEAD \
 	(ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
 
+#define I40E_RXTX_BYTES_H_16_BIT(bytes) ((bytes) & ~I40E_48_BIT_MASK)
+#define I40E_RXTX_BYTES_L_48_BIT(bytes) ((bytes) & I40E_48_BIT_MASK)
+
 struct i40e_adapter;
 
 /**
@@ -387,6 +390,8 @@
 	uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
 	uint8_t vlan_filter_on; /* The VLAN filter enabled */
 	struct i40e_bw_info bw_info; /* VSI bandwidth information */
+	uint64_t prev_rx_bytes;
+	uint64_t prev_tx_bytes;
 };
 
 struct pool_entry {
@@ -975,6 +980,10 @@
 	struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
 	/* Switch Domain Id */
 	uint16_t switch_domain_id;
+	uint64_t prev_rx_bytes;
+	uint64_t prev_tx_bytes;
+	uint64_t internal_prev_rx_bytes;
+	uint64_t internal_prev_tx_bytes;
 };
 
 enum pending_msg {
@@ -1019,6 +1028,7 @@
 	bool promisc_unicast_enabled;
 	bool promisc_multicast_enabled;
 
+	rte_spinlock_t cmd_send_lock;
 	uint32_t version_major; /* Major version number */
 	uint32_t version_minor; /* Minor version number */
 	uint16_t promisc_flags; /* Promiscuous setting */
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_ethdev_vf.c dpdk-18.11.11/drivers/net/i40e/i40e_ethdev_vf.c
--- dpdk-18.11.10/drivers/net/i40e/i40e_ethdev_vf.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_ethdev_vf.c	2021-01-20 12:18:20.000000000 +0000
@@ -259,7 +259,7 @@
 		case VIRTCHNL_EVENT_RESET_IMPENDING:
 			vf->vf_reset = true;
 			vf->pend_msg |= PFMSG_RESET_IMPENDING;
-			PMD_DRV_LOG(INFO, "vf is reseting");
+			PMD_DRV_LOG(INFO, "VF is resetting");
 			break;
 		case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
 			vf->dev_closed = true;
@@ -313,7 +313,7 @@
 #define ASQ_DELAY_MS  10
 
 static int
-i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
+_i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
 {
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -382,6 +382,19 @@
 	return err | vf->cmd_retval;
 }
 
+static int
+i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
+{
+	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	int err;
+
+	while (!rte_spinlock_trylock(&vf->cmd_send_lock))
+		rte_delay_us_sleep(50);
+	err = _i40evf_execute_vf_cmd(dev, args);
+	rte_spinlock_unlock(&vf->cmd_send_lock);
+	return err;
+}
+
 /*
  * Check API version with sync wait until version read or fail from admin queue
  */
@@ -442,7 +455,8 @@
 		       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
 		       VIRTCHNL_VF_OFFLOAD_RSS_REG |
 		       VIRTCHNL_VF_OFFLOAD_VLAN |
-		       VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+		       VIRTCHNL_VF_OFFLOAD_RX_POLLING |
+		       VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
 		args.in_args = (uint8_t *)&caps;
 		args.in_args_size = sizeof(caps);
 	} else {
@@ -1144,6 +1158,7 @@
 
 	vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	vf->dev_data = dev->data;
+	rte_spinlock_init(&vf->cmd_send_lock);
 	err = i40e_set_mac_type(hw);
 	if (err) {
 		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
@@ -1287,8 +1302,39 @@
 		break;
 	case VIRTCHNL_EVENT_LINK_CHANGE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
-		vf->link_up = pf_msg->event_data.link_event.link_status;
-		vf->link_speed = pf_msg->event_data.link_event.link_speed;
+		if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+			vf->link_up =
+			    pf_msg->event_data.link_event_adv.link_status;
+
+			switch (pf_msg->event_data.link_event_adv.link_speed) {
+			case ETH_SPEED_NUM_100M:
+				vf->link_speed = VIRTCHNL_LINK_SPEED_100MB;
+				break;
+			case ETH_SPEED_NUM_1G:
+				vf->link_speed = VIRTCHNL_LINK_SPEED_1GB;
+				break;
+			case ETH_SPEED_NUM_10G:
+				vf->link_speed = VIRTCHNL_LINK_SPEED_10GB;
+				break;
+			case ETH_SPEED_NUM_20G:
+				vf->link_speed = VIRTCHNL_LINK_SPEED_20GB;
+				break;
+			case ETH_SPEED_NUM_25G:
+				vf->link_speed = VIRTCHNL_LINK_SPEED_25GB;
+				break;
+			case ETH_SPEED_NUM_40G:
+				vf->link_speed = VIRTCHNL_LINK_SPEED_40GB;
+				break;
+			default:
+				vf->link_speed = VIRTCHNL_LINK_SPEED_UNKNOWN;
+				break;
+			}
+		} else {
+			vf->link_up =
+			    pf_msg->event_data.link_event.link_status;
+			vf->link_speed =
+			    pf_msg->event_data.link_event.link_speed;
+		}
 		break;
 	case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
 		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_fdir.c dpdk-18.11.11/drivers/net/i40e/i40e_fdir.c
--- dpdk-18.11.10/drivers/net/i40e/i40e_fdir.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_fdir.c	2021-01-20 12:18:20.000000000 +0000
@@ -1549,8 +1549,8 @@
 	struct i40e_fdir_filter check_filter; /* Check if the filter exists */
 	int ret = 0;
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
-		PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
+	if (pf->fdir.fdir_vsi == NULL) {
+		PMD_DRV_LOG(ERR, "FDIR is not enabled");
 		return -ENOTSUP;
 	}
 
@@ -1626,6 +1626,12 @@
 			rte_free(fdir_filter);
 	} else {
 		ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+		if (ret < 0) {
+			PMD_DRV_LOG(ERR,
+				"Error deleting fdir rule from hash table!");
+			return -EINVAL;
+		}
+		pf->fdir.flex_mask_flag[pctype] = 0;
 	}
 
 	return ret;
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_flow.c dpdk-18.11.11/drivers/net/i40e/i40e_flow.c
--- dpdk-18.11.10/drivers/net/i40e/i40e_flow.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_flow.c	2021-01-20 12:18:20.000000000 +0000
@@ -27,7 +27,10 @@
 #define I40E_IPV6_TC_MASK	(0xFF << I40E_FDIR_IPv6_TC_OFFSET)
 #define I40E_IPV6_FRAG_HEADER	44
 #define I40E_TENANT_ARRAY_NUM	3
-#define I40E_TCI_MASK		0xFFFF
+#define I40E_VLAN_TCI_MASK	0xFFFF
+#define I40E_VLAN_PRI_MASK	0xE000
+#define I40E_VLAN_CFI_MASK	0x1000
+#define I40E_VLAN_VID_MASK	0x0FFF
 
 static int i40e_flow_validate(struct rte_eth_dev *dev,
 			      const struct rte_flow_attr *attr,
@@ -2565,12 +2568,22 @@
 
 			RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
 			if (vlan_spec && vlan_mask) {
-				if (vlan_mask->tci ==
-				    rte_cpu_to_be_16(I40E_TCI_MASK)) {
-					input_set |= I40E_INSET_VLAN_INNER;
-					filter->input.flow_ext.vlan_tci =
-						vlan_spec->tci;
+				if (vlan_mask->tci !=
+				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK) &&
+				    vlan_mask->tci !=
+				    rte_cpu_to_be_16(I40E_VLAN_PRI_MASK) &&
+				    vlan_mask->tci !=
+				    rte_cpu_to_be_16(I40E_VLAN_CFI_MASK) &&
+				    vlan_mask->tci !=
+				    rte_cpu_to_be_16(I40E_VLAN_VID_MASK)) {
+					rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Unsupported TCI mask.");
 				}
+				input_set |= I40E_INSET_VLAN_INNER;
+				filter->input.flow_ext.vlan_tci =
+					vlan_spec->tci;
 			}
 			if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
 				if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
@@ -3178,8 +3191,7 @@
 
 	cons_filter_type = RTE_ETH_FILTER_FDIR;
 
-	if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT ||
-		pf->fdir.fdir_vsi == NULL) {
+	if (pf->fdir.fdir_vsi == NULL) {
 		/* Enable fdir when fdir flow is added at first time. */
 		ret = i40e_fdir_setup(pf);
 		if (ret != I40E_SUCCESS) {
@@ -3195,8 +3207,6 @@
 					   NULL, "Failed to configure fdir.");
 			goto err;
 		}
-
-		dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
 	}
 
 	return 0;
@@ -3394,10 +3404,10 @@
 
 			if (vlan_spec && vlan_mask) {
 				if (vlan_mask->tci ==
-				    rte_cpu_to_be_16(I40E_TCI_MASK))
+				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
-					      I40E_TCI_MASK;
+					      I40E_VLAN_TCI_MASK;
 				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
@@ -3625,10 +3635,10 @@
 
 			if (vlan_spec && vlan_mask) {
 				if (vlan_mask->tci ==
-				    rte_cpu_to_be_16(I40E_TCI_MASK))
+				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
 					filter->inner_vlan =
 					      rte_be_to_cpu_16(vlan_spec->tci) &
-					      I40E_TCI_MASK;
+					      I40E_VLAN_TCI_MASK;
 				filter_type |= ETH_TUNNEL_FILTER_IVLAN;
 			}
 			break;
@@ -4149,14 +4159,9 @@
 	}
 
 	/* Get filter specification */
-	if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
-			rte_cpu_to_be_16(I40E_TCI_MASK)) &&
-			(i_vlan_mask != NULL) &&
-			(i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
-		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
-			& I40E_TCI_MASK;
-		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
-			& I40E_TCI_MASK;
+	if (o_vlan_mask != NULL &&  i_vlan_mask != NULL) {
+		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci);
+		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci);
 	} else {
 			rte_flow_error_set(error, EINVAL,
 					   RTE_FLOW_ERROR_TYPE_ITEM,
@@ -4250,7 +4255,7 @@
 			vlan_mask = item->mask;
 			if (vlan_spec && vlan_mask) {
 				if (vlan_mask->tci ==
-					rte_cpu_to_be_16(I40E_TCI_MASK)) {
+					rte_cpu_to_be_16(I40E_VLAN_TCI_MASK)) {
 					info->region[0].user_priority[0] =
 						(rte_be_to_cpu_16(
 						vlan_spec->tci) >> 13) & 0x7;
@@ -4296,13 +4301,59 @@
 	const struct rte_flow_action *act;
 	const struct rte_flow_action_rss *rss;
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct i40e_queue_regions *info = &pf->queue_region;
 	struct i40e_rte_flow_rss_conf *rss_config =
 			&filter->rss_conf;
 	struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
-	uint16_t i, j, n, tmp;
+	uint16_t i, j, n, m, tmp;
 	uint32_t index = 0;
-	uint64_t hf_bit = 1;
+
+	static const struct {
+		uint64_t rss_type;
+		enum i40e_filter_pctype pctype;
+	} pctype_match_table[] = {
+	    {ETH_RSS_FRAG_IPV4,
+		I40E_FILTER_PCTYPE_FRAG_IPV4},
+	    {ETH_RSS_NONFRAG_IPV4_TCP,
+		I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
+	    {ETH_RSS_NONFRAG_IPV4_UDP,
+		I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
+	    {ETH_RSS_NONFRAG_IPV4_SCTP,
+		I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
+	    {ETH_RSS_NONFRAG_IPV4_OTHER,
+		I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
+	    {ETH_RSS_FRAG_IPV6,
+		I40E_FILTER_PCTYPE_FRAG_IPV6},
+	    {ETH_RSS_NONFRAG_IPV6_TCP,
+		I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
+	    {ETH_RSS_NONFRAG_IPV6_UDP,
+		I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
+	    {ETH_RSS_NONFRAG_IPV6_SCTP,
+		I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
+	    {ETH_RSS_NONFRAG_IPV6_OTHER,
+		I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
+	    {ETH_RSS_L2_PAYLOAD,
+		I40E_FILTER_PCTYPE_L2_PAYLOAD},
+	};
+
+	static const struct {
+		uint64_t rss_type;
+		enum i40e_filter_pctype pctype;
+	} pctype_match_table_x722[] = {
+		{ETH_RSS_NONFRAG_IPV4_TCP,
+			I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK},
+		{ETH_RSS_NONFRAG_IPV4_UDP,
+			I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP},
+		{ETH_RSS_NONFRAG_IPV4_UDP,
+			I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP},
+		{ETH_RSS_NONFRAG_IPV6_TCP,
+			I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK},
+		{ETH_RSS_NONFRAG_IPV6_UDP,
+			I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP},
+		{ETH_RSS_NONFRAG_IPV6_UDP,
+			I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP},
+	};
 
 	NEXT_ITEM_OF_ACTION(act, actions, index);
 	rss = act->conf;
@@ -4320,14 +4371,27 @@
 	}
 
 	if (action_flag) {
-		for (n = 0; n < 64; n++) {
-			if (rss->types & (hf_bit << n)) {
-				conf_info->region[0].hw_flowtype[0] = n;
+		for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
+			if (rss->types & pctype_match_table[j].rss_type) {
+				conf_info->region[0].hw_flowtype[0] =
+					(uint8_t)pctype_match_table[j].pctype;
 				conf_info->region[0].flowtype_num = 1;
 				conf_info->queue_region_number = 1;
 				break;
 			}
 		}
+
+		if (hw->mac.type == I40E_MAC_X722)
+			for (j = 0; j < RTE_DIM(pctype_match_table_x722); j++) {
+				if (rss->types &
+				    pctype_match_table_x722[j].rss_type) {
+					m = conf_info->region[0].flowtype_num;
+					conf_info->region[0].hw_flowtype[m] =
+					    pctype_match_table_x722[j].pctype;
+					conf_info->region[0].flowtype_num++;
+					conf_info->queue_region_number = 1;
+				}
+			}
 	}
 
 	/**
@@ -4425,9 +4489,12 @@
 					info->region[i].user_priority_num++;
 				}
 
-				j = info->region[i].flowtype_num;
-				tmp = conf_info->region[n].hw_flowtype[0];
-				if (conf_info->region[n].flowtype_num) {
+				for (m = 0;
+				     m < conf_info->region[n].flowtype_num;
+				     m++) {
+					j = info->region[i].flowtype_num;
+					tmp =
+					  conf_info->region[n].hw_flowtype[m];
 					info->region[i].hw_flowtype[j] = tmp;
 					info->region[i].flowtype_num++;
 				}
@@ -4440,9 +4507,12 @@
 					info->region[i].user_priority_num++;
 				}
 
-				j = info->region[i].flowtype_num;
-				tmp = conf_info->region[n].hw_flowtype[0];
-				if (conf_info->region[n].flowtype_num) {
+				for (m = 0;
+				     m < conf_info->region[n].flowtype_num;
+				     m++) {
+					j = info->region[i].flowtype_num;
+					tmp =
+					  conf_info->region[n].hw_flowtype[m];
 					info->region[i].hw_flowtype[j] = tmp;
 					info->region[i].flowtype_num++;
 				}
@@ -4763,12 +4833,6 @@
 	case RTE_ETH_FILTER_FDIR:
 		ret = i40e_flow_add_del_fdir_filter(dev,
 		       &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
-
-		/* If the last flow is destroyed, disable fdir. */
-		if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
-			dev->data->dev_conf.fdir_conf.mode =
-				   RTE_FDIR_MODE_NONE;
-		}
 		break;
 	case RTE_ETH_FILTER_HASH:
 		ret = i40e_config_rss_filter_del(dev,
@@ -4957,8 +5021,10 @@
 		}
 
 		for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
-		     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
+		     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
 			pf->fdir.inset_flag[pctype] = 0;
+			pf->fdir.flex_mask_flag[pctype] = 0;
+		}
 	}
 
 	return ret;
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_rxtx_vec_altivec.c dpdk-18.11.11/drivers/net/i40e/i40e_rxtx_vec_altivec.c
--- dpdk-18.11.10/drivers/net/i40e/i40e_rxtx_vec_altivec.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_rxtx_vec_altivec.c	2021-01-20 12:18:20.000000000 +0000
@@ -217,11 +217,13 @@
 		ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
 }
 
- /* Notice:
-  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
-  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
-  *   numbers of DD bits
-  */
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
+ */
 static inline uint16_t
 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		   uint16_t nb_pkts, uint8_t *split_packet)
@@ -243,9 +245,6 @@
 		};
 	vector unsigned long dd_check, eop_check;
 
-	/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
-
 	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
 
@@ -488,15 +487,15 @@
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
- /* vPMD receive routine that reassembles scattered packets
-  * Notice:
-  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
-  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
-  *   numbers of DD bits
-  */
-uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-			     uint16_t nb_pkts)
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
 {
 	struct i40e_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
@@ -529,6 +528,32 @@
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			     uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
+		uint16_t burst;
+
+		burst = i40e_recv_scattered_burst_vec(rx_queue,
+						      rx_pkts + retval,
+						      RTE_I40E_VPMD_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_I40E_VPMD_RX_BURST)
+			return retval;
+	}
+
+	return retval + i40e_recv_scattered_burst_vec(rx_queue,
+						      rx_pkts + retval,
+						      nb_pkts);
+}
+
 static inline void
 vtx1(volatile struct i40e_tx_desc *txdp,
 	struct rte_mbuf *pkt, uint64_t flags)
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_rxtx_vec_neon.c dpdk-18.11.11/drivers/net/i40e/i40e_rxtx_vec_neon.c
--- dpdk-18.11.10/drivers/net/i40e/i40e_rxtx_vec_neon.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_rxtx_vec_neon.c	2021-01-20 12:18:20.000000000 +0000
@@ -6,6 +6,7 @@
 #include <stdint.h>
 #include <rte_ethdev_driver.h>
 #include <rte_malloc.h>
+#include <rte_vect.h>
 
 #include "base/i40e_prototype.h"
 #include "base/i40e_type.h"
@@ -13,7 +14,6 @@
 #include "i40e_rxtx.h"
 #include "i40e_rxtx_vec_common.h"
 
-#include <arm_neon.h>
 
 #pragma GCC diagnostic ignored "-Wcast-qual"
 
@@ -187,11 +187,12 @@
 
 }
 
- /*
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
+ *
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- *   numbers of DD bits
+ * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
  */
 static inline uint16_t
 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
@@ -229,9 +230,6 @@
 		0, 0, 0       /* ignore non-length fields */
 		};
 
-	/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
-
 	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
 
@@ -438,15 +436,15 @@
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
- /* vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- *   numbers of DD bits
  */
-uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-			     uint16_t nb_pkts)
+static uint16_t
+i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
 {
 
 	struct i40e_rx_queue *rxq = rx_queue;
@@ -481,6 +479,32 @@
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			     uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
+		uint16_t burst;
+
+		burst = i40e_recv_scattered_burst_vec(rx_queue,
+						      rx_pkts + retval,
+						      RTE_I40E_VPMD_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_I40E_VPMD_RX_BURST)
+			return retval;
+	}
+
+	return retval + i40e_recv_scattered_burst_vec(rx_queue,
+						      rx_pkts + retval,
+						      nb_pkts);
+}
+
 static inline void
 vtx1(volatile struct i40e_tx_desc *txdp,
 		struct rte_mbuf *pkt, uint64_t flags)
diff -Nru dpdk-18.11.10/drivers/net/i40e/i40e_rxtx_vec_sse.c dpdk-18.11.11/drivers/net/i40e/i40e_rxtx_vec_sse.c
--- dpdk-18.11.10/drivers/net/i40e/i40e_rxtx_vec_sse.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/i40e_rxtx_vec_sse.c	2021-01-20 12:18:20.000000000 +0000
@@ -206,11 +206,12 @@
 	rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
 }
 
- /*
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_I40E_DESCS_PER_LOOP)
+ *
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- *   numbers of DD bits
+ * - floor align nb_pkts to a RTE_I40E_DESCS_PER_LOOP power-of-two
  */
 static inline uint16_t
 _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
@@ -242,9 +243,6 @@
 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
 	__m128i dd_check, eop_check;
 
-	/* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
-
 	/* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
 
@@ -469,15 +467,15 @@
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
- /* vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
- *   numbers of DD bits
  */
-uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-			     uint16_t nb_pkts)
+static uint16_t
+i40e_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
 {
 
 	struct i40e_rx_queue *rxq = rx_queue;
@@ -512,6 +510,32 @@
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			     uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
+		uint16_t burst;
+
+		burst = i40e_recv_scattered_burst_vec(rx_queue,
+						      rx_pkts + retval,
+						      RTE_I40E_VPMD_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_I40E_VPMD_RX_BURST)
+			return retval;
+	}
+
+	return retval + i40e_recv_scattered_burst_vec(rx_queue,
+						      rx_pkts + retval,
+						      nb_pkts);
+}
+
 static inline void
 vtx1(volatile struct i40e_tx_desc *txdp,
 		struct rte_mbuf *pkt, uint64_t flags)
diff -Nru dpdk-18.11.10/drivers/net/i40e/Makefile dpdk-18.11.11/drivers/net/i40e/Makefile
--- dpdk-18.11.10/drivers/net/i40e/Makefile	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/Makefile	2021-01-20 12:18:20.000000000 +0000
@@ -74,7 +74,7 @@
 
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
-ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+ifneq ($(filter y,$(CONFIG_RTE_ARCH_ARM) $(CONFIG_RTE_ARCH_ARM64)),)
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
 else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
diff -Nru dpdk-18.11.10/drivers/net/i40e/rte_pmd_i40e.h dpdk-18.11.11/drivers/net/i40e/rte_pmd_i40e.h
--- dpdk-18.11.10/drivers/net/i40e/rte_pmd_i40e.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/i40e/rte_pmd_i40e.h	2021-01-20 12:18:20.000000000 +0000
@@ -14,6 +14,10 @@
  *
  */
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 #include <rte_compat.h>
 #include <rte_ethdev.h>
 #include <rte_ether.h>
@@ -1063,4 +1067,8 @@
 	return 0;
 }
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* _PMD_I40E_H_ */
diff -Nru dpdk-18.11.10/drivers/net/ifc/base/ifcvf.h dpdk-18.11.11/drivers/net/ifc/base/ifcvf.h
--- dpdk-18.11.10/drivers/net/ifc/base/ifcvf.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ifc/base/ifcvf.h	2021-01-20 12:18:20.000000000 +0000
@@ -13,7 +13,10 @@
 #define IFCVF_SUBSYS_DEVICE_ID	0x001A
 
 #define IFCVF_MAX_QUEUES		1
+
+#ifndef VIRTIO_F_IOMMU_PLATFORM
 #define VIRTIO_F_IOMMU_PLATFORM		33
+#endif
 
 /* Common configuration */
 #define IFCVF_PCI_CAP_COMMON_CFG	1
diff -Nru dpdk-18.11.10/drivers/net/ixgbe/base/ixgbe_x540.c dpdk-18.11.11/drivers/net/ixgbe/base/ixgbe_x540.c
--- dpdk-18.11.10/drivers/net/ixgbe/base/ixgbe_x540.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ixgbe/base/ixgbe_x540.c	2021-01-20 12:18:20.000000000 +0000
@@ -784,7 +784,7 @@
 	 * bits in the SW_FW_SYNC register.
 	 */
 	if (ixgbe_get_swfw_sync_semaphore(hw)) {
-		DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
+		DEBUGOUT("Failed to get NVM semaphore and register semaphore while forcefully ignoring FW semaphore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
 		return IXGBE_ERR_SWFW_SYNC;
 	}
 	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
diff -Nru dpdk-18.11.10/drivers/net/ixgbe/ixgbe_ethdev.c dpdk-18.11.11/drivers/net/ixgbe/ixgbe_ethdev.c
--- dpdk-18.11.10/drivers/net/ixgbe/ixgbe_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ixgbe/ixgbe_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -1075,7 +1075,7 @@
 		IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
 	uint32_t ctrl_ext;
 	uint16_t csum;
-	int diag, i;
+	int diag, i, ret;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1249,7 +1249,14 @@
 	memset(hwstrip, 0, sizeof(*hwstrip));
 
 	/* initialize PF if max_vfs not zero */
-	ixgbe_pf_host_init(eth_dev);
+	ret = ixgbe_pf_host_init(eth_dev);
+	if (ret) {
+		rte_free(eth_dev->data->mac_addrs);
+		eth_dev->data->mac_addrs = NULL;
+		rte_free(eth_dev->data->hash_mac_addrs);
+		eth_dev->data->hash_mac_addrs = NULL;
+		return ret;
+	}
 
 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 	/* let hardware know driver is loaded */
@@ -5232,10 +5239,16 @@
 	rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
 
 	err = hw->mac.ops.reset_hw(hw);
-	if (err) {
+
+	/**
+	 * In this case, reuses the MAC address assigned by VF
+	 * initialization.
+	 */
+	if (err != IXGBE_SUCCESS && err != IXGBE_ERR_INVALID_MAC_ADDR) {
 		PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
 		return err;
 	}
+
 	hw->mac.get_link_status = true;
 
 	/* negotiate mailbox API version to use with the PF. */
diff -Nru dpdk-18.11.10/drivers/net/ixgbe/ixgbe_ethdev.h dpdk-18.11.11/drivers/net/ixgbe/ixgbe_ethdev.h
--- dpdk-18.11.10/drivers/net/ixgbe/ixgbe_ethdev.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ixgbe/ixgbe_ethdev.h	2021-01-20 12:18:20.000000000 +0000
@@ -705,7 +705,7 @@
 
 void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
 
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
 
 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
 
diff -Nru dpdk-18.11.10/drivers/net/ixgbe/ixgbe_flow.c dpdk-18.11.11/drivers/net/ixgbe/ixgbe_flow.c
--- dpdk-18.11.10/drivers/net/ixgbe/ixgbe_flow.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ixgbe/ixgbe_flow.c	2021-01-20 12:18:20.000000000 +0000
@@ -870,15 +870,6 @@
 	if (ret)
 		return ret;
 
-	/* Ixgbe doesn't support MAC address. */
-	if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
-		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
-		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_ITEM,
-			NULL, "Not supported by ethertype filter");
-		return -rte_errno;
-	}
-
 	if (filter->queue >= dev->data->nb_rx_queues) {
 		memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
 		rte_flow_error_set(error, EINVAL,
diff -Nru dpdk-18.11.10/drivers/net/ixgbe/ixgbe_pf.c dpdk-18.11.11/drivers/net/ixgbe/ixgbe_pf.c
--- dpdk-18.11.10/drivers/net/ixgbe/ixgbe_pf.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ixgbe/ixgbe_pf.c	2021-01-20 12:18:20.000000000 +0000
@@ -66,7 +66,7 @@
 	return 0;
 }
 
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 {
 	struct ixgbe_vf_info **vfinfo =
 		IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
@@ -78,19 +78,27 @@
 		IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
 	uint16_t vf_num;
 	uint8_t nb_queue;
+	int ret = 0;
 
 	PMD_INIT_FUNC_TRACE();
 
 	RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
 	vf_num = dev_num_vf(eth_dev);
 	if (vf_num == 0)
-		return;
+		return ret;
 
 	*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
 	if (*vfinfo == NULL)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
-	rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+	ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+	if (ret) {
+		PMD_INIT_LOG(ERR,
+			"failed to allocate switch domain for device %d", ret);
+		rte_free(*vfinfo);
+		*vfinfo = NULL;
+		return ret;
+	}
 
 	memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
 	memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
@@ -118,6 +126,8 @@
 
 	/* set mb interrupt mask */
 	ixgbe_mb_intr_setup(eth_dev);
+
+	return ret;
 }
 
 void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
diff -Nru dpdk-18.11.10/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c dpdk-18.11.11/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
--- dpdk-18.11.10/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c	2021-01-20 12:18:20.000000000 +0000
@@ -130,17 +130,6 @@
 	rx_pkts[3]->ol_flags = vol.e[3];
 }
 
-/*
- * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
- *
- * Notice:
- * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
- * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
- * - don't support ol_flags for rss and csum err
- */
-
 #define IXGBE_VPMD_DESC_DD_MASK		0x01010101
 #define IXGBE_VPMD_DESC_EOP_MASK	0x02020202
 
@@ -206,6 +195,13 @@
 				vgetq_lane_u32(tunnel_check, 3));
 }
 
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
+ *
+ * Notice:
+ * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
+ * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
+ */
 static inline uint16_t
 _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		   uint16_t nb_pkts, uint8_t *split_packet)
@@ -226,9 +222,6 @@
 	uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0,
 				 rxq->crc_len, 0, 0, 0};
 
-	/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
-
 	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
 
@@ -382,13 +375,11 @@
 	return nb_pkts_recd;
 }
 
-/*
+/**
  * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
  *
  * Notice:
  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  * - don't support ol_flags for rss and csum err
  */
@@ -399,19 +390,17 @@
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
-/*
+/**
  * vPMD receive routine that reassembles scattered packets
  *
  * Notice:
  * - don't support ol_flags for rss and csum err
  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
-uint16_t
-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
+static uint16_t
+ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
 {
 	struct ixgbe_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
@@ -443,6 +432,32 @@
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
+		uint16_t burst;
+
+		burst = ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       RTE_IXGBE_MAX_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_IXGBE_MAX_RX_BURST)
+			return retval;
+	}
+
+	return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       nb_pkts);
+}
+
 static inline void
 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
 		struct rte_mbuf *pkt, uint64_t flags)
diff -Nru dpdk-18.11.10/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c dpdk-18.11.11/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
--- dpdk-18.11.10/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c	2021-01-20 12:18:20.000000000 +0000
@@ -302,13 +302,11 @@
 		get_packet_type(3, pkt_info, etqf_check, tunnel_check);
 }
 
-/*
+/**
  * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
  *
  * Notice:
  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
 static inline uint16_t
@@ -344,9 +342,6 @@
 	__m128i mbuf_init;
 	uint8_t vlan_flags;
 
-	/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
-	nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST);
-
 	/* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */
 	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
 
@@ -556,13 +551,11 @@
 	return nb_pkts_recd;
 }
 
-/*
+/**
  * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
  *
  * Notice:
  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
 uint16_t
@@ -572,18 +565,16 @@
 	return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
-/*
+/**
  * vPMD receive routine that reassembles scattered packets
  *
  * Notice:
  * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
- *   numbers of DD bit
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
-uint16_t
-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
+static uint16_t
+ixgbe_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			       uint16_t nb_pkts)
 {
 	struct ixgbe_rx_queue *rxq = rx_queue;
 	uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
@@ -615,6 +606,32 @@
 		&split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+			      uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > RTE_IXGBE_MAX_RX_BURST) {
+		uint16_t burst;
+
+		burst = ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       RTE_IXGBE_MAX_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < RTE_IXGBE_MAX_RX_BURST)
+			return retval;
+	}
+
+	return retval + ixgbe_recv_scattered_burst_vec(rx_queue,
+						       rx_pkts + retval,
+						       nb_pkts);
+}
+
 static inline void
 vtx1(volatile union ixgbe_adv_tx_desc *txdp,
 		struct rte_mbuf *pkt, uint64_t flags)
diff -Nru dpdk-18.11.10/drivers/net/mlx4/meson.build dpdk-18.11.11/drivers/net/mlx4/meson.build
--- dpdk-18.11.10/drivers/net/mlx4/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx4/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -28,12 +28,18 @@
 foreach lib:libs
 	if not lib.found()
 		build = false
+	elif not pmd_dlopen
+		ext_deps += lib
 	endif
 endforeach
+if pmd_dlopen
+	# Build without adding shared libs to Libs.private
+	ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout()
+	ext_deps += declare_dependency(compile_args: ibv_cflags.split())
+endif
 
 if build
 	allow_experimental_apis = true
-	ext_deps += libs
 	sources = files(
 		'mlx4.c',
 		'mlx4_ethdev.c',
diff -Nru dpdk-18.11.10/drivers/net/mlx5/meson.build dpdk-18.11.11/drivers/net/mlx5/meson.build
--- dpdk-18.11.10/drivers/net/mlx5/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -29,12 +29,19 @@
 foreach lib:libs
 	if not lib.found()
 		build = false
+	elif not pmd_dlopen
+		ext_deps += lib
 	endif
 endforeach
+if pmd_dlopen
+	ext_deps += libs[0] # libmnl
+	# Build without adding ibverbs libs to Libs.private
+	ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout()
+	ext_deps += declare_dependency(compile_args: ibv_cflags.split())
+endif
 
 if build
 	allow_experimental_apis = true
-	ext_deps += libs
 	sources = files(
 		'mlx5.c',
 		'mlx5_ethdev.c',
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5.c dpdk-18.11.11/drivers/net/mlx5/mlx5.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5.c	2021-01-20 12:18:20.000000000 +0000
@@ -304,6 +304,15 @@
 	unsigned int i;
 	int ret;
 
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		/* Check if process_private released. */
+		if (!dev->process_private)
+			return;
+		mlx5_tx_uar_uninit_secondary(dev);
+		mlx5_proc_priv_uninit(dev);
+		rte_eth_dev_release_port(dev);
+		return;
+	}
 	DRV_LOG(DEBUG, "port %u closing device \"%s\"",
 		dev->data->port_id,
 		((priv->ctx != NULL) ? priv->ctx->device->name : ""));
@@ -849,26 +858,26 @@
 			DRV_LOG(ERR, "can not attach rte ethdev");
 			rte_errno = ENOMEM;
 			err = rte_errno;
-			goto error;
+			goto err_secondary;
 		}
 		eth_dev->device = dpdk_dev;
 		eth_dev->dev_ops = &mlx5_dev_sec_ops;
 		err = mlx5_proc_priv_init(eth_dev);
 		if (err) {
 			err = rte_errno;
-			goto error;
+			goto err_secondary;
 		}
 		/* Receive command fd from primary process */
 		err = mlx5_socket_connect(eth_dev);
 		if (err < 0) {
 			err = rte_errno;
-			goto error;
+			goto err_secondary;
 		}
 		/* Remap UAR for Tx queues. */
 		err = mlx5_tx_uar_init_secondary(eth_dev, err);
 		if (err) {
 			err = rte_errno;
-			goto error;
+			goto err_secondary;
 		}
 		/*
 		 * Ethdev pointer is still required as input since
@@ -879,6 +888,10 @@
 		eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
 		claim_zero(mlx5_glue->close_device(ctx));
 		return eth_dev;
+err_secondary:
+		if (eth_dev)
+			mlx5_dev_close(eth_dev);
+		return NULL;
 	}
 	/* Check port status. */
 	err = mlx5_glue->query_port(ctx, 1, &port_attr);
@@ -1499,8 +1512,17 @@
 	for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
 		port = &rte_eth_devices[port_id];
 		if (port->state != RTE_ETH_DEV_UNUSED &&
-				port->device == &pci_dev->device)
-			rte_eth_dev_close(port_id);
+				port->device == &pci_dev->device) {
+			/*
+			 * mlx5_dev_close() is not registered to secondary
+			 * process, call the close function explicitly for
+			 * secondary process.
+			 */
+			if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+				mlx5_dev_close(&rte_eth_devices[port_id]);
+			else
+				rte_eth_dev_close(port_id);
+		}
 	}
 	return 0;
 }
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_ethdev.c dpdk-18.11.11/drivers/net/mlx5/mlx5_ethdev.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -968,6 +968,7 @@
 {
 	FILE *file;
 	char line[32];
+	int rc = -ENOENT;
 	MKSTR(path, "%s/device/uevent", device->ibdev_path);
 
 	file = fopen(path, "rb");
@@ -977,16 +978,19 @@
 	}
 	while (fgets(line, sizeof(line), file) == line) {
 		size_t len = strlen(line);
-		int ret;
 
 		/* Truncate long lines. */
-		if (len == (sizeof(line) - 1))
+		if (len == (sizeof(line) - 1)) {
 			while (line[(len - 1)] != '\n') {
-				ret = fgetc(file);
+				int ret = fgetc(file);
+
 				if (ret == EOF)
-					break;
+					goto exit;
 				line[(len - 1)] = ret;
 			}
+			/* No match for long lines. */
+			continue;
+		}
 		/* Extract information. */
 		if (sscanf(line,
 			   "PCI_SLOT_NAME="
@@ -995,12 +999,15 @@
 			   &pci_addr->bus,
 			   &pci_addr->devid,
 			   &pci_addr->function) == 4) {
-			ret = 0;
+			rc = 0;
 			break;
 		}
 	}
+exit:
 	fclose(file);
-	return 0;
+	if (rc)
+		rte_errno = -rc;
+	return rc;
 }
 
 /**
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_flow_dv.c dpdk-18.11.11/drivers/net/mlx5/mlx5_flow_dv.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_flow_dv.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_flow_dv.c	2021-01-20 12:18:20.000000000 +0000
@@ -37,6 +37,62 @@
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 
+#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct ether_hdr) + \
+					  sizeof(struct ipv4_hdr))
+/**
+ * Validate VLAN item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[in] dev
+ *   Ethernet device flow is being created on.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_vlan(const struct rte_flow_item *item,
+			  uint64_t item_flags,
+			  struct rte_eth_dev *dev __rte_unused,
+			  struct rte_flow_error *error)
+{
+	const struct rte_flow_item_vlan *mask = item->mask;
+	const struct rte_flow_item_vlan nic_mask = {
+		.tci = RTE_BE16(UINT16_MAX),
+		.inner_type = RTE_BE16(UINT16_MAX),
+	};
+	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	int ret;
+	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+					MLX5_FLOW_LAYER_INNER_L4) :
+				       (MLX5_FLOW_LAYER_OUTER_L3 |
+					MLX5_FLOW_LAYER_OUTER_L4);
+	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+					MLX5_FLOW_LAYER_OUTER_VLAN;
+
+	if (item_flags & vlanm)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "multiple VLAN layers not supported");
+	else if ((item_flags & l34m) != 0)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "VLAN cannot follow L3/L4 layer");
+	if (!mask)
+		mask = &rte_flow_item_vlan_mask;
+	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+					(const uint8_t *)&nic_mask,
+					sizeof(struct rte_flow_item_vlan),
+					error);
+	if (ret)
+		return ret;
+	return 0;
+}
+
 /**
  * Validate META item.
  *
@@ -248,6 +304,8 @@
 				  const struct rte_flow_attr *attr,
 				  struct rte_flow_error *error)
 {
+	const struct rte_flow_action_raw_decap *decap = action->conf;
+
 	if (action_flags & MLX5_FLOW_ACTION_DROP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -263,22 +321,16 @@
 					  "can only have a single decap"
 					  " action in a flow");
 	/* decap action is valid on egress only if it is followed by encap */
-	if (attr->egress) {
-		for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
-		       action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
-		       action++) {
-		}
-		if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
-			return rte_flow_error_set
-					(error, ENOTSUP,
-					 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
-					 NULL, "decap action not supported"
-					 " for egress");
+	if (attr->egress && decap &&
+	    decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+					  NULL, "decap action not supported"
+					  " for egress");
 	}
 	return 0;
 }
 
-
 /**
  * Find existing encap/decap resource or create and register a new one.
  *
@@ -695,9 +747,9 @@
 	encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
 	res.size = encap_data->size;
 	memcpy(res.buf, encap_data->data, res.size);
-	res.reformat_type = attr->egress ?
-		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
-		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+	res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
+		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
+		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
 	res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
 				     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
@@ -814,8 +866,8 @@
 			}
 			break;
 		case RTE_FLOW_ITEM_TYPE_VLAN:
-			ret = mlx5_flow_validate_item_vlan(items, item_flags,
-							   error);
+			ret = flow_dv_validate_item_vlan(items, item_flags,
+							 dev, error);
 			if (ret < 0)
 				return ret;
 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
@@ -1229,10 +1281,6 @@
 	uint16_t tci_m;
 	uint16_t tci_v;
 
-	if (!vlan_v)
-		return;
-	if (!vlan_m)
-		vlan_m = &rte_flow_item_vlan_mask;
 	if (inner) {
 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
 					 inner_headers);
@@ -1242,10 +1290,18 @@
 					 outer_headers);
 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	}
-	tci_m = rte_be_to_cpu_16(vlan_m->tci);
-	tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+	/*
+	 * When VLAN item exists in flow, mark packet as tagged,
+	 * even if TCI is not specified.
+	 */
 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+	if (!vlan_v)
+		return;
+	if (!vlan_m)
+		vlan_m = &rte_flow_item_vlan_mask;
+	tci_m = rte_be_to_cpu_16(vlan_m->tci);
+	tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
@@ -2176,26 +2232,36 @@
 		case RTE_FLOW_ITEM_TYPE_GRE:
 			flow_dv_translate_item_gre(match_mask, match_value,
 						   items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_NVGRE:
 			flow_dv_translate_item_nvgre(match_mask, match_value,
 						     items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
 			flow_dv_translate_item_vxlan(match_mask, match_value,
 						     items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
 			flow_dv_translate_item_vxlan(match_mask, match_value,
 						     items, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_MPLS:
 			flow_dv_translate_item_mpls(match_mask, match_value,
 						    items, last_item, tunnel);
+			matcher.priority = flow->rss.level >= 2 ?
+				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_MPLS;
 			break;
 		case RTE_FLOW_ITEM_TYPE_META:
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_nl.c dpdk-18.11.11/drivers/net/mlx5/mlx5_nl.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_nl.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_nl.c	2021-01-20 12:18:20.000000000 +0000
@@ -244,10 +244,10 @@
 	     void *arg)
 {
 	struct sockaddr_nl sa;
-	char buf[MLX5_RECV_BUF_SIZE];
+	void *buf = malloc(MLX5_RECV_BUF_SIZE);
 	struct iovec iov = {
 		.iov_base = buf,
-		.iov_len = sizeof(buf),
+		.iov_len = MLX5_RECV_BUF_SIZE,
 	};
 	struct msghdr msg = {
 		.msg_name = &sa,
@@ -259,6 +259,10 @@
 	int multipart = 0;
 	int ret = 0;
 
+	if (!buf) {
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
 	do {
 		struct nlmsghdr *nh;
 		int recv_bytes = 0;
@@ -267,7 +271,8 @@
 			recv_bytes = recvmsg(nlsk_fd, &msg, 0);
 			if (recv_bytes == -1) {
 				rte_errno = errno;
-				return -rte_errno;
+				ret = -rte_errno;
+				goto exit;
 			}
 			nh = (struct nlmsghdr *)buf;
 		} while (nh->nlmsg_seq != sn);
@@ -279,24 +284,30 @@
 
 				if (err_data->error < 0) {
 					rte_errno = -err_data->error;
-					return -rte_errno;
+					ret = -rte_errno;
+					goto exit;
 				}
 				/* Ack message. */
-				return 0;
+				ret = 0;
+				goto exit;
 			}
 			/* Multi-part msgs and their trailing DONE message. */
 			if (nh->nlmsg_flags & NLM_F_MULTI) {
-				if (nh->nlmsg_type == NLMSG_DONE)
-					return 0;
+				if (nh->nlmsg_type == NLMSG_DONE) {
+					ret =  0;
+					goto exit;
+				}
 				multipart = 1;
 			}
 			if (cb) {
 				ret = cb(nh, arg);
 				if (ret < 0)
-					return ret;
+					goto exit;
 			}
 		}
 	} while (multipart);
+exit:
+	free(buf);
 	return ret;
 }
 
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_rxq.c dpdk-18.11.11/drivers/net/mlx5/mlx5_rxq.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_rxq.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_rxq.c	2021-01-20 12:18:20.000000000 +0000
@@ -521,6 +521,9 @@
 	unsigned int count = 0;
 	struct rte_intr_handle *intr_handle = dev->intr_handle;
 
+	/* Representor shares dev->intr_handle with PF. */
+	if (priv->representor)
+		return 0;
 	if (!dev->data->dev_conf.intr_conf.rxq)
 		return 0;
 	mlx5_rx_intr_vec_disable(dev);
@@ -598,6 +601,9 @@
 	unsigned int rxqs_n = priv->rxqs_n;
 	unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
 
+	/* Representor shares dev->intr_handle with PF. */
+	if (priv->representor)
+		return;
 	if (!dev->data->dev_conf.intr_conf.rxq)
 		return;
 	if (!intr_handle->intr_vec)
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_rxtx.c dpdk-18.11.11/drivers/net/mlx5/mlx5_rxtx.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_rxtx.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_rxtx.c	2021-01-20 12:18:20.000000000 +0000
@@ -431,13 +431,15 @@
 	struct rxq_zip *zip = &rxq->zip;
 	volatile struct mlx5_cqe *cqe;
 	const unsigned int cqe_n = (1 << rxq->cqe_n);
+	const unsigned int sges_n = (1 << rxq->sges_n);
+	const unsigned int elts_n = (1 << rxq->elts_n);
+	const unsigned int strd_n = (1 << rxq->strd_num_n);
 	const unsigned int cqe_cnt = cqe_n - 1;
-	unsigned int cq_ci;
-	unsigned int used;
+	unsigned int cq_ci, used;
 
 	/* if we are processing a compressed cqe */
 	if (zip->ai) {
-		used = zip->cqe_cnt - zip->ca;
+		used = zip->cqe_cnt - zip->ai;
 		cq_ci = zip->cq_ci;
 	} else {
 		used = 0;
@@ -457,7 +459,7 @@
 		used += n;
 		cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
 	}
-	used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
+	used = RTE_MIN(used * sges_n, elts_n * strd_n);
 	return used;
 }
 
@@ -480,11 +482,12 @@
 			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 	struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
 
-	if (dev->rx_pkt_burst != mlx5_rx_burst) {
+	if (dev->rx_pkt_burst == NULL ||
+	    dev->rx_pkt_burst == removed_rx_burst) {
 		rte_errno = ENOTSUP;
 		return -rte_errno;
 	}
-	if (offset >= (1 << rxq->elts_n)) {
+	if (offset >= (1 << rxq->cqe_n)) {
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
@@ -512,7 +515,8 @@
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_data *rxq;
 
-	if (dev->rx_pkt_burst != mlx5_rx_burst) {
+	if (dev->rx_pkt_burst == NULL ||
+	    dev->rx_pkt_burst == removed_rx_burst) {
 		rte_errno = ENOTSUP;
 		return -rte_errno;
 	}
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_rxtx.h dpdk-18.11.11/drivers/net/mlx5/mlx5_rxtx.h
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_rxtx.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_rxtx.h	2021-01-20 12:18:20.000000000 +0000
@@ -311,6 +311,7 @@
 int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
 int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
 int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
+void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
 struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_txconf *conf);
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_stats.c dpdk-18.11.11/drivers/net/mlx5/mlx5_stats.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_stats.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_stats.c	2021-01-20 12:18:20.000000000 +0000
@@ -3,11 +3,13 @@
  * Copyright 2015 Mellanox Technologies, Ltd
  */
 
+#include <fcntl.h>
 #include <inttypes.h>
 #include <linux/sockios.h>
 #include <linux/ethtool.h>
 #include <stdint.h>
 #include <stdio.h>
+#include <unistd.h>
 
 #include <rte_ethdev_driver.h>
 #include <rte_common.h>
@@ -139,19 +141,24 @@
 static inline void
 mlx5_read_ib_stat(struct mlx5_priv *priv, const char *ctr_name, uint64_t *stat)
 {
-	FILE *file;
+	int fd;
+
 	MKSTR(path, "%s/ports/1/hw_counters/%s",
 		  priv->ibdev_path,
 		  ctr_name);
 
-	file = fopen(path, "rb");
-	if (file) {
-		int n = fscanf(file, "%" SCNu64, stat);
-
-		fclose(file);
-		if (n != 1)
-			stat = 0;
+	fd = open(path, O_RDONLY);
+	if (fd != -1) {
+		char buf[21] = {'\0'};
+		ssize_t n = read(fd, buf, sizeof(buf));
+
+		close(fd);
+		if (n != -1) {
+			*stat = strtoull(buf, NULL, 10);
+			return;
+		}
 	}
+	*stat = 0;
 }
 
 /**
@@ -473,8 +480,7 @@
 	struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
 	int stats_n;
 	unsigned int i;
-	unsigned int n = xstats_ctrl->mlx5_stats_n;
-	uint64_t counters[n];
+	uint64_t *counters;
 	int ret;
 
 	stats_n = mlx5_ethtool_get_stats_n(dev);
@@ -485,14 +491,26 @@
 	}
 	if (xstats_ctrl->stats_n != stats_n)
 		mlx5_stats_init(dev);
+	counters =  rte_malloc("xstats_counters",
+			       sizeof(*counters) * xstats_ctrl->mlx5_stats_n,
+			       SOCKET_ID_ANY);
+	if (!counters) {
+		DRV_LOG(WARNING, "port %u unable to allocate memory "
+				 "for xstats counters",
+				 dev->data->port_id);
+		rte_errno = ENOMEM;
+		return;
+	}
 	ret = mlx5_read_dev_counters(dev, counters);
 	if (ret) {
 		DRV_LOG(ERR, "port %u cannot read device counters: %s",
 			dev->data->port_id, strerror(rte_errno));
+		rte_free(counters);
 		return;
 	}
-	for (i = 0; i != n; ++i)
+	for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i)
 		xstats_ctrl->base[i] = counters[i];
+	rte_free(counters);
 }
 
 /**
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_txq.c dpdk-18.11.11/drivers/net/mlx5/mlx5_txq.c
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_txq.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_txq.c	2021-01-20 12:18:20.000000000 +0000
@@ -258,6 +258,53 @@
 }
 
 /**
+ * Find the Tx UAR mmap offset for the page containing the specified register.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param bf
+ *   Pointer to doorbell register.
+ *
+ * @return
+ *   found mmap offset, 0 otherwise.
+ *
+ * The rdma-core might support multiple registers within one UAR page, the
+ * registers beside the first one are considered as the children and it is
+ * supposed there is no need to mmap them due the page is already mapped
+ * for the first ("parent") register. The uar_map_offset is zero for the
+ * child register, to perform mapping in the secondary process we should
+ * find the uar_map_offset from the parent and use one for mapping.
+ */
+static off_t
+mlx5_tx_find_uar_offset(struct rte_eth_dev *dev, void *bf)
+{
+	const size_t page_size = sysconf(_SC_PAGESIZE);
+	const uintptr_t reg = RTE_ALIGN_FLOOR((uintptr_t)bf, page_size);
+	const struct mlx5_priv *priv = dev->data->dev_private;
+	unsigned int i;
+
+	for (i = 0; i != priv->txqs_n; ++i) {
+		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
+
+		if (!txq_ctrl)
+			continue;
+		if (txq_ctrl->uar_mmap_offset && txq_ctrl->bf_reg) {
+			uintptr_t tx_reg = RTE_ALIGN_FLOOR
+						((uintptr_t)txq_ctrl->bf_reg,
+						 page_size);
+			if (tx_reg == reg) {
+				off_t offset = txq_ctrl->uar_mmap_offset;
+
+				mlx5_txq_release(dev, i);
+				return offset;
+			}
+		}
+		mlx5_txq_release(dev, i);
+	}
+	return 0;
+}
+
+/**
  * Remap UAR register of a Tx queue for secondary process.
  *
  * Remapped address is stored at the table in the process private structure of
@@ -321,6 +368,30 @@
 }
 
 /**
+ * Deinitialize Tx UAR registers for secondary process.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+void
+mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_txq_data *txq;
+	struct mlx5_txq_ctrl *txq_ctrl;
+	unsigned int i;
+
+	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	for (i = 0; i != priv->txqs_n; ++i) {
+		if (!(*priv->txqs)[i])
+			continue;
+		txq = (*priv->txqs)[i];
+		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+		txq_uar_uninit_secondary(txq_ctrl);
+	}
+}
+
+/**
  * Initialize Tx UAR registers for secondary process.
  *
  * @param dev
@@ -405,7 +476,6 @@
 	struct mlx5_txq_ibv *txq_ibv = NULL;
 	union {
 		struct ibv_qp_init_attr_ex init;
-		struct ibv_cq_init_attr_ex cq;
 		struct ibv_qp_attr mod;
 	} attr;
 	unsigned int cqe_n;
@@ -427,9 +497,6 @@
 		return NULL;
 	}
 	memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
-	attr.cq = (struct ibv_cq_init_attr_ex){
-		.comp_mask = 0,
-	};
 	cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
 		((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
 	if (is_empw_burst_func(tx_pkt_burst))
@@ -563,9 +630,11 @@
 	txq_ibv->cq = tmpl.cq;
 	rte_atomic32_inc(&txq_ibv->refcnt);
 	txq_ctrl->bf_reg = qp.bf.reg;
-	txq_uar_init(txq_ctrl);
 	if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
-		txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
+		txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset ?
+					    qp.uar_mmap_offset :
+					    mlx5_tx_find_uar_offset
+						(dev, txq_ctrl->bf_reg);
 		DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64,
 			dev->data->port_id, txq_ctrl->uar_mmap_offset);
 	} else {
@@ -576,6 +645,7 @@
 		rte_errno = EINVAL;
 		goto error;
 	}
+	txq_uar_init(txq_ctrl);
 	LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
 	txq_ibv->txq_ctrl = txq_ctrl;
 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
diff -Nru dpdk-18.11.10/drivers/net/mlx5/mlx5_utils.h dpdk-18.11.11/drivers/net/mlx5/mlx5_utils.h
--- dpdk-18.11.10/drivers/net/mlx5/mlx5_utils.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mlx5/mlx5_utils.h	2021-01-20 12:18:20.000000000 +0000
@@ -125,10 +125,6 @@
 
 #endif /* NDEBUG */
 
-#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
-#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
-#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
-
 /* Convenience macros for accessing mbuf fields. */
 #define NEXT(m) ((m)->next)
 #define DATA_LEN(m) ((m)->data_len)
diff -Nru dpdk-18.11.10/drivers/net/mvpp2/mrvl_mtr.c dpdk-18.11.11/drivers/net/mvpp2/mrvl_mtr.c
--- dpdk-18.11.10/drivers/net/mvpp2/mrvl_mtr.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/mvpp2/mrvl_mtr.c	2021-01-20 12:18:20.000000000 +0000
@@ -329,6 +329,12 @@
 	struct mrvl_mtr_profile *profile;
 	struct mrvl_mtr *mtr;
 
+	profile = mrvl_mtr_profile_from_id(priv, params->meter_profile_id);
+	if (!profile)
+		return -rte_mtr_error_set(error, EINVAL,
+					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+					  NULL, "Profile id does not exist\n");
+
 	mtr = mrvl_mtr_from_id(priv, mtr_id);
 	if (mtr)
 		return -rte_mtr_error_set(error, EEXIST,
@@ -341,12 +347,6 @@
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
 					  NULL, NULL);
 
-	profile = mrvl_mtr_profile_from_id(priv, params->meter_profile_id);
-	if (!profile)
-		return -rte_mtr_error_set(error, EINVAL,
-					  RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
-					  NULL, "Profile id does not exist\n");
-
 	mtr->shared = shared;
 	mtr->mtr_id = mtr_id;
 	mtr->plcr_bit = MRVL_PLCR_BIT_INVALID;
diff -Nru dpdk-18.11.10/drivers/net/netvsc/hn_nvs.c dpdk-18.11.11/drivers/net/netvsc/hn_nvs.c
--- dpdk-18.11.10/drivers/net/netvsc/hn_nvs.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/netvsc/hn_nvs.c	2021-01-20 12:18:20.000000000 +0000
@@ -223,9 +223,15 @@
 		    resp.nvs_sect[0].slotcnt);
 	hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
 
-	hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
-				    sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
-	if (!hv->rxbuf_info) {
+	/*
+	 * Pimary queue's rxbuf_info is not allocated at creation time.
+	 * Now we can allocate it after we figure out the slotcnt.
+	 */
+	hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+			hv->rxbuf_section_cnt,
+			sizeof(*hv->primary->rxbuf_info),
+			RTE_CACHE_LINE_SIZE);
+	if (!hv->primary->rxbuf_info) {
 		PMD_DRV_LOG(ERR,
 			    "could not allocate rxbuf info");
 		return -ENOMEM;
@@ -255,7 +261,6 @@
 			    error);
 	}
 
-	rte_free(hv->rxbuf_info);
 	/*
 	 * Linger long enough for NVS to disconnect RXBUF.
 	 */
diff -Nru dpdk-18.11.10/drivers/net/netvsc/hn_rndis.c dpdk-18.11.11/drivers/net/netvsc/hn_rndis.c
--- dpdk-18.11.10/drivers/net/netvsc/hn_rndis.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/netvsc/hn_rndis.c	2021-01-20 12:18:20.000000000 +0000
@@ -276,7 +276,7 @@
 	sg.len  = reqlen;
 
 	if (sg.ofs + reqlen >  PAGE_SIZE) {
-		PMD_DRV_LOG(ERR, "RNDIS request crosses page bounary");
+		PMD_DRV_LOG(ERR, "RNDIS request crosses page boundary");
 		return -EINVAL;
 	}
 
diff -Nru dpdk-18.11.10/drivers/net/netvsc/hn_rxtx.c dpdk-18.11.11/drivers/net/netvsc/hn_rxtx.c
--- dpdk-18.11.10/drivers/net/netvsc/hn_rxtx.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/netvsc/hn_rxtx.c	2021-01-20 12:18:20.000000000 +0000
@@ -252,16 +252,6 @@
 
 	PMD_INIT_FUNC_TRACE();
 
-	txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
-				 socket_id);
-	if (!txq)
-		return -ENOMEM;
-
-	txq->hv = hv;
-	txq->chan = hv->channels[queue_idx];
-	txq->port_id = dev->data->port_id;
-	txq->queue_id = queue_idx;
-
 	tx_free_thresh = tx_conf->tx_free_thresh;
 	if (tx_free_thresh == 0)
 		tx_free_thresh = RTE_MIN(nb_desc / 4,
@@ -276,6 +266,15 @@
 		return -EINVAL;
 	}
 
+	txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
+				 socket_id);
+	if (!txq)
+		return -ENOMEM;
+
+	txq->hv = hv;
+	txq->chan = hv->channels[queue_idx];
+	txq->port_id = dev->data->port_id;
+	txq->queue_id = queue_idx;
 	txq->free_thresh = tx_free_thresh;
 
 	snprintf(name, sizeof(name),
@@ -284,10 +283,15 @@
 	PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu",
 		     name, nb_desc, sizeof(struct hn_txdesc));
 
-	txq->tx_rndis = rte_calloc("hn_txq_rndis", nb_desc,
-				   HN_RNDIS_PKT_ALIGNED, RTE_CACHE_LINE_SIZE);
-	if (txq->tx_rndis == NULL)
+	txq->tx_rndis_mz = rte_memzone_reserve_aligned(name,
+			nb_desc * HN_RNDIS_PKT_ALIGNED, rte_socket_id(),
+			RTE_MEMZONE_IOVA_CONTIG, HN_RNDIS_PKT_ALIGNED);
+	if (!txq->tx_rndis_mz) {
+		err = -rte_errno;
 		goto error;
+	}
+	txq->tx_rndis = txq->tx_rndis_mz->addr;
+	txq->tx_rndis_iova = txq->tx_rndis_mz->iova;
 
 	txq->txdesc_pool = rte_mempool_create(name, nb_desc,
 					      sizeof(struct hn_txdesc),
@@ -316,7 +320,7 @@
 error:
 	if (txq->txdesc_pool)
 		rte_mempool_free(txq->txdesc_pool);
-	rte_free(txq->tx_rndis);
+	rte_memzone_free(txq->tx_rndis_mz);
 	rte_free(txq);
 	return err;
 }
@@ -358,7 +362,7 @@
 	if (txq->txdesc_pool)
 		rte_mempool_free(txq->txdesc_pool);
 
-	rte_free(txq->tx_rndis);
+	rte_memzone_free(txq->tx_rndis_mz);
 	rte_free(txq);
 }
 
@@ -387,8 +391,10 @@
 		++txq->stats.errors;
 	}
 
-	if (txd->chim_index != NVS_CHIM_IDX_INVALID)
+	if (txd->chim_index != NVS_CHIM_IDX_INVALID) {
 		hn_chim_free(hv, txd->chim_index);
+		txd->chim_index = NVS_CHIM_IDX_INVALID;
+	}
 
 	rte_pktmbuf_free(txd->m);
 	hn_txd_put(txq, txd);
@@ -493,21 +499,21 @@
 static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
 {
 	struct hn_rx_bufinfo *rxb = opaque;
-	struct hn_data *hv = rxb->hv;
+	struct hn_rx_queue *rxq = rxb->rxq;
 
-	rte_atomic32_dec(&hv->rxbuf_outstanding);
+	rte_atomic32_dec(&rxq->rxbuf_outstanding);
 	hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
 }
 
-static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
+static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq,
 					    const struct vmbus_chanpkt_rxbuf *pkt)
 {
 	struct hn_rx_bufinfo *rxb;
 
-	rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
+	rxb = rxq->rxbuf_info + pkt->hdr.xactid;
 	rxb->chan = rxq->chan;
 	rxb->xactid = pkt->hdr.xactid;
-	rxb->hv = rxq->hv;
+	rxb->rxq = rxq;
 
 	rxb->shinfo.free_cb = hn_rx_buf_free_cb;
 	rxb->shinfo.fcb_opaque = rxb;
@@ -536,7 +542,7 @@
 	 * some space available in receive area for later packets.
 	 */
 	if (dlen >= HN_RXCOPY_THRESHOLD &&
-	    (uint32_t)rte_atomic32_read(&hv->rxbuf_outstanding) <
+	    (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
 			hv->rxbuf_section_cnt / 2) {
 		struct rte_mbuf_ext_shared_info *shinfo;
 		const void *rxbuf;
@@ -553,7 +559,7 @@
 
 		/* shinfo is already set to 1 by the caller */
 		if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
-			rte_atomic32_inc(&hv->rxbuf_outstanding);
+			rte_atomic32_inc(&rxq->rxbuf_outstanding);
 
 		rte_pktmbuf_attach_extbuf(m, data, iova,
 					  dlen + headroom, shinfo);
@@ -618,7 +624,8 @@
 			     struct hn_rx_bufinfo *rxb,
 			     void *data, uint32_t dlen)
 {
-	unsigned int data_off, data_len, pktinfo_off, pktinfo_len;
+	unsigned int data_off, data_len;
+	unsigned int pktinfo_off, pktinfo_len;
 	const struct rndis_packet_msg *pkt = data;
 	struct hn_rxinfo info = {
 		.vlan_info = HN_NDIS_VLAN_INFO_INVALID,
@@ -663,7 +670,8 @@
 			goto error;
 	}
 
-	if (unlikely(data_off + data_len > pkt->len))
+	/* overflow check */
+	if (data_len > data_len + data_off || data_len + data_off > pkt->len)
 		goto error;
 
 	if (unlikely(data_len < ETHER_HDR_LEN))
@@ -842,6 +850,23 @@
 		return NULL;
 	}
 
+	/* setup rxbuf_info for non-primary queue */
+	if (queue_id) {
+		rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+					hv->rxbuf_section_cnt,
+					sizeof(*rxq->rxbuf_info),
+					RTE_CACHE_LINE_SIZE);
+
+		if (!rxq->rxbuf_info) {
+			PMD_DRV_LOG(ERR,
+				"Could not allocate rxbuf info for queue %d\n",
+				queue_id);
+			rte_free(rxq->event_buf);
+			rte_free(rxq);
+			return NULL;
+		}
+	}
+
 	return rxq;
 }
 
@@ -896,6 +921,7 @@
 
 fail:
 	rte_ring_free(rxq->rx_ring);
+	rte_free(rxq->rxbuf_info);
 	rte_free(rxq->event_buf);
 	rte_free(rxq);
 	return error;
@@ -919,6 +945,7 @@
 
 	/* Keep primary queue to allow for control operations */
 	if (rxq != rxq->hv->primary) {
+		rte_free(rxq->rxbuf_info);
 		rte_free(rxq->event_buf);
 		rte_free(rxq);
 	}
@@ -1318,12 +1345,8 @@
 	hn_rndis_dump(txd->rndis_pkt);
 
 	/* pass IOVA of rndis header in first segment */
-	addr = rte_malloc_virt2iova(txq->tx_rndis);
-	if (unlikely(addr == RTE_BAD_IOVA)) {
-		PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
-		return -EINVAL;
-	}
-	addr = addr + ((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
+	addr = txq->tx_rndis_iova +
+		((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
 
 	sg[0].page = addr / PAGE_SIZE;
 	sg[0].ofs = addr & PAGE_MASK;
diff -Nru dpdk-18.11.10/drivers/net/netvsc/hn_var.h dpdk-18.11.11/drivers/net/netvsc/hn_var.h
--- dpdk-18.11.10/drivers/net/netvsc/hn_var.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/netvsc/hn_var.h	2021-01-20 12:18:20.000000000 +0000
@@ -53,7 +53,9 @@
 	uint16_t	queue_id;
 	uint32_t	free_thresh;
 	struct rte_mempool *txdesc_pool;
+	const struct rte_memzone *tx_rndis_mz;
 	void		*tx_rndis;
+	rte_iova_t	tx_rndis_iova;
 
 	/* Applied packet transmission aggregation limits. */
 	uint32_t	agg_szmax;
@@ -82,13 +84,15 @@
 	struct hn_stats stats;
 
 	void *event_buf;
+	struct hn_rx_bufinfo *rxbuf_info;
+	rte_atomic32_t  rxbuf_outstanding;
 };
 
 
 /* multi-packet data from host */
 struct hn_rx_bufinfo {
 	struct vmbus_channel *chan;
-	struct hn_data *hv;
+	struct hn_rx_queue *rxq;
 	uint64_t	xactid;
 	struct rte_mbuf_ext_shared_info shinfo;
 } __rte_cache_aligned;
@@ -109,9 +113,7 @@
 	uint32_t	link_speed;
 
 	struct rte_mem_resource *rxbuf_res;	/* UIO resource for Rx */
-	struct hn_rx_bufinfo *rxbuf_info;
 	uint32_t	rxbuf_section_cnt;	/* # of Rx sections */
-	rte_atomic32_t	rxbuf_outstanding;
 	uint16_t	max_queues;		/* Max available queues */
 	uint16_t	num_queues;
 	uint64_t	rss_offloads;
diff -Nru dpdk-18.11.10/drivers/net/nfp/nfp_net.c dpdk-18.11.11/drivers/net/nfp/nfp_net.c
--- dpdk-18.11.10/drivers/net/nfp/nfp_net.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/nfp/nfp_net.c	2021-01-20 12:18:20.000000000 +0000
@@ -1201,6 +1201,20 @@
 		.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
 	};
 
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = NFP_NET_MAX_RX_DESC,
+		.nb_min = NFP_NET_MIN_RX_DESC,
+		.nb_align = NFP_ALIGN_RING_DESC,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = NFP_NET_MAX_TX_DESC,
+		.nb_min = NFP_NET_MIN_TX_DESC,
+		.nb_align = NFP_ALIGN_RING_DESC,
+		.nb_seg_max = NFP_TX_MAX_SEG,
+		.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
+	};
+
 	dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
 					   ETH_RSS_NONFRAG_IPV4_TCP |
 					   ETH_RSS_NONFRAG_IPV4_UDP |
@@ -1462,15 +1476,17 @@
 	const struct rte_memzone *tz;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_hw *hw;
+	uint32_t rx_desc_sz;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	PMD_INIT_FUNC_TRACE();
 
 	/* Validating number of descriptors */
-	if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
-	    (nb_desc > NFP_NET_MAX_RX_DESC) ||
-	    (nb_desc < NFP_NET_MIN_RX_DESC)) {
+	rx_desc_sz = nb_desc * sizeof(struct nfp_net_rx_desc);
+	if (rx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
+	    nb_desc > NFP_NET_MAX_RX_DESC ||
+	    nb_desc < NFP_NET_MIN_RX_DESC) {
 		PMD_DRV_LOG(ERR, "Wrong nb_desc value");
 		return -EINVAL;
 	}
@@ -1609,15 +1625,17 @@
 	struct nfp_net_txq *txq;
 	uint16_t tx_free_thresh;
 	struct nfp_net_hw *hw;
+	uint32_t tx_desc_sz;
 
 	hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	PMD_INIT_FUNC_TRACE();
 
 	/* Validating number of descriptors */
-	if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
-	    (nb_desc > NFP_NET_MAX_TX_DESC) ||
-	    (nb_desc < NFP_NET_MIN_TX_DESC)) {
+	tx_desc_sz = nb_desc * sizeof(struct nfp_net_tx_desc);
+	if (tx_desc_sz % NFP_ALIGN_RING_DESC != 0 ||
+	    nb_desc > NFP_NET_MAX_TX_DESC ||
+	    nb_desc < NFP_NET_MIN_TX_DESC) {
 		PMD_DRV_LOG(ERR, "Wrong nb_desc value");
 		return -EINVAL;
 	}
diff -Nru dpdk-18.11.10/drivers/net/nfp/nfp_net_pmd.h dpdk-18.11.11/drivers/net/nfp/nfp_net_pmd.h
--- dpdk-18.11.10/drivers/net/nfp/nfp_net_pmd.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/nfp/nfp_net_pmd.h	2021-01-20 12:18:20.000000000 +0000
@@ -59,6 +59,12 @@
 #define NFP_NET_MAX_RX_DESC (32 * 1024)
 #define NFP_NET_MIN_RX_DESC 64
 
+/* Descriptor alignment */
+#define NFP_ALIGN_RING_DESC 128
+
+#define NFP_TX_MAX_SEG     UINT8_MAX
+#define NFP_TX_MAX_MTU_SEG 8
+
 /* Bar allocation */
 #define NFP_NET_CRTL_BAR        0
 #define NFP_NET_TX_BAR          2
diff -Nru dpdk-18.11.10/drivers/net/qede/base/bcm_osal.h dpdk-18.11.11/drivers/net/qede/base/bcm_osal.h
--- dpdk-18.11.10/drivers/net/qede/base/bcm_osal.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/qede/base/bcm_osal.h	2021-01-20 12:18:20.000000000 +0000
@@ -81,9 +81,8 @@
 
 #define DELAY(x) rte_delay_us(x)
 #define usec_delay(x) DELAY(x)
-#define msec_delay(x) DELAY(1000 * (x))
 #define OSAL_UDELAY(time) usec_delay(time)
-#define OSAL_MSLEEP(time) msec_delay(time)
+#define OSAL_MSLEEP(time) rte_delay_us_sleep(1000 * (time))
 
 /* Memory allocations and deallocations */
 
diff -Nru dpdk-18.11.10/drivers/net/qede/base/ecore_sriov.c dpdk-18.11.11/drivers/net/qede/base/ecore_sriov.c
--- dpdk-18.11.10/drivers/net/qede/base/ecore_sriov.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/qede/base/ecore_sriov.c	2021-01-20 12:18:20.000000000 +0000
@@ -4009,7 +4009,7 @@
 		rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
 		if (rc) {
 			/* TODO - again, a mess... */
-			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+			DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
 			       vfid);
 			return rc;
 		}
diff -Nru dpdk-18.11.10/drivers/net/qede/qede_main.c dpdk-18.11.11/drivers/net/qede/qede_main.c
--- dpdk-18.11.10/drivers/net/qede/qede_main.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/qede/qede_main.c	2021-01-20 12:18:20.000000000 +0000
@@ -575,13 +575,12 @@
 	hwfn = &edev->hwfns[0];
 	if (IS_PF(edev)) {
 		ptt = ecore_ptt_acquire(hwfn);
-		if (!ptt)
-			DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n");
-
+		if (ptt) {
 			qed_fill_link(hwfn, ptt, if_link);
-
-		if (ptt)
 			ecore_ptt_release(hwfn, ptt);
+		} else {
+			DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n");
+		}
 	} else {
 		qed_fill_link(hwfn, NULL, if_link);
 	}
diff -Nru dpdk-18.11.10/drivers/net/qede/qede_rxtx.c dpdk-18.11.11/drivers/net/qede/qede_rxtx.c
--- dpdk-18.11.10/drivers/net/qede/qede_rxtx.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/qede/qede_rxtx.c	2021-01-20 12:18:20.000000000 +0000
@@ -676,9 +676,9 @@
 
 	for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
 		fp = &qdev->fp_array[sb_idx];
-		DP_INFO(edev, "Free sb_info index 0x%x\n",
-				fp->sb_info->igu_sb_id);
 		if (fp->sb_info) {
+			DP_INFO(edev, "Free sb_info index 0x%x\n",
+					fp->sb_info->igu_sb_id);
 			OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
 				fp->sb_info->sb_phys,
 				sizeof(struct status_block_e4));
diff -Nru dpdk-18.11.10/drivers/net/ring/rte_eth_ring.c dpdk-18.11.11/drivers/net/ring/rte_eth_ring.c
--- dpdk-18.11.10/drivers/net/ring/rte_eth_ring.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/ring/rte_eth_ring.c	2021-01-20 12:18:20.000000000 +0000
@@ -16,6 +16,7 @@
 #define ETH_RING_ACTION_CREATE		"CREATE"
 #define ETH_RING_ACTION_ATTACH		"ATTACH"
 #define ETH_RING_INTERNAL_ARG		"internal"
+#define ETH_RING_INTERNAL_ARG_MAX_LEN	19 /* "0x..16chars..\0" */
 
 static const char *valid_arguments[] = {
 	ETH_RING_NUMA_NODE_ACTION_ARG,
@@ -541,8 +542,21 @@
 {
 	struct ring_internal_args **internal_args = data;
 	void *args;
+	int ret, n;
 
-	sscanf(value, "%p", &args);
+	/* make sure 'value' is valid pointer length */
+	if (strnlen(value, ETH_RING_INTERNAL_ARG_MAX_LEN) >=
+			ETH_RING_INTERNAL_ARG_MAX_LEN) {
+		PMD_LOG(ERR, "Error parsing internal args, argument is too long");
+		return -1;
+	}
+
+	ret = sscanf(value, "%p%n", &args, &n);
+	if (ret == 0 || (size_t)n != strlen(value)) {
+		PMD_LOG(ERR, "Error parsing internal args");
+
+		return -1;
+	}
 
 	*internal_args = args;
 
@@ -581,7 +595,7 @@
 
 		if (!kvlist) {
 			PMD_LOG(INFO,
-				"Ignoring unsupported parameters when creatingrings-backed ethernet device");
+				"Ignoring unsupported parameters when creating rings-backed ethernet device");
 			ret = eth_dev_ring_create(name, dev, rte_socket_id(),
 						  DEV_CREATE, &eth_dev);
 			if (ret == -1) {
diff -Nru dpdk-18.11.10/drivers/net/sfc/base/efx_tunnel.c dpdk-18.11.11/drivers/net/sfc/base/efx_tunnel.c
--- dpdk-18.11.10/drivers/net/sfc/base/efx_tunnel.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/sfc/base/efx_tunnel.c	2021-01-20 12:18:20.000000000 +0000
@@ -421,7 +421,7 @@
 {
 	efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
 	efx_rc_t rc;
-	boolean_t resetting;
+	boolean_t resetting = B_FALSE;
 	efsys_lock_state_t state;
 	efx_tunnel_cfg_t etc;
 
@@ -446,8 +446,14 @@
 		 */
 		rc = efx_mcdi_set_tunnel_encap_udp_ports(enp, &etc, B_FALSE,
 		    &resetting);
-		if (rc != 0)
-			goto fail2;
+		if (rc != 0) {
+			/*
+			 * Do not fail if the access is denied when no
+			 * tunnel encap UDP ports are configured.
+			 */
+			if (rc != EACCES || etc.etc_udp_entries_num != 0)
+				goto fail2;
+		}
 
 		/*
 		 * Although the caller should be able to handle MC reboot,
diff -Nru dpdk-18.11.10/drivers/net/tap/rte_eth_tap.c dpdk-18.11.11/drivers/net/tap/rte_eth_tap.c
--- dpdk-18.11.10/drivers/net/tap/rte_eth_tap.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/tap/rte_eth_tap.c	2021-01-20 12:18:20.000000000 +0000
@@ -1068,6 +1068,9 @@
 				&internals->remote_initial_flags);
 	}
 
+	rte_mempool_free(internals->gso_ctx_mp);
+	internals->gso_ctx_mp = NULL;
+
 	if (internals->ka_fd != -1) {
 		close(internals->ka_fd);
 		internals->ka_fd = -1;
@@ -1249,26 +1252,31 @@
 {
 	uint32_t gso_types;
 	char pool_name[64];
-
-	/*
-	 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
-	 * size per mbuf use this pool for both direct and indirect mbufs
-	 */
-
-	struct rte_mempool *mp;      /* Mempool for GSO packets */
+	struct pmd_internals *pmd = dev->data->dev_private;
+	int ret;
 
 	/* initialize GSO context */
 	gso_types = DEV_TX_OFFLOAD_TCP_TSO;
-	snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
-	mp = rte_mempool_lookup((const char *)pool_name);
-	if (!mp) {
-		mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
-			TAP_GSO_MBUF_CACHE_SIZE, 0,
+	if (!pmd->gso_ctx_mp) {
+		/*
+		 * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE
+		 * bytes size per mbuf use this pool for both direct and
+		 * indirect mbufs
+		 */
+		ret = snprintf(pool_name, sizeof(pool_name), "mp_%s",
+				dev->device->name);
+		if (ret < 0 || ret >= (int)sizeof(pool_name)) {
+			TAP_LOG(ERR,
+				"%s: failed to create mbuf pool name for device %s,"
+				"device name too long or output error, ret: %d\n",
+				pmd->name, dev->device->name, ret);
+			return -ENAMETOOLONG;
+		}
+		pmd->gso_ctx_mp = rte_pktmbuf_pool_create(pool_name,
+			TAP_GSO_MBUFS_NUM, TAP_GSO_MBUF_CACHE_SIZE, 0,
 			RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
 			SOCKET_ID_ANY);
-		if (!mp) {
-			struct pmd_internals *pmd = dev->data->dev_private;
-
+		if (!pmd->gso_ctx_mp) {
 			TAP_LOG(ERR,
 				"%s: failed to create mbuf pool for device %s\n",
 				pmd->name, dev->device->name);
@@ -1276,8 +1284,8 @@
 		}
 	}
 
-	gso_ctx->direct_pool = mp;
-	gso_ctx->indirect_pool = mp;
+	gso_ctx->direct_pool = pmd->gso_ctx_mp;
+	gso_ctx->indirect_pool = pmd->gso_ctx_mp;
 	gso_ctx->gso_types = gso_types;
 	gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
 	gso_ctx->flag = 0;
@@ -1770,6 +1778,7 @@
 	pmd->type = type;
 	pmd->ka_fd = -1;
 	pmd->nlsk_fd = -1;
+	pmd->gso_ctx_mp = NULL;
 
 	pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
 	if (pmd->ioctl_sock == -1) {
diff -Nru dpdk-18.11.10/drivers/net/tap/rte_eth_tap.h dpdk-18.11.11/drivers/net/tap/rte_eth_tap.h
--- dpdk-18.11.10/drivers/net/tap/rte_eth_tap.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/tap/rte_eth_tap.h	2021-01-20 12:18:20.000000000 +0000
@@ -91,6 +91,7 @@
 	struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
 	struct rte_intr_handle intr_handle;          /* LSC interrupt handle. */
 	int ka_fd;                        /* keep-alive file descriptor */
+	struct rte_mempool *gso_ctx_mp;     /* Mempool for GSO packets */
 };
 
 struct pmd_process_private {
diff -Nru dpdk-18.11.10/drivers/net/thunderx/nicvf_ethdev.c dpdk-18.11.11/drivers/net/thunderx/nicvf_ethdev.c
--- dpdk-18.11.10/drivers/net/thunderx/nicvf_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/thunderx/nicvf_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -645,6 +645,7 @@
 				      NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
 	if (rz == NULL) {
 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
+		rte_free(rbdr);
 		return -ENOMEM;
 	}
 
diff -Nru dpdk-18.11.10/drivers/net/vdev_netvsc/vdev_netvsc.c dpdk-18.11.11/drivers/net/vdev_netvsc/vdev_netvsc.c
--- dpdk-18.11.10/drivers/net/vdev_netvsc/vdev_netvsc.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/vdev_netvsc/vdev_netvsc.c	2021-01-20 12:18:20.000000000 +0000
@@ -684,6 +684,7 @@
 	int ret;
 
 	DRV_LOG(DEBUG, "invoked as \"%s\", using arguments \"%s\"", name, args);
+	rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
 	if (!kvargs) {
 		DRV_LOG(ERR, "cannot parse arguments list");
 		goto error;
@@ -699,17 +700,13 @@
 			 !strcmp(pair->key, VDEV_NETVSC_ARG_MAC))
 			++specified;
 	}
-	if (ignore) {
-		if (kvargs)
-			rte_kvargs_free(kvargs);
-		return 0;
-	}
+	if (ignore)
+		goto ignore;
 	if (specified > 1) {
 		DRV_LOG(ERR, "More than one way used to specify the netvsc"
 			" device.");
 		goto error;
 	}
-	rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
 	/* Gather interfaces. */
 	ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 1, name,
 					kvargs, specified, &matched);
@@ -730,17 +727,19 @@
 		}
 		DRV_LOG(WARNING, "non-netvsc device was probed as netvsc");
 	}
-	ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
-				vdev_netvsc_alarm, NULL);
-	if (ret < 0) {
-		DRV_LOG(ERR, "unable to schedule alarm callback: %s",
-			rte_strerror(-ret));
-		goto error;
-	}
 error:
+	++vdev_netvsc_ctx_inst;
+ignore:
 	if (kvargs)
 		rte_kvargs_free(kvargs);
-	++vdev_netvsc_ctx_inst;
+	/* Reset alarm if there are device context created */
+	if (vdev_netvsc_ctx_count) {
+		ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
+					vdev_netvsc_alarm, NULL);
+		if (ret < 0)
+			DRV_LOG(ERR, "unable to schedule alarm callback: %s",
+				rte_strerror(-ret));
+	}
 	return 0;
 }
 
diff -Nru dpdk-18.11.10/drivers/net/vhost/rte_eth_vhost.c dpdk-18.11.11/drivers/net/vhost/rte_eth_vhost.c
--- dpdk-18.11.10/drivers/net/vhost/rte_eth_vhost.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/vhost/rte_eth_vhost.c	2021-01-20 12:18:20.000000000 +0000
@@ -66,6 +66,9 @@
 	VHOST_BROADCAST_PKT,
 	VHOST_MULTICAST_PKT,
 	VHOST_UNICAST_PKT,
+	VHOST_PKT,
+	VHOST_BYTE,
+	VHOST_MISSED_PKT,
 	VHOST_ERRORS_PKT,
 	VHOST_ERRORS_FRAGMENTED,
 	VHOST_ERRORS_JABBER,
@@ -139,11 +142,11 @@
 /* [rx]_is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -179,11 +182,11 @@
 /* [tx]_ is prepended to the name string here */
 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
 	{"good_packets",
-	 offsetof(struct vhost_queue, stats.pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
 	{"total_bytes",
-	 offsetof(struct vhost_queue, stats.bytes)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
 	{"missed_pkts",
-	 offsetof(struct vhost_queue, stats.missed_pkts)},
+	 offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
 	{"broadcast_packets",
 	 offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
 	{"multicast_packets",
@@ -275,23 +278,6 @@
 	if (n < nxstats)
 		return nxstats;
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		vq = dev->data->rx_queues[i];
-		if (!vq)
-			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
-	}
-	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		vq = dev->data->tx_queues[i];
-		if (!vq)
-			continue;
-		vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
-				+ vq->stats.missed_pkts
-				- (vq->stats.xstats[VHOST_BROADCAST_PKT]
-				+ vq->stats.xstats[VHOST_MULTICAST_PKT]);
-	}
 	for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
 		xstats[count].value = 0;
 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
@@ -322,7 +308,7 @@
 }
 
 static inline void
-vhost_count_multicast_broadcast(struct vhost_queue *vq,
+vhost_count_xcast_packets(struct vhost_queue *vq,
 				struct rte_mbuf *mbuf)
 {
 	struct ether_addr *ea = NULL;
@@ -334,20 +320,27 @@
 			pstats->xstats[VHOST_BROADCAST_PKT]++;
 		else
 			pstats->xstats[VHOST_MULTICAST_PKT]++;
+	} else {
+		pstats->xstats[VHOST_UNICAST_PKT]++;
 	}
 }
 
 static void
-vhost_update_packet_xstats(struct vhost_queue *vq,
-			   struct rte_mbuf **bufs,
-			   uint16_t count)
+vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
+			   uint16_t count, uint64_t nb_bytes,
+			   uint64_t nb_missed)
 {
 	uint32_t pkt_len = 0;
 	uint64_t i = 0;
 	uint64_t index;
 	struct vhost_stats *pstats = &vq->stats;
 
+	pstats->xstats[VHOST_BYTE] += nb_bytes;
+	pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
+	pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
+
 	for (i = 0; i < count ; i++) {
+		pstats->xstats[VHOST_PKT]++;
 		pkt_len = bufs[i]->pkt_len;
 		if (pkt_len == 64) {
 			pstats->xstats[VHOST_64_PKT]++;
@@ -363,7 +356,7 @@
 			else if (pkt_len > 1522)
 				pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
 		}
-		vhost_count_multicast_broadcast(vq, bufs[i]);
+		vhost_count_xcast_packets(vq, bufs[i]);
 	}
 }
 
@@ -373,6 +366,7 @@
 	struct vhost_queue *r = q;
 	uint16_t i, nb_rx = 0;
 	uint16_t nb_receive = nb_bufs;
+	uint64_t nb_bytes = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -407,10 +401,11 @@
 		if (r->internal->vlan_strip)
 			rte_vlan_strip(bufs[i]);
 
-		r->stats.bytes += bufs[i]->pkt_len;
+		nb_bytes += bufs[i]->pkt_len;
 	}
 
-	vhost_update_packet_xstats(r, bufs, nb_rx);
+	r->stats.bytes += nb_bytes;
+	vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
 
 out:
 	rte_atomic32_set(&r->while_queuing, 0);
@@ -424,6 +419,8 @@
 	struct vhost_queue *r = q;
 	uint16_t i, nb_tx = 0;
 	uint16_t nb_send = 0;
+	uint64_t nb_bytes = 0;
+	uint64_t nb_missed = 0;
 
 	if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
 		return 0;
@@ -464,20 +461,23 @@
 			break;
 	}
 
+	for (i = 0; likely(i < nb_tx); i++)
+		nb_bytes += bufs[i]->pkt_len;
+
+	nb_missed = nb_bufs - nb_tx;
+
 	r->stats.pkts += nb_tx;
+	r->stats.bytes += nb_bytes;
 	r->stats.missed_pkts += nb_bufs - nb_tx;
 
-	for (i = 0; likely(i < nb_tx); i++)
-		r->stats.bytes += bufs[i]->pkt_len;
-
-	vhost_update_packet_xstats(r, bufs, nb_tx);
+	vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
 
-	/* According to RFC2863 page42 section ifHCOutMulticastPkts and
-	 * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
-	 * are increased when packets are not transmitted successfully.
+	/* According to RFC2863, ifHCOutUcastPkts, ifHCOutMulticastPkts and
+	 * ifHCOutBroadcastPkts counters are increased when packets are not
+	 * transmitted successfully.
 	 */
 	for (i = nb_tx; i < nb_bufs; i++)
-		vhost_count_multicast_broadcast(r, bufs[i]);
+		vhost_count_xcast_packets(r, bufs[i]);
 
 	for (i = 0; likely(i < nb_tx); i++)
 		rte_pktmbuf_free(bufs[i]);
diff -Nru dpdk-18.11.10/drivers/net/virtio/virtio_rxtx.c dpdk-18.11.11/drivers/net/virtio/virtio_rxtx.c
--- dpdk-18.11.10/drivers/net/virtio/virtio_rxtx.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/virtio/virtio_rxtx.c	2021-01-20 12:18:20.000000000 +0000
@@ -843,9 +843,10 @@
 			 */
 			uint16_t csum = 0, off;
 
-			rte_raw_cksum_mbuf(m, hdr->csum_start,
+			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
-				&csum);
+				&csum) < 0)
+				return -EINVAL;
 			if (likely(csum != 0xffff))
 				csum = ~csum;
 			off = hdr->csum_offset + hdr->csum_start;
diff -Nru dpdk-18.11.10/drivers/net/virtio/virtio_user/vhost_kernel_tap.c dpdk-18.11.11/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
--- dpdk-18.11.10/drivers/net/virtio/virtio_user/vhost_kernel_tap.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/net/virtio/virtio_user/vhost_kernel_tap.c	2021-01-20 12:18:20.000000000 +0000
@@ -39,7 +39,7 @@
 
 	/* Check if our kernel supports TUNSETOFFLOAD */
 	if (ioctl(fd, TUNSETOFFLOAD, 0) != 0 && errno == EINVAL) {
-		PMD_DRV_LOG(ERR, "Kernel does't support TUNSETOFFLOAD\n");
+		PMD_DRV_LOG(ERR, "Kernel doesn't support TUNSETOFFLOAD\n");
 		return -ENOTSUP;
 	}
 
diff -Nru dpdk-18.11.10/drivers/raw/dpaa2_qdma/dpaa2_qdma.c dpdk-18.11.11/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
--- dpdk-18.11.10/drivers/raw/dpaa2_qdma/dpaa2_qdma.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/raw/dpaa2_qdma/dpaa2_qdma.c	2021-01-20 12:18:20.000000000 +0000
@@ -179,9 +179,10 @@
 	/* In case there are pending jobs on any VQ, return -EBUSY */
 	for (i = 0; i < qdma_dev.max_vqs; i++) {
 		if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
-		    qdma_vqs[i].num_dequeues))
+		    qdma_vqs[i].num_dequeues)) {
 			DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
 			return -EBUSY;
+		}
 	}
 
 	/* Reset HW queues */
diff -Nru dpdk-18.11.10/drivers/raw/skeleton_rawdev/skeleton_rawdev.c dpdk-18.11.11/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
--- dpdk-18.11.10/drivers/raw/skeleton_rawdev/skeleton_rawdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/raw/skeleton_rawdev/skeleton_rawdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -189,9 +189,11 @@
 		}
 		break;
 	case SKELETON_FW_READY:
+		SKELETON_PMD_DEBUG("Device already in stopped state");
+		break;
 	case SKELETON_FW_ERROR:
 	default:
-		SKELETON_PMD_DEBUG("Device already in stopped state");
+		SKELETON_PMD_DEBUG("Device in impossible state");
 		ret = -EINVAL;
 		break;
 	}
diff -Nru dpdk-18.11.10/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c dpdk-18.11.11/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
--- dpdk-18.11.10/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c	2021-01-20 12:18:20.000000000 +0000
@@ -41,6 +41,12 @@
 testsuite_setup(void)
 {
 	uint8_t count;
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+	unsupported = 0;
+
 	count = rte_rawdev_count();
 	if (!count) {
 		SKELDEV_TEST_INFO("\tNo existing rawdev; "
diff -Nru dpdk-18.11.10/examples/fips_validation/fips_validation.c dpdk-18.11.11/examples/fips_validation/fips_validation.c
--- dpdk-18.11.10/examples/fips_validation/fips_validation.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/fips_validation/fips_validation.c	2021-01-20 12:18:20.000000000 +0000
@@ -105,7 +105,7 @@
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < info.nb_vec_lines; i++) {
+	for (i = 1; i < info.nb_vec_lines; i++) {
 		if (strstr(info.vec[i], "AESVS")) {
 			info.algo = FIPS_TEST_ALGO_AES;
 			ret = parse_test_aes_init();
@@ -255,7 +255,11 @@
 		return -ENOMEM;
 	}
 
-	strlcpy(info.device_name, device_name, sizeof(info.device_name));
+	if (rte_strscpy(info.device_name, device_name,
+				sizeof(info.device_name)) < 0) {
+		RTE_LOG(ERR, USER1, "Device name %s too long\n", device_name);
+		return -EINVAL;
+	}
 
 	if (fips_test_parse_header() < 0) {
 		RTE_LOG(ERR, USER1, "Failed parsing header\n");
diff -Nru dpdk-18.11.10/examples/kni/Makefile dpdk-18.11.11/examples/kni/Makefile
--- dpdk-18.11.10/examples/kni/Makefile	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/kni/Makefile	2021-01-20 12:18:20.000000000 +0000
@@ -25,6 +25,8 @@
 LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
 LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
 
+LDFLAGS += -pthread
+
 build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
 	$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
 
diff -Nru dpdk-18.11.10/examples/l2fwd-crypto/Makefile dpdk-18.11.11/examples/l2fwd-crypto/Makefile
--- dpdk-18.11.10/examples/l2fwd-crypto/Makefile	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/l2fwd-crypto/Makefile	2021-01-20 12:18:20.000000000 +0000
@@ -24,6 +24,12 @@
 LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
 LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
 
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CONFIG_DEFINES = $(shell $(CC) $(CFLAGS) -dM -E - < /dev/null)
+ifneq ($(findstring RTE_CRYPTO_SCHEDULER,$(CONFIG_DEFINES)),)
+LDFLAGS_SHARED += -lrte_crypto_scheduler
+endif
+
 build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
 	$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
 
diff -Nru dpdk-18.11.10/examples/l2fwd-crypto/meson.build dpdk-18.11.11/examples/l2fwd-crypto/meson.build
--- dpdk-18.11.10/examples/l2fwd-crypto/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/l2fwd-crypto/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -7,6 +7,9 @@
 # DPDK instance, use 'make'
 
 deps += 'cryptodev'
+if dpdk_conf.has('RTE_LIBRTE_PMD_CRYPTO_SCHEDULER')
+	deps += 'pmd_crypto_scheduler'
+endif
 sources = files(
 	'main.c'
 )
diff -Nru dpdk-18.11.10/examples/l2fwd-keepalive/meson.build dpdk-18.11.11/examples/l2fwd-keepalive/meson.build
--- dpdk-18.11.10/examples/l2fwd-keepalive/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/l2fwd-keepalive/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -6,7 +6,13 @@
 # To build this example as a standalone application with an already-installed
 # DPDK instance, use 'make'
 
-ext_deps += cc.find_library('rt')
+librt = cc.find_library('rt', required: false)
+if not librt.found()
+	build = false
+	subdir_done()
+endif
+
+ext_deps += librt
 deps += 'timer'
 sources = files(
 	'main.c', 'shm.c'
diff -Nru dpdk-18.11.10/examples/l3fwd-power/main.c dpdk-18.11.11/examples/l3fwd-power/main.c
--- dpdk-18.11.10/examples/l3fwd-power/main.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/l3fwd-power/main.c	2021-01-20 12:18:20.000000000 +0000
@@ -2149,9 +2149,7 @@
 				if (add_cb_parse_ptype(portid, queueid) < 0)
 					rte_exit(EXIT_FAILURE,
 						 "Fail to add ptype cb\n");
-			} else if (!check_ptype(portid))
-				rte_exit(EXIT_FAILURE,
-					 "PMD can not provide needed ptypes\n");
+			}
 		}
 	}
 
@@ -2177,6 +2175,11 @@
 			rte_eth_promiscuous_enable(portid);
 		/* initialize spinlock for each port */
 		rte_spinlock_init(&(locks[portid]));
+
+		if (!parse_ptype)
+			if (!check_ptype(portid))
+				rte_exit(EXIT_FAILURE,
+					"PMD can not provide needed ptypes\n");
 	}
 
 	check_all_ports_link_status(enabled_port_mask);
diff -Nru dpdk-18.11.10/examples/multi_process/client_server_mp/mp_server/main.c dpdk-18.11.11/examples/multi_process/client_server_mp/mp_server/main.c
--- dpdk-18.11.10/examples/multi_process/client_server_mp/mp_server/main.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/multi_process/client_server_mp/mp_server/main.c	2021-01-20 12:18:20.000000000 +0000
@@ -59,18 +59,21 @@
 static const char *
 get_printable_mac_addr(uint16_t port)
 {
-	static const char err_address[] = "00:00:00:00:00:00";
-	static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
+	static const struct ether_addr null_mac; /* static defaults to 0 */
+	static char err_address[32];
+	static char addresses[RTE_MAX_ETHPORTS][32];
 
-	if (unlikely(port >= RTE_MAX_ETHPORTS))
+	if (unlikely(port >= RTE_MAX_ETHPORTS)) {
+		if (err_address[0] == '\0')
+			ether_format_addr(err_address,
+					sizeof(err_address), &null_mac);
 		return err_address;
+	}
 	if (unlikely(addresses[port][0]=='\0')){
 		struct ether_addr mac;
 		rte_eth_macaddr_get(port, &mac);
-		snprintf(addresses[port], sizeof(addresses[port]),
-				"%02x:%02x:%02x:%02x:%02x:%02x\n",
-				mac.addr_bytes[0], mac.addr_bytes[1], mac.addr_bytes[2],
-				mac.addr_bytes[3], mac.addr_bytes[4], mac.addr_bytes[5]);
+		ether_format_addr(addresses[port],
+				sizeof(addresses[port]), &mac);
 	}
 	return addresses[port];
 }
diff -Nru dpdk-18.11.10/examples/multi_process/client_server_mp/shared/common.h dpdk-18.11.11/examples/multi_process/client_server_mp/shared/common.h
--- dpdk-18.11.10/examples/multi_process/client_server_mp/shared/common.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/multi_process/client_server_mp/shared/common.h	2021-01-20 12:18:20.000000000 +0000
@@ -43,7 +43,7 @@
  * Given the rx queue name template above, get the queue name
  */
 static inline const char *
-get_rx_queue_name(unsigned id)
+get_rx_queue_name(uint8_t id)
 {
 	/* buffer for return value. Size calculated by %u being replaced
 	 * by maximum 3 digits (plus an extra byte for safety) */
diff -Nru dpdk-18.11.10/examples/performance-thread/l3fwd-thread/main.c dpdk-18.11.11/examples/performance-thread/l3fwd-thread/main.c
--- dpdk-18.11.10/examples/performance-thread/l3fwd-thread/main.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/performance-thread/l3fwd-thread/main.c	2021-01-20 12:18:20.000000000 +0000
@@ -2,6 +2,10 @@
  * Copyright(c) 2010-2016 Intel Corporation
  */
 
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdint.h>
@@ -12,6 +16,7 @@
 #include <stdarg.h>
 #include <errno.h>
 #include <getopt.h>
+#include <sched.h>
 
 #include <rte_common.h>
 #include <rte_vect.h>
@@ -599,8 +604,8 @@
 struct thread_tx_conf {
 	struct thread_conf conf;
 
-	uint16_t tx_queue_id[RTE_MAX_LCORE];
-	struct mbuf_table tx_mbufs[RTE_MAX_LCORE];
+	uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
+	struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
 
 	struct rte_ring *ring;
 	struct lthread_cond **ready;
diff -Nru dpdk-18.11.10/examples/qos_sched/args.c dpdk-18.11.11/examples/qos_sched/args.c
--- dpdk-18.11.10/examples/qos_sched/args.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/qos_sched/args.c	2021-01-20 12:18:20.000000000 +0000
@@ -39,7 +39,7 @@
 	"           multiple pfc can be configured in command line                      \n"
 	"                                                                               \n"
 	"Application optional parameters:                                               \n"
-        "    --i     : run in interactive mode (default value is %u)                    \n"
+	"    -i      : run in interactive mode (default value is %u)                    \n"
 	"    --mst I : master core index (default value is %u)                          \n"
 	"    --rsz \"A, B, C\" :   Ring sizes                                           \n"
 	"           A = Size (in number of buffer descriptors) of each of the NIC RX    \n"
diff -Nru dpdk-18.11.10/examples/vhost_crypto/main.c dpdk-18.11.11/examples/vhost_crypto/main.c
--- dpdk-18.11.10/examples/vhost_crypto/main.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/vhost_crypto/main.c	2021-01-20 12:18:20.000000000 +0000
@@ -194,7 +194,7 @@
 {
 	printf("%s [EAL options] --\n"
 		"  --%s <lcore>,SOCKET-FILE-PATH\n"
-		"  --%s (lcore,cdev_id,queue_id)[,(lcore,cdev_id,queue_id)]"
+		"  --%s (lcore,cdev_id,queue_id)[,(lcore,cdev_id,queue_id)]\n"
 		"  --%s: zero copy\n"
 		"  --%s: guest polling\n",
 		prgname, SOCKET_FILE_KEYWORD, CONFIG_KEYWORD,
diff -Nru dpdk-18.11.10/examples/vm_power_manager/channel_manager.c dpdk-18.11.11/examples/vm_power_manager/channel_manager.c
--- dpdk-18.11.10/examples/vm_power_manager/channel_manager.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/examples/vm_power_manager/channel_manager.c	2021-01-20 12:18:20.000000000 +0000
@@ -461,9 +461,15 @@
 			continue;
 		}
 
-		snprintf(chan_info->channel_path,
+		if ((size_t)snprintf(chan_info->channel_path,
 				sizeof(chan_info->channel_path), "%s%s",
-				CHANNEL_MGR_SOCKET_PATH, dir->d_name);
+				CHANNEL_MGR_SOCKET_PATH, dir->d_name)
+					>= sizeof(chan_info->channel_path)) {
+			RTE_LOG(ERR, CHANNEL_MANAGER, "Pathname too long for channel '%s%s'\n",
+					CHANNEL_MGR_SOCKET_PATH, dir->d_name);
+			rte_free(chan_info);
+			continue;
+		}
 
 		if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) {
 			rte_free(chan_info);
diff -Nru dpdk-18.11.10/kernel/linux/kni/compat.h dpdk-18.11.11/kernel/linux/kni/compat.h
--- dpdk-18.11.10/kernel/linux/kni/compat.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/kernel/linux/kni/compat.h	2021-01-20 12:18:20.000000000 +0000
@@ -121,6 +121,8 @@
 #define HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
 #endif
 
-#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE || \
+	(defined(RHEL_RELEASE_CODE) && \
+	 RHEL_RELEASE_VERSION(8, 3) <= RHEL_RELEASE_CODE)
 #define HAVE_TX_TIMEOUT_TXQUEUE
 #endif
diff -Nru dpdk-18.11.10/kernel/linux/kni/ethtool/igb/kcompat.h dpdk-18.11.11/kernel/linux/kni/ethtool/igb/kcompat.h
--- dpdk-18.11.10/kernel/linux/kni/ethtool/igb/kcompat.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/kernel/linux/kni/ethtool/igb/kcompat.h	2021-01-20 12:18:20.000000000 +0000
@@ -3973,4 +3973,8 @@
 	pci_aer_clear_nonfatal_status
 #endif
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)
+#define read_barrier_depends() rmb()
+#endif
+
 #endif /* _KCOMPAT_H_ */
diff -Nru dpdk-18.11.10/lib/librte_cryptodev/rte_cryptodev_pmd.h dpdk-18.11.11/lib/librte_cryptodev/rte_cryptodev_pmd.h
--- dpdk-18.11.10/lib/librte_cryptodev/rte_cryptodev_pmd.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_cryptodev/rte_cryptodev_pmd.h	2021-01-20 12:18:20.000000000 +0000
@@ -41,7 +41,8 @@
 static const char * const cryptodev_pmd_valid_params[] = {
 	RTE_CRYPTODEV_PMD_NAME_ARG,
 	RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
-	RTE_CRYPTODEV_PMD_SOCKET_ID_ARG
+	RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+	NULL
 };
 
 /**
diff -Nru dpdk-18.11.10/lib/librte_distributor/rte_distributor.c dpdk-18.11.11/lib/librte_distributor/rte_distributor.c
--- dpdk-18.11.10/lib/librte_distributor/rte_distributor.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_distributor/rte_distributor.c	2021-01-20 12:18:20.000000000 +0000
@@ -43,7 +43,7 @@
 
 	if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
 		rte_distributor_request_pkt_v20(d->d_v20,
-			worker_id, oldpkt[0]);
+			worker_id, count ? oldpkt[0] : NULL);
 		return;
 	}
 
@@ -52,7 +52,7 @@
 	 * Sync with worker on GET_BUF flag.
 	 */
 	while (unlikely(__atomic_load_n(retptr64, __ATOMIC_ACQUIRE)
-			& RTE_DISTRIB_GET_BUF)) {
+			& (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
 		rte_pause();
 		uint64_t t = rte_rdtsc()+100;
 
@@ -68,11 +68,11 @@
 	for (i = count; i < RTE_DIST_BURST_SIZE; i++)
 		buf->retptr64[i] = 0;
 
-	/* Set Return bit for each packet returned */
+	/* Set VALID_BUF bit for each packet returned */
 	for (i = count; i-- > 0; )
 		buf->retptr64[i] =
 			(((int64_t)(uintptr_t)(oldpkt[i])) <<
-			RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
+			RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
 
 	/*
 	 * Finally, set the GET_BUF  to signal to distributor that cache
@@ -102,11 +102,13 @@
 		return (pkts[0]) ? 1 : 0;
 	}
 
-	/* If bit is set, return
+	/* If any of below bits is set, return.
+	 * GET_BUF is set when distributor hasn't sent any packets yet
+	 * RETURN_BUF is set when distributor must retrieve in-flight packets
 	 * Sync with distributor to acquire bufptrs
 	 */
 	if (__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
-		& RTE_DISTRIB_GET_BUF)
+		& (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))
 		return -1;
 
 	/* since bufptr64 is signed, this should be an arithmetic shift */
@@ -118,7 +120,7 @@
 	}
 
 	/*
-	 * so now we've got the contents of the cacheline into an  array of
+	 * so now we've got the contents of the cacheline into an array of
 	 * mbuf pointers, so toggle the bit so scheduler can start working
 	 * on the next cacheline while we're working.
 	 * Sync with distributor on GET_BUF flag. Release bufptrs.
@@ -143,7 +145,7 @@
 	if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
 		if (return_count <= 1) {
 			pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
-				worker_id, oldpkt[0]);
+				worker_id, return_count ? oldpkt[0] : NULL);
 			return (pkts[0]) ? 1 : 0;
 		} else
 			return -EINVAL;
@@ -179,25 +181,48 @@
 		if (num == 1)
 			return rte_distributor_return_pkt_v20(d->d_v20,
 				worker_id, oldpkt[0]);
+		else if (num == 0)
+			return rte_distributor_return_pkt_v20(d->d_v20,
+				worker_id, NULL);
 		else
 			return -EINVAL;
 	}
 
+	/* Spin while handshake bits are set (scheduler clears it).
+	 * Sync with worker on GET_BUF flag.
+	 */
+	while (unlikely(__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_RELAXED)
+			& (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF))) {
+		rte_pause();
+		uint64_t t = rte_rdtsc()+100;
+
+		while (rte_rdtsc() < t)
+			rte_pause();
+	}
+
 	/* Sync with distributor to acquire retptrs */
 	__atomic_thread_fence(__ATOMIC_ACQUIRE);
 	for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
 		/* Switch off the return bit first */
-		buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
+		buf->retptr64[i] = 0;
 
 	for (i = num; i-- > 0; )
 		buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
-			RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
+			RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_VALID_BUF;
 
-	/* set the GET_BUF but even if we got no returns.
-	 * Sync with distributor on GET_BUF flag. Release retptrs.
+	/* Use RETURN_BUF on bufptr64 to notify distributor that
+	 * we won't read any mbufs from there even if GET_BUF is set.
+	 * This allows distributor to retrieve in-flight already sent packets.
+	 */
+	__atomic_or_fetch(&(buf->bufptr64[0]), RTE_DISTRIB_RETURN_BUF,
+		__ATOMIC_ACQ_REL);
+
+	/* set the RETURN_BUF on retptr64 even if we got no returns.
+	 * Sync with distributor on RETURN_BUF flag. Release retptrs.
+	 * Notify distributor that we don't request more packets any more.
 	 */
 	__atomic_store_n(&(buf->retptr64[0]),
-		buf->retptr64[0] | RTE_DISTRIB_GET_BUF, __ATOMIC_RELEASE);
+		buf->retptr64[0] | RTE_DISTRIB_RETURN_BUF, __ATOMIC_RELEASE);
 
 	return 0;
 }
@@ -252,13 +277,13 @@
 
 		for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
 			for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
-				if (d->in_flight_tags[i][j] == data_ptr[w]) {
+				if (d->in_flight_tags[i][w] == data_ptr[j]) {
 					output_ptr[j] = i+1;
 					break;
 				}
 		for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
 			for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
-				if (bl->tags[j] == data_ptr[w]) {
+				if (bl->tags[w] == data_ptr[j]) {
 					output_ptr[j] = i+1;
 					break;
 				}
@@ -271,6 +296,59 @@
 	 */
 }
 
+/*
+ * When worker called rte_distributor_return_pkt()
+ * and passed RTE_DISTRIB_RETURN_BUF handshake through retptr64,
+ * distributor must retrieve both inflight and backlog packets assigned
+ * to the worker and reprocess them to another worker.
+ */
+static void
+handle_worker_shutdown(struct rte_distributor *d, unsigned int wkr)
+{
+	struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
+	/* double BURST size for storing both inflights and backlog */
+	struct rte_mbuf *pkts[RTE_DIST_BURST_SIZE * 2];
+	unsigned int pkts_count = 0;
+	unsigned int i;
+
+	/* If GET_BUF is cleared there are in-flight packets sent
+	 * to worker which does not require new packets.
+	 * They must be retrieved and assigned to another worker.
+	 */
+	if (!(__atomic_load_n(&(buf->bufptr64[0]), __ATOMIC_ACQUIRE)
+		& RTE_DISTRIB_GET_BUF))
+		for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
+			if (buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)
+				pkts[pkts_count++] = (void *)((uintptr_t)
+					(buf->bufptr64[i]
+						>> RTE_DISTRIB_FLAG_BITS));
+
+	/* Make following operations on handshake flags on bufptr64:
+	 * - set GET_BUF to indicate that distributor can overwrite buffer
+	 *     with new packets if worker will make a new request.
+	 * - clear RETURN_BUF to unlock reads on worker side.
+	 */
+	__atomic_store_n(&(buf->bufptr64[0]), RTE_DISTRIB_GET_BUF,
+		__ATOMIC_RELEASE);
+
+	/* Collect backlog packets from worker */
+	for (i = 0; i < d->backlog[wkr].count; i++)
+		pkts[pkts_count++] = (void *)((uintptr_t)
+			(d->backlog[wkr].pkts[i] >> RTE_DISTRIB_FLAG_BITS));
+
+	d->backlog[wkr].count = 0;
+
+	/* Clear both inflight and backlog tags */
+	for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
+		d->in_flight_tags[wkr][i] = 0;
+		d->backlog[wkr].tags[i] = 0;
+	}
+
+	/* Recursive call */
+	if (pkts_count > 0)
+		rte_distributor_process(d, pkts, pkts_count);
+}
+
 
 /*
  * When the handshake bits indicate that there are packets coming
@@ -289,19 +367,33 @@
 
 	/* Sync on GET_BUF flag. Acquire retptrs. */
 	if (__atomic_load_n(&(buf->retptr64[0]), __ATOMIC_ACQUIRE)
-		& RTE_DISTRIB_GET_BUF) {
+		& (RTE_DISTRIB_GET_BUF | RTE_DISTRIB_RETURN_BUF)) {
 		for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
-			if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
+			if (buf->retptr64[i] & RTE_DISTRIB_VALID_BUF) {
 				oldbuf = ((uintptr_t)(buf->retptr64[i] >>
 					RTE_DISTRIB_FLAG_BITS));
 				/* store returns in a circular buffer */
 				store_return(oldbuf, d, &ret_start, &ret_count);
 				count++;
-				buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
+				buf->retptr64[i] &= ~RTE_DISTRIB_VALID_BUF;
 			}
 		}
 		d->returns.start = ret_start;
 		d->returns.count = ret_count;
+
+		/* If worker requested packets with GET_BUF, set it to active
+		 * otherwise (RETURN_BUF), set it to not active.
+		 */
+		d->activesum -= d->active[wkr];
+		d->active[wkr] = !!(buf->retptr64[0] & RTE_DISTRIB_GET_BUF);
+		d->activesum += d->active[wkr];
+
+		/* If worker returned packets without requesting new ones,
+		 * handle all in-flights and backlog packets assigned to it.
+		 */
+		if (unlikely(buf->retptr64[0] & RTE_DISTRIB_RETURN_BUF))
+			handle_worker_shutdown(d, wkr);
+
 		/* Clear for the worker to populate with more returns.
 		 * Sync with distributor on GET_BUF flag. Release retptrs.
 		 */
@@ -325,12 +417,18 @@
 	struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
 	unsigned int i;
 
+	handle_returns(d, wkr);
+	if (unlikely(!d->active[wkr]))
+		return 0;
+
 	/* Sync with worker on GET_BUF flag */
 	while (!(__atomic_load_n(&(d->bufs[wkr].bufptr64[0]), __ATOMIC_ACQUIRE)
-		& RTE_DISTRIB_GET_BUF))
+		& RTE_DISTRIB_GET_BUF)) {
+		handle_returns(d, wkr);
+		if (unlikely(!d->active[wkr]))
+			return 0;
 		rte_pause();
-
-	handle_returns(d, wkr);
+	}
 
 	buf->count = 0;
 
@@ -368,19 +466,23 @@
 	int64_t next_value = 0;
 	uint16_t new_tag = 0;
 	uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
-	unsigned int i, j, w, wid;
+	unsigned int i, j, w, wid, matching_required;
 
 	if (d->alg_type == RTE_DIST_ALG_SINGLE) {
 		/* Call the old API */
 		return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
 	}
 
+	for (wid = 0 ; wid < d->num_workers; wid++)
+		handle_returns(d, wid);
+
 	if (unlikely(num_mbufs == 0)) {
 		/* Flush out all non-full cache-lines to workers. */
 		for (wid = 0 ; wid < d->num_workers; wid++) {
 			/* Sync with worker on GET_BUF flag. */
 			if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
 				__ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
+				d->bufs[wid].count = 0;
 				release(d, wid);
 				handle_returns(d, wid);
 			}
@@ -388,15 +490,13 @@
 		return 0;
 	}
 
+	if (unlikely(!d->activesum))
+		return 0;
+
 	while (next_idx < num_mbufs) {
 		uint16_t matches[RTE_DIST_BURST_SIZE];
 		unsigned int pkts;
 
-		/* Sync with worker on GET_BUF flag. */
-		if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
-			__ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
-			d->bufs[wkr].count = 0;
-
 		if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
 			pkts = num_mbufs - next_idx;
 		else
@@ -412,22 +512,30 @@
 		for (; i < RTE_DIST_BURST_SIZE; i++)
 			flows[i] = 0;
 
-		switch (d->dist_match_fn) {
-		case RTE_DIST_MATCH_VECTOR:
-			find_match_vec(d, &flows[0], &matches[0]);
-			break;
-		default:
-			find_match_scalar(d, &flows[0], &matches[0]);
-		}
+		matching_required = 1;
+
+		for (j = 0; j < pkts; j++) {
+			if (unlikely(!d->activesum))
+				return next_idx;
 
+			if (unlikely(matching_required)) {
+				switch (d->dist_match_fn) {
+				case RTE_DIST_MATCH_VECTOR:
+					find_match_vec(d, &flows[0],
+						&matches[0]);
+					break;
+				default:
+					find_match_scalar(d, &flows[0],
+						&matches[0]);
+				}
+				matching_required = 0;
+			}
 		/*
 		 * Matches array now contain the intended worker ID (+1) of
 		 * the incoming packets. Any zeroes need to be assigned
 		 * workers.
 		 */
 
-		for (j = 0; j < pkts; j++) {
-
 			next_mb = mbufs[next_idx++];
 			next_value = (((int64_t)(uintptr_t)next_mb) <<
 					RTE_DISTRIB_FLAG_BITS);
@@ -447,12 +555,18 @@
 			 */
 			/* matches[j] = 0; */
 
-			if (matches[j]) {
+			if (matches[j] && d->active[matches[j]-1]) {
 				struct rte_distributor_backlog *bl =
 						&d->backlog[matches[j]-1];
 				if (unlikely(bl->count ==
 						RTE_DIST_BURST_SIZE)) {
 					release(d, matches[j]-1);
+					if (!d->active[matches[j]-1]) {
+						j--;
+						next_idx--;
+						matching_required = 1;
+						continue;
+					}
 				}
 
 				/* Add to worker that already has flow */
@@ -462,11 +576,21 @@
 				bl->pkts[idx] = next_value;
 
 			} else {
-				struct rte_distributor_backlog *bl =
-						&d->backlog[wkr];
+				struct rte_distributor_backlog *bl;
+
+				while (unlikely(!d->active[wkr]))
+					wkr = (wkr + 1) % d->num_workers;
+				bl = &d->backlog[wkr];
+
 				if (unlikely(bl->count ==
 						RTE_DIST_BURST_SIZE)) {
 					release(d, wkr);
+					if (!d->active[wkr]) {
+						j--;
+						next_idx--;
+						matching_required = 1;
+						continue;
+					}
 				}
 
 				/* Add to current worker worker */
@@ -485,17 +609,17 @@
 						matches[w] = wkr+1;
 			}
 		}
-		wkr++;
-		if (wkr >= d->num_workers)
-			wkr = 0;
+		wkr = (wkr + 1) % d->num_workers;
 	}
 
 	/* Flush out all non-full cache-lines to workers. */
 	for (wid = 0 ; wid < d->num_workers; wid++)
 		/* Sync with worker on GET_BUF flag. */
 		if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
-			__ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
+			__ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
+			d->bufs[wid].count = 0;
 			release(d, wid);
+		}
 
 	return num_mbufs;
 }
@@ -546,7 +670,7 @@
 	unsigned int wkr, total_outstanding = 0;
 
 	for (wkr = 0; wkr < d->num_workers; wkr++)
-		total_outstanding += d->backlog[wkr].count;
+		total_outstanding += d->backlog[wkr].count + d->bufs[wkr].count;
 
 	return total_outstanding;
 }
@@ -606,6 +730,8 @@
 		/* Sync with worker. Release retptrs. */
 		__atomic_store_n(&(d->bufs[wkr].retptr64[0]), 0,
 				__ATOMIC_RELEASE);
+
+	d->returns.start = d->returns.count = 0;
 }
 BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
 MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
@@ -677,6 +803,9 @@
 	for (i = 0 ; i < num_workers ; i++)
 		d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
 
+	memset(d->active, 0, sizeof(d->active));
+	d->activesum = 0;
+
 	dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
 					  rte_dist_burst_list);
 
diff -Nru dpdk-18.11.10/lib/librte_distributor/rte_distributor.h dpdk-18.11.11/lib/librte_distributor/rte_distributor.h
--- dpdk-18.11.10/lib/librte_distributor/rte_distributor.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_distributor/rte_distributor.h	2021-01-20 12:18:20.000000000 +0000
@@ -155,7 +155,7 @@
  * @param pkts
  *   The mbufs pointer array to be filled in (up to 8 packets)
  * @param oldpkt
- *   The previous packet, if any, being processed by the worker
+ *   The previous packets, if any, being processed by the worker
  * @param retcount
  *   The number of packets being returned
  *
@@ -187,15 +187,15 @@
 
 /**
  * API called by a worker to request a new packet to process.
- * Any previous packet given to the worker is assumed to have completed
+ * Any previous packets given to the worker are assumed to have completed
  * processing, and may be optionally returned to the distributor via
  * the oldpkt parameter.
- * Unlike rte_distributor_get_pkt_burst(), this function does not wait for a
- * new packet to be provided by the distributor.
+ * Unlike rte_distributor_get_pkt(), this function does not wait for
+ * new packets to be provided by the distributor.
  *
- * NOTE: after calling this function, rte_distributor_poll_pkt_burst() should
- * be used to poll for the packet requested. The rte_distributor_get_pkt_burst()
- * API should *not* be used to try and retrieve the new packet.
+ * NOTE: after calling this function, rte_distributor_poll_pkt() should
+ * be used to poll for the packets requested. The rte_distributor_get_pkt()
+ * API should *not* be used to try and retrieve the new packets.
  *
  * @param d
  *   The distributor instance to be used
@@ -213,9 +213,9 @@
 		unsigned int count);
 
 /**
- * API called by a worker to check for a new packet that was previously
+ * API called by a worker to check for new packets that were previously
  * requested by a call to rte_distributor_request_pkt(). It does not wait
- * for the new packet to be available, but returns NULL if the request has
+ * for the new packets to be available, but returns if the request has
  * not yet been fulfilled by the distributor.
  *
  * @param d
@@ -227,8 +227,9 @@
  *   The array of mbufs being given to the worker
  *
  * @return
- *   The number of packets being given to the worker thread, zero if no
- *   packet is yet available.
+ *   The number of packets being given to the worker thread,
+ *   -1 if no packets are yet available (burst API - RTE_DIST_ALG_BURST)
+ *   0 if no packets are yet available (legacy single API - RTE_DIST_ALG_SINGLE)
  */
 int
 rte_distributor_poll_pkt(struct rte_distributor *d,
diff -Nru dpdk-18.11.10/lib/librte_distributor/rte_distributor_private.h dpdk-18.11.11/lib/librte_distributor/rte_distributor_private.h
--- dpdk-18.11.10/lib/librte_distributor/rte_distributor_private.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_distributor/rte_distributor_private.h	2021-01-20 12:18:20.000000000 +0000
@@ -155,6 +155,9 @@
 	enum rte_distributor_match_function dist_match_fn;
 
 	struct rte_distributor_v20 *d_v20;
+
+	uint8_t active[RTE_DISTRIB_MAX_WORKERS];
+	uint8_t activesum;
 };
 
 void
diff -Nru dpdk-18.11.10/lib/librte_distributor/rte_distributor_v20.c dpdk-18.11.11/lib/librte_distributor/rte_distributor_v20.c
--- dpdk-18.11.10/lib/librte_distributor/rte_distributor_v20.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_distributor/rte_distributor_v20.c	2021-01-20 12:18:20.000000000 +0000
@@ -77,6 +77,10 @@
 	union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
 	uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
 			| RTE_DISTRIB_RETURN_BUF;
+	while (unlikely(__atomic_load_n(&buf->bufptr64, __ATOMIC_RELAXED)
+			& RTE_DISTRIB_FLAGS_MASK))
+		rte_pause();
+
 	/* Sync with distributor on RETURN_BUF flag. */
 	__atomic_store_n(&(buf->bufptr64), req, __ATOMIC_RELEASE);
 	return 0;
diff -Nru dpdk-18.11.10/lib/librte_eal/bsdapp/BSDmakefile.meson dpdk-18.11.11/lib/librte_eal/bsdapp/BSDmakefile.meson
--- dpdk-18.11.10/lib/librte_eal/bsdapp/BSDmakefile.meson	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/bsdapp/BSDmakefile.meson	1970-01-01 01:00:00.000000000 +0100
@@ -1,43 +0,0 @@
-#   BSD LICENSE
-#
-#   Copyright(c) 2017 Intel Corporation. All rights reserved.
-#   All rights reserved.
-#
-#   Redistribution and use in source and binary forms, with or without
-#   modification, are permitted provided that the following conditions
-#   are met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above copyright
-#       notice, this list of conditions and the following disclaimer in
-#       the documentation and/or other materials provided with the
-#       distribution.
-#     * Neither the name of Intel Corporation nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-# makefile for building kernel modules using meson
-# takes parameters from the environment
-
-# source file is passed via KMOD_SRC as full path, we only use final
-# component of it, as VPATH is used to find actual file, so as to
-# have the .o files placed in the build, not source directory
-VPATH = ${KMOD_SRC:H}
-SRCS = ${KMOD_SRC:T} device_if.h bus_if.h pci_if.h
-CFLAGS += $(KMOD_CFLAGS)
-
-.include <bsd.kmod.mk>
diff -Nru dpdk-18.11.10/lib/librte_eal/bsdapp/eal/eal_memory.c dpdk-18.11.11/lib/librte_eal/bsdapp/eal/eal_memory.c
--- dpdk-18.11.10/lib/librte_eal/bsdapp/eal/eal_memory.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/bsdapp/eal/eal_memory.c	2021-01-20 12:18:20.000000000 +0000
@@ -180,7 +180,7 @@
 			if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
 				RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
 					RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
-					RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
+					RTE_STR(CONFIG_RTE_MAX_MEM_MB_PER_TYPE));
 				return -1;
 			}
 			arr = &msl->memseg_arr;
diff -Nru dpdk-18.11.10/lib/librte_eal/bsdapp/Makefile dpdk-18.11.11/lib/librte_eal/bsdapp/Makefile
--- dpdk-18.11.10/lib/librte_eal/bsdapp/Makefile	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/bsdapp/Makefile	1970-01-01 01:00:00.000000000 +0100
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2010-2014 Intel Corporation
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal
-
-include $(RTE_SDK)/mk/rte.subdir.mk
diff -Nru dpdk-18.11.10/lib/librte_eal/common/eal_common_dev.c dpdk-18.11.11/lib/librte_eal/common/eal_common_dev.c
--- dpdk-18.11.10/lib/librte_eal/common/eal_common_dev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/common/eal_common_dev.c	2021-01-20 12:18:20.000000000 +0000
@@ -526,6 +526,7 @@
 		 */
 		if (event_cb->active == 0) {
 			TAILQ_REMOVE(&dev_event_cbs, event_cb, next);
+			free(event_cb->dev_name);
 			free(event_cb);
 			ret++;
 		} else {
diff -Nru dpdk-18.11.10/lib/librte_eal/common/include/arch/x86/rte_memcpy.h dpdk-18.11.11/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
--- dpdk-18.11.10/lib/librte_eal/common/include/arch/x86/rte_memcpy.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/common/include/arch/x86/rte_memcpy.h	2021-01-20 12:18:20.000000000 +0000
@@ -45,7 +45,7 @@
 static __rte_always_inline void *
 rte_memcpy(void *dst, const void *src, size_t n);
 
-#ifdef RTE_MACHINE_CPUFLAG_AVX512F
+#if defined RTE_MACHINE_CPUFLAG_AVX512F && defined RTE_MEMCPY_AVX512
 
 #define ALIGNMENT_MASK 0x3F
 
diff -Nru dpdk-18.11.10/lib/librte_eal/common/include/generic/rte_memcpy.h dpdk-18.11.11/lib/librte_eal/common/include/generic/rte_memcpy.h
--- dpdk-18.11.10/lib/librte_eal/common/include/generic/rte_memcpy.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/common/include/generic/rte_memcpy.h	2021-01-20 12:18:20.000000000 +0000
@@ -95,6 +95,10 @@
  * @note This is implemented as a macro, so it's address should not be taken
  * and care is needed as parameter expressions may be evaluated multiple times.
  *
+ * @note For x86 platforms to enable the AVX-512 memcpy implementation, set
+ * -DRTE_MEMCPY_AVX512 macro in CFLAGS, or define the RTE_MEMCPY_AVX512 macro
+ * explicitly in the source file before including the rte_memcpy header file.
+ *
  * @param dst
  *   Pointer to the destination of the data.
  * @param src
diff -Nru dpdk-18.11.10/lib/librte_eal/common/include/rte_eal.h dpdk-18.11.11/lib/librte_eal/common/include/rte_eal.h
--- dpdk-18.11.10/lib/librte_eal/common/include/rte_eal.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/common/include/rte_eal.h	2021-01-20 12:18:20.000000000 +0000
@@ -184,8 +184,9 @@
  * be made. It is expected that common usage of this function is to call it
  * just before terminating the process.
  *
- * @return 0 Successfully released all internal EAL resources
- * @return -EFAULT There was an error in releasing all resources.
+ * @return
+ *  - 0 Successfully released all internal EAL resources.
+ *  - -EFAULT There was an error in releasing all resources.
  */
 int __rte_experimental rte_eal_cleanup(void);
 
diff -Nru dpdk-18.11.10/lib/librte_eal/common/include/rte_version.h dpdk-18.11.11/lib/librte_eal/common/include/rte_version.h
--- dpdk-18.11.10/lib/librte_eal/common/include/rte_version.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/common/include/rte_version.h	2021-01-20 12:18:20.000000000 +0000
@@ -37,7 +37,7 @@
 /**
  * Patch level number i.e. the z in yy.mm.z
  */
-#define RTE_VER_MINOR 10
+#define RTE_VER_MINOR 11
 
 /**
  * Extra string to be appended to version number
diff -Nru dpdk-18.11.10/lib/librte_eal/common/malloc_elem.c dpdk-18.11.11/lib/librte_eal/common/malloc_elem.c
--- dpdk-18.11.10/lib/librte_eal/common/malloc_elem.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/common/malloc_elem.c	2021-01-20 12:18:20.000000000 +0000
@@ -363,14 +363,14 @@
 		return 0;
 
 	/* Find next power of 2 >= size. */
-	log2 = sizeof(size) * 8 - __builtin_clzl(size-1);
+	log2 = sizeof(size) * 8 - __builtin_clzl(size - 1);
 
 	/* Compute freelist index, based on log2(size). */
 	index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
-	        MALLOC_LOG2_INCREMENT;
+			MALLOC_LOG2_INCREMENT;
 
-	return index <= RTE_HEAP_NUM_FREELISTS-1?
-	        index: RTE_HEAP_NUM_FREELISTS-1;
+	return index <= RTE_HEAP_NUM_FREELISTS - 1 ?
+			index : RTE_HEAP_NUM_FREELISTS - 1;
 }
 
 /*
diff -Nru dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_dev.c dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_dev.c
--- dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_dev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_dev.c	2021-01-20 12:18:20.000000000 +0000
@@ -189,7 +189,7 @@
 	else if (!strncmp(subsystem, "vfio", 4))
 		event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_VFIO;
 	else
-		return -1;
+		goto err;
 
 	/* parse the action type */
 	if (!strncmp(action, "add", 3))
@@ -197,8 +197,11 @@
 	else if (!strncmp(action, "remove", 6))
 		event->type = RTE_DEV_EVENT_REMOVE;
 	else
-		return -1;
+		goto err;
 	return 0;
+err:
+	free(event->devname);
+	return -1;
 }
 
 static void
@@ -277,12 +280,14 @@
 			rte_spinlock_unlock(&failure_handle_lock);
 		}
 		rte_dev_event_callback_process(uevent.devname, uevent.type);
+		free(uevent.devname);
 	}
 
 	return;
 
 failure_handle_err:
 	rte_spinlock_unlock(&failure_handle_lock);
+	free(uevent.devname);
 }
 
 int __rte_experimental
diff -Nru dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_memalloc.c dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_memalloc.c
--- dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_memalloc.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_memalloc.c	2021-01-20 12:18:20.000000000 +0000
@@ -419,6 +419,21 @@
 		fd = fd_list[list_idx].fds[seg_idx];
 
 		if (fd < 0) {
+			/* A primary process is the only one creating these
+			 * files. If there is a leftover that was not cleaned
+			 * by clear_hugedir(), we must *now* make sure to drop
+			 * the file or we will remap old stuff while the rest
+			 * of the code is built on the assumption that a new
+			 * page is clean.
+			 */
+			if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
+					unlink(path) == -1 &&
+					errno != ENOENT) {
+				RTE_LOG(DEBUG, EAL, "%s(): could not remove '%s': %s\n",
+					__func__, path, strerror(errno));
+				return -1;
+			}
+
 			fd = open(path, O_CREAT | O_RDWR, 0600);
 			if (fd < 0) {
 				RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
@@ -731,17 +746,25 @@
 	}
 
 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
-	ret = get_mempolicy(&cur_socket_id, NULL, 0, addr,
-			    MPOL_F_NODE | MPOL_F_ADDR);
-	if (ret < 0) {
-		RTE_LOG(DEBUG, EAL, "%s(): get_mempolicy: %s\n",
-			__func__, strerror(errno));
-		goto mapped;
-	} else if (cur_socket_id != socket_id) {
-		RTE_LOG(DEBUG, EAL,
-				"%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
-			__func__, socket_id, cur_socket_id);
-		goto mapped;
+	/*
+	 * If the kernel has been built without NUMA support, get_mempolicy()
+	 * will return an error. If check_numa() returns false, memory
+	 * allocation is not NUMA aware and the socket_id should not be
+	 * checked.
+	 */
+	if (check_numa()) {
+		ret = get_mempolicy(&cur_socket_id, NULL, 0, addr,
+					MPOL_F_NODE | MPOL_F_ADDR);
+		if (ret < 0) {
+			RTE_LOG(DEBUG, EAL, "%s(): get_mempolicy: %s\n",
+				__func__, strerror(errno));
+			goto mapped;
+		} else if (cur_socket_id != socket_id) {
+			RTE_LOG(DEBUG, EAL,
+					"%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
+				__func__, socket_id, cur_socket_id);
+			goto mapped;
+		}
 	}
 #else
 	if (rte_socket_count() > 1)
diff -Nru dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_memory.c dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_memory.c
--- dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_memory.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_memory.c	2021-01-20 12:18:20.000000000 +0000
@@ -704,7 +704,7 @@
 	if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
 		RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
 				RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
-				RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
+				RTE_STR(CONFIG_RTE_MAX_MEM_MB_PER_TYPE));
 		return -1;
 	}
 
diff -Nru dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_vfio.c dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_vfio.c
--- dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_vfio.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_vfio.c	2021-01-20 12:18:20.000000000 +0000
@@ -292,7 +292,7 @@
 							strerror(errno));
 					return -1;
 				}
-				return 0;
+				return -ENOENT;
 			}
 			/* noiommu group found */
 		}
@@ -317,12 +317,12 @@
 			vfio_group_fd = mp_rep->fds[0];
 		} else if (p->result == SOCKET_NO_FD) {
 			RTE_LOG(ERR, EAL, "  bad VFIO group fd\n");
-			vfio_group_fd = 0;
+			vfio_group_fd = -ENOENT;
 		}
 	}
 
 	free(mp_reply.msgs);
-	if (vfio_group_fd < 0)
+	if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
 		RTE_LOG(ERR, EAL, "  cannot request group fd\n");
 	return vfio_group_fd;
 }
@@ -378,9 +378,9 @@
 	}
 
 	vfio_group_fd = vfio_open_group_fd(iommu_group_num);
-	if (vfio_group_fd <= 0) {
+	if (vfio_group_fd < 0) {
 		RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
-		return -1;
+		return vfio_group_fd;
 	}
 
 	cur_grp->group_num = iommu_group_num;
@@ -704,11 +704,14 @@
 
 	/* get the actual group fd */
 	vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
-	if (vfio_group_fd < 0)
+	if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
 		return -1;
 
-	/* if group_fd == 0, that means the device isn't managed by VFIO */
-	if (vfio_group_fd == 0) {
+	/*
+	 * if vfio_group_fd == -ENOENT, that means the device
+	 * isn't managed by VFIO
+	 */
+	if (vfio_group_fd == -ENOENT) {
 		RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
 				dev_addr);
 		return 1;
@@ -928,10 +931,10 @@
 
 	/* get the actual group fd */
 	vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
-	if (vfio_group_fd <= 0) {
+	if (vfio_group_fd < 0) {
 		RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
 				   dev_addr);
-		ret = -1;
+		ret = vfio_group_fd;
 		goto out;
 	}
 
diff -Nru dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
--- dpdk-18.11.10/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c	2021-01-20 12:18:20.000000000 +0000
@@ -43,9 +43,9 @@
 		r->req = SOCKET_REQ_GROUP;
 		r->group_num = m->group_num;
 		fd = rte_vfio_get_group_fd(m->group_num);
-		if (fd < 0)
+		if (fd < 0 && fd != -ENOENT)
 			r->result = SOCKET_ERR;
-		else if (fd == 0)
+		else if (fd == -ENOENT)
 			/* if VFIO group exists but isn't bound to VFIO driver */
 			r->result = SOCKET_NO_FD;
 		else {
diff -Nru dpdk-18.11.10/lib/librte_eal/linuxapp/Makefile dpdk-18.11.11/lib/librte_eal/linuxapp/Makefile
--- dpdk-18.11.10/lib/librte_eal/linuxapp/Makefile	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/linuxapp/Makefile	1970-01-01 01:00:00.000000000 +0100
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2010-2014 Intel Corporation
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal
-DEPDIRS-kni := eal
-
-CFLAGS += -DALLOW_EXPERIMENTAL_API
-
-include $(RTE_SDK)/mk/rte.subdir.mk
diff -Nru dpdk-18.11.10/lib/librte_eal/Makefile dpdk-18.11.11/lib/librte_eal/Makefile
--- dpdk-18.11.10/lib/librte_eal/Makefile	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eal/Makefile	2021-01-20 12:18:20.000000000 +0000
@@ -4,9 +4,9 @@
 include $(RTE_SDK)/mk/rte.vars.mk
 
 DIRS-y += common
-DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
-DEPDIRS-linuxapp := common
-DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
-DEPDIRS-bsdapp := common
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp/eal
+DEPDIRS-linuxapp/eal := common
+DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp/eal
+DEPDIRS-bsdapp/eal := common
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff -Nru dpdk-18.11.10/lib/librte_efd/rte_efd.c dpdk-18.11.11/lib/librte_efd/rte_efd.c
--- dpdk-18.11.10/lib/librte_efd/rte_efd.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_efd/rte_efd.c	2021-01-20 12:18:20.000000000 +0000
@@ -705,6 +705,7 @@
 
 error_unlock_exit:
 	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+	rte_free(te);
 	rte_efd_free(table);
 
 	return NULL;
diff -Nru dpdk-18.11.10/lib/librte_ethdev/rte_ethdev.c dpdk-18.11.11/lib/librte_ethdev/rte_ethdev.c
--- dpdk-18.11.10/lib/librte_ethdev/rte_ethdev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_ethdev/rte_ethdev.c	2021-01-20 12:18:20.000000000 +0000
@@ -381,7 +381,9 @@
 static struct rte_eth_dev *
 _rte_eth_dev_allocated(const char *name)
 {
-	unsigned i;
+	uint16_t i;
+
+	RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
 
 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
 		if (rte_eth_devices[i].data != NULL &&
@@ -410,7 +412,7 @@
 static uint16_t
 rte_eth_dev_find_free_port(void)
 {
-	unsigned i;
+	uint16_t i;
 
 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
 		/* Using shared name field to find a free port. */
@@ -772,7 +774,7 @@
 int
 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
 {
-	uint32_t pid;
+	uint16_t pid;
 
 	if (name == NULL) {
 		RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
@@ -3356,7 +3358,7 @@
 
 RTE_INIT(eth_dev_init_cb_lists)
 {
-	int i;
+	uint16_t i;
 
 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
@@ -3369,7 +3371,7 @@
 {
 	struct rte_eth_dev *dev;
 	struct rte_eth_dev_callback *user_cb;
-	uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
+	uint16_t next_port;
 	uint16_t last_port;
 
 	if (!cb_fn)
@@ -3432,7 +3434,7 @@
 	int ret;
 	struct rte_eth_dev *dev;
 	struct rte_eth_dev_callback *cb, *next;
-	uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
+	uint16_t next_port;
 	uint16_t last_port;
 
 	if (!cb_fn)
@@ -3824,12 +3826,20 @@
 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
 
 	if (!tail) {
-		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
+		/* Stores to cb->fn and cb->param should complete before
+		 * cb is visible to data plane.
+		 */
+		__atomic_store_n(
+			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
+			cb, __ATOMIC_RELEASE);
 
 	} else {
 		while (tail->next)
 			tail = tail->next;
-		tail->next = cb;
+		/* Stores to cb->fn and cb->param should complete before
+		 * cb is visible to data plane.
+		 */
+		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
 	}
 	rte_spinlock_unlock(&rte_eth_rx_cb_lock);
 
@@ -3902,12 +3912,20 @@
 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
 
 	if (!tail) {
-		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
+		/* Stores to cb->fn and cb->param should complete before
+		 * cb is visible to data plane.
+		 */
+		__atomic_store_n(
+			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
+			cb, __ATOMIC_RELEASE);
 
 	} else {
 		while (tail->next)
 			tail = tail->next;
-		tail->next = cb;
+		/* Stores to cb->fn and cb->param should complete before
+		 * cb is visible to data plane.
+		 */
+		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
 	}
 	rte_spinlock_unlock(&rte_eth_tx_cb_lock);
 
@@ -3938,7 +3956,7 @@
 		cb = *prev_cb;
 		if (cb == user_cb) {
 			/* Remove the user cb from the callback list. */
-			*prev_cb = cb->next;
+			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
 			ret = 0;
 			break;
 		}
@@ -3972,7 +3990,7 @@
 		cb = *prev_cb;
 		if (cb == user_cb) {
 			/* Remove the user cb from the callback list. */
-			*prev_cb = cb->next;
+			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
 			ret = 0;
 			break;
 		}
@@ -4357,7 +4375,7 @@
 int __rte_experimental
 rte_eth_switch_domain_alloc(uint16_t *domain_id)
 {
-	unsigned int i;
+	uint16_t i;
 
 	*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
 
diff -Nru dpdk-18.11.10/lib/librte_ethdev/rte_ethdev.h dpdk-18.11.11/lib/librte_ethdev/rte_ethdev.h
--- dpdk-18.11.10/lib/librte_ethdev/rte_ethdev.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_ethdev/rte_ethdev.h	2021-01-20 12:18:20.000000000 +0000
@@ -1018,17 +1018,20 @@
  * Application must set PKT_TX_METADATA and mbuf metadata field.
  */
 #define DEV_TX_OFFLOAD_MATCH_METADATA   0x00200000
-
-#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
-/**< Device supports Rx queue setup after device started*/
-#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
-/**< Device supports Tx queue setup after device started*/
-
 /*
  * If new Tx offload capabilities are defined, they also must be
  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
  */
 
+/**@{@name Device capabilities
+ * Non-offload capabilities reported in rte_eth_dev_info.dev_capa.
+ */
+/** Device supports Rx queue setup after device started. */
+#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
+/** Device supports Tx queue setup after device started. */
+#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
+/**@}*/
+
 /*
  * Fallback default preferred Rx/Tx port parameters.
  * These are used if an application requests default parameters
@@ -3215,7 +3218,8 @@
  *   The callback function
  * @param user_param
  *   A generic pointer parameter which will be passed to each invocation of the
- *   callback function on this port and queue.
+ *   callback function on this port and queue. Inter-thread synchronization
+ *   of any user data changes is the responsibility of the user.
  *
  * @return
  *   NULL on error.
@@ -3244,7 +3248,8 @@
  *   The callback function
  * @param user_param
  *   A generic pointer parameter which will be passed to each invocation of the
- *   callback function on this port and queue.
+ *   callback function on this port and queue. Inter-thread synchronization
+ *   of any user data changes is the responsibility of the user.
  *
  * @return
  *   NULL on error.
@@ -3272,7 +3277,8 @@
  *   The callback function
  * @param user_param
  *   A generic pointer parameter which will be passed to each invocation of the
- *   callback function on this port and queue.
+ *   callback function on this port and queue. Inter-thread synchronization
+ *   of any user data changes is the responsibility of the user.
  *
  * @return
  *   NULL on error.
@@ -3297,7 +3303,9 @@
  *   on that queue.
  *
  * - After a short delay - where the delay is sufficient to allow any
- *   in-flight callbacks to complete.
+ *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
+ *   used to detect when data plane threads have ceased referencing the
+ *   callback memory.
  *
  * @param port_id
  *   The port identifier of the Ethernet device.
@@ -3330,7 +3338,9 @@
  *   on that queue.
  *
  * - After a short delay - where the delay is sufficient to allow any
- *   in-flight callbacks to complete.
+ *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
+ *   used to detect when data plane threads have ceased referencing the
+ *   callback memory.
  *
  * @param port_id
  *   The port identifier of the Ethernet device.
@@ -3880,10 +3890,18 @@
 				     rx_pkts, nb_pkts);
 
 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
-	if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
-		struct rte_eth_rxtx_callback *cb =
-				dev->post_rx_burst_cbs[queue_id];
+	struct rte_eth_rxtx_callback *cb;
+
+	/* __ATOMIC_RELEASE memory order was used when the
+	 * call back was inserted into the list.
+	 * Since there is a clear dependency between loading
+	 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+	 * not required.
+	 */
+	cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
+				__ATOMIC_RELAXED);
 
+	if (unlikely(cb != NULL)) {
 		do {
 			nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
 						nb_pkts, cb->param);
@@ -4144,7 +4162,16 @@
 #endif
 
 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
-	struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
+	struct rte_eth_rxtx_callback *cb;
+
+	/* __ATOMIC_RELEASE memory order was used when the
+	 * call back was inserted into the list.
+	 * Since there is a clear dependency between loading
+	 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+	 * not required.
+	 */
+	cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
+				__ATOMIC_RELAXED);
 
 	if (unlikely(cb != NULL)) {
 		do {
diff -Nru dpdk-18.11.10/lib/librte_ethdev/rte_ethdev_pci.h dpdk-18.11.11/lib/librte_ethdev/rte_ethdev_pci.h
--- dpdk-18.11.10/lib/librte_ethdev/rte_ethdev_pci.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_ethdev/rte_ethdev_pci.h	2021-01-20 12:18:20.000000000 +0000
@@ -3,32 +3,6 @@
  *
  *   Copyright(c) 2017 Brocade Communications Systems, Inc.
  *   Author: Jan Blunck <jblunck@infradead.org>
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of the copyright holder nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef _RTE_ETHDEV_PCI_H_
diff -Nru dpdk-18.11.10/lib/librte_ethdev/rte_ethdev_vdev.h dpdk-18.11.11/lib/librte_ethdev/rte_ethdev_vdev.h
--- dpdk-18.11.10/lib/librte_ethdev/rte_ethdev_vdev.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_ethdev/rte_ethdev_vdev.h	2021-01-20 12:18:20.000000000 +0000
@@ -3,32 +3,6 @@
  *
  *   Copyright(c) 2017 Brocade Communications Systems, Inc.
  *   Author: Jan Blunck <jblunck@infradead.org>
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of the copyright holder nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef _RTE_ETHDEV_VDEV_H_
diff -Nru dpdk-18.11.10/lib/librte_ethdev/rte_flow.c dpdk-18.11.11/lib/librte_ethdev/rte_flow.c
--- dpdk-18.11.10/lib/librte_ethdev/rte_flow.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_ethdev/rte_flow.c	2021-01-20 12:18:20.000000000 +0000
@@ -557,7 +557,7 @@
 			   }),
 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
 		off = sizeof(*dst.rss);
-		if (src.rss->key_len) {
+		if (src.rss->key_len && src.rss->key) {
 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
 			if (size >= off + tmp)
diff -Nru dpdk-18.11.10/lib/librte_eventdev/rte_event_crypto_adapter.c dpdk-18.11.11/lib/librte_eventdev/rte_event_crypto_adapter.c
--- dpdk-18.11.10/lib/librte_eventdev/rte_event_crypto_adapter.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eventdev/rte_event_crypto_adapter.c	2021-01-20 12:18:20.000000000 +0000
@@ -240,6 +240,7 @@
 	if (ret < 0) {
 		RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
 				 dev_id, dev_info.driver_name);
+		rte_free(adapter);
 		return ret;
 	}
 
diff -Nru dpdk-18.11.10/lib/librte_eventdev/rte_event_eth_tx_adapter.c dpdk-18.11.11/lib/librte_eventdev/rte_event_eth_tx_adapter.c
--- dpdk-18.11.10/lib/librte_eventdev/rte_event_eth_tx_adapter.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_eventdev/rte_event_eth_tx_adapter.c	2021-01-20 12:18:20.000000000 +0000
@@ -733,6 +733,8 @@
 
 		qdone = rte_zmalloc(txa->mem_name,
 				nb_queues * sizeof(*qdone), 0);
+		if (qdone == NULL)
+			return -ENOMEM;
 		j = 0;
 		for (i = 0; i < nb_queues; i++) {
 			if (txa_service_is_queue_added(txa, eth_dev, i))
diff -Nru dpdk-18.11.10/lib/librte_gso/gso_udp4.c dpdk-18.11.11/lib/librte_gso/gso_udp4.c
--- dpdk-18.11.10/lib/librte_gso/gso_udp4.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_gso/gso_udp4.c	2021-01-20 12:18:20.000000000 +0000
@@ -69,7 +69,10 @@
 		return 1;
 	}
 
-	pyld_unit_size = gso_size - hdr_offset;
+	/* pyld_unit_size must be a multiple of 8 because frag_off
+	 * uses 8 bytes as unit.
+	 */
+	pyld_unit_size = (gso_size - hdr_offset) & ~7U;
 
 	/* Segment the payload */
 	ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool,
diff -Nru dpdk-18.11.10/lib/librte_net/rte_ip.h dpdk-18.11.11/lib/librte_net/rte_ip.h
--- dpdk-18.11.10/lib/librte_net/rte_ip.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_net/rte_ip.h	2021-01-20 12:18:20.000000000 +0000
@@ -217,6 +217,9 @@
 			break;
 		off -= seglen;
 	}
+	RTE_ASSERT(seg != NULL);
+	if (seg == NULL)
+		return -1;
 	seglen -= off;
 	buf = rte_pktmbuf_mtod_offset(seg, const char *, off);
 	if (seglen >= len) {
diff -Nru dpdk-18.11.10/lib/librte_port/rte_port_source_sink.c dpdk-18.11.11/lib/librte_port/rte_port_source_sink.c
--- dpdk-18.11.10/lib/librte_port/rte_port_source_sink.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_port/rte_port_source_sink.c	2021-01-20 12:18:20.000000000 +0000
@@ -116,7 +116,7 @@
 	}
 
 	for (i = 0; i < n_pkts; i++) {
-		pkt = pcap_next(pcap_handle, &pcap_hdr);
+		pcap_next(pcap_handle, &pcap_hdr);
 		port->pkt_len[i] = RTE_MIN(max_len, pcap_hdr.len);
 		pkt_len_aligns[i] = RTE_CACHE_LINE_ROUNDUP(
 			port->pkt_len[i]);
diff -Nru dpdk-18.11.10/lib/librte_table/rte_table_hash_key16.c dpdk-18.11.11/lib/librte_table/rte_table_hash_key16.c
--- dpdk-18.11.10/lib/librte_table/rte_table_hash_key16.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_table/rte_table_hash_key16.c	2021-01-20 12:18:20.000000000 +0000
@@ -33,6 +33,7 @@
 
 #endif
 
+#ifdef RTE_ARCH_64
 struct rte_bucket_4_16 {
 	/* Cache line 0 */
 	uint64_t signature[4 + 1];
@@ -46,6 +47,22 @@
 	/* Cache line 2 */
 	uint8_t data[0];
 };
+#else
+struct rte_bucket_4_16 {
+	/* Cache line 0 */
+	uint64_t signature[4 + 1];
+	uint64_t lru_list;
+	struct rte_bucket_4_16 *next;
+	uint32_t pad;
+	uint64_t next_valid;
+
+	/* Cache line 1 */
+	uint64_t key[4][2];
+
+	/* Cache line 2 */
+	uint8_t data[0];
+};
+#endif
 
 struct rte_table_hash {
 	struct rte_table_stats stats;
diff -Nru dpdk-18.11.10/lib/librte_table/rte_table_hash_key32.c dpdk-18.11.11/lib/librte_table/rte_table_hash_key32.c
--- dpdk-18.11.10/lib/librte_table/rte_table_hash_key32.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_table/rte_table_hash_key32.c	2021-01-20 12:18:20.000000000 +0000
@@ -33,6 +33,7 @@
 
 #endif
 
+#ifdef RTE_ARCH_64
 struct rte_bucket_4_32 {
 	/* Cache line 0 */
 	uint64_t signature[4 + 1];
@@ -46,6 +47,22 @@
 	/* Cache line 3 */
 	uint8_t data[0];
 };
+#else
+struct rte_bucket_4_32 {
+	/* Cache line 0 */
+	uint64_t signature[4 + 1];
+	uint64_t lru_list;
+	struct rte_bucket_4_32 *next;
+	uint32_t pad;
+	uint64_t next_valid;
+
+	/* Cache lines 1 and 2 */
+	uint64_t key[4][4];
+
+	/* Cache line 3 */
+	uint8_t data[0];
+};
+#endif
 
 struct rte_table_hash {
 	struct rte_table_stats stats;
diff -Nru dpdk-18.11.10/lib/librte_table/rte_table_hash_key8.c dpdk-18.11.11/lib/librte_table/rte_table_hash_key8.c
--- dpdk-18.11.10/lib/librte_table/rte_table_hash_key8.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_table/rte_table_hash_key8.c	2021-01-20 12:18:20.000000000 +0000
@@ -31,6 +31,7 @@
 
 #endif
 
+#ifdef RTE_ARCH_64
 struct rte_bucket_4_8 {
 	/* Cache line 0 */
 	uint64_t signature;
@@ -43,6 +44,21 @@
 	/* Cache line 1 */
 	uint8_t data[0];
 };
+#else
+struct rte_bucket_4_8 {
+	/* Cache line 0 */
+	uint64_t signature;
+	uint64_t lru_list;
+	struct rte_bucket_4_8 *next;
+	uint32_t pad;
+	uint64_t next_valid;
+
+	uint64_t key[4];
+
+	/* Cache line 1 */
+	uint8_t data[0];
+};
+#endif
 
 struct rte_table_hash {
 	struct rte_table_stats stats;
diff -Nru dpdk-18.11.10/lib/librte_timer/rte_timer.h dpdk-18.11.11/lib/librte_timer/rte_timer.h
--- dpdk-18.11.10/lib/librte_timer/rte_timer.h	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_timer/rte_timer.h	2021-01-20 12:18:20.000000000 +0000
@@ -223,6 +223,12 @@
  *   The callback function of the timer.
  * @param arg
  *   The user argument of the callback function.
+ *
+ * @note
+ *   This API should not be called inside a timer's callback function to
+ *   reset another timer; doing so could hang in certain scenarios. Instead,
+ *   the rte_timer_reset() API can be called directly and its return code
+ *   can be checked for success or failure.
  */
 void
 rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
@@ -263,6 +269,12 @@
  *
  * @param tim
  *   The timer handle.
+ *
+ * @note
+ *   This API should not be called inside a timer's callback function to
+ *   stop another timer; doing so could hang in certain scenarios. Instead, the
+ *   rte_timer_stop() API can be called directly and its return code can
+ *   be checked for success or failure.
  */
 void rte_timer_stop_sync(struct rte_timer *tim);
 
diff -Nru dpdk-18.11.10/lib/librte_vhost/iotlb.c dpdk-18.11.11/lib/librte_vhost/iotlb.c
--- dpdk-18.11.10/lib/librte_vhost/iotlb.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_vhost/iotlb.c	2021-01-20 12:18:20.000000000 +0000
@@ -349,8 +349,7 @@
 			IOTLB_CACHE_SIZE, sizeof(struct vhost_iotlb_entry), 0,
 			0, 0, NULL, NULL, NULL, socket,
 			MEMPOOL_F_NO_CACHE_ALIGN |
-			MEMPOOL_F_SP_PUT |
-			MEMPOOL_F_SC_GET);
+			MEMPOOL_F_SP_PUT);
 	if (!vq->iotlb_pool) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 				"Failed to create IOTLB cache pool (%s)\n",
diff -Nru dpdk-18.11.10/lib/librte_vhost/vhost.c dpdk-18.11.11/lib/librte_vhost/vhost.c
--- dpdk-18.11.10/lib/librte_vhost/vhost.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_vhost/vhost.c	2021-01-20 12:18:20.000000000 +0000
@@ -533,22 +533,29 @@
 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 {
 	struct vhost_virtqueue *vq;
+	uint32_t i;
 
-	vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
-	if (vq == NULL) {
-		RTE_LOG(ERR, VHOST_CONFIG,
-			"Failed to allocate memory for vring:%u.\n", vring_idx);
-		return -1;
+	/* Also allocate holes, if any, up to requested vring index. */
+	for (i = 0; i <= vring_idx; i++) {
+		if (dev->virtqueue[i])
+			continue;
+
+		vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
+		if (vq == NULL) {
+			RTE_LOG(ERR, VHOST_CONFIG,
+				"Failed to allocate memory for vring:%u.\n", i);
+			return -1;
+		}
+
+		dev->virtqueue[i] = vq;
+		init_vring_queue(dev, i);
+		rte_spinlock_init(&vq->access_lock);
+		vq->avail_wrap_counter = 1;
+		vq->used_wrap_counter = 1;
+		vq->signalled_used_valid = false;
 	}
 
-	dev->virtqueue[vring_idx] = vq;
-	init_vring_queue(dev, vring_idx);
-	rte_spinlock_init(&vq->access_lock);
-	vq->avail_wrap_counter = 1;
-	vq->used_wrap_counter = 1;
-	vq->signalled_used_valid = false;
-
-	dev->nr_vring += 1;
+	dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1);
 
 	return 0;
 }
@@ -901,7 +908,12 @@
 	if (!dev)
 		return 0;
 
+	if (queue_id >= VHOST_MAX_VRING)
+		return 0;
+
 	vq = dev->virtqueue[queue_id];
+	if (!vq)
+		return 0;
 
 	rte_spinlock_lock(&vq->access_lock);
 
@@ -971,7 +983,12 @@
 	if (!dev)
 		return -1;
 
+	if (queue_id >= VHOST_MAX_VRING)
+		return -1;
+
 	vq = dev->virtqueue[queue_id];
+	if (!vq)
+		return -1;
 
 	rte_spinlock_lock(&vq->access_lock);
 
diff -Nru dpdk-18.11.10/lib/librte_vhost/vhost_crypto.c dpdk-18.11.11/lib/librte_vhost/vhost_crypto.c
--- dpdk-18.11.10/lib/librte_vhost/vhost_crypto.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_vhost/vhost_crypto.c	2021-01-20 12:18:20.000000000 +0000
@@ -35,13 +35,12 @@
 #define VC_LOG_DBG(fmt, args...)
 #endif
 
-#define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |	\
-		(1 << VIRTIO_RING_F_INDIRECT_DESC) |			\
-		(1 << VIRTIO_RING_F_EVENT_IDX) |			\
-		(1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |			\
-		(1 << VIRTIO_CRYPTO_SERVICE_MAC) |			\
-		(1 << VIRTIO_NET_F_CTRL_VQ) |				\
-		(1 << VHOST_USER_PROTOCOL_F_CONFIG))
+#define VIRTIO_CRYPTO_FEATURES ((1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |   \
+               (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |                 \
+               (1ULL << VIRTIO_RING_F_EVENT_IDX) |                     \
+               (1ULL << VIRTIO_NET_F_CTRL_VQ) |                        \
+               (1ULL << VIRTIO_F_VERSION_1) |                          \
+               (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
 
 #define IOVA_TO_VVA(t, r, a, l, p)					\
 	((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
diff -Nru dpdk-18.11.10/lib/librte_vhost/vhost_user.c dpdk-18.11.11/lib/librte_vhost/vhost_user.c
--- dpdk-18.11.10/lib/librte_vhost/vhost_user.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/lib/librte_vhost/vhost_user.c	2021-01-20 12:18:20.000000000 +0000
@@ -88,8 +88,15 @@
 {
 	int i;
 
-	for (i = 0; i < msg->fd_num; i++)
-		close(msg->fds[i]);
+	for (i = 0; i < msg->fd_num; i++) {
+		int fd = msg->fds[i];
+
+		if (fd == -1)
+			continue;
+
+		msg->fds[i] = -1;
+		close(fd);
+	}
 }
 
 /*
@@ -325,7 +332,9 @@
 
 	dev->features = features;
 	if (dev->features &
-		((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
+		((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+		 (1ULL << VIRTIO_F_VERSION_1) |
+		 (1ULL << VIRTIO_F_RING_PACKED))) {
 		dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	} else {
 		dev->vhost_hlen = sizeof(struct virtio_net_hdr);
@@ -1002,7 +1011,6 @@
 	uint64_t alignment;
 	uint32_t i;
 	int populate;
-	int fd;
 
 	if (validate_msg_fds(msg, memory->nregions) != 0)
 		return VH_RESULT_ERR;
@@ -1010,7 +1018,7 @@
 	if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"too many memory regions (%u)\n", memory->nregions);
-		return VH_RESULT_ERR;
+		goto close_msg_fds;
 	}
 
 	if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
@@ -1043,7 +1051,7 @@
 				"(%d) failed to allocate memory "
 				"for dev->guest_pages\n",
 				dev->vid);
-			return VH_RESULT_ERR;
+			goto close_msg_fds;
 		}
 	}
 
@@ -1053,18 +1061,23 @@
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"(%d) failed to allocate memory for dev->mem\n",
 			dev->vid);
-		return VH_RESULT_ERR;
+		goto free_guest_pages;
 	}
 	dev->mem->nregions = memory->nregions;
 
 	for (i = 0; i < memory->nregions; i++) {
-		fd  = msg->fds[i];
 		reg = &dev->mem->regions[i];
 
 		reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
 		reg->guest_user_addr = memory->regions[i].userspace_addr;
 		reg->size            = memory->regions[i].memory_size;
-		reg->fd              = fd;
+		reg->fd              = msg->fds[i];
+
+		/*
+		 * Assign invalid file descriptor value to avoid double
+		 * closing on error path.
+		 */
+		msg->fds[i] = -1;
 
 		mmap_offset = memory->regions[i].mmap_offset;
 
@@ -1074,7 +1087,7 @@
 				"mmap_offset (%#"PRIx64") and memory_size "
 				"(%#"PRIx64") overflow\n",
 				mmap_offset, reg->size);
-			goto err_mmap;
+			goto free_mem_table;
 		}
 
 		mmap_size = reg->size + mmap_offset;
@@ -1087,11 +1100,11 @@
 		 * to avoid failure, make sure in caller to keep length
 		 * aligned.
 		 */
-		alignment = get_blk_size(fd);
+		alignment = get_blk_size(reg->fd);
 		if (alignment == (uint64_t)-1) {
 			RTE_LOG(ERR, VHOST_CONFIG,
 				"couldn't get hugepage size through fstat\n");
-			goto err_mmap;
+			goto free_mem_table;
 		}
 		mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
 		if (mmap_size == 0) {
@@ -1107,17 +1120,17 @@
 			RTE_LOG(ERR, VHOST_CONFIG, "mmap size (0x%" PRIx64 ") "
 					"or alignment (0x%" PRIx64 ") is invalid\n",
 					reg->size + mmap_offset, alignment);
-			goto err_mmap;
+			goto free_mem_table;
 		}
 
 		populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
 		mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
-				 MAP_SHARED | populate, fd, 0);
+				 MAP_SHARED | populate, reg->fd, 0);
 
 		if (mmap_addr == MAP_FAILED) {
 			RTE_LOG(ERR, VHOST_CONFIG,
 				"mmap region %u failed.\n", i);
-			goto err_mmap;
+			goto free_mem_table;
 		}
 
 		reg->mmap_addr = mmap_addr;
@@ -1130,7 +1143,7 @@
 				RTE_LOG(ERR, VHOST_CONFIG,
 					"adding guest pages to region %u failed.\n",
 					i);
-				goto err_mmap;
+				goto free_mem_table;
 			}
 
 		RTE_LOG(INFO, VHOST_CONFIG,
@@ -1173,17 +1186,17 @@
 		if (read_vhost_message(main_fd, &ack_msg) <= 0) {
 			RTE_LOG(ERR, VHOST_CONFIG,
 				"Failed to read qemu ack on postcopy set-mem-table\n");
-			goto err_mmap;
+			goto free_mem_table;
 		}
 
 		if (validate_msg_fds(&ack_msg, 0) != 0)
-			goto err_mmap;
+			goto free_mem_table;
 
 		if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
 			RTE_LOG(ERR, VHOST_CONFIG,
 				"Bad qemu ack on postcopy set-mem-table (%d)\n",
 				ack_msg.request.master);
-			goto err_mmap;
+			goto free_mem_table;
 		}
 
 		/* Now userfault register and we can use the memory */
@@ -1207,7 +1220,7 @@
 					"Failed to register ufd for region %d: (ufd = %d) %s\n",
 					i, dev->postcopy_ufd,
 					strerror(errno));
-				goto err_mmap;
+				goto free_mem_table;
 			}
 			RTE_LOG(INFO, VHOST_CONFIG,
 				"\t userfaultfd registered for range : "
@@ -1216,7 +1229,7 @@
 				(uint64_t)reg_struct.range.start +
 				(uint64_t)reg_struct.range.len - 1);
 #else
-			goto err_mmap;
+			goto free_mem_table;
 #endif
 		}
 	}
@@ -1235,7 +1248,7 @@
 			dev = translate_ring_addresses(dev, i);
 			if (!dev) {
 				dev = *pdev;
-				goto err_mmap;
+				goto free_mem_table;
 			}
 
 			*pdev = dev;
@@ -1246,10 +1259,15 @@
 
 	return VH_RESULT_OK;
 
-err_mmap:
+free_mem_table:
 	free_mem_region(dev);
 	rte_free(dev->mem);
 	dev->mem = NULL;
+free_guest_pages:
+	rte_free(dev->guest_pages);
+	dev->guest_pages = NULL;
+close_msg_fds:
+	close_msg_fds(msg);
 	return VH_RESULT_ERR;
 }
 
@@ -1363,8 +1381,12 @@
 
 	/* Interpret ring addresses only when ring is started. */
 	dev = translate_ring_addresses(dev, file.index);
-	if (!dev)
+	if (!dev) {
+		if (file.fd != VIRTIO_INVALID_EVENTFD)
+			close(file.fd);
+
 		return VH_RESULT_ERR;
+	}
 
 	*pdev = dev;
 
@@ -1590,7 +1612,7 @@
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"invalid log base msg size: %"PRId32" != %d\n",
 			msg->size, (int)sizeof(VhostUserLog));
-		return VH_RESULT_ERR;
+		goto close_msg_fds;
 	}
 
 	size = msg->payload.log.mmap_size;
@@ -1601,7 +1623,7 @@
 		RTE_LOG(ERR, VHOST_CONFIG,
 			"log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
 			off, size);
-		return VH_RESULT_ERR;
+		goto close_msg_fds;
 	}
 
 	RTE_LOG(INFO, VHOST_CONFIG,
@@ -1638,6 +1660,10 @@
 	msg->fd_num = 0;
 
 	return VH_RESULT_REPLY;
+
+close_msg_fds:
+	close_msg_fds(msg);
+	return VH_RESULT_ERR;
 }
 
 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
diff -Nru dpdk-18.11.10/license/bsd-2-clause.txt dpdk-18.11.11/license/bsd-2-clause.txt
--- dpdk-18.11.10/license/bsd-2-clause.txt	1970-01-01 01:00:00.000000000 +0100
+++ dpdk-18.11.11/license/bsd-2-clause.txt	2021-01-20 12:18:20.000000000 +0000
@@ -0,0 +1,20 @@
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+    1. Redistributions of source code must retain the above copyright notice,
+       this list of conditions and the following disclaimer.
+
+    2. Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff -Nru dpdk-18.11.10/license/isc.txt dpdk-18.11.11/license/isc.txt
--- dpdk-18.11.10/license/isc.txt	1970-01-01 01:00:00.000000000 +0100
+++ dpdk-18.11.11/license/isc.txt	2021-01-20 12:18:20.000000000 +0000
@@ -0,0 +1,11 @@
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD
+TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+OF THIS SOFTWARE.
diff -Nru dpdk-18.11.10/license/mit.txt dpdk-18.11.11/license/mit.txt
--- dpdk-18.11.10/license/mit.txt	1970-01-01 01:00:00.000000000 +0100
+++ dpdk-18.11.11/license/mit.txt	2021-01-20 12:18:20.000000000 +0000
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff -Nru dpdk-18.11.10/MAINTAINERS dpdk-18.11.11/MAINTAINERS
--- dpdk-18.11.10/MAINTAINERS	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/MAINTAINERS	2021-01-20 12:18:20.000000000 +0000
@@ -24,34 +24,41 @@
 
 Main Branch
 M: Thomas Monjalon <thomas@monjalon.net>
-M: Ferruh Yigit <ferruh.yigit@intel.com>
+M: David Marchand <david.marchand@redhat.com>
 T: git://dpdk.org/dpdk
 
 Next-net Tree
 M: Ferruh Yigit <ferruh.yigit@intel.com>
+M: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
 T: git://dpdk.org/next/dpdk-next-net
 
+Next-net-brcm Tree
+M: Ajit Khaparde <ajit.khaparde@broadcom.com>
+T: git://dpdk.org/next/dpdk-next-net-brcm
+
 Next-net-intel Tree
 M: Qi Zhang <qi.z.zhang@intel.com>
-M: Beilei Xing <beilei.xing@intel.com>
 T: git://dpdk.org/next/dpdk-next-net-intel
 
+Next-net-mrvl Tree
+M: Jerin Jacob <jerinj@marvell.com>
+T: git://dpdk.org/next/dpdk-next-net-mrvl
+
 Next-net-mlx Tree
-M: Shahaf Shuler <shahafs@mellanox.com>
+M: Raslan Darawsheh <rasland@nvidia.com>
 T: git://dpdk.org/next/dpdk-next-net-mlx
 
 Next-virtio Tree
 M: Maxime Coquelin <maxime.coquelin@redhat.com>
-M: Tiwei Bie <tiwei.bie@intel.com>
+M: Chenbo Xia <chenbo.xia@intel.com>
 T: git://dpdk.org/next/dpdk-next-virtio
 
 Next-crypto Tree
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 M: Akhil Goyal <akhil.goyal@nxp.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 
 Next-eventdev Tree
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
 
 Next-qos Tree
@@ -71,8 +78,6 @@
 M: maintainers@dpdk.org
 
 Documentation (with overlaps)
-M: John McNamara <john.mcnamara@intel.com>
-M: Marko Kovacevic <marko.kovacevic@intel.com>
 F: README
 F: doc/
 
@@ -114,7 +119,6 @@
 Meson build
 M: Bruce Richardson <bruce.richardson@intel.com>
 F: meson.build
-F: lib/librte_eal/bsdapp/BSDmakefile.meson
 F: meson_options.txt
 F: config/rte_config.h
 F: buildtools/gen-pmdinfo-cfile.sh
@@ -122,11 +126,12 @@
 
 Public CI
 M: Aaron Conole <aconole@redhat.com>
-M: Michael Santana <msantana@redhat.com>
+M: Michael Santana <maicolgabriel@hotmail.com>
 F: .travis.yml
 F: .ci/
 
-ABI versioning
+ABI Policy & Versioning
+M: Ray Kinsella <mdr@ashroe.eu>
 M: Neil Horman <nhorman@tuxdriver.com>
 F: lib/librte_compat/
 F: doc/guides/rel_notes/deprecation.rst
@@ -191,8 +196,12 @@
 F: test/test/test_memory.c
 F: test/test/test_memzone.c
 
+Interrupt Subsystem
+M: Harman Kalra <hkalra@marvell.com>
+F: lib/librte_eal/*/*interrupts.*
+F: app/test/test_interrupts.c
+
 Keep alive
-M: Remy Horton <remy.horton@intel.com>
 F: lib/librte_eal/common/include/rte_keepalive.h
 F: lib/librte_eal/common/rte_keepalive.c
 F: examples/l2fwd-keepalive/
@@ -221,13 +230,13 @@
 
 ARM v7
 M: Jan Viktorin <viktorin@rehivetech.com>
-M: Gavin Hu <gavin.hu@arm.com>
+M: Ruifeng Wang <ruifeng.wang@arm.com>
 F: lib/librte_eal/common/arch/arm/
 F: lib/librte_eal/common/include/arch/arm/
 
 ARM v8
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
-M: Gavin Hu <gavin.hu@arm.com>
+M: Jerin Jacob <jerinj@marvell.com>
+M: Ruifeng Wang <ruifeng.wang@arm.com>
 F: lib/librte_eal/common/include/arch/arm/*_64.h
 F: lib/librte_net/net_crc_neon.h
 F: lib/librte_acl/acl_run_neon.*
@@ -287,7 +296,7 @@
 
 Memory pool
 M: Olivier Matz <olivier.matz@6wind.com>
-M: Andrew Rybchenko <arybchenko@solarflare.com>
+M: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
 F: lib/librte_mempool/
 F: drivers/mempool/Makefile
 F: drivers/mempool/ring/
@@ -297,7 +306,8 @@
 F: test/test/test_func_reentrancy.c
 
 Ring queue
-M: Olivier Matz <olivier.matz@6wind.com>
+M: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
+M: Konstantin Ananyev <konstantin.ananyev@intel.com>
 F: lib/librte_ring/
 F: doc/guides/prog_guide/ring_lib.rst
 F: test/test/test_ring*
@@ -312,13 +322,13 @@
 Ethernet API
 M: Thomas Monjalon <thomas@monjalon.net>
 M: Ferruh Yigit <ferruh.yigit@intel.com>
-M: Andrew Rybchenko <arybchenko@solarflare.com>
+M: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
 T: git://dpdk.org/next/dpdk-next-net
 F: lib/librte_ethdev/
 F: devtools/test-null.sh
 
 Flow API
-M: Ori Kam <orika@mellanox.com>
+M: Ori Kam <orika@nvidia.com>
 T: git://dpdk.org/next/dpdk-next-net
 F: app/test-pmd/cmdline_flow.c
 F: doc/guides/prog_guide/rte_flow.rst
@@ -334,7 +344,7 @@
 F: lib/librte_ethdev/rte_mtr*
 
 Baseband API - EXPERIMENTAL
-M: Amr Mokhtar <amr.mokhtar@intel.com>
+M: Nicolas Chautru <nicolas.chautru@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: lib/librte_bbdev/
 F: doc/guides/prog_guide/bbdev.rst
@@ -346,7 +356,6 @@
 F: doc/guides/sample_app_ug/bbdev_app.rst
 
 Crypto API
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 M: Declan Doherty <declan.doherty@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: lib/librte_cryptodev/
@@ -362,8 +371,7 @@
 
 Compression API - EXPERIMENTAL
 M: Fiona Trahe <fiona.trahe@intel.com>
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-M: Ashish Gupta <ashish.gupta@caviumnetworks.com>
+M: Ashish Gupta <ashish.gupta@marvell.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: lib/librte_compressdev/
 F: drivers/compress/
@@ -372,7 +380,7 @@
 F: doc/guides/compressdevs/features/default.ini
 
 Eventdev API
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
 F: lib/librte_eventdev/
 F: drivers/event/skeleton/
@@ -407,7 +415,7 @@
 F: doc/guides/prog_guide/event_crypto_adapter.rst
 
 Raw device API
-M: Shreyansh Jain <shreyansh.jain@nxp.com>
+M: Nipun Gupta <nipun.gupta@nxp.com>
 M: Hemant Agrawal <hemant.agrawal@nxp.com>
 F: lib/librte_rawdev/
 F: drivers/raw/skeleton_rawdev/
@@ -420,7 +428,7 @@
 
 Bucket memory pool
 M: Artem V. Andreev <artem.andreev@oktetlabs.ru>
-M: Andrew Rybchenko <arybchenko@solarflare.com>
+M: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
 F: drivers/mempool/bucket/
 
 
@@ -433,7 +441,7 @@
 
 NXP buses
 M: Hemant Agrawal <hemant.agrawal@nxp.com>
-M: Shreyansh Jain <shreyansh.jain@nxp.com>
+M: Sachin Saxena <sachin.saxena@oss.nxp.com>
 F: drivers/common/dpaax/
 F: drivers/bus/dpaa/
 F: drivers/bus/fslmc/
@@ -446,6 +454,7 @@
 
 VMBUS bus driver
 M: Stephen Hemminger <sthemmin@microsoft.com>
+M: Long Li <longli@microsoft.com>
 F: drivers/bus/vmbus/
 
 
@@ -456,8 +465,8 @@
 F: doc/guides/nics/features/default.ini
 
 Link bonding
-M: Declan Doherty <declan.doherty@intel.com>
 M: Chas Williams <chas3@att.com>
+M: Min Hu (Connor) <humin29@huawei.com>
 F: drivers/net/bonding/
 F: doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
 F: test/test/test_link_bonding*
@@ -483,12 +492,13 @@
 M: Michal Krawczyk <mk@semihalf.com>
 M: Guy Tzalik <gtzalik@amazon.com>
 M: Evgeny Schemeilin <evgenys@amazon.com>
+M: Igor Chauskin <igorch@amazon.com>
 F: drivers/net/ena/
 F: doc/guides/nics/ena.rst
 F: doc/guides/nics/features/ena.ini
 
 AMD axgbe
-M: Ravi Kumar <ravi1.kumar@amd.com>
+M: Somalapuram Amaranath <asomalap@amd.com>
 F: drivers/net/axgbe/
 F: doc/guides/nics/axgbe.rst
 F: doc/guides/nics/features/axgbe.ini
@@ -516,21 +526,21 @@
 F: doc/guides/nics/features/bnxt.ini
 
 Cavium ThunderX nicvf
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
-M: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
+M: Jerin Jacob <jerinj@marvell.com>
+M: Maciej Czekaj <mczekaj@marvell.com>
 F: drivers/net/thunderx/
 F: doc/guides/nics/thunderx.rst
 F: doc/guides/nics/features/thunderx.ini
 
 Cavium LiquidIO
-M: Shijith Thotton <shijith.thotton@cavium.com>
-M: Srisivasubramanian Srinivasan <ssrinivasan@cavium.com>
+M: Shijith Thotton <sthotton@marvell.com>
+M: Srisivasubramanian Srinivasan <srinivasan@marvell.com>
 F: drivers/net/liquidio/
 F: doc/guides/nics/liquidio.rst
 F: doc/guides/nics/features/liquidio.ini
 
 Cavium OCTEON TX
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Harman Kalra <hkalra@marvell.com>
 F: drivers/common/octeontx/
 F: drivers/mempool/octeontx/
 F: drivers/net/octeontx/
@@ -551,7 +561,8 @@
 F: doc/guides/nics/features/enic.ini
 
 Intel e1000
-M: Wenzhuo Lu <wenzhuo.lu@intel.com>
+M: Jeff Guo <jia.guo@intel.com>
+M: Haiyue Wang <haiyue.wang@intel.com>
 T: git://dpdk.org/next/dpdk-next-net-intel
 F: drivers/net/e1000/
 F: doc/guides/nics/e1000em.rst
@@ -560,8 +571,8 @@
 F: doc/guides/nics/features/igb*.ini
 
 Intel ixgbe
-M: Wenzhuo Lu <wenzhuo.lu@intel.com>
-M: Konstantin Ananyev <konstantin.ananyev@intel.com>
+M: Jeff Guo <jia.guo@intel.com>
+M: Haiyue Wang <haiyue.wang@intel.com>
 T: git://dpdk.org/next/dpdk-next-net-intel
 F: drivers/net/ixgbe/
 F: doc/guides/nics/ixgbe.rst
@@ -570,7 +581,7 @@
 
 Intel i40e
 M: Beilei Xing <beilei.xing@intel.com>
-M: Qi Zhang <qi.z.zhang@intel.com>
+M: Jeff Guo <jia.guo@intel.com>
 T: git://dpdk.org/next/dpdk-next-net-intel
 F: drivers/net/i40e/
 F: doc/guides/nics/i40e.rst
@@ -587,7 +598,7 @@
 
 Intel avf
 M: Jingjing Wu <jingjing.wu@intel.com>
-M: Wenzhuo Lu <wenzhuo.lu@intel.com>
+M: Beilei Xing <beilei.xing@intel.com>
 T: git://dpdk.org/next/dpdk-next-net-intel
 F: drivers/net/avf/
 F: doc/guides/nics/features/avf*.ini
@@ -600,9 +611,7 @@
 F: doc/guides/nics/features/ifc*.ini
 
 Marvell mvpp2
-M: Tomasz Duszynski <tdu@semihalf.com>
-M: Dmitri Epshtein <dima@marvell.com>
-M: Natalie Samsonov <nsamsono@marvell.com>
+M: Liron Himi <lironh@marvell.com>
 F: drivers/common/mvep/
 F: drivers/net/mvpp2/
 F: doc/guides/nics/mvpp2.rst
@@ -610,30 +619,30 @@
 
 Marvell mvneta
 M: Zyta Szpak <zr@semihalf.com>
-M: Dmitri Epshtein <dima@marvell.com>
-M: Natalie Samsonov <nsamsono@marvell.com>
+M: Liron Himi <lironh@marvell.com>
 F: drivers/net/mvneta/
 F: doc/guides/nics/mvneta.rst
 F: doc/guides/nics/features/mvneta.ini
 
 Mellanox mlx4
-M: Matan Azrad <matan@mellanox.com>
-M: Shahaf Shuler <shahafs@mellanox.com>
+M: Matan Azrad <matan@nvidia.com>
+M: Shahaf Shuler <shahafs@nvidia.com>
 T: git://dpdk.org/next/dpdk-next-net-mlx
 F: drivers/net/mlx4/
 F: doc/guides/nics/mlx4.rst
 F: doc/guides/nics/features/mlx4.ini
 
 Mellanox mlx5
-M: Shahaf Shuler <shahafs@mellanox.com>
-M: Yongseok Koh <yskoh@mellanox.com>
+M: Matan Azrad <matan@nvidia.com>
+M: Shahaf Shuler <shahafs@nvidia.com>
+M: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
 T: git://dpdk.org/next/dpdk-next-net-mlx
 F: drivers/net/mlx5/
 F: doc/guides/nics/mlx5.rst
 F: doc/guides/nics/features/mlx5.ini
 
 Microsoft vdev_netvsc - EXPERIMENTAL
-M: Matan Azrad <matan@mellanox.com>
+M: Matan Azrad <matan@nvidia.com>
 F: drivers/net/vdev_netvsc/
 F: doc/guides/nics/vdev_netvsc.rst
 F: doc/guides/nics/features/vdev_netvsc.ini
@@ -642,25 +651,26 @@
 M: Stephen Hemminger <sthemmin@microsoft.com>
 M: K. Y. Srinivasan <kys@microsoft.com>
 M: Haiyang Zhang <haiyangz@microsoft.com>
+M: Long Li <longli@microsoft.com>
 F: drivers/net/netvsc/
 F: doc/guides/nics/netvsc.rst
 F: doc/guides/nics/features/netvsc.ini
 
 Netcope szedata2
-M: Jan Remes <remes@netcope.com>
+M: Martin Spinler <spinler@cesnet.cz>
 F: drivers/net/szedata2/
 F: doc/guides/nics/szedata2.rst
 F: doc/guides/nics/features/szedata2.ini
 
-Netronome nfp
-M: Alejandro Lucero <alejandro.lucero@netronome.com>
+Netronome nfp - UNMAINTAINED
+M: Heinrich Kuhn <heinrich.kuhn@netronome.com>
 F: drivers/net/nfp/
 F: doc/guides/nics/nfp.rst
 F: doc/guides/nics/features/nfp*.ini
 
 NXP dpaa
 M: Hemant Agrawal <hemant.agrawal@nxp.com>
-M: Shreyansh Jain <shreyansh.jain@nxp.com>
+M: Sachin Saxena <sachin.saxena@oss.nxp.com>
 F: drivers/mempool/dpaa/
 F: drivers/net/dpaa/
 F: doc/guides/nics/dpaa.rst
@@ -668,7 +678,7 @@
 
 NXP dpaa2
 M: Hemant Agrawal <hemant.agrawal@nxp.com>
-M: Shreyansh Jain <shreyansh.jain@nxp.com>
+M: Sachin Saxena <sachin.saxena@oss.nxp.com>
 F: drivers/mempool/dpaa2/
 F: drivers/net/dpaa2/
 F: doc/guides/nics/dpaa2.rst
@@ -676,28 +686,28 @@
 
 NXP enetc
 M: Gagandeep Singh <g.singh@nxp.com>
-M: Pankaj Chauhan <pankaj.chauhan@nxp.com>
+M: Sachin Saxena <sachin.saxena@oss.nxp.com>
 F: drivers/net/enetc/
 F: doc/guides/nics/enetc.rst
 F: doc/guides/nics/features/enetc.ini
 
 QLogic bnx2x
-M: Harish Patil <harish.patil@cavium.com>
-M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Rasesh Mody <rmody@marvell.com>
+M: Shahed Shaikh <shshaikh@marvell.com>
 F: drivers/net/bnx2x/
 F: doc/guides/nics/bnx2x.rst
 F: doc/guides/nics/features/bnx2x*.ini
 
 QLogic qede PMD
-M: Rasesh Mody <rasesh.mody@cavium.com>
-M: Harish Patil <harish.patil@cavium.com>
+M: Rasesh Mody <rmody@marvell.com>
+M: Shahed Shaikh <shshaikh@marvell.com>
 M: Shahed Shaikh <shahed.shaikh@cavium.com>
 F: drivers/net/qede/
 F: doc/guides/nics/qede.rst
 F: doc/guides/nics/features/qede*.ini
 
 Solarflare sfc_efx
-M: Andrew Rybchenko <arybchenko@solarflare.com>
+M: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
 F: drivers/net/sfc/
 F: doc/guides/nics/sfc_efx.rst
 F: doc/guides/nics/features/sfc_efx.ini
@@ -710,8 +720,7 @@
 
 Vhost-user
 M: Maxime Coquelin <maxime.coquelin@redhat.com>
-M: Tiwei Bie <tiwei.bie@intel.com>
-M: Zhihong Wang <zhihong.wang@intel.com>
+M: Chenbo Xia <chenbo.xia@intel.com>
 T: git://dpdk.org/next/dpdk-next-virtio
 F: lib/librte_vhost/
 F: doc/guides/prog_guide/vhost_lib.rst
@@ -725,8 +734,7 @@
 
 Vhost PMD
 M: Maxime Coquelin <maxime.coquelin@redhat.com>
-M: Tiwei Bie <tiwei.bie@intel.com>
-M: Zhihong Wang <zhihong.wang@intel.com>
+M: Chenbo Xia <chenbo.xia@intel.com>
 T: git://dpdk.org/next/dpdk-next-virtio
 F: drivers/net/vhost/
 F: doc/guides/nics/vhost.rst
@@ -734,15 +742,14 @@
 
 Virtio PMD
 M: Maxime Coquelin <maxime.coquelin@redhat.com>
-M: Tiwei Bie <tiwei.bie@intel.com>
-M: Zhihong Wang <zhihong.wang@intel.com>
+M: Chenbo Xia <chenbo.xia@intel.com>
 T: git://dpdk.org/next/dpdk-next-virtio
 F: drivers/net/virtio/
 F: doc/guides/nics/virtio.rst
 F: doc/guides/nics/features/virtio*.ini
 
 Wind River AVP
-M: Allain Legacy <allain.legacy@windriver.com>
+M: Steven Webster <steven.webster@windriver.com>
 M: Matt Peters <matt.peters@windriver.com>
 F: drivers/net/avp/
 F: doc/guides/nics/avp.rst
@@ -795,24 +802,23 @@
 
 Crypto Drivers
 --------------
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 F: doc/guides/cryptodevs/features/default.ini
 
 AMD CCP Crypto
-M: Ravi Kumar <ravi1.kumar@amd.com>
+M: Somalapuram Amaranath <asomalap@amd.com>
 F: drivers/crypto/ccp/
 F: doc/guides/cryptodevs/ccp.rst
 F: doc/guides/cryptodevs/features/ccp.ini
 
 ARMv8 Crypto
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Ruifeng Wang <ruifeng.wang@arm.com>
 F: drivers/crypto/armv8/
 F: doc/guides/cryptodevs/armv8.rst
 F: doc/guides/cryptodevs/features/armv8.ini
 
 Cavium OCTEON TX crypto
-M: Anoob Joseph <anoob.joseph@caviumnetworks.com>
+M: Anoob Joseph <anoobj@marvell.com>
 F: drivers/common/cpt/
 F: drivers/crypto/octeontx/
 F: doc/guides/cryptodevs/octeontx.rst
@@ -825,12 +831,14 @@
 
 Intel AES-NI GCM
 M: Declan Doherty <declan.doherty@intel.com>
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: drivers/crypto/aesni_gcm/
 F: doc/guides/cryptodevs/aesni_gcm.rst
 F: doc/guides/cryptodevs/features/aesni_gcm.ini
 
 Intel AES-NI Multi-Buffer
 M: Declan Doherty <declan.doherty@intel.com>
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: drivers/crypto/aesni_mb/
 F: doc/guides/cryptodevs/aesni_mb.rst
 F: doc/guides/cryptodevs/features/aesni_mb.ini
@@ -851,9 +859,8 @@
 F: doc/guides/cryptodevs/features/kasumi.ini
 
 Marvell Mrvl
-M: Tomasz Duszynski <tdu@semihalf.com>
-M: Dmitri Epshtein <dima@marvell.com>
-M: Natalie Samsonov <nsamsono@marvell.com>
+M: Michael Shamis <michaelsh@marvell.com>
+M: Liron Himi <lironh@marvell.com>
 F: drivers/crypto/mvsam/
 F: doc/guides/cryptodevs/mvsam.rst
 F: doc/guides/cryptodevs/features/mvsam.ini
@@ -912,11 +919,10 @@
 
 Compression Drivers
 -------------------
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 T: git://dpdk.org/next/dpdk-next-crypto
 
 Cavium OCTEON TX zipvf
-M: Ashish Gupta <ashish.gupta@cavium.com>
+M: Ashish Gupta <ashish.gupta@marvell.com>
 F: drivers/compress/octeontx/
 F: doc/guides/compressdevs/octeontx.rst
 F: doc/guides/compressdevs/features/octeontx.ini
@@ -933,7 +939,7 @@
 F: doc/guides/compressdevs/features/isal.ini
 
 ZLIB
-M: Sunila Sahu <sunila.sahu@caviumnetworks.com>
+M: Sunila Sahu <ssahu@marvell.com>
 F: drivers/compress/zlib/
 F: doc/guides/compressdevs/zlib.rst
 F: doc/guides/compressdevs/features/zlib.ini
@@ -941,21 +947,21 @@
 
 Eventdev Drivers
 ----------------
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
 
 Cavium OCTEON TX ssovf
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Jerin Jacob <jerinj@marvell.com>
 F: drivers/event/octeontx/
 F: doc/guides/eventdevs/octeontx.rst
 
 Cavium OCTEON TX timvf
-M: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
+M: Pavan Nikhilesh <pbhagavatula@marvell.com>
 F: drivers/event/octeontx/timvf_*
 
 NXP DPAA eventdev
 M: Hemant Agrawal <hemant.agrawal@nxp.com>
-M: Sunil Kumar Kori <sunil.kori@nxp.com>
+M: Nipun Gupta <nipun.gupta@nxp.com>
 F: drivers/event/dpaa/
 F: doc/guides/eventdevs/dpaa.rst
 
@@ -1045,7 +1051,6 @@
 F: doc/guides/sample_app_ug/flow_classify.rst
 
 Distributor
-M: Bruce Richardson <bruce.richardson@intel.com>
 M: David Hunt <david.hunt@intel.com>
 F: lib/librte_distributor/
 F: doc/guides/prog_guide/packet_distrib_lib.rst
@@ -1063,6 +1068,7 @@
 
 Hierarchical scheduler
 M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
+M: Jasvinder Singh <jasvinder.singh@intel.com>
 F: lib/librte_sched/
 F: doc/guides/prog_guide/qos_framework.rst
 F: test/test/test_red.c
@@ -1106,7 +1112,7 @@
 
 EFD
 M: Byron Marohn <byron.marohn@intel.com>
-M: Pablo de Lara Guarch <pablo.de.lara.guarch@intel.com>
+M: Yipeng Wang <yipeng1.wang@intel.com>
 F: lib/librte_efd/
 F: doc/guides/prog_guide/efd_lib.rst
 F: test/test/test_efd*
@@ -1114,8 +1120,9 @@
 F: doc/guides/sample_app_ug/server_node_efd.rst
 
 Hashes
+M: Yipeng Wang <yipeng1.wang@intel.com>
+M: Sameh Gobriel <sameh.gobriel@intel.com>
 M: Bruce Richardson <bruce.richardson@intel.com>
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
 F: lib/librte_hash/
 F: doc/guides/prog_guide/hash_lib.rst
 F: test/test/test_*hash*
@@ -1123,6 +1130,7 @@
 
 LPM
 M: Bruce Richardson <bruce.richardson@intel.com>
+M: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
 F: lib/librte_lpm/
 F: doc/guides/prog_guide/lpm*
 F: test/test/test_lpm*
@@ -1183,6 +1191,7 @@
 
 Timers
 M: Robert Sanford <rsanford@akamai.com>
+M: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
 F: lib/librte_timer/
 F: doc/guides/prog_guide/timer_lib.rst
 F: test/test/test_timer*
@@ -1243,7 +1252,7 @@
 
 Driver testing tool
 M: Wenzhuo Lu <wenzhuo.lu@intel.com>
-M: Jingjing Wu <jingjing.wu@intel.com>
+M: Beilei Xing <beilei.xing@intel.com>
 M: Bernard Iremonger <bernard.iremonger@intel.com>
 F: app/test-pmd/
 F: doc/guides/testpmd_app_ug/
@@ -1254,7 +1263,7 @@
 F: doc/guides/tools/cryptoperf.rst
 
 Eventdev test application
-M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Jerin Jacob <jerinj@marvell.com>
 F: app/test-eventdev/
 F: doc/guides/tools/testeventdev.rst
 F: doc/guides/tools/img/eventdev_*
@@ -1270,7 +1279,6 @@
 Other Example Applications
 --------------------------
 
-M: Remy Horton <remy.horton@intel.com>
 F: examples/ethtool/
 F: doc/guides/sample_app_ug/ethtool.rst
 
@@ -1281,7 +1289,7 @@
 F: examples/fips_validation/
 F: doc/guides/sample_app_ug/fips_validation.rst
 
-M: Ori Kam <orika@mellanox.com>
+M: Ori Kam <orika@nvidia.com>
 F: examples/flow_filtering/
 F: doc/guides/sample_app_ug/flow_filtering.rst
 
@@ -1327,7 +1335,7 @@
 F: examples/performance-thread/
 F: doc/guides/sample_app_ug/performance_thread.rst
 
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+M: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
 F: examples/ptpclient/
 
 F: examples/quota_watermark/
diff -Nru dpdk-18.11.10/meson.build dpdk-18.11.11/meson.build
--- dpdk-18.11.10/meson.build	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/meson.build	2021-01-20 12:18:20.000000000 +0000
@@ -2,7 +2,7 @@
 # Copyright(c) 2017 Intel Corporation
 
 project('DPDK', 'C',
-	version: '18.11.10',
+	version: '18.11.11',
 	license: 'BSD',
 	default_options: ['buildtype=release', 'default_library=static'],
 	meson_version: '>= 0.41'
diff -Nru dpdk-18.11.10/pkg/dpdk.spec dpdk-18.11.11/pkg/dpdk.spec
--- dpdk-18.11.10/pkg/dpdk.spec	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/pkg/dpdk.spec	2021-01-20 12:18:20.000000000 +0000
@@ -2,7 +2,7 @@
 # Copyright 2014 6WIND S.A.
 
 Name: dpdk
-Version: 18.11.10
+Version: 18.11.11
 Release: 1
 Packager: packaging@6wind.com
 URL: http://dpdk.org
diff -Nru dpdk-18.11.10/test/test/test_cryptodev.c dpdk-18.11.11/test/test/test_cryptodev.c
--- dpdk-18.11.10/test/test/test_cryptodev.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/test/test/test_cryptodev.c	2021-01-20 12:18:20.000000000 +0000
@@ -597,7 +597,7 @@
 			"Need at least %d devices for test", 1);
 
 	/* valid dev_id values */
-	dev_id = ts_params->valid_devs[ts_params->valid_dev_count - 1];
+	dev_id = ts_params->valid_devs[0];
 
 	/* Stop the device in case it's started so it can be configured */
 	rte_cryptodev_stop(dev_id);
@@ -6508,9 +6508,7 @@
 	dev->dev_ops->stats_get = temp_pfn;
 
 	/* Test expected values */
-	ut_setup();
 	test_AES_CBC_HMAC_SHA1_encrypt_digest();
-	ut_teardown();
 	TEST_ASSERT_SUCCESS(rte_cryptodev_stats_get(ts_params->valid_devs[0],
 			&stats),
 		"rte_cryptodev_stats_get failed");
diff -Nru dpdk-18.11.10/test/test/test_distributor.c dpdk-18.11.11/test/test/test_distributor.c
--- dpdk-18.11.10/test/test/test_distributor.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/test/test/test_distributor.c	2021-01-20 12:18:20.000000000 +0000
@@ -27,7 +27,9 @@
 /* statics - all zero-initialized by default */
 static volatile int quit;      /**< general quit variable for all threads */
 static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
+static volatile int zero_sleep; /**< thr0 has quit basic loop and is sleeping*/
 static volatile unsigned worker_idx;
+static volatile unsigned zero_idx;
 
 struct worker_stats {
 	volatile unsigned handled_packets;
@@ -42,7 +44,8 @@
 {
 	unsigned i, count = 0;
 	for (i = 0; i < worker_idx; i++)
-		count += worker_stats[i].handled_packets;
+		count += __atomic_load_n(&worker_stats[i].handled_packets,
+				__ATOMIC_RELAXED);
 	return count;
 }
 
@@ -50,7 +53,10 @@
 static inline void
 clear_packet_count(void)
 {
-	memset(&worker_stats, 0, sizeof(worker_stats));
+	unsigned int i;
+	for (i = 0; i < RTE_MAX_LCORE; i++)
+		__atomic_store_n(&worker_stats[i].handled_packets, 0,
+			__ATOMIC_RELAXED);
 }
 
 /* this is the basic worker function for sanity test
@@ -62,23 +68,18 @@
 	struct rte_mbuf *buf[8] __rte_cache_aligned;
 	struct worker_params *wp = arg;
 	struct rte_distributor *db = wp->dist;
-	unsigned int count = 0, num = 0;
+	unsigned int num;
 	unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
-	int i;
 
-	for (i = 0; i < 8; i++)
-		buf[i] = NULL;
-	num = rte_distributor_get_pkt(db, id, buf, buf, num);
+	num = rte_distributor_get_pkt(db, id, buf, NULL, 0);
 	while (!quit) {
 		__atomic_fetch_add(&worker_stats[id].handled_packets, num,
 				__ATOMIC_RELAXED);
-		count += num;
 		num = rte_distributor_get_pkt(db, id,
 				buf, buf, num);
 	}
 	__atomic_fetch_add(&worker_stats[id].handled_packets, num,
 			__ATOMIC_RELAXED);
-	count += num;
 	rte_distributor_return_pkt(db, id, buf, num);
 	return 0;
 }
@@ -128,12 +129,14 @@
 		printf("Line %d: Error, not all packets flushed. "
 				"Expected %u, got %u\n",
 				__LINE__, BURST, total_packet_count());
+		rte_mempool_put_bulk(p, (void *)bufs, BURST);
 		return -1;
 	}
 
 	for (i = 0; i < rte_lcore_count() - 1; i++)
 		printf("Worker %u handled %u packets\n", i,
-				worker_stats[i].handled_packets);
+			__atomic_load_n(&worker_stats[i].handled_packets,
+					__ATOMIC_RELAXED));
 	printf("Sanity test with all zero hashes done.\n");
 
 	/* pick two flows and check they go correctly */
@@ -153,12 +156,15 @@
 			printf("Line %d: Error, not all packets flushed. "
 					"Expected %u, got %u\n",
 					__LINE__, BURST, total_packet_count());
+			rte_mempool_put_bulk(p, (void *)bufs, BURST);
 			return -1;
 		}
 
 		for (i = 0; i < rte_lcore_count() - 1; i++)
 			printf("Worker %u handled %u packets\n", i,
-					worker_stats[i].handled_packets);
+				__atomic_load_n(
+					&worker_stats[i].handled_packets,
+					__ATOMIC_RELAXED));
 		printf("Sanity test with two hash values done\n");
 	}
 
@@ -179,12 +185,14 @@
 		printf("Line %d: Error, not all packets flushed. "
 				"Expected %u, got %u\n",
 				__LINE__, BURST, total_packet_count());
+		rte_mempool_put_bulk(p, (void *)bufs, BURST);
 		return -1;
 	}
 
 	for (i = 0; i < rte_lcore_count() - 1; i++)
 		printf("Worker %u handled %u packets\n", i,
-				worker_stats[i].handled_packets);
+			__atomic_load_n(&worker_stats[i].handled_packets,
+					__ATOMIC_RELAXED));
 	printf("Sanity test with non-zero hashes done\n");
 
 	rte_mempool_put_bulk(p, (void *)bufs, BURST);
@@ -233,6 +241,7 @@
 	if (num_returned != BIG_BATCH) {
 		printf("line %d: Missing packets, expected %d\n",
 				__LINE__, num_returned);
+		rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
 		return -1;
 	}
 
@@ -247,6 +256,7 @@
 
 		if (j == BIG_BATCH) {
 			printf("Error: could not find source packet #%u\n", i);
+			rte_mempool_put_bulk(p, (void *)many_bufs, BIG_BATCH);
 			return -1;
 		}
 	}
@@ -270,24 +280,20 @@
 	struct rte_mbuf *buf[8] __rte_cache_aligned;
 	struct worker_params *wp = arg;
 	struct rte_distributor *d = wp->dist;
-	unsigned int count = 0;
 	unsigned int i;
-	unsigned int num = 0;
+	unsigned int num;
 	unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
 
-	for (i = 0; i < 8; i++)
-		buf[i] = NULL;
-	num = rte_distributor_get_pkt(d, id, buf, buf, num);
+	num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
 	while (!quit) {
-		worker_stats[id].handled_packets += num;
-		count += num;
+		__atomic_fetch_add(&worker_stats[id].handled_packets, num,
+				__ATOMIC_RELAXED);
 		for (i = 0; i < num; i++)
 			rte_pktmbuf_free(buf[i]);
-		num = rte_distributor_get_pkt(d,
-				id, buf, buf, num);
+		num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
 	}
-	worker_stats[id].handled_packets += num;
-	count += num;
+	__atomic_fetch_add(&worker_stats[id].handled_packets, num,
+			__ATOMIC_RELAXED);
 	rte_distributor_return_pkt(d, id, buf, num);
 	return 0;
 }
@@ -313,7 +319,6 @@
 			rte_distributor_process(d, NULL, 0);
 		for (j = 0; j < BURST; j++) {
 			bufs[j]->hash.usr = (i+j) << 1;
-			rte_mbuf_refcnt_set(bufs[j], 1);
 		}
 
 		rte_distributor_process(d, bufs, BURST);
@@ -337,54 +342,60 @@
 static int
 handle_work_for_shutdown_test(void *arg)
 {
-	struct rte_mbuf *pkt = NULL;
 	struct rte_mbuf *buf[8] __rte_cache_aligned;
 	struct worker_params *wp = arg;
 	struct rte_distributor *d = wp->dist;
-	unsigned int count = 0;
-	unsigned int num = 0;
-	unsigned int total = 0;
-	unsigned int i;
-	unsigned int returned = 0;
+	unsigned int num;
+	unsigned int zero_id = 0;
+	unsigned int zero_unset;
 	const unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
 
-	num = rte_distributor_get_pkt(d, id, buf, buf, num);
+	num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
+
+	if (num > 0) {
+		zero_unset = RTE_MAX_LCORE;
+		__atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
+			0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+	}
+	zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
 
 	/* wait for quit single globally, or for worker zero, wait
 	 * for zero_quit */
-	while (!quit && !(id == 0 && zero_quit)) {
-		worker_stats[id].handled_packets += num;
-		count += num;
-		for (i = 0; i < num; i++)
-			rte_pktmbuf_free(buf[i]);
-		num = rte_distributor_get_pkt(d,
-				id, buf, buf, num);
-		total += num;
-	}
-	worker_stats[id].handled_packets += num;
-	count += num;
-	returned = rte_distributor_return_pkt(d, id, buf, num);
+	while (!quit && !(id == zero_id && zero_quit)) {
+		__atomic_fetch_add(&worker_stats[id].handled_packets, num,
+				__ATOMIC_RELAXED);
+		num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
+
+		if (num > 0) {
+			zero_unset = RTE_MAX_LCORE;
+			__atomic_compare_exchange_n(&zero_idx, &zero_unset, id,
+				0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+		}
+		zero_id = __atomic_load_n(&zero_idx, __ATOMIC_ACQUIRE);
+	}
+
+	__atomic_fetch_add(&worker_stats[id].handled_packets, num,
+			__ATOMIC_RELAXED);
+	if (id == zero_id) {
+		rte_distributor_return_pkt(d, id, NULL, 0);
 
-	if (id == 0) {
 		/* for worker zero, allow it to restart to pick up last packet
 		 * when all workers are shutting down.
 		 */
+		__atomic_store_n(&zero_sleep, 1, __ATOMIC_RELEASE);
 		while (zero_quit)
 			usleep(100);
+		__atomic_store_n(&zero_sleep, 0, __ATOMIC_RELEASE);
 
-		num = rte_distributor_get_pkt(d,
-				id, buf, buf, num);
+		num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
 
 		while (!quit) {
-			worker_stats[id].handled_packets += num;
-			count += num;
-			rte_pktmbuf_free(pkt);
-			num = rte_distributor_get_pkt(d, id, buf, buf, num);
+			__atomic_fetch_add(&worker_stats[id].handled_packets,
+					num, __ATOMIC_RELAXED);
+			num = rte_distributor_get_pkt(d, id, buf, NULL, 0);
 		}
-		returned = rte_distributor_return_pkt(d,
-				id, buf, num);
-		printf("Num returned = %d\n", returned);
 	}
+	rte_distributor_return_pkt(d, id, buf, num);
 	return 0;
 }
 
@@ -400,7 +411,9 @@
 {
 	struct rte_distributor *d = wp->dist;
 	struct rte_mbuf *bufs[BURST];
-	unsigned i;
+	struct rte_mbuf *bufs2[BURST];
+	unsigned int i;
+	unsigned int failed = 0;
 
 	printf("=== Sanity test of worker shutdown ===\n");
 
@@ -426,32 +439,45 @@
 	 */
 
 	/* get more buffers to queue up, again setting them to the same flow */
-	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
+	if (rte_mempool_get_bulk(p, (void *)bufs2, BURST) != 0) {
 		printf("line %d: Error getting mbufs from pool\n", __LINE__);
+		rte_mempool_put_bulk(p, (void *)bufs, BURST);
 		return -1;
 	}
 	for (i = 0; i < BURST; i++)
-		bufs[i]->hash.usr = 1;
+		bufs2[i]->hash.usr = 1;
 
 	/* get worker zero to quit */
 	zero_quit = 1;
-	rte_distributor_process(d, bufs, BURST);
+	rte_distributor_process(d, bufs2, BURST);
 
 	/* flush the distributor */
 	rte_distributor_flush(d);
-	rte_delay_us(10000);
+	while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_distributor_flush(d);
+
+	zero_quit = 0;
+	while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_delay_us(100);
 
 	for (i = 0; i < rte_lcore_count() - 1; i++)
 		printf("Worker %u handled %u packets\n", i,
-				worker_stats[i].handled_packets);
+			__atomic_load_n(&worker_stats[i].handled_packets,
+					__ATOMIC_RELAXED));
 
 	if (total_packet_count() != BURST * 2) {
 		printf("Line %d: Error, not all packets flushed. "
 				"Expected %u, got %u\n",
 				__LINE__, BURST * 2, total_packet_count());
-		return -1;
+		failed = 1;
 	}
 
+	rte_mempool_put_bulk(p, (void *)bufs, BURST);
+	rte_mempool_put_bulk(p, (void *)bufs2, BURST);
+
+	if (failed)
+		return -1;
+
 	printf("Sanity test with worker shutdown passed\n\n");
 	return 0;
 }
@@ -465,7 +491,8 @@
 {
 	struct rte_distributor *d = wp->dist;
 	struct rte_mbuf *bufs[BURST];
-	unsigned i;
+	unsigned int i;
+	unsigned int failed = 0;
 
 	printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);
 
@@ -491,20 +518,31 @@
 	/* flush the distributor */
 	rte_distributor_flush(d);
 
-	rte_delay_us(10000);
+	while (!__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_distributor_flush(d);
 
 	zero_quit = 0;
+
+	while (__atomic_load_n(&zero_sleep, __ATOMIC_ACQUIRE))
+		rte_delay_us(100);
+
 	for (i = 0; i < rte_lcore_count() - 1; i++)
 		printf("Worker %u handled %u packets\n", i,
-				worker_stats[i].handled_packets);
+			__atomic_load_n(&worker_stats[i].handled_packets,
+					__ATOMIC_RELAXED));
 
 	if (total_packet_count() != BURST) {
 		printf("Line %d: Error, not all packets flushed. "
 				"Expected %u, got %u\n",
 				__LINE__, BURST, total_packet_count());
-		return -1;
+		failed = 1;
 	}
 
+	rte_mempool_put_bulk(p, (void *)bufs, BURST);
+
+	if (failed)
+		return -1;
+
 	printf("Flush test with worker shutdown passed\n\n");
 	return 0;
 }
@@ -570,21 +608,34 @@
 	const unsigned num_workers = rte_lcore_count() - 1;
 	unsigned i;
 	struct rte_mbuf *bufs[RTE_MAX_LCORE];
-	rte_mempool_get_bulk(p, (void *)bufs, num_workers);
+	struct rte_mbuf *returns[RTE_MAX_LCORE];
+	if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
+		printf("line %d: Error getting mbufs from pool\n", __LINE__);
+		return;
+	}
 
 	zero_quit = 0;
 	quit = 1;
-	for (i = 0; i < num_workers; i++)
+	for (i = 0; i < num_workers; i++) {
 		bufs[i]->hash.usr = i << 1;
-	rte_distributor_process(d, bufs, num_workers);
-
-	rte_mempool_put_bulk(p, (void *)bufs, num_workers);
+		rte_distributor_process(d, &bufs[i], 1);
+	}
 
 	rte_distributor_process(d, NULL, 0);
 	rte_distributor_flush(d);
 	rte_eal_mp_wait_lcore();
+
+	while (rte_distributor_returned_pkts(d, returns, RTE_MAX_LCORE))
+		;
+
+	rte_distributor_clear_returns(d);
+	rte_mempool_put_bulk(p, (void *)bufs, num_workers);
+
 	quit = 0;
 	worker_idx = 0;
+	zero_idx = RTE_MAX_LCORE;
+	zero_quit = 0;
+	zero_sleep = 0;
 }
 
 static int
diff -Nru dpdk-18.11.10/test/test/test_event_crypto_adapter.c dpdk-18.11.11/test/test/test_event_crypto_adapter.c
--- dpdk-18.11.10/test/test/test_event_crypto_adapter.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/test/test/test_event_crypto_adapter.c	2021-01-20 12:18:20.000000000 +0000
@@ -200,8 +200,8 @@
 		rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess,
 				&cipher_xform, params.session_mpool);
 
-		ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
-							evdev, &cap);
+		ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID,
+							&cap);
 		TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
 
 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
@@ -287,7 +287,7 @@
 	uint32_t cap;
 	int ret;
 
-	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+	ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
 
 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
@@ -307,7 +307,7 @@
 	uint32_t cap;
 	int ret;
 
-	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+	ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
 
 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
@@ -384,8 +384,8 @@
 		sess = rte_cryptodev_sym_session_create(params.session_mpool);
 		TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
 
-		ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
-							evdev, &cap);
+		ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID,
+							&cap);
 		TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
 
 		if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
@@ -432,7 +432,7 @@
 	uint32_t cap;
 	int ret;
 
-	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+	ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
 
 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
@@ -454,7 +454,7 @@
 	uint32_t cap;
 	int ret;
 
-	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+	ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
 
 	if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
@@ -663,7 +663,7 @@
 
 	/* Create adapter with default port creation callback */
 	ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
-					      TEST_CDEV_ID,
+					      evdev,
 					      &conf, 0);
 	TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
 
@@ -676,7 +676,7 @@
 	uint32_t cap;
 	int ret;
 
-	ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+	ret = rte_event_crypto_adapter_caps_get(evdev, TEST_CDEV_ID, &cap);
 	TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
 
 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
diff -Nru dpdk-18.11.10/test/test/test_event_eth_tx_adapter.c dpdk-18.11.11/test/test/test_event_eth_tx_adapter.c
--- dpdk-18.11.10/test/test/test_event_eth_tx_adapter.c	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/test/test/test_event_eth_tx_adapter.c	2021-01-20 12:18:20.000000000 +0000
@@ -45,7 +45,7 @@
 static uint32_t tid;
 
 static inline int
-port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
+port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
 		struct rte_mempool *mp)
 {
 	const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE;
@@ -100,7 +100,7 @@
 }
 
 static inline int
-port_init(uint8_t port, struct rte_mempool *mp)
+port_init(uint16_t port, struct rte_mempool *mp)
 {
 	struct rte_eth_conf conf = { 0 };
 	return port_init_common(port, &conf, mp);
diff -Nru dpdk-18.11.10/usertools/cpu_layout.py dpdk-18.11.11/usertools/cpu_layout.py
--- dpdk-18.11.10/usertools/cpu_layout.py	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/usertools/cpu_layout.py	2021-01-20 12:18:20.000000000 +0000
@@ -22,8 +22,6 @@
         fd = open("{}/cpu{}/topology/core_id".format(base_path, cpu))
     except IOError:
         continue
-    except:
-        break
     core = int(fd.read())
     fd.close()
     fd = open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu))
@@ -66,7 +64,7 @@
 for c in cores:
     output = "Core %s" % str(c).ljust(max_core_id_len)
     for s in sockets:
-        if (s,c) in core_map:
+        if (s, c) in core_map:
             output += " " + str(core_map[(s, c)]).ljust(max_core_map_len)
         else:
             output += " " * (max_core_map_len + 1)
diff -Nru dpdk-18.11.10/usertools/dpdk-pmdinfo.py dpdk-18.11.11/usertools/dpdk-pmdinfo.py
--- dpdk-18.11.10/usertools/dpdk-pmdinfo.py	2020-09-28 11:38:03.000000000 +0100
+++ dpdk-18.11.11/usertools/dpdk-pmdinfo.py	2021-01-20 12:18:20.000000000 +0000
@@ -352,7 +352,7 @@
             mystring = force_unicode(data[dataptr:endptr])
             rc = mystring.find("PMD_INFO_STRING")
             if (rc != -1):
-                self.parse_pmd_info_string(mystring)
+                self.parse_pmd_info_string(mystring[rc:])
 
             dataptr = endptr
 

Attachment: signature.asc
Description: This is a digitally signed message part


--- End Message ---
--- Begin Message ---
Package: release.debian.org
Version: 10.8

Hi,

Each of the updates referenced by these bugs was included in today's
10.8 point release.

Regards,

Adam

--- End Message ---

Reply to: